/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2009 PaX Team <[email protected]> */
#ifndef _ASM_X86_ATOMIC64_64_H
#define _ASM_X86_ATOMIC64_64_H
#include <linux/types.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
/* The 64-bit atomic type */
#define ATOMIC64_INIT(i) { (i) }
static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
return __READ_ONCE((v)->counter);
}
static __always_inline long __turn_off_size_overflow arch_atomic64_read_unchecked(const atomic64_unchecked_t *v)
{
return READ_ONCE((v)->counter);
}
static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__WRITE_ONCE(v->counter, i);
}
static __always_inline void arch_atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
{
__WRITE_ONCE(v->counter, i);
}
static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "addq %1,%0\n\t"
PAX_REFCOUNT_OVERFLOW(64)
: [counter] "=m" (v->counter)
: "er" (i), "m" (v->counter)
: "cc", "cx", "memory");
}
static __always_inline void arch_atomic64_add_unchecked(s64 i, atomic64_unchecked_t *v)
{
asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter) : "memory");
}
static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "subq %1,%0\n\t"
PAX_REFCOUNT_UNDERFLOW(64)
: [counter] "=m" (v->counter)
: "er" (i), "m" (v->counter)
: "cc", "cx", "memory");
}
#define arch_atomic64_sub arch_atomic64_sub
static __always_inline void arch_atomic64_sub_unchecked(s64 i, atomic64_unchecked_t *v)
{
asm volatile(LOCK_PREFIX "subq %1,%0\n"
: "=m" (v->counter)
: "er" (i), "m" (v->counter) : "memory");
}
#define arch_atomic64_sub_unchecked arch_atomic64_sub_unchecked
static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, underflow, 64, e, "er", i);
}
#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
static __always_inline void arch_atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0\n\t"
PAX_REFCOUNT_OVERFLOW(64)
: [counter] "=m" (v->counter)
: "m" (v->counter)
: "cc", "cx", "memory");
}
#define arch_atomic64_inc arch_atomic64_inc
static __always_inline void arch_atomic64_inc_unchecked(atomic64_unchecked_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter)
: "m" (v->counter) : "memory");
}
#define arch_atomic64_inc_unchecked arch_atomic64_inc_unchecked
static __always_inline void arch_atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0\n\t"
PAX_REFCOUNT_UNDERFLOW(64)
: [counter] "=m" (v->counter)
: "m" (v->counter)
: "cc", "cx", "memory");
}
#define arch_atomic64_dec arch_atomic64_dec
static __always_inline void arch_atomic64_dec_unchecked(atomic64_unchecked_t *v)
{
asm volatile(LOCK_PREFIX "decq %0\n"
: "=m" (v->counter)
: "m" (v->counter) : "memory");
}
#define arch_atomic64_dec_unchecked arch_atomic64_dec_unchecked
static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
{
return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, underflow, 64, e);
}
#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
{
return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, overflow, 64, e);
}
#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, overflow, 64, s, "er", i);
}
#define arch_atomic64_add_negative arch_atomic64_add_negative
static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return i + xadd_check_overflow(&v->counter, i);
}
#define arch_atomic64_add_return arch_atomic64_add_return
/**
* arch_atomic64_add_return_unchecked - add and return
* @i: integer value to add
* @v: pointer to type atomic64_unchecked_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static __always_inline s64 arch_atomic64_add_return_unchecked(s64 i, atomic64_unchecked_t *v)
{
return i + xadd(&v->counter, i);
}
#define arch_atomic64_add_return_unchecked arch_atomic64_add_return_unchecked
static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(-i, v);
}
#define arch_atomic64_sub_return arch_atomic64_sub_return
static __always_inline s64 arch_atomic64_sub_return_unchecked(s64 i, atomic64_unchecked_t *v)
{
return arch_atomic64_add_return_unchecked(-i, v);
}
#define arch_atomic64_sub_return_unchecked arch_atomic64_sub_return_unchecked
static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return xadd_check_overflow(&v->counter, i);
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
static __always_inline s64 arch_atomic64_fetch_add_unchecked(s64 i, atomic64_unchecked_t *v)
{
return xadd(&v->counter, i);
}
#define arch_atomic64_fetch_add_unchecked arch_atomic64_fetch_add_unchecked
static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
return xadd_check_overflow(&v->counter, -i);
}
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
static __always_inline s64 arch_atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, s64 old, s64 new)
{
return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_cmpxchg_unchecked arch_atomic64_cmpxchg_unchecked
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
return arch_try_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static __always_inline bool arch_atomic64_try_cmpxchg_unchecked(atomic64_unchecked_t *v, s64 *old, s64 new)
{
return arch_try_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_try_cmpxchg_unchecked arch_atomic64_try_cmpxchg_unchecked
static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
{
return arch_xchg(&v->counter, new);
}
#define arch_atomic64_xchg arch_atomic64_xchg
static __always_inline s64 arch_atomic64_xchg_unchecked(atomic64_unchecked_t *v, s64 new)
{
return arch_xchg(&v->counter, new);
}
#define arch_atomic64_xchg_unchecked arch_atomic64_xchg_unchecked
static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 new, c = arch_atomic64_read(v);
do {
if (unlikely(c == u))
break;
asm volatile("addq %2,%0\n\t"
PAX_REFCOUNT_OVERFLOW(64)
: "=r" (new)
: "0" (c), "ir" (a),
[counter] "m" (v->counter)
: "cc", "cx");
} while (!arch_atomic64_try_cmpxchg(v, &c, new));
return c;
}
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "andq %1,%0"
: "+m" (v->counter)
: "er" (i)
: "memory");
}
static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
{
s64 val = arch_atomic64_read(v);
do {
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "orq %1,%0"
: "+m" (v->counter)
: "er" (i)
: "memory");
}
static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
{
s64 val = arch_atomic64_read(v);
do {
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "xorq %1,%0"
: "+m" (v->counter)
: "er" (i)
: "memory");
}
static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
{
s64 val = arch_atomic64_read(v);
do {
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif /* _ASM_X86_ATOMIC64_64_H */