/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2009 PaX Team <[email protected]> */
#ifndef _ASM_X86_ATOMIC_H
#define _ASM_X86_ATOMIC_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
#include <asm/rmwcc.h>
#include <asm/barrier.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
static __always_inline int arch_atomic_read(const atomic_t *v)
{
/*
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
* it's non-inlined function that increases binary size and stack usage.
*/
return __READ_ONCE((v)->counter);
}
static __always_inline int __turn_off_size_overflow arch_atomic_read_unchecked(const atomic_unchecked_t *v)
{
/*
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
* it's non-inlined function that increases binary size and stack usage.
*/
return READ_ONCE((v)->counter);
}
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
__WRITE_ONCE(v->counter, i);
}
static __always_inline void arch_atomic_set_unchecked(atomic_unchecked_t *v, int i)
{
WRITE_ONCE(v->counter, i);
}
static __always_inline void arch_atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
PAX_REFCOUNT_OVERFLOW(32)
: [counter] "+m" (v->counter)
: "ir" (i)
: "cc", "cx", "memory");
}
/**
* arch_atomic_add_unchecked - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_unchecked_t
*
* Atomically adds @i to @v.
*/
static __always_inline void arch_atomic_add_unchecked(int i, atomic_unchecked_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0\n"
: [counter] "+m" (v->counter)
: "ir" (i) : "memory");
}
static __always_inline void arch_atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0\n\t"
PAX_REFCOUNT_UNDERFLOW(32)
: [counter] "+m" (v->counter)
: "ir" (i)
: "cc", "cx", "memory");
}
/**
* arch_atomic_sub_unchecked - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_unchecked_t
*
* Atomically subtracts @i from @v.
*/
static __always_inline void arch_atomic_sub_unchecked(int i, atomic_unchecked_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0\n"
: "+m" (v->counter)
: "ir" (i) : "memory");
}
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, underflow, 32, e, "er", i);
}
#define arch_atomic_sub_and_test arch_atomic_sub_and_test
static __always_inline void arch_atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0\n\t"
PAX_REFCOUNT_OVERFLOW(32)
: [counter] "+m" (v->counter)
: : "cc", "cx", "memory");
}
#define arch_atomic_inc arch_atomic_inc
static __always_inline void arch_atomic_inc_unchecked(atomic_unchecked_t *v)
{
asm volatile(LOCK_PREFIX "incl %0\n"
: "+m" (v->counter) :: "memory");
}
#define arch_atomic_inc_unchecked arch_atomic_inc_unchecked
static __always_inline void arch_atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0\n\t"
PAX_REFCOUNT_UNDERFLOW(32)
: [counter] "+m" (v->counter)
: : "cc", "cx", "memory");
}
#define arch_atomic_dec arch_atomic_dec
static __always_inline void arch_atomic_dec_unchecked(atomic_unchecked_t *v)
{
asm volatile(LOCK_PREFIX "decl %0\n"
: "+m" (v->counter) :: "memory");
}
#define arch_atomic_dec_unchecked arch_atomic_dec_unchecked
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{
return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, underflow, 32, e);
}
#define arch_atomic_dec_and_test arch_atomic_dec_and_test
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{
return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, overflow, 32, e);
}
#define arch_atomic_inc_and_test arch_atomic_inc_and_test
static __always_inline bool arch_atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
{
return GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, e);
}
#define arch_atomic_inc_and_test_unchecked arch_atomic_inc_and_test_unchecked
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, overflow, 32, s, "er", i);
}
#define arch_atomic_add_negative arch_atomic_add_negative
static __always_inline int __turn_off_size_overflow arch_atomic_add_return(int i, atomic_t *v)
{
return i + xadd_check_overflow(&v->counter, i);
}
#define arch_atomic_add_return arch_atomic_add_return
static __always_inline int arch_atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
{
return i + xadd(&v->counter, i);
}
#define arch_atomic_add_return_unchecked arch_atomic_add_return_unchecked
static __always_inline int __turn_off_size_overflow arch_atomic_sub_return(int i, atomic_t *v)
{
return arch_atomic_add_return(-i, v);
}
#define arch_atomic_sub_return arch_atomic_sub_return
static __always_inline int __turn_off_size_overflow arch_atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
{
return arch_atomic_add_return_unchecked(-i, v);
}
#define arch_atomic_sub_return_unchecked arch_atomic_sub_return_unchecked
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd_check_overflow(&v->counter, i);
}
#define arch_atomic_fetch_add arch_atomic_fetch_add
static __always_inline int arch_atomic_fetch_add_unchecked(int i, atomic_unchecked_t *v)
{
return xadd(&v->counter, i);
}
#define arch_atomic_fetch_add_unchecked arch_atomic_fetch_add_unchecked
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
{
return xadd_check_overflow(&v->counter, -i);
}
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
static __always_inline int __turn_off_size_overflow arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
static __always_inline int arch_atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
{
return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_cmpxchg_unchecked arch_atomic_cmpxchg_unchecked
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return arch_try_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
{
return arch_xchg(&v->counter, new);
}
#define arch_atomic_xchg arch_atomic_xchg
static inline int arch_atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
{
return arch_xchg(&v->counter, new);
}
#define arch_atomic_xchg_unchecked arch_atomic_xchg_unchecked
static __always_inline void arch_atomic_and(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "andl %1,%0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
return val;
}
#define arch_atomic_fetch_and arch_atomic_fetch_and
static __always_inline void arch_atomic_or(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "orl %1,%0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
return val;
}
#define arch_atomic_fetch_or arch_atomic_fetch_or
static __always_inline void arch_atomic_xor(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "xorl %1,%0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
return val;
}
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
/**
* arch_atomic_fetch_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns the old value of @v.
*/
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int new, c = arch_atomic_read(v);
do {
if (unlikely(c == u))
break;
asm volatile("addl %2,%0\n\t"
PAX_REFCOUNT_OVERFLOW(32)
: "=r" (new)
: "0" (c), "ir" (a),
[counter] "m" (v->counter)
: "cc", "cx");
} while (!arch_atomic_try_cmpxchg(v, &c, new));
return c;
}
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
static inline bool arch_atomic_inc_unless_negative(atomic_t *v)
{
int new, c = arch_atomic_read(v);
do {
if (unlikely(c < 0))
return false;
asm volatile("incl %0\n\t"
PAX_REFCOUNT_OVERFLOW(32)
: "=r" (new)
: "0" (c),
[counter] "m" (v->counter)
: "cc", "cx");
} while (!arch_atomic_try_cmpxchg(v, &c, new));
return true;
}
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
static inline bool arch_atomic_dec_unless_positive(atomic_t *v)
{
int new, c = arch_atomic_read(v);
do {
if (unlikely(c > 0))
return false;
asm volatile("decl %0\n\t"
PAX_REFCOUNT_UNDERFLOW(32)
: "=r" (new)
: "0" (c),
[counter] "m" (v->counter)
: "cc", "cx");
} while (!arch_atomic_try_cmpxchg(v, &c, new));
return true;
}
/*
* atomic_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
static inline int arch_atomic_dec_if_positive(atomic_t *v)
{
int dec, c = arch_atomic_read(v);
do {
asm volatile("decl %0\n\t"
PAX_REFCOUNT_UNDERFLOW(32)
: "=r" (dec)
: "0" (c),
[counter] "m" (v->counter)
: "cc", "cx");
if (unlikely(dec < 0))
break;
} while (!arch_atomic_try_cmpxchg(v, &c, dec));
return dec;
}
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
# include <asm/atomic64_64.h>
#endif
#endif /* _ASM_X86_ATOMIC_H */