/* SPDX-License-Identifier: GPL-2.0 */
/* Atomic operations usable in machine independent code */
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <linux/types.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
/*
* Relaxed variants of xchg, cmpxchg and some atomic operations.
*
* We support four variants:
*
* - Fully ordered: The default implementation, no suffix required.
* - Acquire: Provides ACQUIRE semantics, _acquire suffix.
* - Release: Provides RELEASE semantics, _release suffix.
* - Relaxed: No ordering guarantees, _relaxed suffix.
*
* For compound atomics performing both a load and a store, ACQUIRE
* semantics apply only to the load and RELEASE semantics only to the
* store portion of the operation. Note that a failed cmpxchg_acquire
* does -not- imply any memory ordering constraints.
*
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
* If an architecture overrides __atomic_acquire_fence() it will probably
* want to define smp_mb__after_spinlock().
*/
#ifndef __atomic_acquire_fence
#define __atomic_acquire_fence smp_mb__after_atomic
#endif
#ifndef __atomic_release_fence
#define __atomic_release_fence smp_mb__before_atomic
#endif
#ifndef __atomic_pre_full_fence
#define __atomic_pre_full_fence smp_mb__before_atomic
#endif
#ifndef __atomic_post_full_fence
#define __atomic_post_full_fence smp_mb__after_atomic
#endif
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
__atomic_acquire_fence(); \
__ret; \
})
#define __atomic_op_release(op, args...) \
({ \
__atomic_release_fence(); \
op##_relaxed(args); \
})
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
__atomic_pre_full_fence(); \
__ret = op##_relaxed(args); \
__atomic_post_full_fence(); \
__ret; \
})
#if defined(CONFIG_PAX_REFCOUNT) && !defined(PAX_UNCHECKED_T_DISABLED)
# define __atomic_op_pick(fpfx,tpfx,op,order,var) (_Generic((var), \
/* enable fall-back for abusers, e.g. 'void *' */ \
/* default: fpfx##_##op##order,*/ \
const tpfx##_unchecked_t *: fpfx##tpfx##_##op##_unchecked##order,\
tpfx##_unchecked_t * : fpfx##tpfx##_##op##_unchecked##order,\
const tpfx##_t * : fpfx##tpfx##_##op##order, \
tpfx##_t * : fpfx##tpfx##_##op##order))
# ifdef CONFIG_GENERIC_ATOMIC64
# define __atomic64_op_pick(fpfx,tpfx,op,order,var) fpfx##tpfx##_##op##order
# else
# define __atomic64_op_pick __atomic_op_pick
# endif
#else
# define __atomic_op_pick(fpfx,tpfx,op,order,var) fpfx##tpfx##_##op##order
# define __atomic64_op_pick __atomic_op_pick
#endif
#include <linux/atomic/atomic-arch-fallback.h>
#include <linux/atomic/atomic-long.h>
#include <linux/atomic/atomic-instrumented.h>
static inline void pax_refcount_needs_these_functions(void)
{
atomic_read_unchecked((atomic_unchecked_t *)NULL);
atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
atomic_inc_unchecked((atomic_unchecked_t *)NULL);
(void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
atomic_fetch_inc_unchecked((atomic_unchecked_t *)NULL);
atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
atomic_sub_return_unchecked(0, (atomic_unchecked_t *)NULL);
atomic_dec_unchecked((atomic_unchecked_t *)NULL);
atomic_dec_return_unchecked((atomic_unchecked_t *)NULL);
atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
(void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
atomic_andnot_unchecked(0, (atomic_unchecked_t *)NULL);
atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
atomic_long_sub_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
atomic_long_fetch_inc_unchecked((atomic_long_unchecked_t *)NULL);
atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
atomic_long_dec_return_unchecked((atomic_long_unchecked_t *)NULL);
(void)atomic_long_cmpxchg_unchecked((atomic_long_unchecked_t *)NULL, 0, 0);
}
#endif /* _LINUX_ATOMIC_H */