/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SLUB_DEF_H
#define _LINUX_SLUB_DEF_H
/*
* SLUB : A Slab allocator without object queues.
*
* (C) 2007 SGI, Christoph Lameter
*/
#include <linux/kfence.h>
#include <linux/kobject.h>
#include <linux/reciprocal_div.h>
#include <linux/local_lock.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
FREE_FASTPATH, /* Free to cpu slab */
FREE_SLOWPATH, /* Freeing not to cpu slab */
FREE_FROZEN, /* Freeing to frozen slab */
FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
FREE_REMOVE_PARTIAL, /* Freeing removes last object */
ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
ALLOC_SLAB, /* Cpu slab acquired from page allocator */
ALLOC_REFILL, /* Refill cpu slab from slab freelist */
ALLOC_NODE_MISMATCH, /* Switching cpu slab */
FREE_SLAB, /* Slab freed to the page allocator */
CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
DEACTIVATE_BYPASS, /* Implicit deactivation */
ORDER_FALLBACK, /* Number of times fallback was necessary */
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
CPU_PARTIAL_FREE, /* Refill cpu partial on free */
CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
NR_SLUB_STAT_ITEMS
};
#ifndef CONFIG_SLUB_TINY
/*
* When changing the layout, make sure freelist and tid are still compatible
* with this_cpu_cmpxchg_double() alignment requirements.
*/
struct kmem_cache_cpu {
union {
struct {
void **freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
};
freelist_aba_t freelist_tid;
};
struct slab *slab; /* The slab from which we are allocating */
#ifdef CONFIG_SLUB_CPU_PARTIAL
struct slab *partial; /* Partially allocated slabs */
#endif
#ifdef CONFIG_SLUB_STATS
unsigned int stat[NR_SLUB_STAT_ITEMS];
#endif
} __aligned(2 * sizeof(void *));
#endif /* CONFIG_SLUB_TINY */
#ifdef CONFIG_SLUB_CPU_PARTIAL
#define slub_percpu_partial(c) ((c)->partial)
#define slub_set_percpu_partial(c, p) \
({ \
slub_percpu_partial(c) = (p)->next; \
})
#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
#else
#define slub_percpu_partial(c) NULL
#define slub_set_percpu_partial(c, p)
#define slub_percpu_partial_read_once(c) NULL
#endif /* CONFIG_SLUB_CPU_PARTIAL */
/*
* Word size structure that can be atomically updated or read and that
* contains both the order and the number of objects that a slab of the
* given order would contain.
*/
union kmem_cache_order_objects {
struct {
unsigned long objects : BITS_PER_LONG / 2;
unsigned long order : BITS_PER_LONG / 2;
};
unsigned long x;
};
/*
* Slab cache management.
*/
struct kmem_cache {
#ifndef CONFIG_SLUB_TINY
/* PaX: adjust PERCPU_AUTOSLAB_RESERVE for the __percpu fields */
struct kmem_cache_cpu __percpu *cpu_slab;
#ifdef CONFIG_SLUB_DYNORDER
union kmem_cache_order_objects __percpu *cpu_oo;
#endif
#endif
local_lock_t __percpu *cpu_slab_lock; /* Protects the fields above */
/* Used for retrieving partial slabs, etc. */
slab_flags_t flags;
unsigned int min_partial;
atomic_t min_partial_pages;
unsigned int size; /* Object size including metadata */
unsigned int object_size; /* Object size without metadata */
struct reciprocal_value reciprocal_size;
#ifdef CONFIG_NUMA
/*
* Defragmentation by allocating from a remote node.
*/
unsigned short remote_node_defrag_ratio;
#endif
unsigned int offset; /* Free pointer offset */
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
/* Number of per cpu partial slab pages to keep around */
atomic_t cpu_partial_slab_pages;
#endif
union kmem_cache_order_objects def_oo;
/* Allocation and freeing of slabs */
gfp_t allocflags; /* gfp flags to use on each alloc */
atomic_t refcount; /* Refcount for slab cache destroy */
void (*ctor)(void *object); /* Object constructor */
#ifdef CONFIG_KALLSYMS
ssize_t (*print)(char *buffer, size_t buflen, void *obj, const char *name);
#endif
unsigned int inuse; /* Offset to metadata */
unsigned int align; /* Alignment */
unsigned int red_left_pad; /* Left redzone padding size */
#ifdef CONFIG_GRKERNSEC_SLAB_OBJREUSE_HARDEN
unsigned short pad_space;
#endif
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SLUB_SYSFS
struct kobject kobj; /* For sysfs */
#endif
#ifdef CONFIG_SLAB_FREELIST_HARDENED
unsigned long random;
#endif
#ifdef CONFIG_KASAN_GENERIC
struct kasan_cache kasan_info;
#endif
#ifdef CONFIG_HARDENED_USERCOPY
unsigned int useroffset; /* USERCOPY region offset */
unsigned int usersize; /* USERCOPY region size */
#endif
#ifdef CONFIG_PAX_AUTOSLAB_PLUGIN_AUTOTYPENAME
unsigned int typesize;
const char *typename;
const struct btf_type *btftype;
#ifdef CONFIG_SLUB_DEBUG_MEMBERSHIP
size_t nmemberships;
struct membership {
const struct btf_type *btftype;
size_t offset;
} *memberships;
#endif
#endif
struct kmem_cache_node *node[MAX_NUMNODES];
};
#ifdef CONFIG_PAX_AUTOSLAB_PLUGIN_AUTOTYPENAME
extern struct btf *btf_vmlinux;
#endif
#ifdef CONFIG_SLUB_DEBUG_MEMBERSHIP
bool check_membership(struct kmem_cache *s, void *x);
#else
static inline bool check_membership(struct kmem_cache *s, void *x) { return true; }
#endif
#ifdef CONFIG_SLUB_SYSFS
#define SLAB_SUPPORTS_SYSFS 1
void sysfs_slab_unlink(struct kmem_cache *s);
void sysfs_slab_release(struct kmem_cache *s);
#else
static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
static inline void sysfs_slab_release(struct kmem_cache *s) { }
#endif
void *fixup_red_left(struct kmem_cache *s, void *p);
static inline const void *nearest_obj(struct kmem_cache *cache,
const struct slab *slab, const void *x)
{
const void *object = x - (x - slab_obj_address(slab)) % cache->size;
const void *last_object = slab_obj_address(slab) +
(slab->objects - 1) * cache->size;
const void *result = (unlikely(object > last_object)) ? last_object : object;
result = fixup_red_left(cache, (void *)result);
return result;
}
static inline const void *ptr_to_obj(struct kmem_cache *s, const struct slab *slab, const void *ptr)
{
const void *obj;
obj = ptr - (ptr - slab_obj_address(slab)) % s->size;
return fixup_red_left(s, (void *)obj);
}
/* Determine object index from a given position */
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
const void *addr, const void *obj)
{
return reciprocal_divide(kasan_reset_tag(obj) - addr,
cache->reciprocal_size);
}
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
const struct slab *slab, const void *obj)
{
if (is_kfence_address(obj))
return 0;
return __obj_to_index(cache, slab_obj_address(slab), obj);
}
static inline int objs_per_slab(const struct kmem_cache *cache,
const struct slab *slab)
{
return slab->objects;
}
#endif /* _LINUX_SLUB_DEF_H */