/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_TASK_STACK_H
#define _LINUX_SCHED_TASK_STACK_H
/*
* task->stack (kernel stack) handling interfaces:
*/
#include <linux/sched.h>
#include <linux/magic.h>
#include <linux/kasan.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
* When accessing the stack of a non-current task that might exit, use
* try_get_task_stack() instead. task_stack_page will return a pointer
* that could get freed out from under you.
*/
static __always_inline void *task_stack_page(const struct task_struct *task)
{
return task->stack;
}
#define setup_thread_stack(new,old) do { } while(0)
static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
{
#ifdef CONFIG_STACK_GROWSUP
return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
#else
return (unsigned long *)task->stack + IS_ENABLED(CONFIG_PAX_RAP_XOR);
#endif
}
#elif !defined(__HAVE_THREAD_FUNCTIONS)
#define task_stack_page(task) ((void *)(task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
*task_thread_info(p) = *task_thread_info(org);
task_thread_info(p)->task = p;
}
/*
* Return the address of the last usable long on the stack.
*
* When the stack grows down, this is just above the thread
* info struct. Going any lower will corrupt the threadinfo.
*
* When the stack grows up, this is the highest address.
* Beyond that position, we corrupt data on the next page.
*/
static inline unsigned long *end_of_stack(struct task_struct *p)
{
#ifdef CONFIG_STACK_GROWSUP
return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
#else
return (unsigned long *)(task_thread_info(p) + 1);
#endif
}
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline void *try_get_task_stack(struct task_struct *tsk)
{
return refcount_inc_not_zero(&tsk->stack_refcount) ?
task_stack_page(tsk) : NULL;
}
extern void put_task_stack(struct task_struct *tsk);
#else
static inline void *try_get_task_stack(struct task_struct *tsk)
{
return task_stack_page(tsk);
}
static inline void put_task_stack(struct task_struct *tsk) {}
#endif
#ifdef CONFIG_HAVE_PT_REGS_IN_TASK
static inline struct pt_regs *get_task_pt_regs(struct task_struct *tsk)
{
return task_pt_regs(tsk);
}
static inline void put_task_pt_regs(struct task_struct *tsk)
{
}
#else
static inline struct pt_regs *get_task_pt_regs(struct task_struct *tsk)
{
return try_get_task_stack(tsk) ? task_pt_regs(tsk) : ERR_PTR(-EAGAIN);
}
static inline void put_task_pt_regs(struct task_struct *tsk)
{
put_task_stack(tsk);
}
#endif
void exit_task_stack_account(struct task_struct *tsk);
#ifdef CONFIG_HAVE_STACK_END_CANARY
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
#else
#define task_stack_end_corrupted(task) (false)
#endif
static inline int object_starts_on_stack(const void *obj)
{
const void *stack = task_stack_page(current);
obj = kasan_reset_tag(obj);
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
static inline void populate_stack(void *stack, unsigned int size)
{
int c;
int *ptr = stack;
int *end = stack + size;
while (ptr < end) {
c = *(volatile int *)ptr;
(void)c;
ptr += PAGE_SIZE/sizeof(int);
}
}
#else
static inline void populate_stack(void *stack, unsigned int size)
{
}
#endif
#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
const void *gr_convert_stack_address_to_lowmem(const void *buf);
#else
static inline const void *gr_convert_stack_address_to_lowmem(const void *buf)
{
return buf;
}
#endif
#if defined(CONFIG_GRKERNSEC_KSTACKOVERFLOW) && defined(CONFIG_X86_64)
static inline int object_starts_on_irq_stack(const void *obj)
{
const void *stack = this_cpu_read(pcpu_hot.hardirq_stack_ptr);
return (obj >= (stack - IRQ_STACK_SIZE)) && (obj < stack);
}
#else
static inline int object_starts_on_irq_stack(const void *obj) { return 0; }
#endif
static inline bool is_stack_object_dma_safe(const void *obj)
{
#if defined(CONFIG_VMAP_STACK) || defined(ARCH_DMA_MINALIGN)
return !object_starts_on_stack(obj);
#else
return true;
#endif
}
#ifdef CONFIG_PAX_PRIVATE_KSTACKS
unsigned long alloc_stack_slot(struct task_struct *tsk);
struct task_struct *find_task_by_stack_slot(unsigned long stack_slot);
static inline __pure unsigned long irq_stack_slot(unsigned long address)
{
return (address - KSTACK_IRQ_BASE_ADDR) / (IRQ_STACK_SIZE * 2);
}
static inline __pure unsigned long irq_stack_addr(unsigned int cpu)
{
return KSTACK_IRQ_BASE_ADDR + IRQ_STACK_SIZE * 2 * cpu + IRQ_STACK_SIZE;
}
static inline __pure unsigned long task_stack_slot(unsigned long address)
{
return (address - KSTACK_TASK_BASE_ADDR) / (THREAD_SIZE * 2);
}
static inline __pure unsigned long task_stack_addr(unsigned long stack_slot)
{
return KSTACK_TASK_BASE_ADDR + THREAD_SIZE * 2 * stack_slot + THREAD_SIZE;
}
#endif
extern void thread_stack_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
{
unsigned long *n = end_of_stack(p);
unsigned long i = 0, direction = IS_ENABLED(CONFIG_STACK_GROWSUP) ? -1 : 1;
# ifdef CONFIG_HAVE_STACK_END_CANARY
/* Skip over canary */
i++;
# endif
do {
i++;
} while (!n[i * direction]);
return i;
}
#else
static inline unsigned long stack_not_used(struct task_struct *p) { return 0; }
#endif
static __always_inline void set_task_stack_end_magic(struct task_struct *tsk)
{
#ifdef CONFIG_HAVE_STACK_END_CANARY
unsigned long *stackend;
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */
#endif
}
#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
/* Reliable end of stack detection:
* Some APM bios versions misalign the stack
*/
return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
#endif
#endif /* _LINUX_SCHED_TASK_STACK_H */