/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HIGHMEM_INTERNAL_H
#define _LINUX_HIGHMEM_INTERNAL_H
/*
* Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
*/
#ifdef CONFIG_KMAP_LOCAL
void *____kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot, enum kmap_slots slot);
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
void kunmap_local_indexed(const void *vaddr);
void kmap_local_fork(struct task_struct *tsk);
void __kmap_local_sched_out(void);
void __kmap_local_sched_in(void);
static inline void kmap_assert_nomap(void)
{
DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
}
#else
static inline void kmap_local_fork(struct task_struct *tsk) { }
static inline void kmap_assert_nomap(void) { }
#endif
#ifdef CONFIG_KMAP_LOCAL_PER_CPU
void __kmap_local_per_cpu_sched_out(void);
void __kmap_local_per_cpu_sched_in(void);
#endif
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_KMAP_LOCAL_PER_CPU)
struct page *__kmap_to_page(void *addr);
#endif
static inline struct page *kmap_to_page(void *addr)
{
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_KMAP_LOCAL_PER_CPU)
return __kmap_to_page(addr);
#else
return virt_to_page(addr);
#endif
}
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_PAX_KERNSEAL)
#include <asm/highmem.h>
#endif
#ifdef CONFIG_HIGHMEM
#ifndef ARCH_HAS_KMAP_FLUSH_TLB
static inline void kmap_flush_tlb(unsigned long addr) { }
#endif
#ifndef kmap_prot
#define kmap_prot PAGE_KERNEL
#endif
void *kmap_high(struct page *page);
void kunmap_high(struct page *page);
void __kmap_flush_unused(void);
static inline void *kmap(struct page *page)
{
void *addr;
might_sleep();
if (!PageHighMem(page))
addr = page_address(page);
else
addr = kmap_high(page);
kmap_flush_tlb((unsigned long)addr);
return addr;
}
static inline void *kmap_noreport(struct page *page)
{
return kmap(page);
}
static inline void kunmap(struct page *page)
{
might_sleep();
if (!PageHighMem(page))
return;
kunmap_high(page);
}
static inline void kmap_flush_unused(void)
{
__kmap_flush_unused();
}
static inline void *kmap_local_page(struct page *page)
{
return __kmap_local_page_prot(page, kmap_prot);
}
static inline void *kmap_local_page_user(struct page *page)
{
return kmap_local_page(page);
}
static inline void *kmap_local_page_nested(struct page *page)
{
return __kmap_local_page_prot(page, kmap_prot);
}
static inline void *kmap_local_page_nested_user(struct page *page)
{
return kmap_local_page_nested(page);
}
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
{
struct page *page = folio_page(folio, offset / PAGE_SIZE);
return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
}
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
{
return __kmap_local_page_prot(page, prot);
}
static inline void *kmap_local_pfn(unsigned long pfn)
{
return __kmap_local_pfn_prot(pfn, kmap_prot);
}
static inline void __kunmap_local(const void *vaddr)
{
kunmap_local_indexed(vaddr);
}
static inline void __kunmap_local_nested(const void *vaddr)
{
kunmap_local_indexed(vaddr);
}
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
else
preempt_disable();
pagefault_disable();
return __kmap_local_page_prot(page, prot);
}
static inline void *kmap_atomic(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
static inline void *kmap_atomic_pfn(unsigned long pfn)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
else
preempt_disable();
pagefault_disable();
return __kmap_local_pfn_prot(pfn, kmap_prot);
}
static inline void __kunmap_atomic(const void *addr)
{
kunmap_local_indexed(addr);
pagefault_enable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_enable();
else
preempt_enable();
}
unsigned int __nr_free_highpages(void);
extern atomic_long_t _totalhigh_pages;
static inline unsigned int nr_free_highpages(void)
{
return __nr_free_highpages();
}
static inline unsigned long totalhigh_pages(void)
{
return (unsigned long)atomic_long_read(&_totalhigh_pages);
}
static inline void totalhigh_pages_add(long count)
{
atomic_long_add(count, &_totalhigh_pages);
}
static inline bool is_kmap_addr(const void *x)
{
unsigned long addr = (unsigned long)x;
return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
(addr >= __fix_to_virt(FIX_KMAP_END) &&
addr < __fix_to_virt(FIX_KMAP_BEGIN));
}
#else /* CONFIG_HIGHMEM */
#ifdef CONFIG_PAX_KERNSEAL
void *__kmap(struct page *page);
void __kunmap(struct page *page);
#endif
static inline void *kmap_noreport(struct page *page)
{
might_sleep();
#ifdef CONFIG_PAX_KERNSEAL
VM_BUG_ON_PAGE_ALWAYS(PageSealed(page), page);
if (PageHidden(page))
return __kmap(page);
#endif
return page_address(page);
}
static inline void *kmap(struct page *page)
{
#ifdef CONFIG_PAX_KERNSEAL
WARN_ONCE(PageSealed(page) || PageHidden(page), "PAX: unverified call to kmap for page %pS mapping %pS\n", page, page->mapping);
#endif
return kmap_noreport(page);
}
static inline void kunmap_high(struct page *page) { }
static inline void kmap_flush_unused(void) { }
static inline void kunmap(struct page *page)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(page_address(page));
#endif
#ifdef CONFIG_PAX_KERNSEAL
VM_BUG_ON_PAGE_ALWAYS(PageSealed(page), page);
if (PageHidden(page))
__kunmap(page);
#endif
}
static __always_inline void *kmap_local_page(struct page *page)
{
#ifdef CONFIG_PAX_KERNSEAL
if (PageSealed(page) || PageHidden(page))
return pax_expose_page(page);
#endif
return page_address(page);
}
static __always_inline void *kmap_local_page_user(struct page *page)
{
#ifdef CONFIG_PAX_KERNSEAL
if (PageSealed(page) || PageHidden(page))
return pax_expose_page_user(page);
#endif
return page_address(page);
}
static __always_inline void *kmap_local_page_nested(struct page *page)
{
#ifdef CONFIG_PAX_KERNSEAL
if (PageSealed(page) || PageHidden(page))
return pax_expose_page_nested(page);
#endif
return page_address(page);
}
static __always_inline void *kmap_local_page_nested_user(struct page *page)
{
#ifdef CONFIG_PAX_KERNSEAL
if (PageSealed(page) || PageHidden(page))
return pax_expose_page_nested_user(page);
#endif
return page_address(page);
}
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
{
#ifdef CONFIG_PAX_KERNSEAL
if (folio_test_sealed(folio) || folio_test_hidden(folio)) {
struct page *page = folio_page(folio, offset / PAGE_SIZE);
return pax_expose_page(page) + offset % PAGE_SIZE;
}
#endif
return folio_address(folio) + offset;
}
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
{
#ifdef CONFIG_PAX_KERNSEAL
VM_BUG_ON_PAGE_ALWAYS(PageSealed(page), page);
if (PageHidden(page))
return pax_expose_page_prot(page, prot);
#endif
return kmap_local_page(page);
}
static inline void *kmap_local_pfn(unsigned long pfn)
{
#ifdef CONFIG_PAX_KERNSEAL
struct page *page = pfn_to_page(pfn);
VM_BUG_ON_PAGE_ALWAYS(PageSealed(page), page);
if (PageHidden(page)) {
BUG();
return pax_expose_page(page);
}
#endif
return kmap_local_page(pfn_to_page(pfn));
}
static __always_inline void __kunmap_local(const void *addr)
{
#ifdef CONFIG_PAX_KERNSEAL
struct page *page = kmap_to_page((void *)addr);
if (PageSealed(page) || PageHidden(page))
pax_hide_page(addr);
#endif
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
#endif
}
static __always_inline void __kunmap_local_nested(const void *addr)
{
#ifdef CONFIG_PAX_KERNSEAL
struct page *page = kmap_to_page((void *)addr);
if (PageSealed(page) || PageHidden(page))
pax_hide_page_nested(addr);
#endif
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
#endif
}
static inline void *kmap_atomic(struct page *page)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
else
preempt_disable();
pagefault_disable();
#ifdef CONFIG_PAX_KERNSEAL
if (PageSealed(page) || PageHidden(page))
return pax_expose_page(page);
#endif
return page_address(page);
}
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
return kmap_atomic(page);
}
static inline void *kmap_atomic_pfn(unsigned long pfn)
{
return kmap_atomic(pfn_to_page(pfn));
}
static inline void __kunmap_atomic(const void *addr)
{
#ifdef CONFIG_PAX_KERNSEAL
struct page *page = kmap_to_page((void *)addr);
if (PageSealed(page) || PageHidden(page))
pax_hide_page(addr);
#endif
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
#endif
pagefault_enable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_enable();
else
preempt_enable();
}
static inline unsigned int nr_free_highpages(void) { return 0; }
static inline unsigned long totalhigh_pages(void) { return 0UL; }
static inline bool is_kmap_addr(const void *x)
{
return false;
}
#endif /* CONFIG_HIGHMEM */
/**
* kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
* @__addr: Virtual address to be unmapped
*
* Unmaps an address previously mapped by kmap_atomic() and re-enables
* pagefaults. Depending on PREEMP_RT configuration, re-enables also
* migration and preemption. Users should not count on these side effects.
*
* Mappings should be unmapped in the reverse order that they were mapped.
* See kmap_local_page() for details on nesting.
*
* @__addr can be any address within the mapped page, so there is no need
* to subtract any offset that has been added. In contrast to kunmap(),
* this function takes the address returned from kmap_atomic(), not the
* page passed to it. The compiler will warn you if you pass the page.
*/
#define kunmap_atomic(__addr) \
do { \
BUILD_BUG_ON(__same_type((__addr), struct page *)); \
__kunmap_atomic(__addr); \
} while (0)
/**
* kunmap_local - Unmap a page mapped via kmap_local_page().
* @__addr: An address within the page mapped
*
* @__addr can be any address within the mapped page. Commonly it is the
* address return from kmap_local_page(), but it can also include offsets.
*
* Unmapping should be done in the reverse order of the mapping. See
* kmap_local_page() for details.
*/
#define kunmap_local(__addr) \
do { \
BUILD_BUG_ON(__same_type((__addr), struct page *)); \
__kunmap_local(__addr); \
} while (0)
#define kunmap_local_nested(__addr) \
do { \
BUILD_BUG_ON(__same_type((__addr), struct page *)); \
__kunmap_local_nested(__addr); \
} while (0)
#endif