!C99Shell v. 2.5 [PHP 8 Update] [24.05.2025]!

Software: Apache. PHP/8.3.27 

uname -a: Linux pdx1-shared-a4-04 6.6.104-grsec-jammy+ #3 SMP Tue Sep 16 00:28:11 UTC 2025 x86_64 

uid=6659440(dh_z2jmpm) gid=2086089(pg10499364) groups=2086089(pg10499364)  

Safe-mode: OFF (not secure)

/usr/src/linux-headers-6.6.116-grsec-jammy-dirty/arch/x86/include/asm/   drwxr-xr-x
Free 711.95 GB of 879.6 GB (80.94%)
Home    Back    Forward    UPDIR    Refresh    Search    Buffer    Encoder    Tools    Proc.    FTP brute    Sec.    SQL    PHP-code    Update    Self remove    Logout    


Viewing file:     nospec-branch.h (22.03 KB)      -rw-r--r--
Select action/file-type:
(+) | (+) | (+) | Code (+) | Session (+) | (+) | SDB (+) | (+) | (+) | (+) | (+) | (+) |
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef _ASM_X86_NOSPEC_BRANCH_H_
#define _ASM_X86_NOSPEC_BRANCH_H_

#include <linux/static_key.h>
#include <linux/objtool.h>
#include <linux/linkage.h>

#include <asm/alternative.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
#include <asm/nops.h>
#include <asm/unwind_hints.h>
#include <asm/percpu.h>
#include <asm/current.h>

/*
 * Call depth tracking for Intel SKL CPUs to address the RSB underflow
 * issue in software.
 *
 * The tracking does not use a counter. It uses uses arithmetic shift
 * right on call entry and logical shift left on return.
 *
 * The depth tracking variable is initialized to 0x8000.... when the call
 * depth is zero. The arithmetic shift right sign extends the MSB and
 * saturates after the 12th call. The shift count is 5 for both directions
 * so the tracking covers 12 nested calls.
 *
 *  Call
 *  0: 0x8000000000000000    0x0000000000000000
 *  1: 0xfc00000000000000    0xf000000000000000
 * ...
 * 11: 0xfffffffffffffff8    0xfffffffffffffc00
 * 12: 0xffffffffffffffff    0xffffffffffffffe0
 *
 * After a return buffer fill the depth is credited 12 calls before the
 * next stuffing has to take place.
 *
 * There is a inaccuracy for situations like this:
 *
 *  10 calls
 *   5 returns
 *   3 calls
 *   4 returns
 *   3 calls
 *   ....
 *
 * The shift count might cause this to be off by one in either direction,
 * but there is still a cushion vs. the RSB depth. The algorithm does not
 * claim to be perfect and it can be speculated around by the CPU, but it
 * is considered that it obfuscates the problem enough to make exploitation
 * extremly difficult.
 */
#define RET_DEPTH_SHIFT            5
#define RSB_RET_STUFF_LOOPS        16
#define RET_DEPTH_INIT            0x8000000000000000ULL
#define RET_DEPTH_INIT_FROM_CALL    0xfc00000000000000ULL
#define RET_DEPTH_CREDIT        0xffffffffffffffffULL

#ifdef CONFIG_CALL_THUNKS_DEBUG
# define CALL_THUNKS_DEBUG_INC_CALLS                \
    incq    %gs:__x86_call_count;
# define CALL_THUNKS_DEBUG_INC_RETS                \
    incq    %gs:__x86_ret_count;
# define CALL_THUNKS_DEBUG_INC_STUFFS                \
    incq    %gs:__x86_stuffs_count;
# define CALL_THUNKS_DEBUG_INC_CTXSW                \
    incq    %gs:__x86_ctxsw_count;
#else
# define CALL_THUNKS_DEBUG_INC_CALLS
# define CALL_THUNKS_DEBUG_INC_RETS
# define CALL_THUNKS_DEBUG_INC_STUFFS
# define CALL_THUNKS_DEBUG_INC_CTXSW
#endif

#if defined(CONFIG_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS)

#include <asm/asm-offsets.h>

#define CREDIT_CALL_DEPTH                    \
    movq    $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);

#define ASM_CREDIT_CALL_DEPTH                    \
    movq    $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);

#define RESET_CALL_DEPTH                    \
    xor    %eax, %eax;                    \
    bts    $63, %rax;                    \
    movq    %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);

#define RESET_CALL_DEPTH_FROM_CALL                \
    movb    $0xfc, %al;                    \
    shl    $56, %rax;                    \
    movq    %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);    \
    CALL_THUNKS_DEBUG_INC_CALLS

#define INCREMENT_CALL_DEPTH                    \
    sarq    $5, %gs:pcpu_hot + X86_call_depth;        \
    CALL_THUNKS_DEBUG_INC_CALLS

#define ASM_INCREMENT_CALL_DEPTH                \
    sarq    $5, PER_CPU_VAR(pcpu_hot + X86_call_depth);    \
    CALL_THUNKS_DEBUG_INC_CALLS

#else
#define CREDIT_CALL_DEPTH
#define ASM_CREDIT_CALL_DEPTH
#define RESET_CALL_DEPTH
#define INCREMENT_CALL_DEPTH
#define ASM_INCREMENT_CALL_DEPTH
#define RESET_CALL_DEPTH_FROM_CALL
#endif

/*
 * Fill the CPU return stack buffer.
 *
 * Each entry in the RSB, if used for a speculative 'ret', contains an
 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
 *
 * This is required in various cases for retpoline and IBRS-based
 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
 * eliminate potentially bogus entries from the RSB, and sometimes
 * purely to ensure that it doesn't get empty, which on some CPUs would
 * allow predictions from other (unwanted!) sources to be used.
 *
 * We define a CPP macro such that it can be used from both .S files and
 * inline assembly. It's possible to do a .macro and then include that
 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
 */

#define RETPOLINE_THUNK_SIZE    32
#define RSB_CLEAR_LOOPS        32    /* To forcibly overwrite all entries */

/*
 * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
 */
#define __FILL_RETURN_SLOT            \
    ANNOTATE_INTRA_FUNCTION_CALL;        \
    call    772f;                \
    int3;                    \
772:

/*
 * Stuff the entire RSB.
 *
 * Google experimented with loop-unrolling and this turned out to be
 * the optimal version - two calls, each with their own speculation
 * trap should their return address end up getting used, in a loop.
 */
#ifdef CONFIG_X86_64
#define __FILL_RETURN_BUFFER(reg, nr)            \
    mov    $(nr/2), reg;                \
771:                            \
    __FILL_RETURN_SLOT                \
    __FILL_RETURN_SLOT                \
    add    $(BITS_PER_LONG/8) * 2, %_ASM_SP;    \
    dec    reg;                    \
    jnz    771b;                    \
    /* barrier for jnz misprediction */        \
    lfence;                        \
    ASM_CREDIT_CALL_DEPTH                \
    CALL_THUNKS_DEBUG_INC_CTXSW
#else
/*
 * i386 doesn't unconditionally have LFENCE, as such it can't
 * do a loop.
 */
#define __FILL_RETURN_BUFFER(reg, nr)            \
    .rept nr;                    \
    __FILL_RETURN_SLOT;                \
    .endr;                        \
    add    $(BITS_PER_LONG/8) * nr, %_ASM_SP;
#endif

/*
 * Stuff a single RSB slot.
 *
 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
 * forced to retire before letting a RET instruction execute.
 *
 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
 * before this point.
 */
#define __FILL_ONE_RETURN                \
    __FILL_RETURN_SLOT                \
    add    $(BITS_PER_LONG/8), %_ASM_SP;        \
    lfence;

#ifdef CONFIG_X86_32
#define ITS_THUNK_REGS        8
#else
#define ITS_THUNK_REGS        16
#endif
#define ITS_THUNK_SIZE        4
#define ITS_THUNK_ENTRIES    (32 / ITS_THUNK_SIZE)
#define ITS_THUNK_BLOCKS    512

#ifdef __ASSEMBLY__

#include <asm/cpufeature-macros.h>

/*
 * This should be used immediately before an indirect jump/call. It tells
 * objtool the subsequent indirect jump/call is vouched safe for retpoline
 * builds.
 */
.macro ANNOTATE_RETPOLINE_SAFE
#ifdef CONFIG_RETPOLINE
.Lhere_\@:
    .pushsection .discard.retpoline_safe
    .long .Lhere_\@
    .popsection
#endif
.endm

/*
 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
 * vs RETBleed validation.
 */
.macro ANNOTATE_UNRET_SAFE
    ANNOTATE_RETPOLINE_SAFE
.endm

/*
 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
 * eventually turn into its own annotation.
 */
.macro VALIDATE_UNRET_END
#if defined(CONFIG_NOINSTR_VALIDATION) && \
    (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
    ANNOTATE_RETPOLINE_SAFE
    nop
#endif
.endm

/*
 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
 * indirect jmp/call which may be susceptible to the Spectre variant 2
 * attack.
 *
 * NOTE: these do not take kCFI into account and are thus not comparable to C
 * indirect calls, take care when using. The target of these should be an ENDBR
 * instruction irrespective of kCFI.
 */
.macro JMP_NOSPEC reg:req
    ALTERNATIVE_3 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
              __stringify(jmp __x86_indirect_its_thunk_\reg), ALT_ENABLE(CONFIG_MITIGATION_ITS, X86_FEATURE_INDIRECT_THUNK_ITS), \
              __stringify(jmp __x86_indirect_thunk_\reg), ALT_ENABLE(CONFIG_RETPOLINE, X86_FEATURE_RETPOLINE), \
              __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_ENABLE(CONFIG_RETPOLINE, X86_FEATURE_RETPOLINE_LFENCE)
    int3
.endm

.macro CALL_NOSPEC_INLINE reg:req hashsym:req
    ALTERNATIVE_4 __stringify(pax_indirect_call %\reg, \hashsym), \
              __stringify(pax_direct_call __x86_indirect_its_thunk_\reg, \hashsym), ALT_ENABLE(CONFIG_MITIGATION_ITS, X86_FEATURE_INDIRECT_THUNK_ITS),\
              __stringify(pax_direct_call __x86_indirect_thunk_\reg, \hashsym), ALT_ENABLE(CONFIG_RETPOLINE, X86_FEATURE_RETPOLINE), \
              __stringify(pax_direct_call __x86_indirect_call_thunk_\reg, \hashsym), ALT_ENABLE(CONFIG_CALL_DEPTH_TRACKING, X86_FEATURE_CALL_DEPTH), \
              __stringify(pax_indirect_call %\reg, \hashsym, lfence=1), ALT_ENABLE(CONFIG_RETPOLINE, X86_FEATURE_RETPOLINE_LFENCE)
.endm

.macro CALL_NOSPEC reg:req
    ALTERNATIVE_4 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
              __stringify(call __x86_indirect_its_thunk_\reg), ALT_ENABLE(CONFIG_MITIGATION_ITS, X86_FEATURE_INDIRECT_THUNK_ITS), \
              __stringify(call __x86_indirect_thunk_\reg), ALT_ENABLE(CONFIG_RETPOLINE, X86_FEATURE_RETPOLINE), \
              __stringify(call __x86_indirect_call_thunk_\reg), ALT_ENABLE(CONFIG_CALL_DEPTH_TRACKING, X86_FEATURE_CALL_DEPTH), \
              __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), ALT_ENABLE(CONFIG_RETPOLINE, X86_FEATURE_RETPOLINE_LFENCE)
.endm

.macro __CALL_NOSPEC reg:req
    /* Pre-pad for RAP retloc hash */
    ALTERNATIVE_4_PREPAD __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
        __stringify(call __x86_indirect_its_thunk_\reg), ALT_ENABLE(CONFIG_MITIGATION_ITS, X86_FEATURE_INDIRECT_THUNK_ITS), \
        __stringify(call __x86_indirect_thunk_\reg), ALT_ENABLE(CONFIG_RETPOLINE, X86_FEATURE_RETPOLINE), \
        __stringify(call __x86_indirect_call_thunk_\reg), ALT_ENABLE(CONFIG_CALL_DEPTH_TRACKING, X86_FEATURE_CALL_DEPTH), \
        __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), ALT_ENABLE(CONFIG_RETPOLINE, X86_FEATURE_RETPOLINE_LFENCE)
.endm

 /*
  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
  * monstrosity above, manually.
  */
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
  .ifnb \ftr2
    ALTERNATIVE_2 "pax_jmp .Lskip_rsb_\@", \
        __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
        __stringify(nop;nop;__FILL_ONE_RETURN), \ftr2
  .else
    ALTERNATIVE "pax_jmp .Lskip_rsb_\@", \
        __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr
  .endif

.Lskip_rsb_\@:
.endm

.macro INVLUTLB
#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
    ALTERNATIVE "", "or $-1, %eax ; invlpg (%eax)", X86_BUG_SPEC_SEGMENT_LIMIT_BYPASS
#endif
.endm

/*
 * The CALL to srso_alias_untrain_ret() must be patched in directly at
 * the spot where untraining must be done, ie., srso_alias_untrain_ret()
 * must be the target of a CALL instruction instead of indirectly
 * jumping to a wrapper which then calls it. Therefore, this macro is
 * called outside of __UNTRAIN_RET below, for the time being, before the
 * kernel can support nested alternatives with arbitrary nesting.
 */
.macro CALL_UNTRAIN_RET
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
    ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
                  "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
#endif
.endm

/*
 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
 * return thunk isn't mapped into the userspace tables (then again, AMD
 * typically has NO_MELTDOWN).
 *
 * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
 * write_ibpb() will clobber AX, CX, DX.
 *
 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
 * where we have a stack but before any RET instruction.
 */
.macro __UNTRAIN_RET ibpb_feature, call_depth_insns
#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
    VALIDATE_UNRET_END
    CALL_UNTRAIN_RET
    ALTERNATIVE_2 "",                        \
              "call write_ibpb", \ibpb_feature,            \
             __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
.endm

#define UNTRAIN_RET \
    __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH)

#define UNTRAIN_RET_VM \
    __UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH)

#define UNTRAIN_RET_FROM_CALL \
    __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL)


.macro CALL_DEPTH_ACCOUNT
#ifdef CONFIG_CALL_DEPTH_TRACKING
    ALTERNATIVE "",                            \
            __stringify(ASM_INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
#endif
.endm

#ifdef CONFIG_RETHUNK
.macro RETHUNK
    pax_ret_nospec
.endm
#endif

/*
 * Macro to execute VERW insns that mitigate transient data sampling
 * attacks such as MDS or TSA. On affected systems a microcode update
 * overloaded VERW insns to also clear the CPU buffers. VERW clobbers
 * CFLAGS.ZF.
 * Note: Only the memory operand variant of VERW clears the CPU buffers.
 */
.macro __CLEAR_CPU_BUFFERS feature
#ifdef CONFIG_X86_64
    ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
#else
    /*
     * In 32bit mode, the memory operand must be a %cs reference. The data
     * segments may not be usable (vm86 mode), and the stack segment may not
     * be flat (ESPFIX32).
     */
    ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
#endif
.endm

#define CLEAR_CPU_BUFFERS \
    __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF

#define VM_CLEAR_CPU_BUFFERS \
    __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM

#ifdef CONFIG_X86_64
.macro CLEAR_BRANCH_HISTORY
    ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
.endm

.macro CLEAR_BRANCH_HISTORY_VMEXIT
    ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
.endm
#else
#define CLEAR_BRANCH_HISTORY
#define CLEAR_BRANCH_HISTORY_VMEXIT
#endif

#else /* __ASSEMBLY__ */

#define ANNOTATE_RETPOLINE_SAFE                    \
    "999:\n\t"                        \
    ".pushsection .discard.retpoline_safe\n\t"        \
    ".long 999b\n\t"                    \
    ".popsection\n\t"

typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
extern retpoline_thunk_t __x86_indirect_thunk_array[];
extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];

extern struct its_thunk {
    u8 padding[ITS_THUNK_ENTRIES][ITS_THUNK_SIZE];
    u8 thunks[ITS_THUNK_ENTRIES][ITS_THUNK_SIZE];
} __x86_indirect_its_thunk_array[ITS_THUNK_REGS][ITS_THUNK_BLOCKS];
static_assert(sizeof(struct its_thunk) == 64);

#ifdef CONFIG_RETHUNK
void __x86_return_thunk(void);
#else
static inline void __x86_return_thunk(void) {}
#endif

#ifdef CONFIG_CPU_UNRET_ENTRY
extern void retbleed_return_thunk(void) __rap_hash;
#else
static inline void retbleed_return_thunk(void) {}
#endif

extern void srso_alias_untrain_ret(void) __rap_hash;

#ifdef CONFIG_CPU_SRSO
extern void srso_return_thunk(void) __rap_hash;
extern void srso_alias_return_thunk(void) __rap_hash;
#else
static inline void srso_return_thunk(void) {}
static inline void srso_alias_return_thunk(void) {}
#endif

#ifdef CONFIG_MITIGATION_ITS
extern void its_return_thunk(void) __rap_hash;
#else
static inline void its_return_thunk(void) {}
#endif

extern void retbleed_untrain_ret(void) __rap_hash;
extern void srso_untrain_ret(void) __rap_hash;
extern void srso_alias_untrain_ret(void) __rap_hash;

extern void entry_untrain_ret(void) __rap_hash;
extern void write_ibpb(void) __rap_hash;

#ifdef CONFIG_X86_64
extern void clear_bhb_loop(void) __rap_hash;
#endif

extern void (*x86_return_thunk)(void);

extern void __warn_thunk(void);

#ifdef CONFIG_CALL_DEPTH_TRACKING
extern void call_depth_return_thunk(void) __rap_hash;

#define CALL_DEPTH_ACCOUNT                    \
    ALTERNATIVE("",                        \
            __stringify(INCREMENT_CALL_DEPTH),        \
            X86_FEATURE_CALL_DEPTH)

#ifdef CONFIG_CALL_THUNKS_DEBUG
DECLARE_PER_CPU(u64, __x86_call_count);
DECLARE_PER_CPU(u64, __x86_ret_count);
DECLARE_PER_CPU(u64, __x86_stuffs_count);
DECLARE_PER_CPU(u64, __x86_ctxsw_count);
#endif
#else /* !CONFIG_CALL_DEPTH_TRACKING */

static inline void call_depth_return_thunk(void) {}
#define CALL_DEPTH_ACCOUNT ""

#endif /* CONFIG_CALL_DEPTH_TRACKING */

#ifdef CONFIG_RETPOLINE

#define GEN(reg) \
    extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
#include <asm/GEN-for-each-reg.h>
#undef GEN

#define GEN(reg)                        \
    extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg;
#include <asm/GEN-for-each-reg.h>
#undef GEN

#define GEN(reg)                        \
    extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg;
#include <asm/GEN-for-each-reg.h>
#undef GEN

#define CALL_NOSPEC_SPEC    "%V[thunk_target]"

/*
 * CALL_NOSPEC_INLINE is used in (inline) asm for call targets where we want
 * RAP retloc and other instrumentations because we control the call target
 * as well, e.g., in PAX_INDIRECT_CALL used in execute_on_irq_stack during
 * a temporary stack switch or in __fentry__.
 *
 * CALL_NOSPEC is needed in (inline) asm for call targets where we don't want
 * (KVM fastop emulation) or cannot have (EFI, MSHYPERV) RAP retloc and other
 * instrumentations.
 */
# define CALL_NOSPEC_INLINE(target, hashsym)            \
    ALTERNATIVE_4(                        \
    "pax_indirect_call " target ", " hashsym"\n",        \
    "pax_direct_call __x86_indirect_its_thunk_"CALL_NOSPEC_SPEC", " hashsym "\n",\
    IS_ENABLED(CONFIG_MITIGATION_ITS), X86_FEATURE_INDIRECT_THUNK_ITS,\
    "pax_direct_call __x86_indirect_thunk_"CALL_NOSPEC_SPEC", " hashsym "\n",\
    1, X86_FEATURE_RETPOLINE,                \
    "pax_direct_call __x86_indirect_call_thunk_"CALL_NOSPEC_SPEC", " hashsym "\n",\
    IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING), X86_FEATURE_CALL_DEPTH,\
    "pax_indirect_call " target ", " hashsym", lfence=1\n",    \
    1, X86_FEATURE_RETPOLINE_LFENCE)

#ifdef CONFIG_X86_64

# define CALL_NOSPEC_REG    "r"

/*
 * Inline asm uses the %V modifier which is only in newer GCC
 * which is ensured when CONFIG_RETPOLINE is defined.
 */
# define CALL_NOSPEC                        \
    ALTERNATIVE_4(                        \
    ANNOTATE_RETPOLINE_SAFE                    \
    "call *%[thunk_target]\n",                \
    "call __x86_indirect_its_thunk_"CALL_NOSPEC_SPEC"\n",    \
    IS_ENABLED(CONFIG_MITIGATION_ITS), X86_FEATURE_INDIRECT_THUNK_ITS,\
    "call __x86_indirect_thunk_"CALL_NOSPEC_SPEC"\n",    \
    1, X86_FEATURE_RETPOLINE,                \
    "call __x86_indirect_call_thunk_"CALL_NOSPEC_SPEC"\n",    \
    IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING), X86_FEATURE_CALL_DEPTH,\
    "lfence;\n"                        \
    ANNOTATE_RETPOLINE_SAFE                    \
    "call *%[thunk_target]\n",                \
    1, X86_FEATURE_RETPOLINE_LFENCE)

# define THUNK_TARGET(addr) [thunk_target] CALL_NOSPEC_REG (addr)

#else /* CONFIG_X86_32 */

/*
 * For i386 we use the original ret-equivalent retpoline, because
 * otherwise we'll run out of registers. We don't care about CET
 * here, anyway.
 */
# define CALL_NOSPEC                        \
    ALTERNATIVE_2(                        \
    "       pax_jmp    904f;\n"                \
    "       pax_jmp    905f;\n"                \
    "       .align 16\n"                    \
    "901:    call   903f;\n"                    \
    "902:    pause;\n"                    \
    "        lfence;\n"                    \
    "       pax_jmp    902b;\n"                \
    "       .align 16\n"                    \
    "903:    lea    4(%%esp), %%esp;\n"            \
    "       pushl  %[thunk_target];\n"            \
    "905:   pax_ret_nospec_alternative;\n"            \
    "       .align 16\n"                    \
    "904:    call   901b;\n",                \
    ANNOTATE_RETPOLINE_SAFE                    \
    "call *%[thunk_target]\n",                \
    ALT_NOT(X86_FEATURE_RETPOLINE),                \
    "lfence;\n"                        \
    ANNOTATE_RETPOLINE_SAFE                    \
    "call *%[thunk_target]\n",                \
    X86_FEATURE_RETPOLINE_LFENCE)

# define THUNK_TARGET(addr) [thunk_target] "m" (addr)
#endif
#else /* No retpoline for C / inline asm */
# define CALL_NOSPEC "call *%[thunk_target]\n"
# define CALL_NOSPEC_INLINE(target, hashsym) "pax_indirect_call " target ", " hashsym"\n"
# ifdef CONFIG_X86_64
#  define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
# else
#  define THUNK_TARGET(addr) [thunk_target] "m" (addr)
# endif
#endif /* CONFIG_RETPOLINE */

/* The Spectre V2 mitigation variants */
enum spectre_v2_mitigation {
    SPECTRE_V2_NONE,
    SPECTRE_V2_RETPOLINE,
    SPECTRE_V2_LFENCE,
    SPECTRE_V2_EIBRS,
    SPECTRE_V2_EIBRS_RETPOLINE,
    SPECTRE_V2_EIBRS_LFENCE,
    SPECTRE_V2_IBRS,
};

/* The indirect branch speculation control variants */
enum spectre_v2_user_mitigation {
    SPECTRE_V2_USER_NONE,
    SPECTRE_V2_USER_STRICT,
    SPECTRE_V2_USER_STRICT_PREFERRED,
    SPECTRE_V2_USER_PRCTL,
    SPECTRE_V2_USER_SECCOMP,
};

/* The Speculative Store Bypass disable variants */
enum ssb_mitigation {
    SPEC_STORE_BYPASS_NONE,
    SPEC_STORE_BYPASS_DISABLE,
    SPEC_STORE_BYPASS_PRCTL,
    SPEC_STORE_BYPASS_SECCOMP,
};

static __always_inline
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
{
    asm volatile(RAP_SAFE_ASM ALTERNATIVE("", "wrmsr", %c[feature])
        : : "c" (msr),
            "a" ((u32)val),
            "d" ((u32)(val >> 32)),
            [feature] "i" (feature)
        : "memory");
}

DECLARE_PER_CPU(bool, x86_ibpb_exit_to_user);

static inline void indirect_branch_prediction_barrier(void)
{
    asm_inline volatile(ALTERNATIVE("", "call write_ibpb", X86_FEATURE_IBPB)
                : ASM_CALL_CONSTRAINT
                :: "rax", "rcx", "rdx", "memory");
}

/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base __mutable;
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
extern void update_spec_ctrl_cond(u64 val);
extern u64 spec_ctrl_current(void);

/*
 * With retpoline, we must use IBRS to restrict branch prediction
 * before calling into firmware.
 *
 * (Implemented as CPP macros due to header hell.)
 */
#define firmware_restrict_branch_speculation_start()            \
do {                                    \
    preempt_disable();                        \
    alternative_msr_write(MSR_IA32_SPEC_CTRL,            \
                  spec_ctrl_current() | SPEC_CTRL_IBRS,    \
                  X86_FEATURE_USE_IBRS_FW);            \
    alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,        \
                  X86_FEATURE_USE_IBPB_FW);            \
} while (0)

#define firmware_restrict_branch_speculation_end()            \
do {                                    \
    alternative_msr_write(MSR_IA32_SPEC_CTRL,            \
                  spec_ctrl_current(),            \
                  X86_FEATURE_USE_IBRS_FW);            \
    preempt_enable();                        \
} while (0)

DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);

DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb);

DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);

DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);

DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);

extern u16 x86_verw_sel;

#include <asm/segment.h>

/**
 * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
 *
 * This uses the otherwise unused and obsolete VERW instruction in
 * combination with microcode which triggers a CPU buffer flush when the
 * instruction is executed.
 */
static __always_inline void x86_clear_cpu_buffers(void)
{
    static const u16 ds = __KERNEL_DS;

    /*
     * Has to be the memory-operand variant because only that
     * guarantees the CPU buffer flush functionality according to
     * documentation. The register-operand variant does not.
     * Works with any segment selector, but a valid writable
     * data segment is the fastest variant.
     *
     * "cc" clobber is required because VERW modifies ZF.
     */
    asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
}

/**
 * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
 * and TSA vulnerabilities.
 *
 * Clear CPU buffers if the corresponding static key is enabled
 */
static __always_inline void x86_idle_clear_cpu_buffers(void)
{
    if (static_branch_likely(&cpu_buf_idle_clear))
        x86_clear_cpu_buffers();
}

#endif /* __ASSEMBLY__ */

#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */

:: Command execute ::

Enter:
 
Select:
 

:: Search ::
  - regexp 

:: Upload ::
 
[ Read-Only ]

:: Make Dir ::
 
[ Read-Only ]
:: Make File ::
 
[ Read-Only ]

:: Go Dir ::
 
:: Go File ::
 

--[ c99shell v. 2.5 [PHP 8 Update] [24.05.2025] | Generation time: 0.0536 ]--