LCOV - code coverage report
Current view: top level - arch/x86/include/asm - processor.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsx @ Mon Jul 31 20:08:34 PDT 2023 Lines: 2 2 100.0 %
Date: 2023-07-31 20:08:34 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _ASM_X86_PROCESSOR_H
       3             : #define _ASM_X86_PROCESSOR_H
       4             : 
       5             : #include <asm/processor-flags.h>
       6             : 
       7             : /* Forward declaration, a strange C thing */
       8             : struct task_struct;
       9             : struct mm_struct;
      10             : struct io_bitmap;
      11             : struct vm86;
      12             : 
      13             : #include <asm/math_emu.h>
      14             : #include <asm/segment.h>
      15             : #include <asm/types.h>
      16             : #include <uapi/asm/sigcontext.h>
      17             : #include <asm/current.h>
      18             : #include <asm/cpufeatures.h>
      19             : #include <asm/cpuid.h>
      20             : #include <asm/page.h>
      21             : #include <asm/pgtable_types.h>
      22             : #include <asm/percpu.h>
      23             : #include <asm/msr.h>
      24             : #include <asm/desc_defs.h>
      25             : #include <asm/nops.h>
      26             : #include <asm/special_insns.h>
      27             : #include <asm/fpu/types.h>
      28             : #include <asm/unwind_hints.h>
      29             : #include <asm/vmxfeatures.h>
      30             : #include <asm/vdso/processor.h>
      31             : 
      32             : #include <linux/personality.h>
      33             : #include <linux/cache.h>
      34             : #include <linux/threads.h>
      35             : #include <linux/math64.h>
      36             : #include <linux/err.h>
      37             : #include <linux/irqflags.h>
      38             : #include <linux/mem_encrypt.h>
      39             : 
      40             : /*
      41             :  * We handle most unaligned accesses in hardware.  On the other hand
      42             :  * unaligned DMA can be quite expensive on some Nehalem processors.
      43             :  *
      44             :  * Based on this we disable the IP header alignment in network drivers.
      45             :  */
      46             : #define NET_IP_ALIGN    0
      47             : 
      48             : #define HBP_NUM 4
      49             : 
      50             : /*
      51             :  * These alignment constraints are for performance in the vSMP case,
      52             :  * but in the task_struct case we must also meet hardware imposed
      53             :  * alignment requirements of the FPU state:
      54             :  */
      55             : #ifdef CONFIG_X86_VSMP
      56             : # define ARCH_MIN_TASKALIGN             (1 << INTERNODE_CACHE_SHIFT)
      57             : # define ARCH_MIN_MMSTRUCT_ALIGN        (1 << INTERNODE_CACHE_SHIFT)
      58             : #else
      59             : # define ARCH_MIN_TASKALIGN             __alignof__(union fpregs_state)
      60             : # define ARCH_MIN_MMSTRUCT_ALIGN        0
      61             : #endif
      62             : 
      63             : enum tlb_infos {
      64             :         ENTRIES,
      65             :         NR_INFO
      66             : };
      67             : 
      68             : extern u16 __read_mostly tlb_lli_4k[NR_INFO];
      69             : extern u16 __read_mostly tlb_lli_2m[NR_INFO];
      70             : extern u16 __read_mostly tlb_lli_4m[NR_INFO];
      71             : extern u16 __read_mostly tlb_lld_4k[NR_INFO];
      72             : extern u16 __read_mostly tlb_lld_2m[NR_INFO];
      73             : extern u16 __read_mostly tlb_lld_4m[NR_INFO];
      74             : extern u16 __read_mostly tlb_lld_1g[NR_INFO];
      75             : 
      76             : /*
      77             :  *  CPU type and hardware bug flags. Kept separately for each CPU.
      78             :  *  Members of this structure are referenced in head_32.S, so think twice
      79             :  *  before touching them. [mj]
      80             :  */
      81             : 
      82             : struct cpuinfo_x86 {
      83             :         __u8                    x86;            /* CPU family */
      84             :         __u8                    x86_vendor;     /* CPU vendor */
      85             :         __u8                    x86_model;
      86             :         __u8                    x86_stepping;
      87             : #ifdef CONFIG_X86_64
      88             :         /* Number of 4K pages in DTLB/ITLB combined(in pages): */
      89             :         int                     x86_tlbsize;
      90             : #endif
      91             : #ifdef CONFIG_X86_VMX_FEATURE_NAMES
      92             :         __u32                   vmx_capability[NVMXINTS];
      93             : #endif
      94             :         __u8                    x86_virt_bits;
      95             :         __u8                    x86_phys_bits;
      96             :         /* CPUID returned core id bits: */
      97             :         __u8                    x86_coreid_bits;
      98             :         __u8                    cu_id;
      99             :         /* Max extended CPUID function supported: */
     100             :         __u32                   extended_cpuid_level;
     101             :         /* Maximum supported CPUID level, -1=no CPUID: */
     102             :         int                     cpuid_level;
     103             :         /*
     104             :          * Align to size of unsigned long because the x86_capability array
     105             :          * is passed to bitops which require the alignment. Use unnamed
     106             :          * union to enforce the array is aligned to size of unsigned long.
     107             :          */
     108             :         union {
     109             :                 __u32           x86_capability[NCAPINTS + NBUGINTS];
     110             :                 unsigned long   x86_capability_alignment;
     111             :         };
     112             :         char                    x86_vendor_id[16];
     113             :         char                    x86_model_id[64];
     114             :         /* in KB - valid for CPUS which support this call: */
     115             :         unsigned int            x86_cache_size;
     116             :         int                     x86_cache_alignment;    /* In bytes */
     117             :         /* Cache QoS architectural values, valid only on the BSP: */
     118             :         int                     x86_cache_max_rmid;     /* max index */
     119             :         int                     x86_cache_occ_scale;    /* scale to bytes */
     120             :         int                     x86_cache_mbm_width_offset;
     121             :         int                     x86_power;
     122             :         unsigned long           loops_per_jiffy;
     123             :         /* protected processor identification number */
     124             :         u64                     ppin;
     125             :         /* cpuid returned max cores value: */
     126             :         u16                     x86_max_cores;
     127             :         u16                     apicid;
     128             :         u16                     initial_apicid;
     129             :         u16                     x86_clflush_size;
     130             :         /* number of cores as seen by the OS: */
     131             :         u16                     booted_cores;
     132             :         /* Physical processor id: */
     133             :         u16                     phys_proc_id;
     134             :         /* Logical processor id: */
     135             :         u16                     logical_proc_id;
     136             :         /* Core id: */
     137             :         u16                     cpu_core_id;
     138             :         u16                     cpu_die_id;
     139             :         u16                     logical_die_id;
     140             :         /* Index into per_cpu list: */
     141             :         u16                     cpu_index;
     142             :         /*  Is SMT active on this core? */
     143             :         bool                    smt_active;
     144             :         u32                     microcode;
     145             :         /* Address space bits used by the cache internally */
     146             :         u8                      x86_cache_bits;
     147             :         unsigned                initialized : 1;
     148             : } __randomize_layout;
     149             : 
     150             : #define X86_VENDOR_INTEL        0
     151             : #define X86_VENDOR_CYRIX        1
     152             : #define X86_VENDOR_AMD          2
     153             : #define X86_VENDOR_UMC          3
     154             : #define X86_VENDOR_CENTAUR      5
     155             : #define X86_VENDOR_TRANSMETA    7
     156             : #define X86_VENDOR_NSC          8
     157             : #define X86_VENDOR_HYGON        9
     158             : #define X86_VENDOR_ZHAOXIN      10
     159             : #define X86_VENDOR_VORTEX       11
     160             : #define X86_VENDOR_NUM          12
     161             : 
     162             : #define X86_VENDOR_UNKNOWN      0xff
     163             : 
     164             : /*
     165             :  * capabilities of CPUs
     166             :  */
     167             : extern struct cpuinfo_x86       boot_cpu_data;
     168             : extern struct cpuinfo_x86       new_cpu_data;
     169             : 
     170             : extern __u32                    cpu_caps_cleared[NCAPINTS + NBUGINTS];
     171             : extern __u32                    cpu_caps_set[NCAPINTS + NBUGINTS];
     172             : 
     173             : #ifdef CONFIG_SMP
     174             : DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
     175             : #define cpu_data(cpu)           per_cpu(cpu_info, cpu)
     176             : #else
     177             : #define cpu_info                boot_cpu_data
     178             : #define cpu_data(cpu)           boot_cpu_data
     179             : #endif
     180             : 
     181             : extern const struct seq_operations cpuinfo_op;
     182             : 
     183             : #define cache_line_size()       (boot_cpu_data.x86_cache_alignment)
     184             : 
     185             : extern void cpu_detect(struct cpuinfo_x86 *c);
     186             : 
     187             : static inline unsigned long long l1tf_pfn_limit(void)
     188             : {
     189             :         return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
     190             : }
     191             : 
     192             : extern void early_cpu_init(void);
     193             : extern void identify_boot_cpu(void);
     194             : extern void identify_secondary_cpu(struct cpuinfo_x86 *);
     195             : extern void print_cpu_info(struct cpuinfo_x86 *);
     196             : void print_cpu_msr(struct cpuinfo_x86 *);
     197             : 
     198             : /*
     199             :  * Friendlier CR3 helpers.
     200             :  */
     201             : static inline unsigned long read_cr3_pa(void)
     202             : {
     203             :         return __read_cr3() & CR3_ADDR_MASK;
     204             : }
     205             : 
     206             : static inline unsigned long native_read_cr3_pa(void)
     207             : {
     208             :         return __native_read_cr3() & CR3_ADDR_MASK;
     209             : }
     210             : 
     211             : static inline void load_cr3(pgd_t *pgdir)
     212             : {
     213             :         write_cr3(__sme_pa(pgdir));
     214             : }
     215             : 
     216             : /*
     217             :  * Note that while the legacy 'TSS' name comes from 'Task State Segment',
     218             :  * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
     219             :  * unrelated to the task-switch mechanism:
     220             :  */
     221             : #ifdef CONFIG_X86_32
     222             : /* This is the TSS defined by the hardware. */
     223             : struct x86_hw_tss {
     224             :         unsigned short          back_link, __blh;
     225             :         unsigned long           sp0;
     226             :         unsigned short          ss0, __ss0h;
     227             :         unsigned long           sp1;
     228             : 
     229             :         /*
     230             :          * We don't use ring 1, so ss1 is a convenient scratch space in
     231             :          * the same cacheline as sp0.  We use ss1 to cache the value in
     232             :          * MSR_IA32_SYSENTER_CS.  When we context switch
     233             :          * MSR_IA32_SYSENTER_CS, we first check if the new value being
     234             :          * written matches ss1, and, if it's not, then we wrmsr the new
     235             :          * value and update ss1.
     236             :          *
     237             :          * The only reason we context switch MSR_IA32_SYSENTER_CS is
     238             :          * that we set it to zero in vm86 tasks to avoid corrupting the
     239             :          * stack if we were to go through the sysenter path from vm86
     240             :          * mode.
     241             :          */
     242             :         unsigned short          ss1;    /* MSR_IA32_SYSENTER_CS */
     243             : 
     244             :         unsigned short          __ss1h;
     245             :         unsigned long           sp2;
     246             :         unsigned short          ss2, __ss2h;
     247             :         unsigned long           __cr3;
     248             :         unsigned long           ip;
     249             :         unsigned long           flags;
     250             :         unsigned long           ax;
     251             :         unsigned long           cx;
     252             :         unsigned long           dx;
     253             :         unsigned long           bx;
     254             :         unsigned long           sp;
     255             :         unsigned long           bp;
     256             :         unsigned long           si;
     257             :         unsigned long           di;
     258             :         unsigned short          es, __esh;
     259             :         unsigned short          cs, __csh;
     260             :         unsigned short          ss, __ssh;
     261             :         unsigned short          ds, __dsh;
     262             :         unsigned short          fs, __fsh;
     263             :         unsigned short          gs, __gsh;
     264             :         unsigned short          ldt, __ldth;
     265             :         unsigned short          trace;
     266             :         unsigned short          io_bitmap_base;
     267             : 
     268             : } __attribute__((packed));
     269             : #else
     270             : struct x86_hw_tss {
     271             :         u32                     reserved1;
     272             :         u64                     sp0;
     273             :         u64                     sp1;
     274             : 
     275             :         /*
     276             :          * Since Linux does not use ring 2, the 'sp2' slot is unused by
     277             :          * hardware.  entry_SYSCALL_64 uses it as scratch space to stash
     278             :          * the user RSP value.
     279             :          */
     280             :         u64                     sp2;
     281             : 
     282             :         u64                     reserved2;
     283             :         u64                     ist[7];
     284             :         u32                     reserved3;
     285             :         u32                     reserved4;
     286             :         u16                     reserved5;
     287             :         u16                     io_bitmap_base;
     288             : 
     289             : } __attribute__((packed));
     290             : #endif
     291             : 
     292             : /*
     293             :  * IO-bitmap sizes:
     294             :  */
     295             : #define IO_BITMAP_BITS                  65536
     296             : #define IO_BITMAP_BYTES                 (IO_BITMAP_BITS / BITS_PER_BYTE)
     297             : #define IO_BITMAP_LONGS                 (IO_BITMAP_BYTES / sizeof(long))
     298             : 
     299             : #define IO_BITMAP_OFFSET_VALID_MAP                              \
     300             :         (offsetof(struct tss_struct, io_bitmap.bitmap) -        \
     301             :          offsetof(struct tss_struct, x86_tss))
     302             : 
     303             : #define IO_BITMAP_OFFSET_VALID_ALL                              \
     304             :         (offsetof(struct tss_struct, io_bitmap.mapall) -        \
     305             :          offsetof(struct tss_struct, x86_tss))
     306             : 
     307             : #ifdef CONFIG_X86_IOPL_IOPERM
     308             : /*
     309             :  * sizeof(unsigned long) coming from an extra "long" at the end of the
     310             :  * iobitmap. The limit is inclusive, i.e. the last valid byte.
     311             :  */
     312             : # define __KERNEL_TSS_LIMIT     \
     313             :         (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
     314             :          sizeof(unsigned long) - 1)
     315             : #else
     316             : # define __KERNEL_TSS_LIMIT     \
     317             :         (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
     318             : #endif
     319             : 
     320             : /* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
     321             : #define IO_BITMAP_OFFSET_INVALID        (__KERNEL_TSS_LIMIT + 1)
     322             : 
     323             : struct entry_stack {
     324             :         char    stack[PAGE_SIZE];
     325             : };
     326             : 
     327             : struct entry_stack_page {
     328             :         struct entry_stack stack;
     329             : } __aligned(PAGE_SIZE);
     330             : 
     331             : /*
     332             :  * All IO bitmap related data stored in the TSS:
     333             :  */
     334             : struct x86_io_bitmap {
     335             :         /* The sequence number of the last active bitmap. */
     336             :         u64                     prev_sequence;
     337             : 
     338             :         /*
     339             :          * Store the dirty size of the last io bitmap offender. The next
     340             :          * one will have to do the cleanup as the switch out to a non io
     341             :          * bitmap user will just set x86_tss.io_bitmap_base to a value
     342             :          * outside of the TSS limit. So for sane tasks there is no need to
     343             :          * actually touch the io_bitmap at all.
     344             :          */
     345             :         unsigned int            prev_max;
     346             : 
     347             :         /*
     348             :          * The extra 1 is there because the CPU will access an
     349             :          * additional byte beyond the end of the IO permission
     350             :          * bitmap. The extra byte must be all 1 bits, and must
     351             :          * be within the limit.
     352             :          */
     353             :         unsigned long           bitmap[IO_BITMAP_LONGS + 1];
     354             : 
     355             :         /*
     356             :          * Special I/O bitmap to emulate IOPL(3). All bytes zero,
     357             :          * except the additional byte at the end.
     358             :          */
     359             :         unsigned long           mapall[IO_BITMAP_LONGS + 1];
     360             : };
     361             : 
     362             : struct tss_struct {
     363             :         /*
     364             :          * The fixed hardware portion.  This must not cross a page boundary
     365             :          * at risk of violating the SDM's advice and potentially triggering
     366             :          * errata.
     367             :          */
     368             :         struct x86_hw_tss       x86_tss;
     369             : 
     370             :         struct x86_io_bitmap    io_bitmap;
     371             : } __aligned(PAGE_SIZE);
     372             : 
     373             : DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
     374             : 
     375             : /* Per CPU interrupt stacks */
     376             : struct irq_stack {
     377             :         char            stack[IRQ_STACK_SIZE];
     378             : } __aligned(IRQ_STACK_SIZE);
     379             : 
     380             : #ifdef CONFIG_X86_64
     381             : struct fixed_percpu_data {
     382             :         /*
     383             :          * GCC hardcodes the stack canary as %gs:40.  Since the
     384             :          * irq_stack is the object at %gs:0, we reserve the bottom
     385             :          * 48 bytes of the irq stack for the canary.
     386             :          *
     387             :          * Once we are willing to require -mstack-protector-guard-symbol=
     388             :          * support for x86_64 stackprotector, we can get rid of this.
     389             :          */
     390             :         char            gs_base[40];
     391             :         unsigned long   stack_canary;
     392             : };
     393             : 
     394             : DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
     395             : DECLARE_INIT_PER_CPU(fixed_percpu_data);
     396             : 
     397             : static inline unsigned long cpu_kernelmode_gs_base(int cpu)
     398             : {
     399             :         return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
     400             : }
     401             : 
     402             : extern asmlinkage void ignore_sysret(void);
     403             : 
     404             : /* Save actual FS/GS selectors and bases to current->thread */
     405             : void current_save_fsgs(void);
     406             : #else   /* X86_64 */
     407             : #ifdef CONFIG_STACKPROTECTOR
     408             : DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
     409             : #endif
     410             : #endif  /* !X86_64 */
     411             : 
     412             : struct perf_event;
     413             : 
     414             : struct thread_struct {
     415             :         /* Cached TLS descriptors: */
     416             :         struct desc_struct      tls_array[GDT_ENTRY_TLS_ENTRIES];
     417             : #ifdef CONFIG_X86_32
     418             :         unsigned long           sp0;
     419             : #endif
     420             :         unsigned long           sp;
     421             : #ifdef CONFIG_X86_32
     422             :         unsigned long           sysenter_cs;
     423             : #else
     424             :         unsigned short          es;
     425             :         unsigned short          ds;
     426             :         unsigned short          fsindex;
     427             :         unsigned short          gsindex;
     428             : #endif
     429             : 
     430             : #ifdef CONFIG_X86_64
     431             :         unsigned long           fsbase;
     432             :         unsigned long           gsbase;
     433             : #else
     434             :         /*
     435             :          * XXX: this could presumably be unsigned short.  Alternatively,
     436             :          * 32-bit kernels could be taught to use fsindex instead.
     437             :          */
     438             :         unsigned long fs;
     439             :         unsigned long gs;
     440             : #endif
     441             : 
     442             :         /* Save middle states of ptrace breakpoints */
     443             :         struct perf_event       *ptrace_bps[HBP_NUM];
     444             :         /* Debug status used for traps, single steps, etc... */
     445             :         unsigned long           virtual_dr6;
     446             :         /* Keep track of the exact dr7 value set by the user */
     447             :         unsigned long           ptrace_dr7;
     448             :         /* Fault info: */
     449             :         unsigned long           cr2;
     450             :         unsigned long           trap_nr;
     451             :         unsigned long           error_code;
     452             : #ifdef CONFIG_VM86
     453             :         /* Virtual 86 mode info */
     454             :         struct vm86             *vm86;
     455             : #endif
     456             :         /* IO permissions: */
     457             :         struct io_bitmap        *io_bitmap;
     458             : 
     459             :         /*
     460             :          * IOPL. Privilege level dependent I/O permission which is
     461             :          * emulated via the I/O bitmap to prevent user space from disabling
     462             :          * interrupts.
     463             :          */
     464             :         unsigned long           iopl_emul;
     465             : 
     466             :         unsigned int            iopl_warn:1;
     467             :         unsigned int            sig_on_uaccess_err:1;
     468             : 
     469             :         /*
     470             :          * Protection Keys Register for Userspace.  Loaded immediately on
     471             :          * context switch. Store it in thread_struct to avoid a lookup in
     472             :          * the tasks's FPU xstate buffer. This value is only valid when a
     473             :          * task is scheduled out. For 'current' the authoritative source of
     474             :          * PKRU is the hardware itself.
     475             :          */
     476             :         u32                     pkru;
     477             : 
     478             :         /* Floating point and extended processor state */
     479             :         struct fpu              fpu;
     480             :         /*
     481             :          * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
     482             :          * the end.
     483             :          */
     484             : };
     485             : 
     486             : extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size);
     487             : 
     488             : static inline void arch_thread_struct_whitelist(unsigned long *offset,
     489             :                                                 unsigned long *size)
     490             : {
     491             :         fpu_thread_struct_whitelist(offset, size);
     492             : }
     493             : 
     494             : static inline void
     495             : native_load_sp0(unsigned long sp0)
     496             : {
     497             :         this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
     498             : }
     499             : 
     500             : static __always_inline void native_swapgs(void)
     501             : {
     502             : #ifdef CONFIG_X86_64
     503             :         asm volatile("swapgs" ::: "memory");
     504             : #endif
     505             : }
     506             : 
     507             : static __always_inline unsigned long current_top_of_stack(void)
     508             : {
     509             :         /*
     510             :          *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
     511             :          *  and around vm86 mode and sp0 on x86_64 is special because of the
     512             :          *  entry trampoline.
     513             :          */
     514             :         return this_cpu_read_stable(pcpu_hot.top_of_stack);
     515             : }
     516             : 
     517             : static __always_inline bool on_thread_stack(void)
     518             : {
     519             :         return (unsigned long)(current_top_of_stack() -
     520             :                                current_stack_pointer) < THREAD_SIZE;
     521             : }
     522             : 
     523             : #ifdef CONFIG_PARAVIRT_XXL
     524             : #include <asm/paravirt.h>
     525             : #else
     526             : 
     527             : static inline void load_sp0(unsigned long sp0)
     528             : {
     529             :         native_load_sp0(sp0);
     530             : }
     531             : 
     532             : #endif /* CONFIG_PARAVIRT_XXL */
     533             : 
     534             : unsigned long __get_wchan(struct task_struct *p);
     535             : 
     536             : extern void select_idle_routine(const struct cpuinfo_x86 *c);
     537             : extern void amd_e400_c1e_apic_setup(void);
     538             : 
     539             : extern unsigned long            boot_option_idle_override;
     540             : 
     541             : enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
     542             :                          IDLE_POLL};
     543             : 
     544             : extern void enable_sep_cpu(void);
     545             : 
     546             : 
     547             : /* Defined in head.S */
     548             : extern struct desc_ptr          early_gdt_descr;
     549             : 
     550             : extern void switch_gdt_and_percpu_base(int);
     551             : extern void load_direct_gdt(int);
     552             : extern void load_fixmap_gdt(int);
     553             : extern void cpu_init(void);
     554             : extern void cpu_init_exception_handling(void);
     555             : extern void cr4_init(void);
     556             : 
     557             : static inline unsigned long get_debugctlmsr(void)
     558             : {
     559             :         unsigned long debugctlmsr = 0;
     560             : 
     561             : #ifndef CONFIG_X86_DEBUGCTLMSR
     562             :         if (boot_cpu_data.x86 < 6)
     563             :                 return 0;
     564             : #endif
     565             :         rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
     566             : 
     567             :         return debugctlmsr;
     568             : }
     569             : 
     570             : static inline void update_debugctlmsr(unsigned long debugctlmsr)
     571             : {
     572             : #ifndef CONFIG_X86_DEBUGCTLMSR
     573             :         if (boot_cpu_data.x86 < 6)
     574             :                 return;
     575             : #endif
     576             :         wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
     577             : }
     578             : 
     579             : extern void set_task_blockstep(struct task_struct *task, bool on);
     580             : 
     581             : /* Boot loader type from the setup header: */
     582             : extern int                      bootloader_type;
     583             : extern int                      bootloader_version;
     584             : 
     585             : extern char                     ignore_fpu_irq;
     586             : 
     587             : #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
     588             : #define ARCH_HAS_PREFETCHW
     589             : #define ARCH_HAS_SPINLOCK_PREFETCH
     590             : 
     591             : #ifdef CONFIG_X86_32
     592             : # define BASE_PREFETCH          ""
     593             : # define ARCH_HAS_PREFETCH
     594             : #else
     595             : # define BASE_PREFETCH          "prefetcht0 %P1"
     596             : #endif
     597             : 
     598             : /*
     599             :  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
     600             :  *
     601             :  * It's not worth to care about 3dnow prefetches for the K6
     602             :  * because they are microcoded there and very slow.
     603             :  */
     604             : static inline void prefetch(const void *x)
     605             : {
     606             :         alternative_input(BASE_PREFETCH, "prefetchnta %P1",
     607             :                           X86_FEATURE_XMM,
     608             :                           "m" (*(const char *)x));
     609             : }
     610             : 
     611             : /*
     612             :  * 3dnow prefetch to get an exclusive cache line.
     613             :  * Useful for spinlocks to avoid one state transition in the
     614             :  * cache coherency protocol:
     615             :  */
     616             : static __always_inline void prefetchw(const void *x)
     617             : {
     618   578466643 :         alternative_input(BASE_PREFETCH, "prefetchw %P1",
     619             :                           X86_FEATURE_3DNOWPREFETCH,
     620             :                           "m" (*(const char *)x));
     621             : }
     622             : 
     623             : static inline void spin_lock_prefetch(const void *x)
     624             : {
     625   394472419 :         prefetchw(x);
     626             : }
     627             : 
     628             : #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
     629             :                            TOP_OF_KERNEL_STACK_PADDING)
     630             : 
     631             : #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
     632             : 
     633             : #define task_pt_regs(task) \
     634             : ({                                                                      \
     635             :         unsigned long __ptr = (unsigned long)task_stack_page(task);     \
     636             :         __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;             \
     637             :         ((struct pt_regs *)__ptr) - 1;                                  \
     638             : })
     639             : 
     640             : #ifdef CONFIG_X86_32
     641             : #define INIT_THREAD  {                                                    \
     642             :         .sp0                    = TOP_OF_INIT_STACK,                      \
     643             :         .sysenter_cs            = __KERNEL_CS,                            \
     644             : }
     645             : 
     646             : #define KSTK_ESP(task)          (task_pt_regs(task)->sp)
     647             : 
     648             : #else
     649             : extern unsigned long __end_init_task[];
     650             : 
     651             : #define INIT_THREAD {                                                       \
     652             :         .sp     = (unsigned long)&__end_init_task - sizeof(struct pt_regs), \
     653             : }
     654             : 
     655             : extern unsigned long KSTK_ESP(struct task_struct *task);
     656             : 
     657             : #endif /* CONFIG_X86_64 */
     658             : 
     659             : extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
     660             :                                                unsigned long new_sp);
     661             : 
     662             : /*
     663             :  * This decides where the kernel will search for a free chunk of vm
     664             :  * space during mmap's.
     665             :  */
     666             : #define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3))
     667             : #define TASK_UNMAPPED_BASE              __TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
     668             : 
     669             : #define KSTK_EIP(task)          (task_pt_regs(task)->ip)
     670             : 
     671             : /* Get/set a process' ability to use the timestamp counter instruction */
     672             : #define GET_TSC_CTL(adr)        get_tsc_mode((adr))
     673             : #define SET_TSC_CTL(val)        set_tsc_mode((val))
     674             : 
     675             : extern int get_tsc_mode(unsigned long adr);
     676             : extern int set_tsc_mode(unsigned int val);
     677             : 
     678             : DECLARE_PER_CPU(u64, msr_misc_features_shadow);
     679             : 
     680             : extern u16 get_llc_id(unsigned int cpu);
     681             : 
     682             : #ifdef CONFIG_CPU_SUP_AMD
     683             : extern u32 amd_get_nodes_per_socket(void);
     684             : extern u32 amd_get_highest_perf(void);
     685             : #else
     686             : static inline u32 amd_get_nodes_per_socket(void)        { return 0; }
     687             : static inline u32 amd_get_highest_perf(void)            { return 0; }
     688             : #endif
     689             : 
     690             : extern unsigned long arch_align_stack(unsigned long sp);
     691             : void free_init_pages(const char *what, unsigned long begin, unsigned long end);
     692             : extern void free_kernel_image_pages(const char *what, void *begin, void *end);
     693             : 
     694             : void default_idle(void);
     695             : #ifdef  CONFIG_XEN
     696             : bool xen_set_default_idle(void);
     697             : #else
     698             : #define xen_set_default_idle 0
     699             : #endif
     700             : 
     701             : void __noreturn stop_this_cpu(void *dummy);
     702             : void microcode_check(struct cpuinfo_x86 *prev_info);
     703             : void store_cpu_caps(struct cpuinfo_x86 *info);
     704             : 
     705             : enum l1tf_mitigations {
     706             :         L1TF_MITIGATION_OFF,
     707             :         L1TF_MITIGATION_FLUSH_NOWARN,
     708             :         L1TF_MITIGATION_FLUSH,
     709             :         L1TF_MITIGATION_FLUSH_NOSMT,
     710             :         L1TF_MITIGATION_FULL,
     711             :         L1TF_MITIGATION_FULL_FORCE
     712             : };
     713             : 
     714             : extern enum l1tf_mitigations l1tf_mitigation;
     715             : 
     716             : enum mds_mitigations {
     717             :         MDS_MITIGATION_OFF,
     718             :         MDS_MITIGATION_FULL,
     719             :         MDS_MITIGATION_VMWERV,
     720             : };
     721             : 
     722             : #ifdef CONFIG_X86_SGX
     723             : int arch_memory_failure(unsigned long pfn, int flags);
     724             : #define arch_memory_failure arch_memory_failure
     725             : 
     726             : bool arch_is_platform_page(u64 paddr);
     727             : #define arch_is_platform_page arch_is_platform_page
     728             : #endif
     729             : 
     730             : #endif /* _ASM_X86_PROCESSOR_H */

Generated by: LCOV version 1.14