LCOV - code coverage report
Current view: top level - arch/arm64/include/asm - cpufeature.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-acha @ Mon Jul 31 20:08:06 PDT 2023 Lines: 0 11 0.0 %
Date: 2023-07-31 20:08:07 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-only */
       2             : /*
       3             :  * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
       4             :  */
       5             : 
       6             : #ifndef __ASM_CPUFEATURE_H
       7             : #define __ASM_CPUFEATURE_H
       8             : 
       9             : #include <asm/alternative-macros.h>
      10             : #include <asm/cpucaps.h>
      11             : #include <asm/cputype.h>
      12             : #include <asm/hwcap.h>
      13             : #include <asm/sysreg.h>
      14             : 
      15             : #define MAX_CPU_FEATURES        128
      16             : #define cpu_feature(x)          KERNEL_HWCAP_ ## x
      17             : 
      18             : #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR       0
      19             : #define ARM64_SW_FEATURE_OVERRIDE_HVHE          4
      20             : 
      21             : #ifndef __ASSEMBLY__
      22             : 
      23             : #include <linux/bug.h>
      24             : #include <linux/jump_label.h>
      25             : #include <linux/kernel.h>
      26             : 
      27             : /*
      28             :  * CPU feature register tracking
      29             :  *
      30             :  * The safe value of a CPUID feature field is dependent on the implications
      31             :  * of the values assigned to it by the architecture. Based on the relationship
      32             :  * between the values, the features are classified into 3 types - LOWER_SAFE,
      33             :  * HIGHER_SAFE and EXACT.
      34             :  *
      35             :  * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
      36             :  * for HIGHER_SAFE. It is expected that all CPUs have the same value for
      37             :  * a field when EXACT is specified, failing which, the safe value specified
      38             :  * in the table is chosen.
      39             :  */
      40             : 
      41             : enum ftr_type {
      42             :         FTR_EXACT,                      /* Use a predefined safe value */
      43             :         FTR_LOWER_SAFE,                 /* Smaller value is safe */
      44             :         FTR_HIGHER_SAFE,                /* Bigger value is safe */
      45             :         FTR_HIGHER_OR_ZERO_SAFE,        /* Bigger value is safe, but 0 is biggest */
      46             : };
      47             : 
      48             : #define FTR_STRICT      true    /* SANITY check strict matching required */
      49             : #define FTR_NONSTRICT   false   /* SANITY check ignored */
      50             : 
      51             : #define FTR_SIGNED      true    /* Value should be treated as signed */
      52             : #define FTR_UNSIGNED    false   /* Value should be treated as unsigned */
      53             : 
      54             : #define FTR_VISIBLE     true    /* Feature visible to the user space */
      55             : #define FTR_HIDDEN      false   /* Feature is hidden from the user */
      56             : 
      57             : #define FTR_VISIBLE_IF_IS_ENABLED(config)               \
      58             :         (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
      59             : 
      60             : struct arm64_ftr_bits {
      61             :         bool            sign;   /* Value is signed ? */
      62             :         bool            visible;
      63             :         bool            strict; /* CPU Sanity check: strict matching required ? */
      64             :         enum ftr_type   type;
      65             :         u8              shift;
      66             :         u8              width;
      67             :         s64             safe_val; /* safe value for FTR_EXACT features */
      68             : };
      69             : 
      70             : /*
      71             :  * Describe the early feature override to the core override code:
      72             :  *
      73             :  * @val                 Values that are to be merged into the final
      74             :  *                      sanitised value of the register. Only the bitfields
      75             :  *                      set to 1 in @mask are valid
      76             :  * @mask                Mask of the features that are overridden by @val
      77             :  *
      78             :  * A @mask field set to full-1 indicates that the corresponding field
      79             :  * in @val is a valid override.
      80             :  *
      81             :  * A @mask field set to full-0 with the corresponding @val field set
      82             :  * to full-0 denotes that this field has no override
      83             :  *
      84             :  * A @mask field set to full-0 with the corresponding @val field set
      85             :  * to full-1 denotes thath this field has an invalid override.
      86             :  */
      87             : struct arm64_ftr_override {
      88             :         u64             val;
      89             :         u64             mask;
      90             : };
      91             : 
      92             : /*
      93             :  * @arm64_ftr_reg - Feature register
      94             :  * @strict_mask         Bits which should match across all CPUs for sanity.
      95             :  * @sys_val             Safe value across the CPUs (system view)
      96             :  */
      97             : struct arm64_ftr_reg {
      98             :         const char                      *name;
      99             :         u64                             strict_mask;
     100             :         u64                             user_mask;
     101             :         u64                             sys_val;
     102             :         u64                             user_val;
     103             :         struct arm64_ftr_override       *override;
     104             :         const struct arm64_ftr_bits     *ftr_bits;
     105             : };
     106             : 
     107             : extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
     108             : 
     109             : /*
     110             :  * CPU capabilities:
     111             :  *
     112             :  * We use arm64_cpu_capabilities to represent system features, errata work
     113             :  * arounds (both used internally by kernel and tracked in system_cpucaps) and
     114             :  * ELF HWCAPs (which are exposed to user).
     115             :  *
     116             :  * To support systems with heterogeneous CPUs, we need to make sure that we
     117             :  * detect the capabilities correctly on the system and take appropriate
     118             :  * measures to ensure there are no incompatibilities.
     119             :  *
     120             :  * This comment tries to explain how we treat the capabilities.
     121             :  * Each capability has the following list of attributes :
     122             :  *
     123             :  * 1) Scope of Detection : The system detects a given capability by
     124             :  *    performing some checks at runtime. This could be, e.g, checking the
     125             :  *    value of a field in CPU ID feature register or checking the cpu
     126             :  *    model. The capability provides a call back ( @matches() ) to
     127             :  *    perform the check. Scope defines how the checks should be performed.
     128             :  *    There are three cases:
     129             :  *
     130             :  *     a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
     131             :  *        matches. This implies, we have to run the check on all the
     132             :  *        booting CPUs, until the system decides that state of the
     133             :  *        capability is finalised. (See section 2 below)
     134             :  *              Or
     135             :  *     b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
     136             :  *        matches. This implies, we run the check only once, when the
     137             :  *        system decides to finalise the state of the capability. If the
     138             :  *        capability relies on a field in one of the CPU ID feature
     139             :  *        registers, we use the sanitised value of the register from the
     140             :  *        CPU feature infrastructure to make the decision.
     141             :  *              Or
     142             :  *     c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
     143             :  *        feature. This category is for features that are "finalised"
     144             :  *        (or used) by the kernel very early even before the SMP cpus
     145             :  *        are brought up.
     146             :  *
     147             :  *    The process of detection is usually denoted by "update" capability
     148             :  *    state in the code.
     149             :  *
     150             :  * 2) Finalise the state : The kernel should finalise the state of a
     151             :  *    capability at some point during its execution and take necessary
     152             :  *    actions if any. Usually, this is done, after all the boot-time
     153             :  *    enabled CPUs are brought up by the kernel, so that it can make
     154             :  *    better decision based on the available set of CPUs. However, there
     155             :  *    are some special cases, where the action is taken during the early
     156             :  *    boot by the primary boot CPU. (e.g, running the kernel at EL2 with
     157             :  *    Virtualisation Host Extensions). The kernel usually disallows any
     158             :  *    changes to the state of a capability once it finalises the capability
     159             :  *    and takes any action, as it may be impossible to execute the actions
     160             :  *    safely. A CPU brought up after a capability is "finalised" is
     161             :  *    referred to as "Late CPU" w.r.t the capability. e.g, all secondary
     162             :  *    CPUs are treated "late CPUs" for capabilities determined by the boot
     163             :  *    CPU.
     164             :  *
     165             :  *    At the moment there are two passes of finalising the capabilities.
     166             :  *      a) Boot CPU scope capabilities - Finalised by primary boot CPU via
     167             :  *         setup_boot_cpu_capabilities().
     168             :  *      b) Everything except (a) - Run via setup_system_capabilities().
     169             :  *
     170             :  * 3) Verification: When a CPU is brought online (e.g, by user or by the
     171             :  *    kernel), the kernel should make sure that it is safe to use the CPU,
     172             :  *    by verifying that the CPU is compliant with the state of the
     173             :  *    capabilities finalised already. This happens via :
     174             :  *
     175             :  *      secondary_start_kernel()-> check_local_cpu_capabilities()
     176             :  *
     177             :  *    As explained in (2) above, capabilities could be finalised at
     178             :  *    different points in the execution. Each newly booted CPU is verified
     179             :  *    against the capabilities that have been finalised by the time it
     180             :  *    boots.
     181             :  *
     182             :  *      a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
     183             :  *      except for the primary boot CPU.
     184             :  *
     185             :  *      b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
     186             :  *      user after the kernel boot are verified against the capability.
     187             :  *
     188             :  *    If there is a conflict, the kernel takes an action, based on the
     189             :  *    severity (e.g, a CPU could be prevented from booting or cause a
     190             :  *    kernel panic). The CPU is allowed to "affect" the state of the
     191             :  *    capability, if it has not been finalised already. See section 5
     192             :  *    for more details on conflicts.
     193             :  *
     194             :  * 4) Action: As mentioned in (2), the kernel can take an action for each
     195             :  *    detected capability, on all CPUs on the system. Appropriate actions
     196             :  *    include, turning on an architectural feature, modifying the control
     197             :  *    registers (e.g, SCTLR, TCR etc.) or patching the kernel via
     198             :  *    alternatives. The kernel patching is batched and performed at later
     199             :  *    point. The actions are always initiated only after the capability
     200             :  *    is finalised. This is usally denoted by "enabling" the capability.
     201             :  *    The actions are initiated as follows :
     202             :  *      a) Action is triggered on all online CPUs, after the capability is
     203             :  *      finalised, invoked within the stop_machine() context from
     204             :  *      enable_cpu_capabilitie().
     205             :  *
     206             :  *      b) Any late CPU, brought up after (1), the action is triggered via:
     207             :  *
     208             :  *        check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
     209             :  *
     210             :  * 5) Conflicts: Based on the state of the capability on a late CPU vs.
     211             :  *    the system state, we could have the following combinations :
     212             :  *
     213             :  *              x-----------------------------x
     214             :  *              | Type  | System   | Late CPU |
     215             :  *              |-----------------------------|
     216             :  *              |  a    |   y      |    n     |
     217             :  *              |-----------------------------|
     218             :  *              |  b    |   n      |    y     |
     219             :  *              x-----------------------------x
     220             :  *
     221             :  *     Two separate flag bits are defined to indicate whether each kind of
     222             :  *     conflict can be allowed:
     223             :  *              ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
     224             :  *              ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
     225             :  *
     226             :  *     Case (a) is not permitted for a capability that the system requires
     227             :  *     all CPUs to have in order for the capability to be enabled. This is
     228             :  *     typical for capabilities that represent enhanced functionality.
     229             :  *
     230             :  *     Case (b) is not permitted for a capability that must be enabled
     231             :  *     during boot if any CPU in the system requires it in order to run
     232             :  *     safely. This is typical for erratum work arounds that cannot be
     233             :  *     enabled after the corresponding capability is finalised.
     234             :  *
     235             :  *     In some non-typical cases either both (a) and (b), or neither,
     236             :  *     should be permitted. This can be described by including neither
     237             :  *     or both flags in the capability's type field.
     238             :  *
     239             :  *     In case of a conflict, the CPU is prevented from booting. If the
     240             :  *     ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
     241             :  *     then a kernel panic is triggered.
     242             :  */
     243             : 
     244             : 
     245             : /*
     246             :  * Decide how the capability is detected.
     247             :  * On any local CPU vs System wide vs the primary boot CPU
     248             :  */
     249             : #define ARM64_CPUCAP_SCOPE_LOCAL_CPU            ((u16)BIT(0))
     250             : #define ARM64_CPUCAP_SCOPE_SYSTEM               ((u16)BIT(1))
     251             : /*
     252             :  * The capabilitiy is detected on the Boot CPU and is used by kernel
     253             :  * during early boot. i.e, the capability should be "detected" and
     254             :  * "enabled" as early as possibly on all booting CPUs.
     255             :  */
     256             : #define ARM64_CPUCAP_SCOPE_BOOT_CPU             ((u16)BIT(2))
     257             : #define ARM64_CPUCAP_SCOPE_MASK                 \
     258             :         (ARM64_CPUCAP_SCOPE_SYSTEM      |       \
     259             :          ARM64_CPUCAP_SCOPE_LOCAL_CPU   |       \
     260             :          ARM64_CPUCAP_SCOPE_BOOT_CPU)
     261             : 
     262             : #define SCOPE_SYSTEM                            ARM64_CPUCAP_SCOPE_SYSTEM
     263             : #define SCOPE_LOCAL_CPU                         ARM64_CPUCAP_SCOPE_LOCAL_CPU
     264             : #define SCOPE_BOOT_CPU                          ARM64_CPUCAP_SCOPE_BOOT_CPU
     265             : #define SCOPE_ALL                               ARM64_CPUCAP_SCOPE_MASK
     266             : 
     267             : /*
     268             :  * Is it permitted for a late CPU to have this capability when system
     269             :  * hasn't already enabled it ?
     270             :  */
     271             : #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU     ((u16)BIT(4))
     272             : /* Is it safe for a late CPU to miss this capability when system has it */
     273             : #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU      ((u16)BIT(5))
     274             : /* Panic when a conflict is detected */
     275             : #define ARM64_CPUCAP_PANIC_ON_CONFLICT          ((u16)BIT(6))
     276             : 
     277             : /*
     278             :  * CPU errata workarounds that need to be enabled at boot time if one or
     279             :  * more CPUs in the system requires it. When one of these capabilities
     280             :  * has been enabled, it is safe to allow any CPU to boot that doesn't
     281             :  * require the workaround. However, it is not safe if a "late" CPU
     282             :  * requires a workaround and the system hasn't enabled it already.
     283             :  */
     284             : #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM          \
     285             :         (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
     286             : /*
     287             :  * CPU feature detected at boot time based on system-wide value of a
     288             :  * feature. It is safe for a late CPU to have this feature even though
     289             :  * the system hasn't enabled it, although the feature will not be used
     290             :  * by Linux in this case. If the system has enabled this feature already,
     291             :  * then every late CPU must have it.
     292             :  */
     293             : #define ARM64_CPUCAP_SYSTEM_FEATURE     \
     294             :         (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
     295             : /*
     296             :  * CPU feature detected at boot time based on feature of one or more CPUs.
     297             :  * All possible conflicts for a late CPU are ignored.
     298             :  * NOTE: this means that a late CPU with the feature will *not* cause the
     299             :  * capability to be advertised by cpus_have_*cap()!
     300             :  */
     301             : #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE             \
     302             :         (ARM64_CPUCAP_SCOPE_LOCAL_CPU           |       \
     303             :          ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU     |       \
     304             :          ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
     305             : 
     306             : /*
     307             :  * CPU feature detected at boot time, on one or more CPUs. A late CPU
     308             :  * is not allowed to have the capability when the system doesn't have it.
     309             :  * It is Ok for a late CPU to miss the feature.
     310             :  */
     311             : #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE  \
     312             :         (ARM64_CPUCAP_SCOPE_LOCAL_CPU           |       \
     313             :          ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
     314             : 
     315             : /*
     316             :  * CPU feature used early in the boot based on the boot CPU. All secondary
     317             :  * CPUs must match the state of the capability as detected by the boot CPU. In
     318             :  * case of a conflict, a kernel panic is triggered.
     319             :  */
     320             : #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE            \
     321             :         (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
     322             : 
     323             : /*
     324             :  * CPU feature used early in the boot based on the boot CPU. It is safe for a
     325             :  * late CPU to have this feature even though the boot CPU hasn't enabled it,
     326             :  * although the feature will not be used by Linux in this case. If the boot CPU
     327             :  * has enabled this feature already, then every late CPU must have it.
     328             :  */
     329             : #define ARM64_CPUCAP_BOOT_CPU_FEATURE                  \
     330             :         (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
     331             : 
     332             : struct arm64_cpu_capabilities {
     333             :         const char *desc;
     334             :         u16 capability;
     335             :         u16 type;
     336             :         bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
     337             :         /*
     338             :          * Take the appropriate actions to configure this capability
     339             :          * for this CPU. If the capability is detected by the kernel
     340             :          * this will be called on all the CPUs in the system,
     341             :          * including the hotplugged CPUs, regardless of whether the
     342             :          * capability is available on that specific CPU. This is
     343             :          * useful for some capabilities (e.g, working around CPU
     344             :          * errata), where all the CPUs must take some action (e.g,
     345             :          * changing system control/configuration). Thus, if an action
     346             :          * is required only if the CPU has the capability, then the
     347             :          * routine must check it before taking any action.
     348             :          */
     349             :         void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
     350             :         union {
     351             :                 struct {        /* To be used for erratum handling only */
     352             :                         struct midr_range midr_range;
     353             :                         const struct arm64_midr_revidr {
     354             :                                 u32 midr_rv;            /* revision/variant */
     355             :                                 u32 revidr_mask;
     356             :                         } * const fixed_revs;
     357             :                 };
     358             : 
     359             :                 const struct midr_range *midr_range_list;
     360             :                 struct {        /* Feature register checking */
     361             :                         u32 sys_reg;
     362             :                         u8 field_pos;
     363             :                         u8 field_width;
     364             :                         u8 min_field_value;
     365             :                         u8 hwcap_type;
     366             :                         bool sign;
     367             :                         unsigned long hwcap;
     368             :                 };
     369             :         };
     370             : 
     371             :         /*
     372             :          * An optional list of "matches/cpu_enable" pair for the same
     373             :          * "capability" of the same "type" as described by the parent.
     374             :          * Only matches(), cpu_enable() and fields relevant to these
     375             :          * methods are significant in the list. The cpu_enable is
     376             :          * invoked only if the corresponding entry "matches()".
     377             :          * However, if a cpu_enable() method is associated
     378             :          * with multiple matches(), care should be taken that either
     379             :          * the match criteria are mutually exclusive, or that the
     380             :          * method is robust against being called multiple times.
     381             :          */
     382             :         const struct arm64_cpu_capabilities *match_list;
     383             : };
     384             : 
     385             : static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
     386             : {
     387             :         return cap->type & ARM64_CPUCAP_SCOPE_MASK;
     388             : }
     389             : 
     390             : /*
     391             :  * Generic helper for handling capabilities with multiple (match,enable) pairs
     392             :  * of call backs, sharing the same capability bit.
     393             :  * Iterate over each entry to see if at least one matches.
     394             :  */
     395             : static inline bool
     396             : cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
     397             :                                int scope)
     398             : {
     399             :         const struct arm64_cpu_capabilities *caps;
     400             : 
     401             :         for (caps = entry->match_list; caps->matches; caps++)
     402             :                 if (caps->matches(caps, scope))
     403             :                         return true;
     404             : 
     405             :         return false;
     406             : }
     407             : 
     408             : static __always_inline bool is_vhe_hyp_code(void)
     409             : {
     410             :         /* Only defined for code run in VHE hyp context */
     411             :         return __is_defined(__KVM_VHE_HYPERVISOR__);
     412             : }
     413             : 
     414             : static __always_inline bool is_nvhe_hyp_code(void)
     415             : {
     416             :         /* Only defined for code run in NVHE hyp context */
     417             :         return __is_defined(__KVM_NVHE_HYPERVISOR__);
     418             : }
     419             : 
     420             : static __always_inline bool is_hyp_code(void)
     421             : {
     422             :         return is_vhe_hyp_code() || is_nvhe_hyp_code();
     423             : }
     424             : 
     425             : extern DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS);
     426             : 
     427             : extern DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
     428             : 
     429             : #define for_each_available_cap(cap)             \
     430             :         for_each_set_bit(cap, system_cpucaps, ARM64_NCAPS)
     431             : 
     432             : bool this_cpu_has_cap(unsigned int cap);
     433             : void cpu_set_feature(unsigned int num);
     434             : bool cpu_have_feature(unsigned int num);
     435             : unsigned long cpu_get_elf_hwcap(void);
     436             : unsigned long cpu_get_elf_hwcap2(void);
     437             : 
     438             : #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
     439             : #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
     440             : 
     441             : static __always_inline bool system_capabilities_finalized(void)
     442             : {
     443           0 :         return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
     444             : }
     445             : 
     446             : /*
     447             :  * Test for a capability with a runtime check.
     448             :  *
     449             :  * Before the capability is detected, this returns false.
     450             :  */
     451             : static __always_inline bool cpus_have_cap(unsigned int num)
     452             : {
     453           0 :         if (num >= ARM64_NCAPS)
     454             :                 return false;
     455           0 :         return arch_test_bit(num, system_cpucaps);
     456             : }
     457             : 
     458             : /*
     459             :  * Test for a capability without a runtime check.
     460             :  *
     461             :  * Before capabilities are finalized, this returns false.
     462             :  * After capabilities are finalized, this is patched to avoid a runtime check.
     463             :  *
     464             :  * @num must be a compile-time constant.
     465             :  */
     466             : static __always_inline bool __cpus_have_const_cap(int num)
     467             : {
     468           0 :         if (num >= ARM64_NCAPS)
     469             :                 return false;
     470           0 :         return alternative_has_cap_unlikely(num);
     471             : }
     472             : 
     473             : /*
     474             :  * Test for a capability without a runtime check.
     475             :  *
     476             :  * Before capabilities are finalized, this will BUG().
     477             :  * After capabilities are finalized, this is patched to avoid a runtime check.
     478             :  *
     479             :  * @num must be a compile-time constant.
     480             :  */
     481             : static __always_inline bool cpus_have_final_cap(int num)
     482             : {
     483             :         if (system_capabilities_finalized())
     484             :                 return __cpus_have_const_cap(num);
     485             :         else
     486             :                 BUG();
     487             : }
     488             : 
     489             : /*
     490             :  * Test for a capability, possibly with a runtime check for non-hyp code.
     491             :  *
     492             :  * For hyp code, this behaves the same as cpus_have_final_cap().
     493             :  *
     494             :  * For non-hyp code:
     495             :  * Before capabilities are finalized, this behaves as cpus_have_cap().
     496             :  * After capabilities are finalized, this is patched to avoid a runtime check.
     497             :  *
     498             :  * @num must be a compile-time constant.
     499             :  */
     500             : static __always_inline bool cpus_have_const_cap(int num)
     501             : {
     502           0 :         if (is_hyp_code())
     503             :                 return cpus_have_final_cap(num);
     504           0 :         else if (system_capabilities_finalized())
     505           0 :                 return __cpus_have_const_cap(num);
     506             :         else
     507           0 :                 return cpus_have_cap(num);
     508             : }
     509             : 
     510             : static inline int __attribute_const__
     511             : cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
     512             : {
     513             :         return (s64)(features << (64 - width - field)) >> (64 - width);
     514             : }
     515             : 
     516             : static inline int __attribute_const__
     517             : cpuid_feature_extract_signed_field(u64 features, int field)
     518             : {
     519             :         return cpuid_feature_extract_signed_field_width(features, field, 4);
     520             : }
     521             : 
     522             : static __always_inline unsigned int __attribute_const__
     523             : cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
     524             : {
     525             :         return (u64)(features << (64 - width - field)) >> (64 - width);
     526             : }
     527             : 
     528             : static __always_inline unsigned int __attribute_const__
     529             : cpuid_feature_extract_unsigned_field(u64 features, int field)
     530             : {
     531             :         return cpuid_feature_extract_unsigned_field_width(features, field, 4);
     532             : }
     533             : 
     534             : /*
     535             :  * Fields that identify the version of the Performance Monitors Extension do
     536             :  * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
     537             :  * "Alternative ID scheme used for the Performance Monitors Extension version".
     538             :  */
     539             : static inline u64 __attribute_const__
     540             : cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
     541             : {
     542             :         u64 val = cpuid_feature_extract_unsigned_field(features, field);
     543             :         u64 mask = GENMASK_ULL(field + 3, field);
     544             : 
     545             :         /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
     546             :         if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
     547             :                 val = 0;
     548             : 
     549             :         if (val > cap) {
     550             :                 features &= ~mask;
     551             :                 features |= (cap << field) & mask;
     552             :         }
     553             : 
     554             :         return features;
     555             : }
     556             : 
     557             : static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
     558             : {
     559             :         return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
     560             : }
     561             : 
     562             : static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
     563             : {
     564             :         return (reg->user_val | (reg->sys_val & reg->user_mask));
     565             : }
     566             : 
     567             : static inline int __attribute_const__
     568             : cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
     569             : {
     570             :         if (WARN_ON_ONCE(!width))
     571             :                 width = 4;
     572             :         return (sign) ?
     573             :                 cpuid_feature_extract_signed_field_width(features, field, width) :
     574             :                 cpuid_feature_extract_unsigned_field_width(features, field, width);
     575             : }
     576             : 
     577             : static inline int __attribute_const__
     578             : cpuid_feature_extract_field(u64 features, int field, bool sign)
     579             : {
     580             :         return cpuid_feature_extract_field_width(features, field, 4, sign);
     581             : }
     582             : 
     583             : static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
     584             : {
     585             :         return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
     586             : }
     587             : 
     588             : static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
     589             : {
     590             :         return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
     591             :                 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
     592             : }
     593             : 
     594             : static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
     595             : {
     596             :         u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
     597             : 
     598             :         return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
     599             : }
     600             : 
     601             : static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
     602             : {
     603             :         u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
     604             : 
     605             :         return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
     606             : }
     607             : 
     608             : static inline bool id_aa64pfr0_sve(u64 pfr0)
     609             : {
     610             :         u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
     611             : 
     612             :         return val > 0;
     613             : }
     614             : 
     615             : static inline bool id_aa64pfr1_sme(u64 pfr1)
     616             : {
     617             :         u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
     618             : 
     619             :         return val > 0;
     620             : }
     621             : 
     622             : static inline bool id_aa64pfr1_mte(u64 pfr1)
     623             : {
     624             :         u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
     625             : 
     626             :         return val >= ID_AA64PFR1_EL1_MTE_MTE2;
     627             : }
     628             : 
     629             : void __init setup_cpu_features(void);
     630             : void check_local_cpu_capabilities(void);
     631             : 
     632             : u64 read_sanitised_ftr_reg(u32 id);
     633             : u64 __read_sysreg_by_encoding(u32 sys_id);
     634             : 
     635             : static inline bool cpu_supports_mixed_endian_el0(void)
     636             : {
     637             :         return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
     638             : }
     639             : 
     640             : 
     641             : static inline bool supports_csv2p3(int scope)
     642             : {
     643             :         u64 pfr0;
     644             :         u8 csv2_val;
     645             : 
     646             :         if (scope == SCOPE_LOCAL_CPU)
     647             :                 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
     648             :         else
     649             :                 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
     650             : 
     651             :         csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
     652             :                                                         ID_AA64PFR0_EL1_CSV2_SHIFT);
     653             :         return csv2_val == 3;
     654             : }
     655             : 
     656             : static inline bool supports_clearbhb(int scope)
     657             : {
     658             :         u64 isar2;
     659             : 
     660             :         if (scope == SCOPE_LOCAL_CPU)
     661             :                 isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
     662             :         else
     663             :                 isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
     664             : 
     665             :         return cpuid_feature_extract_unsigned_field(isar2,
     666             :                                                     ID_AA64ISAR2_EL1_BC_SHIFT);
     667             : }
     668             : 
     669             : const struct cpumask *system_32bit_el0_cpumask(void);
     670             : DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
     671             : 
     672             : static inline bool system_supports_32bit_el0(void)
     673             : {
     674             :         u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
     675             : 
     676             :         return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
     677             :                id_aa64pfr0_32bit_el0(pfr0);
     678             : }
     679             : 
     680             : static inline bool system_supports_4kb_granule(void)
     681             : {
     682             :         u64 mmfr0;
     683             :         u32 val;
     684             : 
     685             :         mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
     686             :         val = cpuid_feature_extract_unsigned_field(mmfr0,
     687             :                                                 ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
     688             : 
     689             :         return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
     690             :                (val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
     691             : }
     692             : 
     693             : static inline bool system_supports_64kb_granule(void)
     694             : {
     695             :         u64 mmfr0;
     696             :         u32 val;
     697             : 
     698             :         mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
     699             :         val = cpuid_feature_extract_unsigned_field(mmfr0,
     700             :                                                 ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
     701             : 
     702             :         return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
     703             :                (val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
     704             : }
     705             : 
     706             : static inline bool system_supports_16kb_granule(void)
     707             : {
     708             :         u64 mmfr0;
     709             :         u32 val;
     710             : 
     711             :         mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
     712             :         val = cpuid_feature_extract_unsigned_field(mmfr0,
     713             :                                                 ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
     714             : 
     715             :         return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
     716             :                (val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
     717             : }
     718             : 
     719             : static inline bool system_supports_mixed_endian_el0(void)
     720             : {
     721             :         return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
     722             : }
     723             : 
     724             : static inline bool system_supports_mixed_endian(void)
     725             : {
     726             :         u64 mmfr0;
     727             :         u32 val;
     728             : 
     729             :         mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
     730             :         val = cpuid_feature_extract_unsigned_field(mmfr0,
     731             :                                                 ID_AA64MMFR0_EL1_BIGEND_SHIFT);
     732             : 
     733             :         return val == 0x1;
     734             : }
     735             : 
     736             : static __always_inline bool system_supports_fpsimd(void)
     737             : {
     738             :         return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
     739             : }
     740             : 
     741             : static inline bool system_uses_hw_pan(void)
     742             : {
     743             :         return IS_ENABLED(CONFIG_ARM64_PAN) &&
     744             :                 cpus_have_const_cap(ARM64_HAS_PAN);
     745             : }
     746             : 
     747             : static inline bool system_uses_ttbr0_pan(void)
     748             : {
     749             :         return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
     750             :                 !system_uses_hw_pan();
     751             : }
     752             : 
     753             : static __always_inline bool system_supports_sve(void)
     754             : {
     755             :         return IS_ENABLED(CONFIG_ARM64_SVE) &&
     756             :                 cpus_have_const_cap(ARM64_SVE);
     757             : }
     758             : 
     759             : static __always_inline bool system_supports_sme(void)
     760             : {
     761             :         return IS_ENABLED(CONFIG_ARM64_SME) &&
     762             :                 cpus_have_const_cap(ARM64_SME);
     763             : }
     764             : 
     765             : static __always_inline bool system_supports_sme2(void)
     766             : {
     767             :         return IS_ENABLED(CONFIG_ARM64_SME) &&
     768             :                 cpus_have_const_cap(ARM64_SME2);
     769             : }
     770             : 
     771             : static __always_inline bool system_supports_fa64(void)
     772             : {
     773             :         return IS_ENABLED(CONFIG_ARM64_SME) &&
     774             :                 cpus_have_const_cap(ARM64_SME_FA64);
     775             : }
     776             : 
     777             : static __always_inline bool system_supports_tpidr2(void)
     778             : {
     779             :         return system_supports_sme();
     780             : }
     781             : 
     782             : static __always_inline bool system_supports_cnp(void)
     783             : {
     784             :         return IS_ENABLED(CONFIG_ARM64_CNP) &&
     785             :                 cpus_have_const_cap(ARM64_HAS_CNP);
     786             : }
     787             : 
     788             : static inline bool system_supports_address_auth(void)
     789             : {
     790             :         return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
     791             :                 cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
     792             : }
     793             : 
     794             : static inline bool system_supports_generic_auth(void)
     795             : {
     796             :         return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
     797             :                 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
     798             : }
     799             : 
     800             : static inline bool system_has_full_ptr_auth(void)
     801             : {
     802             :         return system_supports_address_auth() && system_supports_generic_auth();
     803             : }
     804             : 
     805             : static __always_inline bool system_uses_irq_prio_masking(void)
     806             : {
     807             :         return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
     808             :                cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING);
     809             : }
     810             : 
     811             : static inline bool system_supports_mte(void)
     812             : {
     813             :         return IS_ENABLED(CONFIG_ARM64_MTE) &&
     814             :                 cpus_have_const_cap(ARM64_MTE);
     815             : }
     816             : 
     817             : static inline bool system_has_prio_mask_debugging(void)
     818             : {
     819             :         return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
     820             :                system_uses_irq_prio_masking();
     821             : }
     822             : 
     823           0 : static inline bool system_supports_bti(void)
     824             : {
     825           0 :         return IS_ENABLED(CONFIG_ARM64_BTI) && cpus_have_const_cap(ARM64_BTI);
     826             : }
     827             : 
     828             : static inline bool system_supports_tlb_range(void)
     829             : {
     830             :         return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) &&
     831             :                 cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
     832             : }
     833             : 
     834             : int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
     835             : bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
     836             : 
     837             : static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
     838             : {
     839             :         switch (parange) {
     840             :         case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
     841             :         case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
     842             :         case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
     843             :         case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
     844             :         case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
     845             :         case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
     846             :         case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
     847             :         /*
     848             :          * A future PE could use a value unknown to the kernel.
     849             :          * However, by the "D10.1.4 Principles of the ID scheme
     850             :          * for fields in ID registers", ARM DDI 0487C.a, any new
     851             :          * value is guaranteed to be higher than what we know already.
     852             :          * As a safe limit, we return the limit supported by the kernel.
     853             :          */
     854             :         default: return CONFIG_ARM64_PA_BITS;
     855             :         }
     856             : }
     857             : 
     858             : /* Check whether hardware update of the Access flag is supported */
     859             : static inline bool cpu_has_hw_af(void)
     860             : {
     861             :         u64 mmfr1;
     862             : 
     863             :         if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
     864             :                 return false;
     865             : 
     866             :         /*
     867             :          * Use cached version to avoid emulated msr operation on KVM
     868             :          * guests.
     869             :          */
     870             :         mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
     871             :         return cpuid_feature_extract_unsigned_field(mmfr1,
     872             :                                                 ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
     873             : }
     874             : 
     875             : static inline bool cpu_has_pan(void)
     876             : {
     877             :         u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
     878             :         return cpuid_feature_extract_unsigned_field(mmfr1,
     879             :                                                     ID_AA64MMFR1_EL1_PAN_SHIFT);
     880             : }
     881             : 
     882             : #ifdef CONFIG_ARM64_AMU_EXTN
     883             : /* Check whether the cpu supports the Activity Monitors Unit (AMU) */
     884             : extern bool cpu_has_amu_feat(int cpu);
     885             : #else
     886             : static inline bool cpu_has_amu_feat(int cpu)
     887             : {
     888             :         return false;
     889             : }
     890             : #endif
     891             : 
     892             : /* Get a cpu that supports the Activity Monitors Unit (AMU) */
     893             : extern int get_cpu_with_amu_feat(void);
     894             : 
     895             : static inline unsigned int get_vmid_bits(u64 mmfr1)
     896             : {
     897             :         int vmid_bits;
     898             : 
     899             :         vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
     900             :                                                 ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
     901             :         if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
     902             :                 return 16;
     903             : 
     904             :         /*
     905             :          * Return the default here even if any reserved
     906             :          * value is fetched from the system register.
     907             :          */
     908             :         return 8;
     909             : }
     910             : 
     911             : s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
     912             : struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
     913             : 
     914             : extern struct arm64_ftr_override id_aa64mmfr1_override;
     915             : extern struct arm64_ftr_override id_aa64pfr0_override;
     916             : extern struct arm64_ftr_override id_aa64pfr1_override;
     917             : extern struct arm64_ftr_override id_aa64zfr0_override;
     918             : extern struct arm64_ftr_override id_aa64smfr0_override;
     919             : extern struct arm64_ftr_override id_aa64isar1_override;
     920             : extern struct arm64_ftr_override id_aa64isar2_override;
     921             : 
     922             : extern struct arm64_ftr_override arm64_sw_feature_override;
     923             : 
     924             : u32 get_kvm_ipa_limit(void);
     925             : void dump_cpu_features(void);
     926             : 
     927             : #endif /* __ASSEMBLY__ */
     928             : 
     929             : #endif

Generated by: LCOV version 1.14