LCOV - code coverage report
Current view: top level - arch/arm64/include/asm - pgtable.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-acha @ Mon Jul 31 20:08:06 PDT 2023 Lines: 0 3 0.0 %
Date: 2023-07-31 20:08:07 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-only */
       2             : /*
       3             :  * Copyright (C) 2012 ARM Ltd.
       4             :  */
       5             : #ifndef __ASM_PGTABLE_H
       6             : #define __ASM_PGTABLE_H
       7             : 
       8             : #include <asm/bug.h>
       9             : #include <asm/proc-fns.h>
      10             : 
      11             : #include <asm/memory.h>
      12             : #include <asm/mte.h>
      13             : #include <asm/pgtable-hwdef.h>
      14             : #include <asm/pgtable-prot.h>
      15             : #include <asm/tlbflush.h>
      16             : 
      17             : /*
      18             :  * VMALLOC range.
      19             :  *
      20             :  * VMALLOC_START: beginning of the kernel vmalloc space
      21             :  * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
      22             :  *      and fixed mappings
      23             :  */
      24             : #define VMALLOC_START           (MODULES_END)
      25             : #define VMALLOC_END             (VMEMMAP_START - SZ_256M)
      26             : 
      27             : #define vmemmap                 ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
      28             : 
      29             : #ifndef __ASSEMBLY__
      30             : 
      31             : #include <asm/cmpxchg.h>
      32             : #include <asm/fixmap.h>
      33             : #include <linux/mmdebug.h>
      34             : #include <linux/mm_types.h>
      35             : #include <linux/sched.h>
      36             : #include <linux/page_table_check.h>
      37             : 
      38             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
      39             : #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
      40             : 
      41             : /* Set stride and tlb_level in flush_*_tlb_range */
      42             : #define flush_pmd_tlb_range(vma, addr, end)     \
      43             :         __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
      44             : #define flush_pud_tlb_range(vma, addr, end)     \
      45             :         __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
      46             : #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
      47             : 
      48             : static inline bool arch_thp_swp_supported(void)
      49             : {
      50             :         return !system_supports_mte();
      51             : }
      52             : #define arch_thp_swp_supported arch_thp_swp_supported
      53             : 
      54             : /*
      55             :  * Outside of a few very special situations (e.g. hibernation), we always
      56             :  * use broadcast TLB invalidation instructions, therefore a spurious page
      57             :  * fault on one CPU which has been handled concurrently by another CPU
      58             :  * does not need to perform additional invalidation.
      59             :  */
      60             : #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
      61             : 
      62             : /*
      63             :  * ZERO_PAGE is a global shared page that is always zero: used
      64             :  * for zero-mapped memory areas etc..
      65             :  */
      66             : extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
      67             : #define ZERO_PAGE(vaddr)        phys_to_page(__pa_symbol(empty_zero_page))
      68             : 
      69             : #define pte_ERROR(e)    \
      70             :         pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
      71             : 
      72             : /*
      73             :  * Macros to convert between a physical address and its placement in a
      74             :  * page table entry, taking care of 52-bit addresses.
      75             :  */
      76             : #ifdef CONFIG_ARM64_PA_BITS_52
      77             : static inline phys_addr_t __pte_to_phys(pte_t pte)
      78             : {
      79             :         return (pte_val(pte) & PTE_ADDR_LOW) |
      80             :                 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT);
      81             : }
      82             : static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
      83             : {
      84             :         return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PTE_ADDR_MASK;
      85             : }
      86             : #else
      87             : #define __pte_to_phys(pte)      (pte_val(pte) & PTE_ADDR_MASK)
      88             : #define __phys_to_pte_val(phys) (phys)
      89             : #endif
      90             : 
      91             : #define pte_pfn(pte)            (__pte_to_phys(pte) >> PAGE_SHIFT)
      92             : #define pfn_pte(pfn,prot)       \
      93             :         __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
      94             : 
      95             : #define pte_none(pte)           (!pte_val(pte))
      96             : #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
      97             : #define pte_page(pte)           (pfn_to_page(pte_pfn(pte)))
      98             : 
      99             : /*
     100             :  * The following only work if pte_present(). Undefined behaviour otherwise.
     101             :  */
     102             : #define pte_present(pte)        (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
     103             : #define pte_young(pte)          (!!(pte_val(pte) & PTE_AF))
     104             : #define pte_special(pte)        (!!(pte_val(pte) & PTE_SPECIAL))
     105             : #define pte_write(pte)          (!!(pte_val(pte) & PTE_WRITE))
     106             : #define pte_user(pte)           (!!(pte_val(pte) & PTE_USER))
     107             : #define pte_user_exec(pte)      (!(pte_val(pte) & PTE_UXN))
     108             : #define pte_cont(pte)           (!!(pte_val(pte) & PTE_CONT))
     109             : #define pte_devmap(pte)         (!!(pte_val(pte) & PTE_DEVMAP))
     110             : #define pte_tagged(pte)         ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
     111             :                                  PTE_ATTRINDX(MT_NORMAL_TAGGED))
     112             : 
     113             : #define pte_cont_addr_end(addr, end)                                            \
     114             : ({      unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;        \
     115             :         (__boundary - 1 < (end) - 1) ? __boundary : (end);                   \
     116             : })
     117             : 
     118             : #define pmd_cont_addr_end(addr, end)                                            \
     119             : ({      unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;        \
     120             :         (__boundary - 1 < (end) - 1) ? __boundary : (end);                   \
     121             : })
     122             : 
     123             : #define pte_hw_dirty(pte)       (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
     124             : #define pte_sw_dirty(pte)       (!!(pte_val(pte) & PTE_DIRTY))
     125             : #define pte_dirty(pte)          (pte_sw_dirty(pte) || pte_hw_dirty(pte))
     126             : 
     127             : #define pte_valid(pte)          (!!(pte_val(pte) & PTE_VALID))
     128             : /*
     129             :  * Execute-only user mappings do not have the PTE_USER bit set. All valid
     130             :  * kernel mappings have the PTE_UXN bit set.
     131             :  */
     132             : #define pte_valid_not_user(pte) \
     133             :         ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
     134             : /*
     135             :  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
     136             :  * so that we don't erroneously return false for pages that have been
     137             :  * remapped as PROT_NONE but are yet to be flushed from the TLB.
     138             :  * Note that we can't make any assumptions based on the state of the access
     139             :  * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
     140             :  * TLB.
     141             :  */
     142             : #define pte_accessible(mm, pte) \
     143             :         (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
     144             : 
     145             : /*
     146             :  * p??_access_permitted() is true for valid user mappings (PTE_USER
     147             :  * bit set, subject to the write permission check). For execute-only
     148             :  * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
     149             :  * not set) must return false. PROT_NONE mappings do not have the
     150             :  * PTE_VALID bit set.
     151             :  */
     152             : #define pte_access_permitted(pte, write) \
     153             :         (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
     154             : #define pmd_access_permitted(pmd, write) \
     155             :         (pte_access_permitted(pmd_pte(pmd), (write)))
     156             : #define pud_access_permitted(pud, write) \
     157             :         (pte_access_permitted(pud_pte(pud), (write)))
     158             : 
     159             : static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
     160             : {
     161             :         pte_val(pte) &= ~pgprot_val(prot);
     162             :         return pte;
     163             : }
     164             : 
     165             : static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
     166             : {
     167             :         pte_val(pte) |= pgprot_val(prot);
     168             :         return pte;
     169             : }
     170             : 
     171             : static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
     172             : {
     173             :         pmd_val(pmd) &= ~pgprot_val(prot);
     174             :         return pmd;
     175             : }
     176             : 
     177             : static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
     178             : {
     179             :         pmd_val(pmd) |= pgprot_val(prot);
     180             :         return pmd;
     181             : }
     182             : 
     183             : static inline pte_t pte_mkwrite(pte_t pte)
     184             : {
     185             :         pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
     186             :         pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
     187             :         return pte;
     188             : }
     189             : 
     190             : static inline pte_t pte_mkclean(pte_t pte)
     191             : {
     192             :         pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
     193             :         pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
     194             : 
     195             :         return pte;
     196             : }
     197             : 
     198             : static inline pte_t pte_mkdirty(pte_t pte)
     199             : {
     200             :         pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
     201             : 
     202             :         if (pte_write(pte))
     203             :                 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
     204             : 
     205             :         return pte;
     206             : }
     207             : 
     208             : static inline pte_t pte_wrprotect(pte_t pte)
     209             : {
     210             :         /*
     211             :          * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
     212             :          * clear), set the PTE_DIRTY bit.
     213             :          */
     214             :         if (pte_hw_dirty(pte))
     215             :                 pte = pte_mkdirty(pte);
     216             : 
     217             :         pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
     218             :         pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
     219             :         return pte;
     220             : }
     221             : 
     222             : static inline pte_t pte_mkold(pte_t pte)
     223             : {
     224             :         return clear_pte_bit(pte, __pgprot(PTE_AF));
     225             : }
     226             : 
     227             : static inline pte_t pte_mkyoung(pte_t pte)
     228             : {
     229             :         return set_pte_bit(pte, __pgprot(PTE_AF));
     230             : }
     231             : 
     232             : static inline pte_t pte_mkspecial(pte_t pte)
     233             : {
     234             :         return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
     235             : }
     236             : 
     237             : static inline pte_t pte_mkcont(pte_t pte)
     238             : {
     239             :         pte = set_pte_bit(pte, __pgprot(PTE_CONT));
     240             :         return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
     241             : }
     242             : 
     243             : static inline pte_t pte_mknoncont(pte_t pte)
     244             : {
     245             :         return clear_pte_bit(pte, __pgprot(PTE_CONT));
     246             : }
     247             : 
     248             : static inline pte_t pte_mkpresent(pte_t pte)
     249             : {
     250             :         return set_pte_bit(pte, __pgprot(PTE_VALID));
     251             : }
     252             : 
     253             : static inline pmd_t pmd_mkcont(pmd_t pmd)
     254             : {
     255             :         return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
     256             : }
     257             : 
     258             : static inline pte_t pte_mkdevmap(pte_t pte)
     259             : {
     260             :         return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
     261             : }
     262             : 
     263             : static inline void set_pte(pte_t *ptep, pte_t pte)
     264             : {
     265             :         WRITE_ONCE(*ptep, pte);
     266             : 
     267             :         /*
     268             :          * Only if the new pte is valid and kernel, otherwise TLB maintenance
     269             :          * or update_mmu_cache() have the necessary barriers.
     270             :          */
     271             :         if (pte_valid_not_user(pte)) {
     272             :                 dsb(ishst);
     273             :                 isb();
     274             :         }
     275             : }
     276             : 
     277             : extern void __sync_icache_dcache(pte_t pteval);
     278             : bool pgattr_change_is_safe(u64 old, u64 new);
     279             : 
     280             : /*
     281             :  * PTE bits configuration in the presence of hardware Dirty Bit Management
     282             :  * (PTE_WRITE == PTE_DBM):
     283             :  *
     284             :  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
     285             :  *   0      0      |   1           0          0
     286             :  *   0      1      |   1           1          0
     287             :  *   1      0      |   1           0          1
     288             :  *   1      1      |   0           1          x
     289             :  *
     290             :  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
     291             :  * the page fault mechanism. Checking the dirty status of a pte becomes:
     292             :  *
     293             :  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
     294             :  */
     295             : 
     296             : static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
     297             :                                            pte_t pte)
     298             : {
     299             :         pte_t old_pte;
     300             : 
     301             :         if (!IS_ENABLED(CONFIG_DEBUG_VM))
     302             :                 return;
     303             : 
     304             :         old_pte = READ_ONCE(*ptep);
     305             : 
     306             :         if (!pte_valid(old_pte) || !pte_valid(pte))
     307             :                 return;
     308             :         if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
     309             :                 return;
     310             : 
     311             :         /*
     312             :          * Check for potential race with hardware updates of the pte
     313             :          * (ptep_set_access_flags safely changes valid ptes without going
     314             :          * through an invalid entry).
     315             :          */
     316             :         VM_WARN_ONCE(!pte_young(pte),
     317             :                      "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
     318             :                      __func__, pte_val(old_pte), pte_val(pte));
     319             :         VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
     320             :                      "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
     321             :                      __func__, pte_val(old_pte), pte_val(pte));
     322             :         VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)),
     323             :                      "%s: unsafe attribute change: 0x%016llx -> 0x%016llx",
     324             :                      __func__, pte_val(old_pte), pte_val(pte));
     325             : }
     326             : 
     327             : static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
     328             :                                 pte_t *ptep, pte_t pte)
     329             : {
     330             :         if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
     331             :                 __sync_icache_dcache(pte);
     332             : 
     333             :         /*
     334             :          * If the PTE would provide user space access to the tags associated
     335             :          * with it then ensure that the MTE tags are synchronised.  Although
     336             :          * pte_access_permitted() returns false for exec only mappings, they
     337             :          * don't expose tags (instruction fetches don't check tags).
     338             :          */
     339             :         if (system_supports_mte() && pte_access_permitted(pte, false) &&
     340             :             !pte_special(pte)) {
     341             :                 pte_t old_pte = READ_ONCE(*ptep);
     342             :                 /*
     343             :                  * We only need to synchronise if the new PTE has tags enabled
     344             :                  * or if swapping in (in which case another mapping may have
     345             :                  * set tags in the past even if this PTE isn't tagged).
     346             :                  * (!pte_none() && !pte_present()) is an open coded version of
     347             :                  * is_swap_pte()
     348             :                  */
     349             :                 if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
     350             :                         mte_sync_tags(old_pte, pte);
     351             :         }
     352             : 
     353             :         __check_safe_pte_update(mm, ptep, pte);
     354             : 
     355             :         set_pte(ptep, pte);
     356             : }
     357             : 
     358             : static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
     359             :                               pte_t *ptep, pte_t pte)
     360             : {
     361             :         page_table_check_pte_set(mm, addr, ptep, pte);
     362             :         return __set_pte_at(mm, addr, ptep, pte);
     363             : }
     364             : 
     365             : /*
     366             :  * Huge pte definitions.
     367             :  */
     368             : #define pte_mkhuge(pte)         (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
     369             : 
     370             : /*
     371             :  * Hugetlb definitions.
     372             :  */
     373             : #define HUGE_MAX_HSTATE         4
     374             : #define HPAGE_SHIFT             PMD_SHIFT
     375             : #define HPAGE_SIZE              (_AC(1, UL) << HPAGE_SHIFT)
     376             : #define HPAGE_MASK              (~(HPAGE_SIZE - 1))
     377             : #define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
     378             : 
     379             : static inline pte_t pgd_pte(pgd_t pgd)
     380             : {
     381             :         return __pte(pgd_val(pgd));
     382             : }
     383             : 
     384             : static inline pte_t p4d_pte(p4d_t p4d)
     385             : {
     386             :         return __pte(p4d_val(p4d));
     387             : }
     388             : 
     389             : static inline pte_t pud_pte(pud_t pud)
     390             : {
     391             :         return __pte(pud_val(pud));
     392             : }
     393             : 
     394             : static inline pud_t pte_pud(pte_t pte)
     395             : {
     396             :         return __pud(pte_val(pte));
     397             : }
     398             : 
     399             : static inline pmd_t pud_pmd(pud_t pud)
     400             : {
     401             :         return __pmd(pud_val(pud));
     402             : }
     403             : 
     404             : static inline pte_t pmd_pte(pmd_t pmd)
     405             : {
     406             :         return __pte(pmd_val(pmd));
     407             : }
     408             : 
     409             : static inline pmd_t pte_pmd(pte_t pte)
     410             : {
     411             :         return __pmd(pte_val(pte));
     412             : }
     413             : 
     414             : static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
     415             : {
     416             :         return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
     417             : }
     418             : 
     419             : static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
     420             : {
     421             :         return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
     422             : }
     423             : 
     424             : static inline pte_t pte_swp_mkexclusive(pte_t pte)
     425             : {
     426             :         return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
     427             : }
     428             : 
     429             : static inline int pte_swp_exclusive(pte_t pte)
     430             : {
     431             :         return pte_val(pte) & PTE_SWP_EXCLUSIVE;
     432             : }
     433             : 
     434             : static inline pte_t pte_swp_clear_exclusive(pte_t pte)
     435             : {
     436             :         return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
     437             : }
     438             : 
     439             : /*
     440             :  * Select all bits except the pfn
     441             :  */
     442             : static inline pgprot_t pte_pgprot(pte_t pte)
     443             : {
     444             :         unsigned long pfn = pte_pfn(pte);
     445             : 
     446             :         return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
     447             : }
     448             : 
     449             : #ifdef CONFIG_NUMA_BALANCING
     450             : /*
     451             :  * See the comment in include/linux/pgtable.h
     452             :  */
     453             : static inline int pte_protnone(pte_t pte)
     454             : {
     455             :         return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
     456             : }
     457             : 
     458             : static inline int pmd_protnone(pmd_t pmd)
     459             : {
     460             :         return pte_protnone(pmd_pte(pmd));
     461             : }
     462             : #endif
     463             : 
     464             : #define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
     465             : 
     466             : static inline int pmd_present(pmd_t pmd)
     467             : {
     468           0 :         return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
     469             : }
     470             : 
     471             : /*
     472             :  * THP definitions.
     473             :  */
     474             : 
     475             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     476           0 : static inline int pmd_trans_huge(pmd_t pmd)
     477             : {
     478           0 :         return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
     479             : }
     480             : #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
     481             : 
     482             : #define pmd_dirty(pmd)          pte_dirty(pmd_pte(pmd))
     483             : #define pmd_young(pmd)          pte_young(pmd_pte(pmd))
     484             : #define pmd_valid(pmd)          pte_valid(pmd_pte(pmd))
     485             : #define pmd_user(pmd)           pte_user(pmd_pte(pmd))
     486             : #define pmd_user_exec(pmd)      pte_user_exec(pmd_pte(pmd))
     487             : #define pmd_cont(pmd)           pte_cont(pmd_pte(pmd))
     488             : #define pmd_wrprotect(pmd)      pte_pmd(pte_wrprotect(pmd_pte(pmd)))
     489             : #define pmd_mkold(pmd)          pte_pmd(pte_mkold(pmd_pte(pmd)))
     490             : #define pmd_mkwrite(pmd)        pte_pmd(pte_mkwrite(pmd_pte(pmd)))
     491             : #define pmd_mkclean(pmd)        pte_pmd(pte_mkclean(pmd_pte(pmd)))
     492             : #define pmd_mkdirty(pmd)        pte_pmd(pte_mkdirty(pmd_pte(pmd)))
     493             : #define pmd_mkyoung(pmd)        pte_pmd(pte_mkyoung(pmd_pte(pmd)))
     494             : 
     495             : static inline pmd_t pmd_mkinvalid(pmd_t pmd)
     496             : {
     497             :         pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
     498             :         pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
     499             : 
     500             :         return pmd;
     501             : }
     502             : 
     503             : #define pmd_thp_or_huge(pmd)    (pmd_huge(pmd) || pmd_trans_huge(pmd))
     504             : 
     505             : #define pmd_write(pmd)          pte_write(pmd_pte(pmd))
     506             : 
     507             : #define pmd_mkhuge(pmd)         (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
     508             : 
     509             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     510             : #define pmd_devmap(pmd)         pte_devmap(pmd_pte(pmd))
     511             : #endif
     512             : static inline pmd_t pmd_mkdevmap(pmd_t pmd)
     513             : {
     514             :         return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
     515             : }
     516             : 
     517             : #define __pmd_to_phys(pmd)      __pte_to_phys(pmd_pte(pmd))
     518             : #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
     519             : #define pmd_pfn(pmd)            ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
     520             : #define pfn_pmd(pfn,prot)       __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
     521             : #define mk_pmd(page,prot)       pfn_pmd(page_to_pfn(page),prot)
     522             : 
     523             : #define pud_young(pud)          pte_young(pud_pte(pud))
     524             : #define pud_mkyoung(pud)        pte_pud(pte_mkyoung(pud_pte(pud)))
     525             : #define pud_write(pud)          pte_write(pud_pte(pud))
     526             : 
     527             : #define pud_mkhuge(pud)         (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
     528             : 
     529             : #define __pud_to_phys(pud)      __pte_to_phys(pud_pte(pud))
     530             : #define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
     531             : #define pud_pfn(pud)            ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
     532             : #define pfn_pud(pfn,prot)       __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
     533             : 
     534             : static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
     535             :                               pmd_t *pmdp, pmd_t pmd)
     536             : {
     537             :         page_table_check_pmd_set(mm, addr, pmdp, pmd);
     538             :         return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
     539             : }
     540             : 
     541             : static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
     542             :                               pud_t *pudp, pud_t pud)
     543             : {
     544             :         page_table_check_pud_set(mm, addr, pudp, pud);
     545             :         return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
     546             : }
     547             : 
     548             : #define __p4d_to_phys(p4d)      __pte_to_phys(p4d_pte(p4d))
     549             : #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
     550             : 
     551             : #define __pgd_to_phys(pgd)      __pte_to_phys(pgd_pte(pgd))
     552             : #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
     553             : 
     554             : #define __pgprot_modify(prot,mask,bits) \
     555             :         __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
     556             : 
     557             : #define pgprot_nx(prot) \
     558             :         __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
     559             : 
     560             : /*
     561             :  * Mark the prot value as uncacheable and unbufferable.
     562             :  */
     563             : #define pgprot_noncached(prot) \
     564             :         __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
     565             : #define pgprot_writecombine(prot) \
     566             :         __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
     567             : #define pgprot_device(prot) \
     568             :         __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
     569             : #define pgprot_tagged(prot) \
     570             :         __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
     571             : #define pgprot_mhp      pgprot_tagged
     572             : /*
     573             :  * DMA allocations for non-coherent devices use what the Arm architecture calls
     574             :  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
     575             :  * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
     576             :  * is intended for MMIO and thus forbids speculation, preserves access size,
     577             :  * requires strict alignment and can also force write responses to come from the
     578             :  * endpoint.
     579             :  */
     580             : #define pgprot_dmacoherent(prot) \
     581             :         __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
     582             :                         PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
     583             : 
     584             : #define __HAVE_PHYS_MEM_ACCESS_PROT
     585             : struct file;
     586             : extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
     587             :                                      unsigned long size, pgprot_t vma_prot);
     588             : 
     589             : #define pmd_none(pmd)           (!pmd_val(pmd))
     590             : 
     591             : #define pmd_table(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
     592             :                                  PMD_TYPE_TABLE)
     593             : #define pmd_sect(pmd)           ((pmd_val(pmd) & PMD_TYPE_MASK) == \
     594             :                                  PMD_TYPE_SECT)
     595             : #define pmd_leaf(pmd)           (pmd_present(pmd) && !pmd_table(pmd))
     596             : #define pmd_bad(pmd)            (!pmd_table(pmd))
     597             : 
     598             : #define pmd_leaf_size(pmd)      (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
     599             : #define pte_leaf_size(pte)      (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
     600             : 
     601             : #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
     602             : static inline bool pud_sect(pud_t pud) { return false; }
     603             : static inline bool pud_table(pud_t pud) { return true; }
     604             : #else
     605             : #define pud_sect(pud)           ((pud_val(pud) & PUD_TYPE_MASK) == \
     606             :                                  PUD_TYPE_SECT)
     607             : #define pud_table(pud)          ((pud_val(pud) & PUD_TYPE_MASK) == \
     608             :                                  PUD_TYPE_TABLE)
     609             : #endif
     610             : 
     611             : extern pgd_t init_pg_dir[PTRS_PER_PGD];
     612             : extern pgd_t init_pg_end[];
     613             : extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
     614             : extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
     615             : extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
     616             : extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
     617             : 
     618             : extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
     619             : 
     620             : static inline bool in_swapper_pgdir(void *addr)
     621             : {
     622             :         return ((unsigned long)addr & PAGE_MASK) ==
     623             :                 ((unsigned long)swapper_pg_dir & PAGE_MASK);
     624             : }
     625             : 
     626             : static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
     627             : {
     628             : #ifdef __PAGETABLE_PMD_FOLDED
     629             :         if (in_swapper_pgdir(pmdp)) {
     630             :                 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
     631             :                 return;
     632             :         }
     633             : #endif /* __PAGETABLE_PMD_FOLDED */
     634             : 
     635             :         WRITE_ONCE(*pmdp, pmd);
     636             : 
     637             :         if (pmd_valid(pmd)) {
     638             :                 dsb(ishst);
     639             :                 isb();
     640             :         }
     641             : }
     642             : 
     643             : static inline void pmd_clear(pmd_t *pmdp)
     644             : {
     645             :         set_pmd(pmdp, __pmd(0));
     646             : }
     647             : 
     648             : static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
     649             : {
     650             :         return __pmd_to_phys(pmd);
     651             : }
     652             : 
     653             : static inline unsigned long pmd_page_vaddr(pmd_t pmd)
     654             : {
     655             :         return (unsigned long)__va(pmd_page_paddr(pmd));
     656             : }
     657             : 
     658             : /* Find an entry in the third-level page table. */
     659             : #define pte_offset_phys(dir,addr)       (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
     660             : 
     661             : #define pte_set_fixmap(addr)            ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
     662             : #define pte_set_fixmap_offset(pmd, addr)        pte_set_fixmap(pte_offset_phys(pmd, addr))
     663             : #define pte_clear_fixmap()              clear_fixmap(FIX_PTE)
     664             : 
     665             : #define pmd_page(pmd)                   phys_to_page(__pmd_to_phys(pmd))
     666             : 
     667             : /* use ONLY for statically allocated translation tables */
     668             : #define pte_offset_kimg(dir,addr)       ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
     669             : 
     670             : /*
     671             :  * Conversion functions: convert a page and protection to a page entry,
     672             :  * and a page entry and page directory to the page they refer to.
     673             :  */
     674             : #define mk_pte(page,prot)       pfn_pte(page_to_pfn(page),prot)
     675             : 
     676             : #if CONFIG_PGTABLE_LEVELS > 2
     677             : 
     678             : #define pmd_ERROR(e)    \
     679             :         pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
     680             : 
     681             : #define pud_none(pud)           (!pud_val(pud))
     682             : #define pud_bad(pud)            (!pud_table(pud))
     683             : #define pud_present(pud)        pte_present(pud_pte(pud))
     684             : #define pud_leaf(pud)           (pud_present(pud) && !pud_table(pud))
     685             : #define pud_valid(pud)          pte_valid(pud_pte(pud))
     686             : #define pud_user(pud)           pte_user(pud_pte(pud))
     687             : #define pud_user_exec(pud)      pte_user_exec(pud_pte(pud))
     688             : 
     689             : static inline void set_pud(pud_t *pudp, pud_t pud)
     690             : {
     691             : #ifdef __PAGETABLE_PUD_FOLDED
     692             :         if (in_swapper_pgdir(pudp)) {
     693             :                 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
     694             :                 return;
     695             :         }
     696             : #endif /* __PAGETABLE_PUD_FOLDED */
     697             : 
     698             :         WRITE_ONCE(*pudp, pud);
     699             : 
     700             :         if (pud_valid(pud)) {
     701             :                 dsb(ishst);
     702             :                 isb();
     703             :         }
     704             : }
     705             : 
     706             : static inline void pud_clear(pud_t *pudp)
     707             : {
     708             :         set_pud(pudp, __pud(0));
     709             : }
     710             : 
     711             : static inline phys_addr_t pud_page_paddr(pud_t pud)
     712             : {
     713             :         return __pud_to_phys(pud);
     714             : }
     715             : 
     716             : static inline pmd_t *pud_pgtable(pud_t pud)
     717             : {
     718             :         return (pmd_t *)__va(pud_page_paddr(pud));
     719             : }
     720             : 
     721             : /* Find an entry in the second-level page table. */
     722             : #define pmd_offset_phys(dir, addr)      (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
     723             : 
     724             : #define pmd_set_fixmap(addr)            ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
     725             : #define pmd_set_fixmap_offset(pud, addr)        pmd_set_fixmap(pmd_offset_phys(pud, addr))
     726             : #define pmd_clear_fixmap()              clear_fixmap(FIX_PMD)
     727             : 
     728             : #define pud_page(pud)                   phys_to_page(__pud_to_phys(pud))
     729             : 
     730             : /* use ONLY for statically allocated translation tables */
     731             : #define pmd_offset_kimg(dir,addr)       ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
     732             : 
     733             : #else
     734             : 
     735             : #define pud_page_paddr(pud)     ({ BUILD_BUG(); 0; })
     736             : #define pud_user_exec(pud)      pud_user(pud) /* Always 0 with folding */
     737             : 
     738             : /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
     739             : #define pmd_set_fixmap(addr)            NULL
     740             : #define pmd_set_fixmap_offset(pudp, addr)       ((pmd_t *)pudp)
     741             : #define pmd_clear_fixmap()
     742             : 
     743             : #define pmd_offset_kimg(dir,addr)       ((pmd_t *)dir)
     744             : 
     745             : #endif  /* CONFIG_PGTABLE_LEVELS > 2 */
     746             : 
     747             : #if CONFIG_PGTABLE_LEVELS > 3
     748             : 
     749             : #define pud_ERROR(e)    \
     750             :         pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
     751             : 
     752             : #define p4d_none(p4d)           (!p4d_val(p4d))
     753             : #define p4d_bad(p4d)            (!(p4d_val(p4d) & 2))
     754             : #define p4d_present(p4d)        (p4d_val(p4d))
     755             : 
     756             : static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
     757             : {
     758             :         if (in_swapper_pgdir(p4dp)) {
     759             :                 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
     760             :                 return;
     761             :         }
     762             : 
     763             :         WRITE_ONCE(*p4dp, p4d);
     764             :         dsb(ishst);
     765             :         isb();
     766             : }
     767             : 
     768             : static inline void p4d_clear(p4d_t *p4dp)
     769             : {
     770             :         set_p4d(p4dp, __p4d(0));
     771             : }
     772             : 
     773             : static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
     774             : {
     775             :         return __p4d_to_phys(p4d);
     776             : }
     777             : 
     778             : static inline pud_t *p4d_pgtable(p4d_t p4d)
     779             : {
     780             :         return (pud_t *)__va(p4d_page_paddr(p4d));
     781             : }
     782             : 
     783             : /* Find an entry in the first-level page table. */
     784             : #define pud_offset_phys(dir, addr)      (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
     785             : 
     786             : #define pud_set_fixmap(addr)            ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
     787             : #define pud_set_fixmap_offset(p4d, addr)        pud_set_fixmap(pud_offset_phys(p4d, addr))
     788             : #define pud_clear_fixmap()              clear_fixmap(FIX_PUD)
     789             : 
     790             : #define p4d_page(p4d)           pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
     791             : 
     792             : /* use ONLY for statically allocated translation tables */
     793             : #define pud_offset_kimg(dir,addr)       ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
     794             : 
     795             : #else
     796             : 
     797             : #define p4d_page_paddr(p4d)     ({ BUILD_BUG(); 0;})
     798             : #define pgd_page_paddr(pgd)     ({ BUILD_BUG(); 0;})
     799             : 
     800             : /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
     801             : #define pud_set_fixmap(addr)            NULL
     802             : #define pud_set_fixmap_offset(pgdp, addr)       ((pud_t *)pgdp)
     803             : #define pud_clear_fixmap()
     804             : 
     805             : #define pud_offset_kimg(dir,addr)       ((pud_t *)dir)
     806             : 
     807             : #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
     808             : 
     809             : #define pgd_ERROR(e)    \
     810             :         pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
     811             : 
     812             : #define pgd_set_fixmap(addr)    ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
     813             : #define pgd_clear_fixmap()      clear_fixmap(FIX_PGD)
     814             : 
     815             : static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
     816             : {
     817             :         /*
     818             :          * Normal and Normal-Tagged are two different memory types and indices
     819             :          * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
     820             :          */
     821             :         const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
     822             :                               PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
     823             :                               PTE_ATTRINDX_MASK;
     824             :         /* preserve the hardware dirty information */
     825             :         if (pte_hw_dirty(pte))
     826             :                 pte = pte_mkdirty(pte);
     827             :         pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
     828             :         return pte;
     829             : }
     830             : 
     831             : static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
     832             : {
     833             :         return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
     834             : }
     835             : 
     836             : #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
     837             : extern int ptep_set_access_flags(struct vm_area_struct *vma,
     838             :                                  unsigned long address, pte_t *ptep,
     839             :                                  pte_t entry, int dirty);
     840             : 
     841             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     842             : #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
     843             : static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
     844             :                                         unsigned long address, pmd_t *pmdp,
     845             :                                         pmd_t entry, int dirty)
     846             : {
     847             :         return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
     848             : }
     849             : 
     850             : static inline int pud_devmap(pud_t pud)
     851             : {
     852             :         return 0;
     853             : }
     854             : 
     855             : static inline int pgd_devmap(pgd_t pgd)
     856             : {
     857             :         return 0;
     858             : }
     859             : #endif
     860             : 
     861             : #ifdef CONFIG_PAGE_TABLE_CHECK
     862             : static inline bool pte_user_accessible_page(pte_t pte)
     863             : {
     864             :         return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte));
     865             : }
     866             : 
     867             : static inline bool pmd_user_accessible_page(pmd_t pmd)
     868             : {
     869             :         return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
     870             : }
     871             : 
     872             : static inline bool pud_user_accessible_page(pud_t pud)
     873             : {
     874             :         return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
     875             : }
     876             : #endif
     877             : 
     878             : /*
     879             :  * Atomic pte/pmd modifications.
     880             :  */
     881             : #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
     882             : static inline int __ptep_test_and_clear_young(pte_t *ptep)
     883             : {
     884             :         pte_t old_pte, pte;
     885             : 
     886             :         pte = READ_ONCE(*ptep);
     887             :         do {
     888             :                 old_pte = pte;
     889             :                 pte = pte_mkold(pte);
     890             :                 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
     891             :                                                pte_val(old_pte), pte_val(pte));
     892             :         } while (pte_val(pte) != pte_val(old_pte));
     893             : 
     894             :         return pte_young(pte);
     895             : }
     896             : 
     897             : static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
     898             :                                             unsigned long address,
     899             :                                             pte_t *ptep)
     900             : {
     901             :         return __ptep_test_and_clear_young(ptep);
     902             : }
     903             : 
     904             : #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
     905             : static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
     906             :                                          unsigned long address, pte_t *ptep)
     907             : {
     908             :         int young = ptep_test_and_clear_young(vma, address, ptep);
     909             : 
     910             :         if (young) {
     911             :                 /*
     912             :                  * We can elide the trailing DSB here since the worst that can
     913             :                  * happen is that a CPU continues to use the young entry in its
     914             :                  * TLB and we mistakenly reclaim the associated page. The
     915             :                  * window for such an event is bounded by the next
     916             :                  * context-switch, which provides a DSB to complete the TLB
     917             :                  * invalidation.
     918             :                  */
     919             :                 flush_tlb_page_nosync(vma, address);
     920             :         }
     921             : 
     922             :         return young;
     923             : }
     924             : 
     925             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     926             : #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
     927             : static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
     928             :                                             unsigned long address,
     929             :                                             pmd_t *pmdp)
     930             : {
     931             :         return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
     932             : }
     933             : #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
     934             : 
     935             : #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
     936             : static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
     937             :                                        unsigned long address, pte_t *ptep)
     938             : {
     939             :         pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
     940             : 
     941             :         page_table_check_pte_clear(mm, address, pte);
     942             : 
     943             :         return pte;
     944             : }
     945             : 
     946             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     947             : #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
     948             : static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
     949             :                                             unsigned long address, pmd_t *pmdp)
     950             : {
     951             :         pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
     952             : 
     953             :         page_table_check_pmd_clear(mm, address, pmd);
     954             : 
     955             :         return pmd;
     956             : }
     957             : #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
     958             : 
     959             : /*
     960             :  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
     961             :  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
     962             :  */
     963             : #define __HAVE_ARCH_PTEP_SET_WRPROTECT
     964             : static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
     965             : {
     966             :         pte_t old_pte, pte;
     967             : 
     968             :         pte = READ_ONCE(*ptep);
     969             :         do {
     970             :                 old_pte = pte;
     971             :                 pte = pte_wrprotect(pte);
     972             :                 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
     973             :                                                pte_val(old_pte), pte_val(pte));
     974             :         } while (pte_val(pte) != pte_val(old_pte));
     975             : }
     976             : 
     977             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     978             : #define __HAVE_ARCH_PMDP_SET_WRPROTECT
     979             : static inline void pmdp_set_wrprotect(struct mm_struct *mm,
     980             :                                       unsigned long address, pmd_t *pmdp)
     981             : {
     982             :         ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
     983             : }
     984             : 
     985             : #define pmdp_establish pmdp_establish
     986             : static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
     987             :                 unsigned long address, pmd_t *pmdp, pmd_t pmd)
     988             : {
     989             :         page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
     990             :         return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
     991             : }
     992             : #endif
     993             : 
     994             : /*
     995             :  * Encode and decode a swap entry:
     996             :  *      bits 0-1:       present (must be zero)
     997             :  *      bits 2:         remember PG_anon_exclusive
     998             :  *      bits 3-7:       swap type
     999             :  *      bits 8-57:      swap offset
    1000             :  *      bit  58:        PTE_PROT_NONE (must be zero)
    1001             :  */
    1002             : #define __SWP_TYPE_SHIFT        3
    1003             : #define __SWP_TYPE_BITS         5
    1004             : #define __SWP_OFFSET_BITS       50
    1005             : #define __SWP_TYPE_MASK         ((1 << __SWP_TYPE_BITS) - 1)
    1006             : #define __SWP_OFFSET_SHIFT      (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
    1007             : #define __SWP_OFFSET_MASK       ((1UL << __SWP_OFFSET_BITS) - 1)
    1008             : 
    1009             : #define __swp_type(x)           (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
    1010             : #define __swp_offset(x)         (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
    1011             : #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
    1012             : 
    1013             : #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
    1014             : #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
    1015             : 
    1016             : #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
    1017             : #define __pmd_to_swp_entry(pmd)         ((swp_entry_t) { pmd_val(pmd) })
    1018             : #define __swp_entry_to_pmd(swp)         __pmd((swp).val)
    1019             : #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
    1020             : 
    1021             : /*
    1022             :  * Ensure that there are not more swap files than can be encoded in the kernel
    1023             :  * PTEs.
    1024             :  */
    1025             : #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
    1026             : 
    1027             : #ifdef CONFIG_ARM64_MTE
    1028             : 
    1029             : #define __HAVE_ARCH_PREPARE_TO_SWAP
    1030             : static inline int arch_prepare_to_swap(struct page *page)
    1031             : {
    1032             :         if (system_supports_mte())
    1033             :                 return mte_save_tags(page);
    1034             :         return 0;
    1035             : }
    1036             : 
    1037             : #define __HAVE_ARCH_SWAP_INVALIDATE
    1038             : static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
    1039             : {
    1040             :         if (system_supports_mte())
    1041             :                 mte_invalidate_tags(type, offset);
    1042             : }
    1043             : 
    1044             : static inline void arch_swap_invalidate_area(int type)
    1045             : {
    1046             :         if (system_supports_mte())
    1047             :                 mte_invalidate_tags_area(type);
    1048             : }
    1049             : 
    1050             : #define __HAVE_ARCH_SWAP_RESTORE
    1051             : static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
    1052             : {
    1053             :         if (system_supports_mte())
    1054             :                 mte_restore_tags(entry, &folio->page);
    1055             : }
    1056             : 
    1057             : #endif /* CONFIG_ARM64_MTE */
    1058             : 
    1059             : /*
    1060             :  * On AArch64, the cache coherency is handled via the set_pte_at() function.
    1061             :  */
    1062             : static inline void update_mmu_cache(struct vm_area_struct *vma,
    1063             :                                     unsigned long addr, pte_t *ptep)
    1064             : {
    1065             :         /*
    1066             :          * We don't do anything here, so there's a very small chance of
    1067             :          * us retaking a user fault which we just fixed up. The alternative
    1068             :          * is doing a dsb(ishst), but that penalises the fastpath.
    1069             :          */
    1070             : }
    1071             : 
    1072             : #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
    1073             : 
    1074             : #ifdef CONFIG_ARM64_PA_BITS_52
    1075             : #define phys_to_ttbr(addr)      (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
    1076             : #else
    1077             : #define phys_to_ttbr(addr)      (addr)
    1078             : #endif
    1079             : 
    1080             : /*
    1081             :  * On arm64 without hardware Access Flag, copying from user will fail because
    1082             :  * the pte is old and cannot be marked young. So we always end up with zeroed
    1083             :  * page after fork() + CoW for pfn mappings. We don't always have a
    1084             :  * hardware-managed access flag on arm64.
    1085             :  */
    1086             : #define arch_has_hw_pte_young           cpu_has_hw_af
    1087             : 
    1088             : /*
    1089             :  * Experimentally, it's cheap to set the access flag in hardware and we
    1090             :  * benefit from prefaulting mappings as 'old' to start with.
    1091             :  */
    1092             : #define arch_wants_old_prefaulted_pte   cpu_has_hw_af
    1093             : 
    1094             : static inline bool pud_sect_supported(void)
    1095             : {
    1096             :         return PAGE_SIZE == SZ_4K;
    1097             : }
    1098             : 
    1099             : 
    1100             : #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
    1101             : #define ptep_modify_prot_start ptep_modify_prot_start
    1102             : extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
    1103             :                                     unsigned long addr, pte_t *ptep);
    1104             : 
    1105             : #define ptep_modify_prot_commit ptep_modify_prot_commit
    1106             : extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
    1107             :                                     unsigned long addr, pte_t *ptep,
    1108             :                                     pte_t old_pte, pte_t new_pte);
    1109             : #endif /* !__ASSEMBLY__ */
    1110             : 
    1111             : #endif /* __ASM_PGTABLE_H */

Generated by: LCOV version 1.14