LCOV - code coverage report
Current view: top level - arch/x86/include/asm - page.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-achx @ Mon Jul 31 20:08:12 PDT 2023 Lines: 0 2 0.0 %
Date: 2023-07-31 20:08:12 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _ASM_X86_PAGE_H
       3             : #define _ASM_X86_PAGE_H
       4             : 
       5             : #include <linux/types.h>
       6             : 
       7             : #ifdef __KERNEL__
       8             : 
       9             : #include <asm/page_types.h>
      10             : 
      11             : #ifdef CONFIG_X86_64
      12             : #include <asm/page_64.h>
      13             : #else
      14             : #include <asm/page_32.h>
      15             : #endif  /* CONFIG_X86_64 */
      16             : 
      17             : #ifndef __ASSEMBLY__
      18             : 
      19             : struct page;
      20             : 
      21             : #include <linux/range.h>
      22             : extern struct range pfn_mapped[];
      23             : extern int nr_pfn_mapped;
      24             : 
      25             : static inline void clear_user_page(void *page, unsigned long vaddr,
      26             :                                    struct page *pg)
      27             : {
      28           0 :         clear_page(page);
      29             : }
      30             : 
      31             : static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
      32             :                                   struct page *topage)
      33             : {
      34           0 :         copy_page(to, from);
      35             : }
      36             : 
      37             : #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
      38             :         vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
      39             : 
      40             : #ifndef __pa
      41             : #define __pa(x)         __phys_addr((unsigned long)(x))
      42             : #endif
      43             : 
      44             : #define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
      45             : /* __pa_symbol should be used for C visible symbols.
      46             :    This seems to be the official gcc blessed way to do such arithmetic. */
      47             : /*
      48             :  * We need __phys_reloc_hide() here because gcc may assume that there is no
      49             :  * overflow during __pa() calculation and can optimize it unexpectedly.
      50             :  * Newer versions of gcc provide -fno-strict-overflow switch to handle this
      51             :  * case properly. Once all supported versions of gcc understand it, we can
      52             :  * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
      53             :  */
      54             : #define __pa_symbol(x) \
      55             :         __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
      56             : 
      57             : #ifndef __va
      58             : #define __va(x)                 ((void *)((unsigned long)(x)+PAGE_OFFSET))
      59             : #endif
      60             : 
      61             : #define __boot_va(x)            __va(x)
      62             : #define __boot_pa(x)            __pa(x)
      63             : 
      64             : /*
      65             :  * virt_to_page(kaddr) returns a valid pointer if and only if
      66             :  * virt_addr_valid(kaddr) returns true.
      67             :  */
      68             : #define virt_to_page(kaddr)     pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
      69             : #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
      70             : extern bool __virt_addr_valid(unsigned long kaddr);
      71             : #define virt_addr_valid(kaddr)  __virt_addr_valid((unsigned long) (kaddr))
      72             : 
      73             : static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
      74             : {
      75             :         return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
      76             : }
      77             : 
      78             : static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
      79             : {
      80             :         return __canonical_address(vaddr, vaddr_bits) == vaddr;
      81             : }
      82             : 
      83             : #endif  /* __ASSEMBLY__ */
      84             : 
      85             : #include <asm-generic/memory_model.h>
      86             : #include <asm-generic/getorder.h>
      87             : 
      88             : #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
      89             : 
      90             : #endif  /* __KERNEL__ */
      91             : #endif /* _ASM_X86_PAGE_H */

Generated by: LCOV version 1.14