LCOV - code coverage report
Current view: top level - mm - internal.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-achx @ Mon Jul 31 20:08:12 PDT 2023 Lines: 12 13 92.3 %
Date: 2023-07-31 20:08:12 Functions: 2 2 100.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /* internal.h: mm/ internal definitions
       3             :  *
       4             :  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
       5             :  * Written by David Howells (dhowells@redhat.com)
       6             :  */
       7             : #ifndef __MM_INTERNAL_H
       8             : #define __MM_INTERNAL_H
       9             : 
      10             : #include <linux/fs.h>
      11             : #include <linux/mm.h>
      12             : #include <linux/pagemap.h>
      13             : #include <linux/rmap.h>
      14             : #include <linux/tracepoint-defs.h>
      15             : 
      16             : struct folio_batch;
      17             : 
      18             : /*
      19             :  * The set of flags that only affect watermark checking and reclaim
      20             :  * behaviour. This is used by the MM to obey the caller constraints
      21             :  * about IO, FS and watermark checking while ignoring placement
      22             :  * hints such as HIGHMEM usage.
      23             :  */
      24             : #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
      25             :                         __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
      26             :                         __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
      27             :                         __GFP_NOLOCKDEP)
      28             : 
      29             : /* The GFP flags allowed during early boot */
      30             : #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
      31             : 
      32             : /* Control allocation cpuset and node placement constraints */
      33             : #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
      34             : 
      35             : /* Do not use these with a slab allocator */
      36             : #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
      37             : 
      38             : /*
      39             :  * Different from WARN_ON_ONCE(), no warning will be issued
      40             :  * when we specify __GFP_NOWARN.
      41             :  */
      42             : #define WARN_ON_ONCE_GFP(cond, gfp)     ({                              \
      43             :         static bool __section(".data.once") __warned;                 \
      44             :         int __ret_warn_once = !!(cond);                                 \
      45             :                                                                         \
      46             :         if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
      47             :                 __warned = true;                                        \
      48             :                 WARN_ON(1);                                             \
      49             :         }                                                               \
      50             :         unlikely(__ret_warn_once);                                      \
      51             : })
      52             : 
      53             : void page_writeback_init(void);
      54             : 
      55             : /*
      56             :  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
      57             :  * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
      58             :  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
      59             :  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
      60             :  */
      61             : #define COMPOUND_MAPPED         0x800000
      62             : #define FOLIO_PAGES_MAPPED      (COMPOUND_MAPPED - 1)
      63             : 
      64             : /*
      65             :  * How many individual pages have an elevated _mapcount.  Excludes
      66             :  * the folio's entire_mapcount.
      67             :  */
      68             : static inline int folio_nr_pages_mapped(struct folio *folio)
      69             : {
      70             :         return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
      71             : }
      72             : 
      73             : static inline void *folio_raw_mapping(struct folio *folio)
      74             : {
      75             :         unsigned long mapping = (unsigned long)folio->mapping;
      76             : 
      77             :         return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
      78             : }
      79             : 
      80             : void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
      81             :                                                 int nr_throttled);
      82   806475055 : static inline void acct_reclaim_writeback(struct folio *folio)
      83             : {
      84   806475055 :         pg_data_t *pgdat = folio_pgdat(folio);
      85   806475055 :         int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
      86             : 
      87   806475055 :         if (nr_throttled)
      88           0 :                 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
      89   806475055 : }
      90             : 
      91             : static inline void wake_throttle_isolated(pg_data_t *pgdat)
      92             : {
      93             :         wait_queue_head_t *wqh;
      94             : 
      95             :         wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
      96             :         if (waitqueue_active(wqh))
      97             :                 wake_up(wqh);
      98             : }
      99             : 
     100             : vm_fault_t do_swap_page(struct vm_fault *vmf);
     101             : void folio_rotate_reclaimable(struct folio *folio);
     102             : bool __folio_end_writeback(struct folio *folio);
     103             : void deactivate_file_folio(struct folio *folio);
     104             : void folio_activate(struct folio *folio);
     105             : 
     106             : void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
     107             :                    struct vm_area_struct *start_vma, unsigned long floor,
     108             :                    unsigned long ceiling, bool mm_wr_locked);
     109             : void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
     110             : 
     111             : struct zap_details;
     112             : void unmap_page_range(struct mmu_gather *tlb,
     113             :                              struct vm_area_struct *vma,
     114             :                              unsigned long addr, unsigned long end,
     115             :                              struct zap_details *details);
     116             : 
     117             : void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
     118             :                 unsigned int order);
     119             : void force_page_cache_ra(struct readahead_control *, unsigned long nr);
     120             : static inline void force_page_cache_readahead(struct address_space *mapping,
     121             :                 struct file *file, pgoff_t index, unsigned long nr_to_read)
     122             : {
     123             :         DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
     124             :         force_page_cache_ra(&ractl, nr_to_read);
     125             : }
     126             : 
     127             : unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
     128             :                 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
     129             : unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
     130             :                 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
     131             : void filemap_free_folio(struct address_space *mapping, struct folio *folio);
     132             : int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
     133             : bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
     134             :                 loff_t end);
     135             : long invalidate_inode_page(struct page *page);
     136             : unsigned long mapping_try_invalidate(struct address_space *mapping,
     137             :                 pgoff_t start, pgoff_t end, unsigned long *nr_failed);
     138             : 
     139             : /**
     140             :  * folio_evictable - Test whether a folio is evictable.
     141             :  * @folio: The folio to test.
     142             :  *
     143             :  * Test whether @folio is evictable -- i.e., should be placed on
     144             :  * active/inactive lists vs unevictable list.
     145             :  *
     146             :  * Reasons folio might not be evictable:
     147             :  * 1. folio's mapping marked unevictable
     148             :  * 2. One of the pages in the folio is part of an mlocked VMA
     149             :  */
     150             : static inline bool folio_evictable(struct folio *folio)
     151             : {
     152             :         bool ret;
     153             : 
     154             :         /* Prevent address_space of inode and swap cache from being freed */
     155             :         rcu_read_lock();
     156             :         ret = !mapping_unevictable(folio_mapping(folio)) &&
     157             :                         !folio_test_mlocked(folio);
     158             :         rcu_read_unlock();
     159             :         return ret;
     160             : }
     161             : 
     162             : /*
     163             :  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
     164             :  * a count of one.
     165             :  */
     166             : static inline void set_page_refcounted(struct page *page)
     167             : {
     168             :         VM_BUG_ON_PAGE(PageTail(page), page);
     169             :         VM_BUG_ON_PAGE(page_ref_count(page), page);
     170             :         set_page_count(page, 1);
     171             : }
     172             : 
     173             : extern unsigned long highest_memmap_pfn;
     174             : 
     175             : /*
     176             :  * Maximum number of reclaim retries without progress before the OOM
     177             :  * killer is consider the only way forward.
     178             :  */
     179             : #define MAX_RECLAIM_RETRIES 16
     180             : 
     181             : /*
     182             :  * in mm/vmscan.c:
     183             :  */
     184             : bool isolate_lru_page(struct page *page);
     185             : bool folio_isolate_lru(struct folio *folio);
     186             : void putback_lru_page(struct page *page);
     187             : void folio_putback_lru(struct folio *folio);
     188             : extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
     189             : 
     190             : /*
     191             :  * in mm/rmap.c:
     192             :  */
     193             : pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
     194             : 
     195             : /*
     196             :  * in mm/page_alloc.c
     197             :  */
     198             : #define K(x) ((x) << (PAGE_SHIFT-10))
     199             : 
     200             : extern char * const zone_names[MAX_NR_ZONES];
     201             : 
     202             : /* perform sanity checks on struct pages being allocated or freed */
     203             : DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
     204             : 
     205             : extern int min_free_kbytes;
     206             : 
     207             : void setup_per_zone_wmarks(void);
     208             : void calculate_min_free_kbytes(void);
     209             : int __meminit init_per_zone_wmark_min(void);
     210             : void page_alloc_sysctl_init(void);
     211             : 
     212             : /*
     213             :  * Structure for holding the mostly immutable allocation parameters passed
     214             :  * between functions involved in allocations, including the alloc_pages*
     215             :  * family of functions.
     216             :  *
     217             :  * nodemask, migratetype and highest_zoneidx are initialized only once in
     218             :  * __alloc_pages() and then never change.
     219             :  *
     220             :  * zonelist, preferred_zone and highest_zoneidx are set first in
     221             :  * __alloc_pages() for the fast path, and might be later changed
     222             :  * in __alloc_pages_slowpath(). All other functions pass the whole structure
     223             :  * by a const pointer.
     224             :  */
     225             : struct alloc_context {
     226             :         struct zonelist *zonelist;
     227             :         nodemask_t *nodemask;
     228             :         struct zoneref *preferred_zoneref;
     229             :         int migratetype;
     230             : 
     231             :         /*
     232             :          * highest_zoneidx represents highest usable zone index of
     233             :          * the allocation request. Due to the nature of the zone,
     234             :          * memory on lower zone than the highest_zoneidx will be
     235             :          * protected by lowmem_reserve[highest_zoneidx].
     236             :          *
     237             :          * highest_zoneidx is also used by reclaim/compaction to limit
     238             :          * the target zone since higher zone than this index cannot be
     239             :          * usable for this allocation request.
     240             :          */
     241             :         enum zone_type highest_zoneidx;
     242             :         bool spread_dirty_pages;
     243             : };
     244             : 
     245             : /*
     246             :  * This function returns the order of a free page in the buddy system. In
     247             :  * general, page_zone(page)->lock must be held by the caller to prevent the
     248             :  * page from being allocated in parallel and returning garbage as the order.
     249             :  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
     250             :  * page cannot be allocated or merged in parallel. Alternatively, it must
     251             :  * handle invalid values gracefully, and use buddy_order_unsafe() below.
     252             :  */
     253             : static inline unsigned int buddy_order(struct page *page)
     254             : {
     255             :         /* PageBuddy() must be checked by the caller */
     256             :         return page_private(page);
     257             : }
     258             : 
     259             : /*
     260             :  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
     261             :  * PageBuddy() should be checked first by the caller to minimize race window,
     262             :  * and invalid values must be handled gracefully.
     263             :  *
     264             :  * READ_ONCE is used so that if the caller assigns the result into a local
     265             :  * variable and e.g. tests it for valid range before using, the compiler cannot
     266             :  * decide to remove the variable and inline the page_private(page) multiple
     267             :  * times, potentially observing different values in the tests and the actual
     268             :  * use of the result.
     269             :  */
     270             : #define buddy_order_unsafe(page)        READ_ONCE(page_private(page))
     271             : 
     272             : /*
     273             :  * This function checks whether a page is free && is the buddy
     274             :  * we can coalesce a page and its buddy if
     275             :  * (a) the buddy is not in a hole (check before calling!) &&
     276             :  * (b) the buddy is in the buddy system &&
     277             :  * (c) a page and its buddy have the same order &&
     278             :  * (d) a page and its buddy are in the same zone.
     279             :  *
     280             :  * For recording whether a page is in the buddy system, we set PageBuddy.
     281             :  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
     282             :  *
     283             :  * For recording page's order, we use page_private(page).
     284             :  */
     285             : static inline bool page_is_buddy(struct page *page, struct page *buddy,
     286             :                                  unsigned int order)
     287             : {
     288             :         if (!page_is_guard(buddy) && !PageBuddy(buddy))
     289             :                 return false;
     290             : 
     291             :         if (buddy_order(buddy) != order)
     292             :                 return false;
     293             : 
     294             :         /*
     295             :          * zone check is done late to avoid uselessly calculating
     296             :          * zone/node ids for pages that could never merge.
     297             :          */
     298             :         if (page_zone_id(page) != page_zone_id(buddy))
     299             :                 return false;
     300             : 
     301             :         VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
     302             : 
     303             :         return true;
     304             : }
     305             : 
     306             : /*
     307             :  * Locate the struct page for both the matching buddy in our
     308             :  * pair (buddy1) and the combined O(n+1) page they form (page).
     309             :  *
     310             :  * 1) Any buddy B1 will have an order O twin B2 which satisfies
     311             :  * the following equation:
     312             :  *     B2 = B1 ^ (1 << O)
     313             :  * For example, if the starting buddy (buddy2) is #8 its order
     314             :  * 1 buddy is #10:
     315             :  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
     316             :  *
     317             :  * 2) Any buddy B will have an order O+1 parent P which
     318             :  * satisfies the following equation:
     319             :  *     P = B & ~(1 << O)
     320             :  *
     321             :  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
     322             :  */
     323             : static inline unsigned long
     324             : __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
     325             : {
     326             :         return page_pfn ^ (1 << order);
     327             : }
     328             : 
     329             : /*
     330             :  * Find the buddy of @page and validate it.
     331             :  * @page: The input page
     332             :  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
     333             :  *       function is used in the performance-critical __free_one_page().
     334             :  * @order: The order of the page
     335             :  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
     336             :  *             page_to_pfn().
     337             :  *
     338             :  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
     339             :  * not the same as @page. The validation is necessary before use it.
     340             :  *
     341             :  * Return: the found buddy page or NULL if not found.
     342             :  */
     343             : static inline struct page *find_buddy_page_pfn(struct page *page,
     344             :                         unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
     345             : {
     346             :         unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
     347             :         struct page *buddy;
     348             : 
     349             :         buddy = page + (__buddy_pfn - pfn);
     350             :         if (buddy_pfn)
     351             :                 *buddy_pfn = __buddy_pfn;
     352             : 
     353             :         if (page_is_buddy(page, buddy, order))
     354             :                 return buddy;
     355             :         return NULL;
     356             : }
     357             : 
     358             : extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
     359             :                                 unsigned long end_pfn, struct zone *zone);
     360             : 
     361             : static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
     362             :                                 unsigned long end_pfn, struct zone *zone)
     363             : {
     364             :         if (zone->contiguous)
     365             :                 return pfn_to_page(start_pfn);
     366             : 
     367             :         return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
     368             : }
     369             : 
     370             : void set_zone_contiguous(struct zone *zone);
     371             : 
     372             : static inline void clear_zone_contiguous(struct zone *zone)
     373             : {
     374             :         zone->contiguous = false;
     375             : }
     376             : 
     377             : extern int __isolate_free_page(struct page *page, unsigned int order);
     378             : extern void __putback_isolated_page(struct page *page, unsigned int order,
     379             :                                     int mt);
     380             : extern void memblock_free_pages(struct page *page, unsigned long pfn,
     381             :                                         unsigned int order);
     382             : extern void __free_pages_core(struct page *page, unsigned int order);
     383             : 
     384             : /*
     385             :  * This will have no effect, other than possibly generating a warning, if the
     386             :  * caller passes in a non-large folio.
     387             :  */
     388             : static inline void folio_set_order(struct folio *folio, unsigned int order)
     389             : {
     390             :         if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
     391             :                 return;
     392             : 
     393             :         folio->_folio_order = order;
     394             : #ifdef CONFIG_64BIT
     395             :         folio->_folio_nr_pages = 1U << order;
     396             : #endif
     397             : }
     398             : 
     399             : static inline void prep_compound_head(struct page *page, unsigned int order)
     400             : {
     401             :         struct folio *folio = (struct folio *)page;
     402             : 
     403             :         folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR);
     404             :         folio_set_order(folio, order);
     405             :         atomic_set(&folio->_entire_mapcount, -1);
     406             :         atomic_set(&folio->_nr_pages_mapped, 0);
     407             :         atomic_set(&folio->_pincount, 0);
     408             : }
     409             : 
     410             : static inline void prep_compound_tail(struct page *head, int tail_idx)
     411             : {
     412             :         struct page *p = head + tail_idx;
     413             : 
     414             :         p->mapping = TAIL_MAPPING;
     415             :         set_compound_head(p, head);
     416             :         set_page_private(p, 0);
     417             : }
     418             : 
     419             : extern void prep_compound_page(struct page *page, unsigned int order);
     420             : 
     421             : extern void post_alloc_hook(struct page *page, unsigned int order,
     422             :                                         gfp_t gfp_flags);
     423             : extern int user_min_free_kbytes;
     424             : 
     425             : extern void free_unref_page(struct page *page, unsigned int order);
     426             : extern void free_unref_page_list(struct list_head *list);
     427             : 
     428             : extern void zone_pcp_reset(struct zone *zone);
     429             : extern void zone_pcp_disable(struct zone *zone);
     430             : extern void zone_pcp_enable(struct zone *zone);
     431             : extern void zone_pcp_init(struct zone *zone);
     432             : 
     433             : extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
     434             :                           phys_addr_t min_addr,
     435             :                           int nid, bool exact_nid);
     436             : 
     437             : void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
     438             :                 unsigned long, enum meminit_context, struct vmem_altmap *, int);
     439             : 
     440             : 
     441             : int split_free_page(struct page *free_page,
     442             :                         unsigned int order, unsigned long split_pfn_offset);
     443             : 
     444             : #if defined CONFIG_COMPACTION || defined CONFIG_CMA
     445             : 
     446             : /*
     447             :  * in mm/compaction.c
     448             :  */
     449             : /*
     450             :  * compact_control is used to track pages being migrated and the free pages
     451             :  * they are being migrated to during memory compaction. The free_pfn starts
     452             :  * at the end of a zone and migrate_pfn begins at the start. Movable pages
     453             :  * are moved to the end of a zone during a compaction run and the run
     454             :  * completes when free_pfn <= migrate_pfn
     455             :  */
     456             : struct compact_control {
     457             :         struct list_head freepages;     /* List of free pages to migrate to */
     458             :         struct list_head migratepages;  /* List of pages being migrated */
     459             :         unsigned int nr_freepages;      /* Number of isolated free pages */
     460             :         unsigned int nr_migratepages;   /* Number of pages to migrate */
     461             :         unsigned long free_pfn;         /* isolate_freepages search base */
     462             :         /*
     463             :          * Acts as an in/out parameter to page isolation for migration.
     464             :          * isolate_migratepages uses it as a search base.
     465             :          * isolate_migratepages_block will update the value to the next pfn
     466             :          * after the last isolated one.
     467             :          */
     468             :         unsigned long migrate_pfn;
     469             :         unsigned long fast_start_pfn;   /* a pfn to start linear scan from */
     470             :         struct zone *zone;
     471             :         unsigned long total_migrate_scanned;
     472             :         unsigned long total_free_scanned;
     473             :         unsigned short fast_search_fail;/* failures to use free list searches */
     474             :         short search_order;             /* order to start a fast search at */
     475             :         const gfp_t gfp_mask;           /* gfp mask of a direct compactor */
     476             :         int order;                      /* order a direct compactor needs */
     477             :         int migratetype;                /* migratetype of direct compactor */
     478             :         const unsigned int alloc_flags; /* alloc flags of a direct compactor */
     479             :         const int highest_zoneidx;      /* zone index of a direct compactor */
     480             :         enum migrate_mode mode;         /* Async or sync migration mode */
     481             :         bool ignore_skip_hint;          /* Scan blocks even if marked skip */
     482             :         bool no_set_skip_hint;          /* Don't mark blocks for skipping */
     483             :         bool ignore_block_suitable;     /* Scan blocks considered unsuitable */
     484             :         bool direct_compaction;         /* False from kcompactd or /proc/... */
     485             :         bool proactive_compaction;      /* kcompactd proactive compaction */
     486             :         bool whole_zone;                /* Whole zone should/has been scanned */
     487             :         bool contended;                 /* Signal lock contention */
     488             :         bool finish_pageblock;          /* Scan the remainder of a pageblock. Used
     489             :                                          * when there are potentially transient
     490             :                                          * isolation or migration failures to
     491             :                                          * ensure forward progress.
     492             :                                          */
     493             :         bool alloc_contig;              /* alloc_contig_range allocation */
     494             : };
     495             : 
     496             : /*
     497             :  * Used in direct compaction when a page should be taken from the freelists
     498             :  * immediately when one is created during the free path.
     499             :  */
     500             : struct capture_control {
     501             :         struct compact_control *cc;
     502             :         struct page *page;
     503             : };
     504             : 
     505             : unsigned long
     506             : isolate_freepages_range(struct compact_control *cc,
     507             :                         unsigned long start_pfn, unsigned long end_pfn);
     508             : int
     509             : isolate_migratepages_range(struct compact_control *cc,
     510             :                            unsigned long low_pfn, unsigned long end_pfn);
     511             : 
     512             : int __alloc_contig_migrate_range(struct compact_control *cc,
     513             :                                         unsigned long start, unsigned long end);
     514             : 
     515             : /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
     516             : void init_cma_reserved_pageblock(struct page *page);
     517             : 
     518             : #endif /* CONFIG_COMPACTION || CONFIG_CMA */
     519             : 
     520             : int find_suitable_fallback(struct free_area *area, unsigned int order,
     521             :                         int migratetype, bool only_stealable, bool *can_steal);
     522             : 
     523             : static inline bool free_area_empty(struct free_area *area, int migratetype)
     524             : {
     525             :         return list_empty(&area->free_list[migratetype]);
     526             : }
     527             : 
     528             : /*
     529             :  * These three helpers classifies VMAs for virtual memory accounting.
     530             :  */
     531             : 
     532             : /*
     533             :  * Executable code area - executable, not writable, not stack
     534             :  */
     535             : static inline bool is_exec_mapping(vm_flags_t flags)
     536             : {
     537             :         return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
     538             : }
     539             : 
     540             : /*
     541             :  * Stack area - automatically grows in one direction
     542             :  *
     543             :  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
     544             :  * do_mmap() forbids all other combinations.
     545             :  */
     546             : static inline bool is_stack_mapping(vm_flags_t flags)
     547             : {
     548             :         return (flags & VM_STACK) == VM_STACK;
     549             : }
     550             : 
     551             : /*
     552             :  * Data area - private, writable, not stack
     553             :  */
     554             : static inline bool is_data_mapping(vm_flags_t flags)
     555             : {
     556             :         return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
     557             : }
     558             : 
     559             : /* mm/util.c */
     560             : struct anon_vma *folio_anon_vma(struct folio *folio);
     561             : 
     562             : #ifdef CONFIG_MMU
     563             : void unmap_mapping_folio(struct folio *folio);
     564             : extern long populate_vma_page_range(struct vm_area_struct *vma,
     565             :                 unsigned long start, unsigned long end, int *locked);
     566             : extern long faultin_vma_page_range(struct vm_area_struct *vma,
     567             :                                    unsigned long start, unsigned long end,
     568             :                                    bool write, int *locked);
     569             : extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
     570             :                                unsigned long bytes);
     571             : /*
     572             :  * mlock_vma_folio() and munlock_vma_folio():
     573             :  * should be called with vma's mmap_lock held for read or write,
     574             :  * under page table lock for the pte/pmd being added or removed.
     575             :  *
     576             :  * mlock is usually called at the end of page_add_*_rmap(), munlock at
     577             :  * the end of page_remove_rmap(); but new anon folios are managed by
     578             :  * folio_add_lru_vma() calling mlock_new_folio().
     579             :  *
     580             :  * @compound is used to include pmd mappings of THPs, but filter out
     581             :  * pte mappings of THPs, which cannot be consistently counted: a pte
     582             :  * mapping of the THP head cannot be distinguished by the page alone.
     583             :  */
     584             : void mlock_folio(struct folio *folio);
     585             : static inline void mlock_vma_folio(struct folio *folio,
     586             :                         struct vm_area_struct *vma, bool compound)
     587             : {
     588             :         /*
     589             :          * The VM_SPECIAL check here serves two purposes.
     590             :          * 1) VM_IO check prevents migration from double-counting during mlock.
     591             :          * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
     592             :          *    is never left set on a VM_SPECIAL vma, there is an interval while
     593             :          *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
     594             :          *    still be set while VM_SPECIAL bits are added: so ignore it then.
     595             :          */
     596             :         if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
     597             :             (compound || !folio_test_large(folio)))
     598             :                 mlock_folio(folio);
     599             : }
     600             : 
     601             : void munlock_folio(struct folio *folio);
     602             : static inline void munlock_vma_folio(struct folio *folio,
     603             :                         struct vm_area_struct *vma, bool compound)
     604             : {
     605             :         if (unlikely(vma->vm_flags & VM_LOCKED) &&
     606             :             (compound || !folio_test_large(folio)))
     607             :                 munlock_folio(folio);
     608             : }
     609             : 
     610             : void mlock_new_folio(struct folio *folio);
     611             : bool need_mlock_drain(int cpu);
     612             : void mlock_drain_local(void);
     613             : void mlock_drain_remote(int cpu);
     614             : 
     615             : extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
     616             : 
     617             : /*
     618             :  * Return the start of user virtual address at the specific offset within
     619             :  * a vma.
     620             :  */
     621             : static inline unsigned long
     622             : vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
     623             :                   struct vm_area_struct *vma)
     624             : {
     625             :         unsigned long address;
     626             : 
     627             :         if (pgoff >= vma->vm_pgoff) {
     628             :                 address = vma->vm_start +
     629             :                         ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
     630             :                 /* Check for address beyond vma (or wrapped through 0?) */
     631             :                 if (address < vma->vm_start || address >= vma->vm_end)
     632             :                         address = -EFAULT;
     633             :         } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
     634             :                 /* Test above avoids possibility of wrap to 0 on 32-bit */
     635             :                 address = vma->vm_start;
     636             :         } else {
     637             :                 address = -EFAULT;
     638             :         }
     639             :         return address;
     640             : }
     641             : 
     642             : /*
     643             :  * Return the start of user virtual address of a page within a vma.
     644             :  * Returns -EFAULT if all of the page is outside the range of vma.
     645             :  * If page is a compound head, the entire compound page is considered.
     646             :  */
     647             : static inline unsigned long
     648             : vma_address(struct page *page, struct vm_area_struct *vma)
     649             : {
     650             :         VM_BUG_ON_PAGE(PageKsm(page), page);    /* KSM page->index unusable */
     651             :         return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
     652             : }
     653             : 
     654             : /*
     655             :  * Then at what user virtual address will none of the range be found in vma?
     656             :  * Assumes that vma_address() already returned a good starting address.
     657             :  */
     658             : static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
     659             : {
     660             :         struct vm_area_struct *vma = pvmw->vma;
     661             :         pgoff_t pgoff;
     662             :         unsigned long address;
     663             : 
     664             :         /* Common case, plus ->pgoff is invalid for KSM */
     665             :         if (pvmw->nr_pages == 1)
     666             :                 return pvmw->address + PAGE_SIZE;
     667             : 
     668             :         pgoff = pvmw->pgoff + pvmw->nr_pages;
     669             :         address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
     670             :         /* Check for address beyond vma (or wrapped through 0?) */
     671             :         if (address < vma->vm_start || address > vma->vm_end)
     672             :                 address = vma->vm_end;
     673             :         return address;
     674             : }
     675             : 
     676     8045238 : static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
     677             :                                                     struct file *fpin)
     678             : {
     679     8045238 :         int flags = vmf->flags;
     680             : 
     681     8045238 :         if (fpin)
     682             :                 return fpin;
     683             : 
     684             :         /*
     685             :          * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
     686             :          * anything, so we only pin the file and drop the mmap_lock if only
     687             :          * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
     688             :          */
     689     7359083 :         if (fault_flag_allow_retry_first(flags) &&
     690     7221780 :             !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
     691     7221780 :                 fpin = get_file(vmf->vma->vm_file);
     692     7222334 :                 mmap_read_unlock(vmf->vma->vm_mm);
     693             :         }
     694             :         return fpin;
     695             : }
     696             : #else /* !CONFIG_MMU */
     697             : static inline void unmap_mapping_folio(struct folio *folio) { }
     698             : static inline void mlock_new_folio(struct folio *folio) { }
     699             : static inline bool need_mlock_drain(int cpu) { return false; }
     700             : static inline void mlock_drain_local(void) { }
     701             : static inline void mlock_drain_remote(int cpu) { }
     702             : static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
     703             : {
     704             : }
     705             : #endif /* !CONFIG_MMU */
     706             : 
     707             : /* Memory initialisation debug and verification */
     708             : #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
     709             : DECLARE_STATIC_KEY_TRUE(deferred_pages);
     710             : 
     711             : bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
     712             : #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
     713             : 
     714             : enum mminit_level {
     715             :         MMINIT_WARNING,
     716             :         MMINIT_VERIFY,
     717             :         MMINIT_TRACE
     718             : };
     719             : 
     720             : #ifdef CONFIG_DEBUG_MEMORY_INIT
     721             : 
     722             : extern int mminit_loglevel;
     723             : 
     724             : #define mminit_dprintk(level, prefix, fmt, arg...) \
     725             : do { \
     726             :         if (level < mminit_loglevel) { \
     727             :                 if (level <= MMINIT_WARNING) \
     728             :                         pr_warn("mminit::" prefix " " fmt, ##arg);  \
     729             :                 else \
     730             :                         printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
     731             :         } \
     732             : } while (0)
     733             : 
     734             : extern void mminit_verify_pageflags_layout(void);
     735             : extern void mminit_verify_zonelist(void);
     736             : #else
     737             : 
     738             : static inline void mminit_dprintk(enum mminit_level level,
     739             :                                 const char *prefix, const char *fmt, ...)
     740             : {
     741             : }
     742             : 
     743             : static inline void mminit_verify_pageflags_layout(void)
     744             : {
     745             : }
     746             : 
     747             : static inline void mminit_verify_zonelist(void)
     748             : {
     749             : }
     750             : #endif /* CONFIG_DEBUG_MEMORY_INIT */
     751             : 
     752             : #define NODE_RECLAIM_NOSCAN     -2
     753             : #define NODE_RECLAIM_FULL       -1
     754             : #define NODE_RECLAIM_SOME       0
     755             : #define NODE_RECLAIM_SUCCESS    1
     756             : 
     757             : #ifdef CONFIG_NUMA
     758             : extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
     759             : extern int find_next_best_node(int node, nodemask_t *used_node_mask);
     760             : #else
     761             : static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
     762             :                                 unsigned int order)
     763             : {
     764             :         return NODE_RECLAIM_NOSCAN;
     765             : }
     766             : static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
     767             : {
     768             :         return NUMA_NO_NODE;
     769             : }
     770             : #endif
     771             : 
     772             : /*
     773             :  * mm/memory-failure.c
     774             :  */
     775             : extern int hwpoison_filter(struct page *p);
     776             : 
     777             : extern u32 hwpoison_filter_dev_major;
     778             : extern u32 hwpoison_filter_dev_minor;
     779             : extern u64 hwpoison_filter_flags_mask;
     780             : extern u64 hwpoison_filter_flags_value;
     781             : extern u64 hwpoison_filter_memcg;
     782             : extern u32 hwpoison_filter_enable;
     783             : 
     784             : extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
     785             :         unsigned long, unsigned long,
     786             :         unsigned long, unsigned long);
     787             : 
     788             : extern void set_pageblock_order(void);
     789             : unsigned long reclaim_pages(struct list_head *folio_list);
     790             : unsigned int reclaim_clean_pages_from_list(struct zone *zone,
     791             :                                             struct list_head *folio_list);
     792             : /* The ALLOC_WMARK bits are used as an index to zone->watermark */
     793             : #define ALLOC_WMARK_MIN         WMARK_MIN
     794             : #define ALLOC_WMARK_LOW         WMARK_LOW
     795             : #define ALLOC_WMARK_HIGH        WMARK_HIGH
     796             : #define ALLOC_NO_WATERMARKS     0x04 /* don't check watermarks at all */
     797             : 
     798             : /* Mask to get the watermark bits */
     799             : #define ALLOC_WMARK_MASK        (ALLOC_NO_WATERMARKS-1)
     800             : 
     801             : /*
     802             :  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
     803             :  * cannot assume a reduced access to memory reserves is sufficient for
     804             :  * !MMU
     805             :  */
     806             : #ifdef CONFIG_MMU
     807             : #define ALLOC_OOM               0x08
     808             : #else
     809             : #define ALLOC_OOM               ALLOC_NO_WATERMARKS
     810             : #endif
     811             : 
     812             : #define ALLOC_NON_BLOCK          0x10 /* Caller cannot block. Allow access
     813             :                                        * to 25% of the min watermark or
     814             :                                        * 62.5% if __GFP_HIGH is set.
     815             :                                        */
     816             : #define ALLOC_MIN_RESERVE        0x20 /* __GFP_HIGH set. Allow access to 50%
     817             :                                        * of the min watermark.
     818             :                                        */
     819             : #define ALLOC_CPUSET             0x40 /* check for correct cpuset */
     820             : #define ALLOC_CMA                0x80 /* allow allocations from CMA areas */
     821             : #ifdef CONFIG_ZONE_DMA32
     822             : #define ALLOC_NOFRAGMENT        0x100 /* avoid mixing pageblock types */
     823             : #else
     824             : #define ALLOC_NOFRAGMENT          0x0
     825             : #endif
     826             : #define ALLOC_HIGHATOMIC        0x200 /* Allows access to MIGRATE_HIGHATOMIC */
     827             : #define ALLOC_KSWAPD            0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
     828             : 
     829             : /* Flags that allow allocations below the min watermark. */
     830             : #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
     831             : 
     832             : enum ttu_flags;
     833             : struct tlbflush_unmap_batch;
     834             : 
     835             : 
     836             : /*
     837             :  * only for MM internal work items which do not depend on
     838             :  * any allocations or locks which might depend on allocations
     839             :  */
     840             : extern struct workqueue_struct *mm_percpu_wq;
     841             : 
     842             : #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
     843             : void try_to_unmap_flush(void);
     844             : void try_to_unmap_flush_dirty(void);
     845             : void flush_tlb_batched_pending(struct mm_struct *mm);
     846             : #else
     847             : static inline void try_to_unmap_flush(void)
     848             : {
     849             : }
     850             : static inline void try_to_unmap_flush_dirty(void)
     851             : {
     852             : }
     853             : static inline void flush_tlb_batched_pending(struct mm_struct *mm)
     854             : {
     855             : }
     856             : #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
     857             : 
     858             : extern const struct trace_print_flags pageflag_names[];
     859             : extern const struct trace_print_flags pagetype_names[];
     860             : extern const struct trace_print_flags vmaflag_names[];
     861             : extern const struct trace_print_flags gfpflag_names[];
     862             : 
     863             : static inline bool is_migrate_highatomic(enum migratetype migratetype)
     864             : {
     865             :         return migratetype == MIGRATE_HIGHATOMIC;
     866             : }
     867             : 
     868             : static inline bool is_migrate_highatomic_page(struct page *page)
     869             : {
     870             :         return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
     871             : }
     872             : 
     873             : void setup_zone_pageset(struct zone *zone);
     874             : 
     875             : struct migration_target_control {
     876             :         int nid;                /* preferred node id */
     877             :         nodemask_t *nmask;
     878             :         gfp_t gfp_mask;
     879             : };
     880             : 
     881             : /*
     882             :  * mm/filemap.c
     883             :  */
     884             : size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
     885             :                               struct folio *folio, loff_t fpos, size_t size);
     886             : 
     887             : /*
     888             :  * mm/vmalloc.c
     889             :  */
     890             : #ifdef CONFIG_MMU
     891             : void __init vmalloc_init(void);
     892             : int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
     893             :                 pgprot_t prot, struct page **pages, unsigned int page_shift);
     894             : #else
     895             : static inline void vmalloc_init(void)
     896             : {
     897             : }
     898             : 
     899             : static inline
     900             : int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
     901             :                 pgprot_t prot, struct page **pages, unsigned int page_shift)
     902             : {
     903             :         return -EINVAL;
     904             : }
     905             : #endif
     906             : 
     907             : int __must_check __vmap_pages_range_noflush(unsigned long addr,
     908             :                                unsigned long end, pgprot_t prot,
     909             :                                struct page **pages, unsigned int page_shift);
     910             : 
     911             : void vunmap_range_noflush(unsigned long start, unsigned long end);
     912             : 
     913             : void __vunmap_range_noflush(unsigned long start, unsigned long end);
     914             : 
     915             : int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
     916             :                       unsigned long addr, int page_nid, int *flags);
     917             : 
     918             : void free_zone_device_page(struct page *page);
     919             : int migrate_device_coherent_page(struct page *page);
     920             : 
     921             : /*
     922             :  * mm/gup.c
     923             :  */
     924             : struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
     925             : int __must_check try_grab_page(struct page *page, unsigned int flags);
     926             : 
     927             : enum {
     928             :         /* mark page accessed */
     929             :         FOLL_TOUCH = 1 << 16,
     930             :         /* a retry, previous pass started an IO */
     931             :         FOLL_TRIED = 1 << 17,
     932             :         /* we are working on non-current tsk/mm */
     933             :         FOLL_REMOTE = 1 << 18,
     934             :         /* pages must be released via unpin_user_page */
     935             :         FOLL_PIN = 1 << 19,
     936             :         /* gup_fast: prevent fall-back to slow gup */
     937             :         FOLL_FAST_ONLY = 1 << 20,
     938             :         /* allow unlocking the mmap lock */
     939             :         FOLL_UNLOCKABLE = 1 << 21,
     940             : };
     941             : 
     942             : /*
     943             :  * Indicates for which pages that are write-protected in the page table,
     944             :  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
     945             :  * GUP pin will remain consistent with the pages mapped into the page tables
     946             :  * of the MM.
     947             :  *
     948             :  * Temporary unmapping of PageAnonExclusive() pages or clearing of
     949             :  * PageAnonExclusive() has to protect against concurrent GUP:
     950             :  * * Ordinary GUP: Using the PT lock
     951             :  * * GUP-fast and fork(): mm->write_protect_seq
     952             :  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
     953             :  *    page_try_share_anon_rmap()
     954             :  *
     955             :  * Must be called with the (sub)page that's actually referenced via the
     956             :  * page table entry, which might not necessarily be the head page for a
     957             :  * PTE-mapped THP.
     958             :  *
     959             :  * If the vma is NULL, we're coming from the GUP-fast path and might have
     960             :  * to fallback to the slow path just to lookup the vma.
     961             :  */
     962             : static inline bool gup_must_unshare(struct vm_area_struct *vma,
     963             :                                     unsigned int flags, struct page *page)
     964             : {
     965             :         /*
     966             :          * FOLL_WRITE is implicitly handled correctly as the page table entry
     967             :          * has to be writable -- and if it references (part of) an anonymous
     968             :          * folio, that part is required to be marked exclusive.
     969             :          */
     970             :         if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
     971             :                 return false;
     972             :         /*
     973             :          * Note: PageAnon(page) is stable until the page is actually getting
     974             :          * freed.
     975             :          */
     976             :         if (!PageAnon(page)) {
     977             :                 /*
     978             :                  * We only care about R/O long-term pining: R/O short-term
     979             :                  * pinning does not have the semantics to observe successive
     980             :                  * changes through the process page tables.
     981             :                  */
     982             :                 if (!(flags & FOLL_LONGTERM))
     983             :                         return false;
     984             : 
     985             :                 /* We really need the vma ... */
     986             :                 if (!vma)
     987             :                         return true;
     988             : 
     989             :                 /*
     990             :                  * ... because we only care about writable private ("COW")
     991             :                  * mappings where we have to break COW early.
     992             :                  */
     993             :                 return is_cow_mapping(vma->vm_flags);
     994             :         }
     995             : 
     996             :         /* Paired with a memory barrier in page_try_share_anon_rmap(). */
     997             :         if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
     998             :                 smp_rmb();
     999             : 
    1000             :         /*
    1001             :          * Note that PageKsm() pages cannot be exclusive, and consequently,
    1002             :          * cannot get pinned.
    1003             :          */
    1004             :         return !PageAnonExclusive(page);
    1005             : }
    1006             : 
    1007             : extern bool mirrored_kernelcore;
    1008             : 
    1009             : static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
    1010             : {
    1011             :         /*
    1012             :          * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
    1013             :          * enablements, because when without soft-dirty being compiled in,
    1014             :          * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
    1015             :          * will be constantly true.
    1016             :          */
    1017             :         if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
    1018             :                 return false;
    1019             : 
    1020             :         /*
    1021             :          * Soft-dirty is kind of special: its tracking is enabled when the
    1022             :          * vma flags not set.
    1023             :          */
    1024             :         return !(vma->vm_flags & VM_SOFTDIRTY);
    1025             : }
    1026             : 
    1027             : /*
    1028             :  * VMA Iterator functions shared between nommu and mmap
    1029             :  */
    1030             : static inline int vma_iter_prealloc(struct vma_iterator *vmi)
    1031             : {
    1032             :         return mas_preallocate(&vmi->mas, GFP_KERNEL);
    1033             : }
    1034             : 
    1035             : static inline void vma_iter_clear(struct vma_iterator *vmi,
    1036             :                                   unsigned long start, unsigned long end)
    1037             : {
    1038             :         mas_set_range(&vmi->mas, start, end - 1);
    1039             :         mas_store_prealloc(&vmi->mas, NULL);
    1040             : }
    1041             : 
    1042             : static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
    1043             : {
    1044             :         return mas_walk(&vmi->mas);
    1045             : }
    1046             : 
    1047             : /* Store a VMA with preallocated memory */
    1048             : static inline void vma_iter_store(struct vma_iterator *vmi,
    1049             :                                   struct vm_area_struct *vma)
    1050             : {
    1051             : 
    1052             : #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
    1053             :         if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
    1054             :                         vmi->mas.index > vma->vm_start)) {
    1055             :                 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
    1056             :                         vmi->mas.index, vma->vm_start, vma->vm_start,
    1057             :                         vma->vm_end, vmi->mas.index, vmi->mas.last);
    1058             :         }
    1059             :         if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
    1060             :                         vmi->mas.last <  vma->vm_start)) {
    1061             :                 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
    1062             :                        vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
    1063             :                        vmi->mas.index, vmi->mas.last);
    1064             :         }
    1065             : #endif
    1066             : 
    1067             :         if (vmi->mas.node != MAS_START &&
    1068             :             ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
    1069             :                 vma_iter_invalidate(vmi);
    1070             : 
    1071             :         vmi->mas.index = vma->vm_start;
    1072             :         vmi->mas.last = vma->vm_end - 1;
    1073             :         mas_store_prealloc(&vmi->mas, vma);
    1074             : }
    1075             : 
    1076             : static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
    1077             :                         struct vm_area_struct *vma, gfp_t gfp)
    1078             : {
    1079             :         if (vmi->mas.node != MAS_START &&
    1080             :             ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
    1081             :                 vma_iter_invalidate(vmi);
    1082             : 
    1083             :         vmi->mas.index = vma->vm_start;
    1084             :         vmi->mas.last = vma->vm_end - 1;
    1085             :         mas_store_gfp(&vmi->mas, vma, gfp);
    1086             :         if (unlikely(mas_is_err(&vmi->mas)))
    1087             :                 return -ENOMEM;
    1088             : 
    1089             :         return 0;
    1090             : }
    1091             : 
    1092             : /*
    1093             :  * VMA lock generalization
    1094             :  */
    1095             : struct vma_prepare {
    1096             :         struct vm_area_struct *vma;
    1097             :         struct vm_area_struct *adj_next;
    1098             :         struct file *file;
    1099             :         struct address_space *mapping;
    1100             :         struct anon_vma *anon_vma;
    1101             :         struct vm_area_struct *insert;
    1102             :         struct vm_area_struct *remove;
    1103             :         struct vm_area_struct *remove2;
    1104             : };
    1105             : #endif  /* __MM_INTERNAL_H */

Generated by: LCOV version 1.14