LCOV - code coverage report
Current view: top level - fs - userfaultfd.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-djwx @ Mon Jul 31 20:08:22 PDT 2023 Lines: 41 996 4.1 %
Date: 2023-07-31 20:08:22 Functions: 8 44 18.2 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  *  fs/userfaultfd.c
       4             :  *
       5             :  *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
       6             :  *  Copyright (C) 2008-2009 Red Hat, Inc.
       7             :  *  Copyright (C) 2015  Red Hat, Inc.
       8             :  *
       9             :  *  Some part derived from fs/eventfd.c (anon inode setup) and
      10             :  *  mm/ksm.c (mm hashing).
      11             :  */
      12             : 
      13             : #include <linux/list.h>
      14             : #include <linux/hashtable.h>
      15             : #include <linux/sched/signal.h>
      16             : #include <linux/sched/mm.h>
      17             : #include <linux/mm.h>
      18             : #include <linux/mm_inline.h>
      19             : #include <linux/mmu_notifier.h>
      20             : #include <linux/poll.h>
      21             : #include <linux/slab.h>
      22             : #include <linux/seq_file.h>
      23             : #include <linux/file.h>
      24             : #include <linux/bug.h>
      25             : #include <linux/anon_inodes.h>
      26             : #include <linux/syscalls.h>
      27             : #include <linux/userfaultfd_k.h>
      28             : #include <linux/mempolicy.h>
      29             : #include <linux/ioctl.h>
      30             : #include <linux/security.h>
      31             : #include <linux/hugetlb.h>
      32             : #include <linux/swapops.h>
      33             : #include <linux/miscdevice.h>
      34             : 
      35             : static int sysctl_unprivileged_userfaultfd __read_mostly;
      36             : 
      37             : #ifdef CONFIG_SYSCTL
      38             : static struct ctl_table vm_userfaultfd_table[] = {
      39             :         {
      40             :                 .procname       = "unprivileged_userfaultfd",
      41             :                 .data           = &sysctl_unprivileged_userfaultfd,
      42             :                 .maxlen         = sizeof(sysctl_unprivileged_userfaultfd),
      43             :                 .mode           = 0644,
      44             :                 .proc_handler   = proc_dointvec_minmax,
      45             :                 .extra1         = SYSCTL_ZERO,
      46             :                 .extra2         = SYSCTL_ONE,
      47             :         },
      48             :         { }
      49             : };
      50             : #endif
      51             : 
      52             : static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
      53             : 
      54             : /*
      55             :  * Start with fault_pending_wqh and fault_wqh so they're more likely
      56             :  * to be in the same cacheline.
      57             :  *
      58             :  * Locking order:
      59             :  *      fd_wqh.lock
      60             :  *              fault_pending_wqh.lock
      61             :  *                      fault_wqh.lock
      62             :  *              event_wqh.lock
      63             :  *
      64             :  * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
      65             :  * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
      66             :  * also taken in IRQ context.
      67             :  */
      68             : struct userfaultfd_ctx {
      69             :         /* waitqueue head for the pending (i.e. not read) userfaults */
      70             :         wait_queue_head_t fault_pending_wqh;
      71             :         /* waitqueue head for the userfaults */
      72             :         wait_queue_head_t fault_wqh;
      73             :         /* waitqueue head for the pseudo fd to wakeup poll/read */
      74             :         wait_queue_head_t fd_wqh;
      75             :         /* waitqueue head for events */
      76             :         wait_queue_head_t event_wqh;
      77             :         /* a refile sequence protected by fault_pending_wqh lock */
      78             :         seqcount_spinlock_t refile_seq;
      79             :         /* pseudo fd refcounting */
      80             :         refcount_t refcount;
      81             :         /* userfaultfd syscall flags */
      82             :         unsigned int flags;
      83             :         /* features requested from the userspace */
      84             :         unsigned int features;
      85             :         /* released */
      86             :         bool released;
      87             :         /* memory mappings are changing because of non-cooperative event */
      88             :         atomic_t mmap_changing;
      89             :         /* mm with one ore more vmas attached to this userfaultfd_ctx */
      90             :         struct mm_struct *mm;
      91             : };
      92             : 
      93             : struct userfaultfd_fork_ctx {
      94             :         struct userfaultfd_ctx *orig;
      95             :         struct userfaultfd_ctx *new;
      96             :         struct list_head list;
      97             : };
      98             : 
      99             : struct userfaultfd_unmap_ctx {
     100             :         struct userfaultfd_ctx *ctx;
     101             :         unsigned long start;
     102             :         unsigned long end;
     103             :         struct list_head list;
     104             : };
     105             : 
     106             : struct userfaultfd_wait_queue {
     107             :         struct uffd_msg msg;
     108             :         wait_queue_entry_t wq;
     109             :         struct userfaultfd_ctx *ctx;
     110             :         bool waken;
     111             : };
     112             : 
     113             : struct userfaultfd_wake_range {
     114             :         unsigned long start;
     115             :         unsigned long len;
     116             : };
     117             : 
     118             : /* internal indication that UFFD_API ioctl was successfully executed */
     119             : #define UFFD_FEATURE_INITIALIZED                (1u << 31)
     120             : 
     121             : static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
     122             : {
     123           0 :         return ctx->features & UFFD_FEATURE_INITIALIZED;
     124             : }
     125             : 
     126             : /*
     127             :  * Whether WP_UNPOPULATED is enabled on the uffd context.  It is only
     128             :  * meaningful when userfaultfd_wp()==true on the vma and when it's
     129             :  * anonymous.
     130             :  */
     131           0 : bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
     132             : {
     133           0 :         struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
     134             : 
     135           0 :         if (!ctx)
     136             :                 return false;
     137             : 
     138           0 :         return ctx->features & UFFD_FEATURE_WP_UNPOPULATED;
     139             : }
     140             : 
     141  1161104890 : static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
     142             :                                      vm_flags_t flags)
     143             : {
     144  1161104890 :         const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
     145             : 
     146  1161104890 :         vm_flags_reset(vma, flags);
     147             :         /*
     148             :          * For shared mappings, we want to enable writenotify while
     149             :          * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
     150             :          * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
     151             :          */
     152  1161076610 :         if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
     153           0 :                 vma_set_page_prot(vma);
     154  1161076610 : }
     155             : 
     156           0 : static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
     157             :                                      int wake_flags, void *key)
     158             : {
     159           0 :         struct userfaultfd_wake_range *range = key;
     160           0 :         int ret;
     161           0 :         struct userfaultfd_wait_queue *uwq;
     162           0 :         unsigned long start, len;
     163             : 
     164           0 :         uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
     165           0 :         ret = 0;
     166             :         /* len == 0 means wake all */
     167           0 :         start = range->start;
     168           0 :         len = range->len;
     169           0 :         if (len && (start > uwq->msg.arg.pagefault.address ||
     170           0 :                     start + len <= uwq->msg.arg.pagefault.address))
     171           0 :                 goto out;
     172           0 :         WRITE_ONCE(uwq->waken, true);
     173             :         /*
     174             :          * The Program-Order guarantees provided by the scheduler
     175             :          * ensure uwq->waken is visible before the task is woken.
     176             :          */
     177           0 :         ret = wake_up_state(wq->private, mode);
     178           0 :         if (ret) {
     179             :                 /*
     180             :                  * Wake only once, autoremove behavior.
     181             :                  *
     182             :                  * After the effect of list_del_init is visible to the other
     183             :                  * CPUs, the waitqueue may disappear from under us, see the
     184             :                  * !list_empty_careful() in handle_userfault().
     185             :                  *
     186             :                  * try_to_wake_up() has an implicit smp_mb(), and the
     187             :                  * wq->private is read before calling the extern function
     188             :                  * "wake_up_state" (which in turns calls try_to_wake_up).
     189             :                  */
     190           0 :                 list_del_init(&wq->entry);
     191             :         }
     192           0 : out:
     193           0 :         return ret;
     194             : }
     195             : 
     196             : /**
     197             :  * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
     198             :  * context.
     199             :  * @ctx: [in] Pointer to the userfaultfd context.
     200             :  */
     201             : static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
     202             : {
     203           0 :         refcount_inc(&ctx->refcount);
     204             : }
     205             : 
     206             : /**
     207             :  * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
     208             :  * context.
     209             :  * @ctx: [in] Pointer to userfaultfd context.
     210             :  *
     211             :  * The userfaultfd context reference must have been previously acquired either
     212             :  * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
     213             :  */
     214           0 : static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
     215             : {
     216           0 :         if (refcount_dec_and_test(&ctx->refcount)) {
     217           0 :                 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
     218           0 :                 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
     219           0 :                 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
     220           0 :                 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
     221           0 :                 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
     222           0 :                 VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
     223           0 :                 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
     224           0 :                 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
     225           0 :                 mmdrop(ctx->mm);
     226           0 :                 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
     227             :         }
     228           0 : }
     229             : 
     230           0 : static inline void msg_init(struct uffd_msg *msg)
     231             : {
     232           0 :         BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
     233             :         /*
     234             :          * Must use memset to zero out the paddings or kernel data is
     235             :          * leaked to userland.
     236             :          */
     237           0 :         memset(msg, 0, sizeof(struct uffd_msg));
     238           0 : }
     239             : 
     240           0 : static inline struct uffd_msg userfault_msg(unsigned long address,
     241             :                                             unsigned long real_address,
     242             :                                             unsigned int flags,
     243             :                                             unsigned long reason,
     244             :                                             unsigned int features)
     245             : {
     246           0 :         struct uffd_msg msg;
     247             : 
     248           0 :         msg_init(&msg);
     249           0 :         msg.event = UFFD_EVENT_PAGEFAULT;
     250             : 
     251           0 :         msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
     252           0 :                                     real_address : address;
     253             : 
     254             :         /*
     255             :          * These flags indicate why the userfault occurred:
     256             :          * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
     257             :          * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault.
     258             :          * - Neither of these flags being set indicates a MISSING fault.
     259             :          *
     260             :          * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write
     261             :          * fault. Otherwise, it was a read fault.
     262             :          */
     263           0 :         if (flags & FAULT_FLAG_WRITE)
     264           0 :                 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
     265           0 :         if (reason & VM_UFFD_WP)
     266           0 :                 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
     267           0 :         if (reason & VM_UFFD_MINOR)
     268           0 :                 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
     269           0 :         if (features & UFFD_FEATURE_THREAD_ID)
     270           0 :                 msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
     271           0 :         return msg;
     272             : }
     273             : 
     274             : #ifdef CONFIG_HUGETLB_PAGE
     275             : /*
     276             :  * Same functionality as userfaultfd_must_wait below with modifications for
     277             :  * hugepmd ranges.
     278             :  */
     279           0 : static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
     280             :                                          struct vm_area_struct *vma,
     281             :                                          unsigned long address,
     282             :                                          unsigned long flags,
     283             :                                          unsigned long reason)
     284             : {
     285           0 :         pte_t *ptep, pte;
     286           0 :         bool ret = true;
     287             : 
     288           0 :         mmap_assert_locked(ctx->mm);
     289             : 
     290           0 :         ptep = hugetlb_walk(vma, address, vma_mmu_pagesize(vma));
     291           0 :         if (!ptep)
     292           0 :                 goto out;
     293             : 
     294           0 :         ret = false;
     295           0 :         pte = huge_ptep_get(ptep);
     296             : 
     297             :         /*
     298             :          * Lockless access: we're in a wait_event so it's ok if it
     299             :          * changes under us.  PTE markers should be handled the same as none
     300             :          * ptes here.
     301             :          */
     302           0 :         if (huge_pte_none_mostly(pte))
     303             :                 ret = true;
     304           0 :         if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
     305           0 :                 ret = true;
     306           0 : out:
     307           0 :         return ret;
     308             : }
     309             : #else
     310             : static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
     311             :                                          struct vm_area_struct *vma,
     312             :                                          unsigned long address,
     313             :                                          unsigned long flags,
     314             :                                          unsigned long reason)
     315             : {
     316             :         return false;   /* should never get here */
     317             : }
     318             : #endif /* CONFIG_HUGETLB_PAGE */
     319             : 
     320             : /*
     321             :  * Verify the pagetables are still not ok after having reigstered into
     322             :  * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
     323             :  * userfault that has already been resolved, if userfaultfd_read and
     324             :  * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
     325             :  * threads.
     326             :  */
     327           0 : static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
     328             :                                          unsigned long address,
     329             :                                          unsigned long flags,
     330             :                                          unsigned long reason)
     331             : {
     332           0 :         struct mm_struct *mm = ctx->mm;
     333           0 :         pgd_t *pgd;
     334           0 :         p4d_t *p4d;
     335           0 :         pud_t *pud;
     336           0 :         pmd_t *pmd, _pmd;
     337           0 :         pte_t *pte;
     338           0 :         pte_t ptent;
     339           0 :         bool ret = true;
     340             : 
     341           0 :         mmap_assert_locked(mm);
     342             : 
     343           0 :         pgd = pgd_offset(mm, address);
     344           0 :         if (!pgd_present(*pgd))
     345             :                 goto out;
     346           0 :         p4d = p4d_offset(pgd, address);
     347           0 :         if (!p4d_present(*p4d))
     348           0 :                 goto out;
     349           0 :         pud = pud_offset(p4d, address);
     350           0 :         if (!pud_present(*pud))
     351           0 :                 goto out;
     352           0 :         pmd = pmd_offset(pud, address);
     353           0 : again:
     354           0 :         _pmd = pmdp_get_lockless(pmd);
     355           0 :         if (pmd_none(_pmd))
     356           0 :                 goto out;
     357             : 
     358           0 :         ret = false;
     359           0 :         if (!pmd_present(_pmd) || pmd_devmap(_pmd))
     360           0 :                 goto out;
     361             : 
     362           0 :         if (pmd_trans_huge(_pmd)) {
     363           0 :                 if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
     364           0 :                         ret = true;
     365           0 :                 goto out;
     366             :         }
     367             : 
     368           0 :         pte = pte_offset_map(pmd, address);
     369           0 :         if (!pte) {
     370           0 :                 ret = true;
     371           0 :                 goto again;
     372             :         }
     373             :         /*
     374             :          * Lockless access: we're in a wait_event so it's ok if it
     375             :          * changes under us.  PTE markers should be handled the same as none
     376             :          * ptes here.
     377             :          */
     378           0 :         ptent = ptep_get(pte);
     379           0 :         if (pte_none_mostly(ptent))
     380             :                 ret = true;
     381           0 :         if (!pte_write(ptent) && (reason & VM_UFFD_WP))
     382           0 :                 ret = true;
     383             :         pte_unmap(pte);
     384             : 
     385           0 : out:
     386           0 :         return ret;
     387             : }
     388             : 
     389             : static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
     390             : {
     391           0 :         if (flags & FAULT_FLAG_INTERRUPTIBLE)
     392             :                 return TASK_INTERRUPTIBLE;
     393             : 
     394           0 :         if (flags & FAULT_FLAG_KILLABLE)
     395           0 :                 return TASK_KILLABLE;
     396             : 
     397             :         return TASK_UNINTERRUPTIBLE;
     398             : }
     399             : 
     400             : /*
     401             :  * The locking rules involved in returning VM_FAULT_RETRY depending on
     402             :  * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
     403             :  * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
     404             :  * recommendation in __lock_page_or_retry is not an understatement.
     405             :  *
     406             :  * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
     407             :  * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
     408             :  * not set.
     409             :  *
     410             :  * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
     411             :  * set, VM_FAULT_RETRY can still be returned if and only if there are
     412             :  * fatal_signal_pending()s, and the mmap_lock must be released before
     413             :  * returning it.
     414             :  */
     415           0 : vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
     416             : {
     417           0 :         struct vm_area_struct *vma = vmf->vma;
     418           0 :         struct mm_struct *mm = vma->vm_mm;
     419           0 :         struct userfaultfd_ctx *ctx;
     420           0 :         struct userfaultfd_wait_queue uwq;
     421           0 :         vm_fault_t ret = VM_FAULT_SIGBUS;
     422           0 :         bool must_wait;
     423           0 :         unsigned int blocking_state;
     424             : 
     425             :         /*
     426             :          * We don't do userfault handling for the final child pid update.
     427             :          *
     428             :          * We also don't do userfault handling during
     429             :          * coredumping. hugetlbfs has the special
     430             :          * follow_hugetlb_page() to skip missing pages in the
     431             :          * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
     432             :          * the no_page_table() helper in follow_page_mask(), but the
     433             :          * shmem_vm_ops->fault method is invoked even during
     434             :          * coredumping without mmap_lock and it ends up here.
     435             :          */
     436           0 :         if (current->flags & (PF_EXITING|PF_DUMPCORE))
     437           0 :                 goto out;
     438             : 
     439             :         /*
     440             :          * Coredumping runs without mmap_lock so we can only check that
     441             :          * the mmap_lock is held, if PF_DUMPCORE was not set.
     442             :          */
     443           0 :         mmap_assert_locked(mm);
     444             : 
     445           0 :         ctx = vma->vm_userfaultfd_ctx.ctx;
     446           0 :         if (!ctx)
     447           0 :                 goto out;
     448             : 
     449           0 :         BUG_ON(ctx->mm != mm);
     450             : 
     451             :         /* Any unrecognized flag is a bug. */
     452           0 :         VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
     453             :         /* 0 or > 1 flags set is a bug; we expect exactly 1. */
     454           0 :         VM_BUG_ON(!reason || (reason & (reason - 1)));
     455             : 
     456           0 :         if (ctx->features & UFFD_FEATURE_SIGBUS)
     457           0 :                 goto out;
     458           0 :         if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
     459           0 :                 goto out;
     460             : 
     461             :         /*
     462             :          * If it's already released don't get it. This avoids to loop
     463             :          * in __get_user_pages if userfaultfd_release waits on the
     464             :          * caller of handle_userfault to release the mmap_lock.
     465             :          */
     466           0 :         if (unlikely(READ_ONCE(ctx->released))) {
     467             :                 /*
     468             :                  * Don't return VM_FAULT_SIGBUS in this case, so a non
     469             :                  * cooperative manager can close the uffd after the
     470             :                  * last UFFDIO_COPY, without risking to trigger an
     471             :                  * involuntary SIGBUS if the process was starting the
     472             :                  * userfaultfd while the userfaultfd was still armed
     473             :                  * (but after the last UFFDIO_COPY). If the uffd
     474             :                  * wasn't already closed when the userfault reached
     475             :                  * this point, that would normally be solved by
     476             :                  * userfaultfd_must_wait returning 'false'.
     477             :                  *
     478             :                  * If we were to return VM_FAULT_SIGBUS here, the non
     479             :                  * cooperative manager would be instead forced to
     480             :                  * always call UFFDIO_UNREGISTER before it can safely
     481             :                  * close the uffd.
     482             :                  */
     483           0 :                 ret = VM_FAULT_NOPAGE;
     484           0 :                 goto out;
     485             :         }
     486             : 
     487             :         /*
     488             :          * Check that we can return VM_FAULT_RETRY.
     489             :          *
     490             :          * NOTE: it should become possible to return VM_FAULT_RETRY
     491             :          * even if FAULT_FLAG_TRIED is set without leading to gup()
     492             :          * -EBUSY failures, if the userfaultfd is to be extended for
     493             :          * VM_UFFD_WP tracking and we intend to arm the userfault
     494             :          * without first stopping userland access to the memory. For
     495             :          * VM_UFFD_MISSING userfaults this is enough for now.
     496             :          */
     497           0 :         if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
     498             :                 /*
     499             :                  * Validate the invariant that nowait must allow retry
     500             :                  * to be sure not to return SIGBUS erroneously on
     501             :                  * nowait invocations.
     502             :                  */
     503           0 :                 BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
     504             : #ifdef CONFIG_DEBUG_VM
     505           0 :                 if (printk_ratelimit()) {
     506           0 :                         printk(KERN_WARNING
     507             :                                "FAULT_FLAG_ALLOW_RETRY missing %x\n",
     508             :                                vmf->flags);
     509           0 :                         dump_stack();
     510             :                 }
     511             : #endif
     512           0 :                 goto out;
     513             :         }
     514             : 
     515             :         /*
     516             :          * Handle nowait, not much to do other than tell it to retry
     517             :          * and wait.
     518             :          */
     519           0 :         ret = VM_FAULT_RETRY;
     520           0 :         if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
     521           0 :                 goto out;
     522             : 
     523             :         /* take the reference before dropping the mmap_lock */
     524           0 :         userfaultfd_ctx_get(ctx);
     525             : 
     526           0 :         init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
     527           0 :         uwq.wq.private = current;
     528           0 :         uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
     529             :                                 reason, ctx->features);
     530           0 :         uwq.ctx = ctx;
     531           0 :         uwq.waken = false;
     532             : 
     533           0 :         blocking_state = userfaultfd_get_blocking_state(vmf->flags);
     534             : 
     535             :         /*
     536             :          * Take the vma lock now, in order to safely call
     537             :          * userfaultfd_huge_must_wait() later. Since acquiring the
     538             :          * (sleepable) vma lock can modify the current task state, that
     539             :          * must be before explicitly calling set_current_state().
     540             :          */
     541           0 :         if (is_vm_hugetlb_page(vma))
     542           0 :                 hugetlb_vma_lock_read(vma);
     543             : 
     544           0 :         spin_lock_irq(&ctx->fault_pending_wqh.lock);
     545             :         /*
     546             :          * After the __add_wait_queue the uwq is visible to userland
     547             :          * through poll/read().
     548             :          */
     549           0 :         __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
     550             :         /*
     551             :          * The smp_mb() after __set_current_state prevents the reads
     552             :          * following the spin_unlock to happen before the list_add in
     553             :          * __add_wait_queue.
     554             :          */
     555           0 :         set_current_state(blocking_state);
     556           0 :         spin_unlock_irq(&ctx->fault_pending_wqh.lock);
     557             : 
     558           0 :         if (!is_vm_hugetlb_page(vma))
     559           0 :                 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
     560             :                                                   reason);
     561             :         else
     562           0 :                 must_wait = userfaultfd_huge_must_wait(ctx, vma,
     563           0 :                                                        vmf->address,
     564           0 :                                                        vmf->flags, reason);
     565           0 :         if (is_vm_hugetlb_page(vma))
     566           0 :                 hugetlb_vma_unlock_read(vma);
     567           0 :         mmap_read_unlock(mm);
     568             : 
     569           0 :         if (likely(must_wait && !READ_ONCE(ctx->released))) {
     570           0 :                 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
     571           0 :                 schedule();
     572             :         }
     573             : 
     574           0 :         __set_current_state(TASK_RUNNING);
     575             : 
     576             :         /*
     577             :          * Here we race with the list_del; list_add in
     578             :          * userfaultfd_ctx_read(), however because we don't ever run
     579             :          * list_del_init() to refile across the two lists, the prev
     580             :          * and next pointers will never point to self. list_add also
     581             :          * would never let any of the two pointers to point to
     582             :          * self. So list_empty_careful won't risk to see both pointers
     583             :          * pointing to self at any time during the list refile. The
     584             :          * only case where list_del_init() is called is the full
     585             :          * removal in the wake function and there we don't re-list_add
     586             :          * and it's fine not to block on the spinlock. The uwq on this
     587             :          * kernel stack can be released after the list_del_init.
     588             :          */
     589           0 :         if (!list_empty_careful(&uwq.wq.entry)) {
     590           0 :                 spin_lock_irq(&ctx->fault_pending_wqh.lock);
     591             :                 /*
     592             :                  * No need of list_del_init(), the uwq on the stack
     593             :                  * will be freed shortly anyway.
     594             :                  */
     595           0 :                 list_del(&uwq.wq.entry);
     596           0 :                 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
     597             :         }
     598             : 
     599             :         /*
     600             :          * ctx may go away after this if the userfault pseudo fd is
     601             :          * already released.
     602             :          */
     603           0 :         userfaultfd_ctx_put(ctx);
     604             : 
     605           0 : out:
     606           0 :         return ret;
     607             : }
     608             : 
     609           0 : static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
     610             :                                               struct userfaultfd_wait_queue *ewq)
     611             : {
     612           0 :         struct userfaultfd_ctx *release_new_ctx;
     613             : 
     614           0 :         if (WARN_ON_ONCE(current->flags & PF_EXITING))
     615           0 :                 goto out;
     616             : 
     617           0 :         ewq->ctx = ctx;
     618           0 :         init_waitqueue_entry(&ewq->wq, current);
     619           0 :         release_new_ctx = NULL;
     620             : 
     621           0 :         spin_lock_irq(&ctx->event_wqh.lock);
     622             :         /*
     623             :          * After the __add_wait_queue the uwq is visible to userland
     624             :          * through poll/read().
     625             :          */
     626           0 :         __add_wait_queue(&ctx->event_wqh, &ewq->wq);
     627           0 :         for (;;) {
     628           0 :                 set_current_state(TASK_KILLABLE);
     629           0 :                 if (ewq->msg.event == 0)
     630             :                         break;
     631           0 :                 if (READ_ONCE(ctx->released) ||
     632           0 :                     fatal_signal_pending(current)) {
     633             :                         /*
     634             :                          * &ewq->wq may be queued in fork_event, but
     635             :                          * __remove_wait_queue ignores the head
     636             :                          * parameter. It would be a problem if it
     637             :                          * didn't.
     638             :                          */
     639           0 :                         __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
     640           0 :                         if (ewq->msg.event == UFFD_EVENT_FORK) {
     641           0 :                                 struct userfaultfd_ctx *new;
     642             : 
     643           0 :                                 new = (struct userfaultfd_ctx *)
     644             :                                         (unsigned long)
     645           0 :                                         ewq->msg.arg.reserved.reserved1;
     646           0 :                                 release_new_ctx = new;
     647             :                         }
     648             :                         break;
     649             :                 }
     650             : 
     651           0 :                 spin_unlock_irq(&ctx->event_wqh.lock);
     652             : 
     653           0 :                 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
     654           0 :                 schedule();
     655             : 
     656           0 :                 spin_lock_irq(&ctx->event_wqh.lock);
     657             :         }
     658           0 :         __set_current_state(TASK_RUNNING);
     659           0 :         spin_unlock_irq(&ctx->event_wqh.lock);
     660             : 
     661           0 :         if (release_new_ctx) {
     662           0 :                 struct vm_area_struct *vma;
     663           0 :                 struct mm_struct *mm = release_new_ctx->mm;
     664           0 :                 VMA_ITERATOR(vmi, mm, 0);
     665             : 
     666             :                 /* the various vma->vm_userfaultfd_ctx still points to it */
     667           0 :                 mmap_write_lock(mm);
     668           0 :                 for_each_vma(vmi, vma) {
     669           0 :                         if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
     670           0 :                                 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
     671           0 :                                 userfaultfd_set_vm_flags(vma,
     672           0 :                                                          vma->vm_flags & ~__VM_UFFD_FLAGS);
     673             :                         }
     674             :                 }
     675           0 :                 mmap_write_unlock(mm);
     676             : 
     677           0 :                 userfaultfd_ctx_put(release_new_ctx);
     678             :         }
     679             : 
     680             :         /*
     681             :          * ctx may go away after this if the userfault pseudo fd is
     682             :          * already released.
     683             :          */
     684           0 : out:
     685           0 :         atomic_dec(&ctx->mmap_changing);
     686           0 :         VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
     687           0 :         userfaultfd_ctx_put(ctx);
     688           0 : }
     689             : 
     690           0 : static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
     691             :                                        struct userfaultfd_wait_queue *ewq)
     692             : {
     693           0 :         ewq->msg.event = 0;
     694           0 :         wake_up_locked(&ctx->event_wqh);
     695           0 :         __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
     696           0 : }
     697             : 
     698  1161122422 : int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
     699             : {
     700  1161122422 :         struct userfaultfd_ctx *ctx = NULL, *octx;
     701  1161122422 :         struct userfaultfd_fork_ctx *fctx;
     702             : 
     703  1161122422 :         octx = vma->vm_userfaultfd_ctx.ctx;
     704  1161122422 :         if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
     705  1161122422 :                 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
     706  1161122422 :                 userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
     707  1161122422 :                 return 0;
     708             :         }
     709             : 
     710           0 :         list_for_each_entry(fctx, fcs, list)
     711           0 :                 if (fctx->orig == octx) {
     712           0 :                         ctx = fctx->new;
     713           0 :                         break;
     714             :                 }
     715             : 
     716           0 :         if (!ctx) {
     717           0 :                 fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
     718           0 :                 if (!fctx)
     719             :                         return -ENOMEM;
     720             : 
     721           0 :                 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
     722           0 :                 if (!ctx) {
     723           0 :                         kfree(fctx);
     724           0 :                         return -ENOMEM;
     725             :                 }
     726             : 
     727           0 :                 refcount_set(&ctx->refcount, 1);
     728           0 :                 ctx->flags = octx->flags;
     729           0 :                 ctx->features = octx->features;
     730           0 :                 ctx->released = false;
     731           0 :                 atomic_set(&ctx->mmap_changing, 0);
     732           0 :                 ctx->mm = vma->vm_mm;
     733           0 :                 mmgrab(ctx->mm);
     734             : 
     735           0 :                 userfaultfd_ctx_get(octx);
     736           0 :                 atomic_inc(&octx->mmap_changing);
     737           0 :                 fctx->orig = octx;
     738           0 :                 fctx->new = ctx;
     739           0 :                 list_add_tail(&fctx->list, fcs);
     740             :         }
     741             : 
     742           0 :         vma->vm_userfaultfd_ctx.ctx = ctx;
     743           0 :         return 0;
     744             : }
     745             : 
     746           0 : static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
     747             : {
     748           0 :         struct userfaultfd_ctx *ctx = fctx->orig;
     749           0 :         struct userfaultfd_wait_queue ewq;
     750             : 
     751           0 :         msg_init(&ewq.msg);
     752             : 
     753           0 :         ewq.msg.event = UFFD_EVENT_FORK;
     754           0 :         ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
     755             : 
     756           0 :         userfaultfd_event_wait_completion(ctx, &ewq);
     757           0 : }
     758             : 
     759    40017755 : void dup_userfaultfd_complete(struct list_head *fcs)
     760             : {
     761    40017755 :         struct userfaultfd_fork_ctx *fctx, *n;
     762             : 
     763    40013349 :         list_for_each_entry_safe(fctx, n, fcs, list) {
     764           0 :                 dup_fctx(fctx);
     765           0 :                 list_del(&fctx->list);
     766           0 :                 kfree(fctx);
     767             :         }
     768    40013349 : }
     769             : 
     770        4848 : void mremap_userfaultfd_prep(struct vm_area_struct *vma,
     771             :                              struct vm_userfaultfd_ctx *vm_ctx)
     772             : {
     773        4848 :         struct userfaultfd_ctx *ctx;
     774             : 
     775        4848 :         ctx = vma->vm_userfaultfd_ctx.ctx;
     776             : 
     777        4848 :         if (!ctx)
     778             :                 return;
     779             : 
     780           0 :         if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
     781           0 :                 vm_ctx->ctx = ctx;
     782           0 :                 userfaultfd_ctx_get(ctx);
     783           0 :                 atomic_inc(&ctx->mmap_changing);
     784             :         } else {
     785             :                 /* Drop uffd context if remap feature not enabled */
     786           0 :                 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
     787           0 :                 userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
     788             :         }
     789             : }
     790             : 
     791       25086 : void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
     792             :                                  unsigned long from, unsigned long to,
     793             :                                  unsigned long len)
     794             : {
     795       25086 :         struct userfaultfd_ctx *ctx = vm_ctx->ctx;
     796       25086 :         struct userfaultfd_wait_queue ewq;
     797             : 
     798       25086 :         if (!ctx)
     799       25086 :                 return;
     800             : 
     801           0 :         if (to & ~PAGE_MASK) {
     802           0 :                 userfaultfd_ctx_put(ctx);
     803           0 :                 return;
     804             :         }
     805             : 
     806           0 :         msg_init(&ewq.msg);
     807             : 
     808           0 :         ewq.msg.event = UFFD_EVENT_REMAP;
     809           0 :         ewq.msg.arg.remap.from = from;
     810           0 :         ewq.msg.arg.remap.to = to;
     811           0 :         ewq.msg.arg.remap.len = len;
     812             : 
     813           0 :         userfaultfd_event_wait_completion(ctx, &ewq);
     814             : }
     815             : 
     816    62109621 : bool userfaultfd_remove(struct vm_area_struct *vma,
     817             :                         unsigned long start, unsigned long end)
     818             : {
     819    62109621 :         struct mm_struct *mm = vma->vm_mm;
     820    62109621 :         struct userfaultfd_ctx *ctx;
     821    62109621 :         struct userfaultfd_wait_queue ewq;
     822             : 
     823    62109621 :         ctx = vma->vm_userfaultfd_ctx.ctx;
     824    62109621 :         if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
     825             :                 return true;
     826             : 
     827           0 :         userfaultfd_ctx_get(ctx);
     828           0 :         atomic_inc(&ctx->mmap_changing);
     829           0 :         mmap_read_unlock(mm);
     830             : 
     831           0 :         msg_init(&ewq.msg);
     832             : 
     833           0 :         ewq.msg.event = UFFD_EVENT_REMOVE;
     834           0 :         ewq.msg.arg.remove.start = start;
     835           0 :         ewq.msg.arg.remove.end = end;
     836             : 
     837           0 :         userfaultfd_event_wait_completion(ctx, &ewq);
     838             : 
     839           0 :         return false;
     840             : }
     841             : 
     842           0 : static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
     843             :                           unsigned long start, unsigned long end)
     844             : {
     845           0 :         struct userfaultfd_unmap_ctx *unmap_ctx;
     846             : 
     847           0 :         list_for_each_entry(unmap_ctx, unmaps, list)
     848           0 :                 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
     849           0 :                     unmap_ctx->end == end)
     850             :                         return true;
     851             : 
     852             :         return false;
     853             : }
     854             : 
     855   600810074 : int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start,
     856             :                            unsigned long end, struct list_head *unmaps)
     857             : {
     858   600810074 :         struct userfaultfd_unmap_ctx *unmap_ctx;
     859   600810074 :         struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
     860             : 
     861   600810074 :         if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
     862           0 :             has_unmap_ctx(ctx, unmaps, start, end))
     863             :                 return 0;
     864             : 
     865           0 :         unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
     866           0 :         if (!unmap_ctx)
     867             :                 return -ENOMEM;
     868             : 
     869           0 :         userfaultfd_ctx_get(ctx);
     870           0 :         atomic_inc(&ctx->mmap_changing);
     871           0 :         unmap_ctx->ctx = ctx;
     872           0 :         unmap_ctx->start = start;
     873           0 :         unmap_ctx->end = end;
     874           0 :         list_add_tail(&unmap_ctx->list, unmaps);
     875             : 
     876           0 :         return 0;
     877             : }
     878             : 
     879  1125187333 : void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
     880             : {
     881  1125187333 :         struct userfaultfd_unmap_ctx *ctx, *n;
     882  1125187333 :         struct userfaultfd_wait_queue ewq;
     883             : 
     884  1125036289 :         list_for_each_entry_safe(ctx, n, uf, list) {
     885           0 :                 msg_init(&ewq.msg);
     886             : 
     887           0 :                 ewq.msg.event = UFFD_EVENT_UNMAP;
     888           0 :                 ewq.msg.arg.remove.start = ctx->start;
     889           0 :                 ewq.msg.arg.remove.end = ctx->end;
     890             : 
     891           0 :                 userfaultfd_event_wait_completion(ctx->ctx, &ewq);
     892             : 
     893           0 :                 list_del(&ctx->list);
     894           0 :                 kfree(ctx);
     895             :         }
     896  1125036289 : }
     897             : 
     898           0 : static int userfaultfd_release(struct inode *inode, struct file *file)
     899             : {
     900           0 :         struct userfaultfd_ctx *ctx = file->private_data;
     901           0 :         struct mm_struct *mm = ctx->mm;
     902           0 :         struct vm_area_struct *vma, *prev;
     903             :         /* len == 0 means wake all */
     904           0 :         struct userfaultfd_wake_range range = { .len = 0, };
     905           0 :         unsigned long new_flags;
     906           0 :         VMA_ITERATOR(vmi, mm, 0);
     907             : 
     908           0 :         WRITE_ONCE(ctx->released, true);
     909             : 
     910           0 :         if (!mmget_not_zero(mm))
     911           0 :                 goto wakeup;
     912             : 
     913             :         /*
     914             :          * Flush page faults out of all CPUs. NOTE: all page faults
     915             :          * must be retried without returning VM_FAULT_SIGBUS if
     916             :          * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
     917             :          * changes while handle_userfault released the mmap_lock. So
     918             :          * it's critical that released is set to true (above), before
     919             :          * taking the mmap_lock for writing.
     920             :          */
     921           0 :         mmap_write_lock(mm);
     922           0 :         prev = NULL;
     923           0 :         for_each_vma(vmi, vma) {
     924           0 :                 cond_resched();
     925           0 :                 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
     926             :                        !!(vma->vm_flags & __VM_UFFD_FLAGS));
     927           0 :                 if (vma->vm_userfaultfd_ctx.ctx != ctx) {
     928           0 :                         prev = vma;
     929           0 :                         continue;
     930             :                 }
     931           0 :                 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
     932           0 :                 prev = vma_merge(&vmi, mm, prev, vma->vm_start, vma->vm_end,
     933             :                                  new_flags, vma->anon_vma,
     934             :                                  vma->vm_file, vma->vm_pgoff,
     935             :                                  vma_policy(vma),
     936           0 :                                  NULL_VM_UFFD_CTX, anon_vma_name(vma));
     937           0 :                 if (prev) {
     938             :                         vma = prev;
     939             :                 } else {
     940           0 :                         prev = vma;
     941             :                 }
     942             : 
     943           0 :                 userfaultfd_set_vm_flags(vma, new_flags);
     944           0 :                 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
     945             :         }
     946           0 :         mmap_write_unlock(mm);
     947           0 :         mmput(mm);
     948           0 : wakeup:
     949             :         /*
     950             :          * After no new page faults can wait on this fault_*wqh, flush
     951             :          * the last page faults that may have been already waiting on
     952             :          * the fault_*wqh.
     953             :          */
     954           0 :         spin_lock_irq(&ctx->fault_pending_wqh.lock);
     955           0 :         __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
     956           0 :         __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
     957           0 :         spin_unlock_irq(&ctx->fault_pending_wqh.lock);
     958             : 
     959             :         /* Flush pending events that may still wait on event_wqh */
     960           0 :         wake_up_all(&ctx->event_wqh);
     961             : 
     962           0 :         wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
     963           0 :         userfaultfd_ctx_put(ctx);
     964           0 :         return 0;
     965             : }
     966             : 
     967             : /* fault_pending_wqh.lock must be hold by the caller */
     968             : static inline struct userfaultfd_wait_queue *find_userfault_in(
     969             :                 wait_queue_head_t *wqh)
     970             : {
     971           0 :         wait_queue_entry_t *wq;
     972           0 :         struct userfaultfd_wait_queue *uwq;
     973             : 
     974           0 :         lockdep_assert_held(&wqh->lock);
     975             : 
     976           0 :         uwq = NULL;
     977           0 :         if (!waitqueue_active(wqh))
     978           0 :                 goto out;
     979             :         /* walk in reverse to provide FIFO behavior to read userfaults */
     980           0 :         wq = list_last_entry(&wqh->head, typeof(*wq), entry);
     981           0 :         uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
     982             : out:
     983           0 :         return uwq;
     984             : }
     985             : 
     986             : static inline struct userfaultfd_wait_queue *find_userfault(
     987             :                 struct userfaultfd_ctx *ctx)
     988             : {
     989           0 :         return find_userfault_in(&ctx->fault_pending_wqh);
     990             : }
     991             : 
     992             : static inline struct userfaultfd_wait_queue *find_userfault_evt(
     993             :                 struct userfaultfd_ctx *ctx)
     994             : {
     995           0 :         return find_userfault_in(&ctx->event_wqh);
     996             : }
     997             : 
     998           0 : static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
     999             : {
    1000           0 :         struct userfaultfd_ctx *ctx = file->private_data;
    1001           0 :         __poll_t ret;
    1002             : 
    1003           0 :         poll_wait(file, &ctx->fd_wqh, wait);
    1004             : 
    1005           0 :         if (!userfaultfd_is_initialized(ctx))
    1006             :                 return EPOLLERR;
    1007             : 
    1008             :         /*
    1009             :          * poll() never guarantees that read won't block.
    1010             :          * userfaults can be waken before they're read().
    1011             :          */
    1012           0 :         if (unlikely(!(file->f_flags & O_NONBLOCK)))
    1013             :                 return EPOLLERR;
    1014             :         /*
    1015             :          * lockless access to see if there are pending faults
    1016             :          * __pollwait last action is the add_wait_queue but
    1017             :          * the spin_unlock would allow the waitqueue_active to
    1018             :          * pass above the actual list_add inside
    1019             :          * add_wait_queue critical section. So use a full
    1020             :          * memory barrier to serialize the list_add write of
    1021             :          * add_wait_queue() with the waitqueue_active read
    1022             :          * below.
    1023             :          */
    1024           0 :         ret = 0;
    1025           0 :         smp_mb();
    1026           0 :         if (waitqueue_active(&ctx->fault_pending_wqh))
    1027             :                 ret = EPOLLIN;
    1028           0 :         else if (waitqueue_active(&ctx->event_wqh))
    1029           0 :                 ret = EPOLLIN;
    1030             : 
    1031             :         return ret;
    1032             : }
    1033             : 
    1034             : static const struct file_operations userfaultfd_fops;
    1035             : 
    1036           0 : static int resolve_userfault_fork(struct userfaultfd_ctx *new,
    1037             :                                   struct inode *inode,
    1038             :                                   struct uffd_msg *msg)
    1039             : {
    1040           0 :         int fd;
    1041             : 
    1042           0 :         fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
    1043           0 :                         O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
    1044           0 :         if (fd < 0)
    1045             :                 return fd;
    1046             : 
    1047           0 :         msg->arg.reserved.reserved1 = 0;
    1048           0 :         msg->arg.fork.ufd = fd;
    1049           0 :         return 0;
    1050             : }
    1051             : 
    1052           0 : static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
    1053             :                                     struct uffd_msg *msg, struct inode *inode)
    1054             : {
    1055           0 :         ssize_t ret;
    1056           0 :         DECLARE_WAITQUEUE(wait, current);
    1057           0 :         struct userfaultfd_wait_queue *uwq;
    1058             :         /*
    1059             :          * Handling fork event requires sleeping operations, so
    1060             :          * we drop the event_wqh lock, then do these ops, then
    1061             :          * lock it back and wake up the waiter. While the lock is
    1062             :          * dropped the ewq may go away so we keep track of it
    1063             :          * carefully.
    1064             :          */
    1065           0 :         LIST_HEAD(fork_event);
    1066           0 :         struct userfaultfd_ctx *fork_nctx = NULL;
    1067             : 
    1068             :         /* always take the fd_wqh lock before the fault_pending_wqh lock */
    1069           0 :         spin_lock_irq(&ctx->fd_wqh.lock);
    1070           0 :         __add_wait_queue(&ctx->fd_wqh, &wait);
    1071           0 :         for (;;) {
    1072           0 :                 set_current_state(TASK_INTERRUPTIBLE);
    1073           0 :                 spin_lock(&ctx->fault_pending_wqh.lock);
    1074           0 :                 uwq = find_userfault(ctx);
    1075           0 :                 if (uwq) {
    1076             :                         /*
    1077             :                          * Use a seqcount to repeat the lockless check
    1078             :                          * in wake_userfault() to avoid missing
    1079             :                          * wakeups because during the refile both
    1080             :                          * waitqueue could become empty if this is the
    1081             :                          * only userfault.
    1082             :                          */
    1083           0 :                         write_seqcount_begin(&ctx->refile_seq);
    1084             : 
    1085             :                         /*
    1086             :                          * The fault_pending_wqh.lock prevents the uwq
    1087             :                          * to disappear from under us.
    1088             :                          *
    1089             :                          * Refile this userfault from
    1090             :                          * fault_pending_wqh to fault_wqh, it's not
    1091             :                          * pending anymore after we read it.
    1092             :                          *
    1093             :                          * Use list_del() by hand (as
    1094             :                          * userfaultfd_wake_function also uses
    1095             :                          * list_del_init() by hand) to be sure nobody
    1096             :                          * changes __remove_wait_queue() to use
    1097             :                          * list_del_init() in turn breaking the
    1098             :                          * !list_empty_careful() check in
    1099             :                          * handle_userfault(). The uwq->wq.head list
    1100             :                          * must never be empty at any time during the
    1101             :                          * refile, or the waitqueue could disappear
    1102             :                          * from under us. The "wait_queue_head_t"
    1103             :                          * parameter of __remove_wait_queue() is unused
    1104             :                          * anyway.
    1105             :                          */
    1106           0 :                         list_del(&uwq->wq.entry);
    1107           0 :                         add_wait_queue(&ctx->fault_wqh, &uwq->wq);
    1108             : 
    1109           0 :                         write_seqcount_end(&ctx->refile_seq);
    1110             : 
    1111             :                         /* careful to always initialize msg if ret == 0 */
    1112           0 :                         *msg = uwq->msg;
    1113           0 :                         spin_unlock(&ctx->fault_pending_wqh.lock);
    1114           0 :                         ret = 0;
    1115           0 :                         break;
    1116             :                 }
    1117           0 :                 spin_unlock(&ctx->fault_pending_wqh.lock);
    1118             : 
    1119           0 :                 spin_lock(&ctx->event_wqh.lock);
    1120           0 :                 uwq = find_userfault_evt(ctx);
    1121           0 :                 if (uwq) {
    1122           0 :                         *msg = uwq->msg;
    1123             : 
    1124           0 :                         if (uwq->msg.event == UFFD_EVENT_FORK) {
    1125           0 :                                 fork_nctx = (struct userfaultfd_ctx *)
    1126             :                                         (unsigned long)
    1127           0 :                                         uwq->msg.arg.reserved.reserved1;
    1128           0 :                                 list_move(&uwq->wq.entry, &fork_event);
    1129             :                                 /*
    1130             :                                  * fork_nctx can be freed as soon as
    1131             :                                  * we drop the lock, unless we take a
    1132             :                                  * reference on it.
    1133             :                                  */
    1134           0 :                                 userfaultfd_ctx_get(fork_nctx);
    1135           0 :                                 spin_unlock(&ctx->event_wqh.lock);
    1136           0 :                                 ret = 0;
    1137           0 :                                 break;
    1138             :                         }
    1139             : 
    1140           0 :                         userfaultfd_event_complete(ctx, uwq);
    1141           0 :                         spin_unlock(&ctx->event_wqh.lock);
    1142           0 :                         ret = 0;
    1143           0 :                         break;
    1144             :                 }
    1145           0 :                 spin_unlock(&ctx->event_wqh.lock);
    1146             : 
    1147           0 :                 if (signal_pending(current)) {
    1148             :                         ret = -ERESTARTSYS;
    1149             :                         break;
    1150             :                 }
    1151           0 :                 if (no_wait) {
    1152             :                         ret = -EAGAIN;
    1153             :                         break;
    1154             :                 }
    1155           0 :                 spin_unlock_irq(&ctx->fd_wqh.lock);
    1156           0 :                 schedule();
    1157           0 :                 spin_lock_irq(&ctx->fd_wqh.lock);
    1158             :         }
    1159           0 :         __remove_wait_queue(&ctx->fd_wqh, &wait);
    1160           0 :         __set_current_state(TASK_RUNNING);
    1161           0 :         spin_unlock_irq(&ctx->fd_wqh.lock);
    1162             : 
    1163           0 :         if (!ret && msg->event == UFFD_EVENT_FORK) {
    1164           0 :                 ret = resolve_userfault_fork(fork_nctx, inode, msg);
    1165           0 :                 spin_lock_irq(&ctx->event_wqh.lock);
    1166           0 :                 if (!list_empty(&fork_event)) {
    1167             :                         /*
    1168             :                          * The fork thread didn't abort, so we can
    1169             :                          * drop the temporary refcount.
    1170             :                          */
    1171           0 :                         userfaultfd_ctx_put(fork_nctx);
    1172             : 
    1173           0 :                         uwq = list_first_entry(&fork_event,
    1174             :                                                typeof(*uwq),
    1175             :                                                wq.entry);
    1176             :                         /*
    1177             :                          * If fork_event list wasn't empty and in turn
    1178             :                          * the event wasn't already released by fork
    1179             :                          * (the event is allocated on fork kernel
    1180             :                          * stack), put the event back to its place in
    1181             :                          * the event_wq. fork_event head will be freed
    1182             :                          * as soon as we return so the event cannot
    1183             :                          * stay queued there no matter the current
    1184             :                          * "ret" value.
    1185             :                          */
    1186           0 :                         list_del(&uwq->wq.entry);
    1187           0 :                         __add_wait_queue(&ctx->event_wqh, &uwq->wq);
    1188             : 
    1189             :                         /*
    1190             :                          * Leave the event in the waitqueue and report
    1191             :                          * error to userland if we failed to resolve
    1192             :                          * the userfault fork.
    1193             :                          */
    1194           0 :                         if (likely(!ret))
    1195           0 :                                 userfaultfd_event_complete(ctx, uwq);
    1196             :                 } else {
    1197             :                         /*
    1198             :                          * Here the fork thread aborted and the
    1199             :                          * refcount from the fork thread on fork_nctx
    1200             :                          * has already been released. We still hold
    1201             :                          * the reference we took before releasing the
    1202             :                          * lock above. If resolve_userfault_fork
    1203             :                          * failed we've to drop it because the
    1204             :                          * fork_nctx has to be freed in such case. If
    1205             :                          * it succeeded we'll hold it because the new
    1206             :                          * uffd references it.
    1207             :                          */
    1208           0 :                         if (ret)
    1209           0 :                                 userfaultfd_ctx_put(fork_nctx);
    1210             :                 }
    1211           0 :                 spin_unlock_irq(&ctx->event_wqh.lock);
    1212             :         }
    1213             : 
    1214           0 :         return ret;
    1215             : }
    1216             : 
    1217           0 : static ssize_t userfaultfd_read(struct file *file, char __user *buf,
    1218             :                                 size_t count, loff_t *ppos)
    1219             : {
    1220           0 :         struct userfaultfd_ctx *ctx = file->private_data;
    1221           0 :         ssize_t _ret, ret = 0;
    1222           0 :         struct uffd_msg msg;
    1223           0 :         int no_wait = file->f_flags & O_NONBLOCK;
    1224           0 :         struct inode *inode = file_inode(file);
    1225             : 
    1226           0 :         if (!userfaultfd_is_initialized(ctx))
    1227             :                 return -EINVAL;
    1228             : 
    1229           0 :         for (;;) {
    1230           0 :                 if (count < sizeof(msg))
    1231           0 :                         return ret ? ret : -EINVAL;
    1232           0 :                 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
    1233           0 :                 if (_ret < 0)
    1234           0 :                         return ret ? ret : _ret;
    1235           0 :                 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
    1236           0 :                         return ret ? ret : -EFAULT;
    1237           0 :                 ret += sizeof(msg);
    1238           0 :                 buf += sizeof(msg);
    1239           0 :                 count -= sizeof(msg);
    1240             :                 /*
    1241             :                  * Allow to read more than one fault at time but only
    1242             :                  * block if waiting for the very first one.
    1243             :                  */
    1244           0 :                 no_wait = O_NONBLOCK;
    1245             :         }
    1246             : }
    1247             : 
    1248           0 : static void __wake_userfault(struct userfaultfd_ctx *ctx,
    1249             :                              struct userfaultfd_wake_range *range)
    1250             : {
    1251           0 :         spin_lock_irq(&ctx->fault_pending_wqh.lock);
    1252             :         /* wake all in the range and autoremove */
    1253           0 :         if (waitqueue_active(&ctx->fault_pending_wqh))
    1254           0 :                 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
    1255             :                                      range);
    1256           0 :         if (waitqueue_active(&ctx->fault_wqh))
    1257           0 :                 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
    1258           0 :         spin_unlock_irq(&ctx->fault_pending_wqh.lock);
    1259           0 : }
    1260             : 
    1261             : static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
    1262             :                                            struct userfaultfd_wake_range *range)
    1263             : {
    1264           0 :         unsigned seq;
    1265           0 :         bool need_wakeup;
    1266             : 
    1267             :         /*
    1268             :          * To be sure waitqueue_active() is not reordered by the CPU
    1269             :          * before the pagetable update, use an explicit SMP memory
    1270             :          * barrier here. PT lock release or mmap_read_unlock(mm) still
    1271             :          * have release semantics that can allow the
    1272             :          * waitqueue_active() to be reordered before the pte update.
    1273             :          */
    1274           0 :         smp_mb();
    1275             : 
    1276             :         /*
    1277             :          * Use waitqueue_active because it's very frequent to
    1278             :          * change the address space atomically even if there are no
    1279             :          * userfaults yet. So we take the spinlock only when we're
    1280             :          * sure we've userfaults to wake.
    1281             :          */
    1282             :         do {
    1283           0 :                 seq = read_seqcount_begin(&ctx->refile_seq);
    1284           0 :                 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
    1285             :                         waitqueue_active(&ctx->fault_wqh);
    1286           0 :                 cond_resched();
    1287           0 :         } while (read_seqcount_retry(&ctx->refile_seq, seq));
    1288           0 :         if (need_wakeup)
    1289           0 :                 __wake_userfault(ctx, range);
    1290             : }
    1291             : 
    1292             : static __always_inline int validate_range(struct mm_struct *mm,
    1293             :                                           __u64 start, __u64 len)
    1294             : {
    1295           0 :         __u64 task_size = mm->task_size;
    1296             : 
    1297           0 :         if (start & ~PAGE_MASK)
    1298             :                 return -EINVAL;
    1299           0 :         if (len & ~PAGE_MASK)
    1300             :                 return -EINVAL;
    1301           0 :         if (!len)
    1302             :                 return -EINVAL;
    1303           0 :         if (start < mmap_min_addr)
    1304             :                 return -EINVAL;
    1305           0 :         if (start >= task_size)
    1306             :                 return -EINVAL;
    1307           0 :         if (len > task_size - start)
    1308           0 :                 return -EINVAL;
    1309             :         return 0;
    1310             : }
    1311             : 
    1312           0 : static int userfaultfd_register(struct userfaultfd_ctx *ctx,
    1313             :                                 unsigned long arg)
    1314             : {
    1315           0 :         struct mm_struct *mm = ctx->mm;
    1316           0 :         struct vm_area_struct *vma, *prev, *cur;
    1317           0 :         int ret;
    1318           0 :         struct uffdio_register uffdio_register;
    1319           0 :         struct uffdio_register __user *user_uffdio_register;
    1320           0 :         unsigned long vm_flags, new_flags;
    1321           0 :         bool found;
    1322           0 :         bool basic_ioctls;
    1323           0 :         unsigned long start, end, vma_end;
    1324           0 :         struct vma_iterator vmi;
    1325           0 :         pgoff_t pgoff;
    1326             : 
    1327           0 :         user_uffdio_register = (struct uffdio_register __user *) arg;
    1328             : 
    1329           0 :         ret = -EFAULT;
    1330           0 :         if (copy_from_user(&uffdio_register, user_uffdio_register,
    1331             :                            sizeof(uffdio_register)-sizeof(__u64)))
    1332           0 :                 goto out;
    1333             : 
    1334           0 :         ret = -EINVAL;
    1335           0 :         if (!uffdio_register.mode)
    1336           0 :                 goto out;
    1337           0 :         if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
    1338           0 :                 goto out;
    1339           0 :         vm_flags = 0;
    1340           0 :         if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
    1341           0 :                 vm_flags |= VM_UFFD_MISSING;
    1342           0 :         if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
    1343             : #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
    1344             :                 goto out;
    1345             : #endif
    1346           0 :                 vm_flags |= VM_UFFD_WP;
    1347             :         }
    1348           0 :         if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
    1349             : #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
    1350             :                 goto out;
    1351             : #endif
    1352           0 :                 vm_flags |= VM_UFFD_MINOR;
    1353             :         }
    1354             : 
    1355           0 :         ret = validate_range(mm, uffdio_register.range.start,
    1356             :                              uffdio_register.range.len);
    1357           0 :         if (ret)
    1358           0 :                 goto out;
    1359             : 
    1360           0 :         start = uffdio_register.range.start;
    1361           0 :         end = start + uffdio_register.range.len;
    1362             : 
    1363           0 :         ret = -ENOMEM;
    1364           0 :         if (!mmget_not_zero(mm))
    1365           0 :                 goto out;
    1366             : 
    1367           0 :         ret = -EINVAL;
    1368           0 :         mmap_write_lock(mm);
    1369           0 :         vma_iter_init(&vmi, mm, start);
    1370           0 :         vma = vma_find(&vmi, end);
    1371           0 :         if (!vma)
    1372           0 :                 goto out_unlock;
    1373             : 
    1374             :         /*
    1375             :          * If the first vma contains huge pages, make sure start address
    1376             :          * is aligned to huge page size.
    1377             :          */
    1378           0 :         if (is_vm_hugetlb_page(vma)) {
    1379           0 :                 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
    1380             : 
    1381           0 :                 if (start & (vma_hpagesize - 1))
    1382           0 :                         goto out_unlock;
    1383             :         }
    1384             : 
    1385             :         /*
    1386             :          * Search for not compatible vmas.
    1387             :          */
    1388             :         found = false;
    1389             :         basic_ioctls = false;
    1390             :         cur = vma;
    1391           0 :         do {
    1392           0 :                 cond_resched();
    1393             : 
    1394           0 :                 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
    1395             :                        !!(cur->vm_flags & __VM_UFFD_FLAGS));
    1396             : 
    1397             :                 /* check not compatible vmas */
    1398           0 :                 ret = -EINVAL;
    1399           0 :                 if (!vma_can_userfault(cur, vm_flags))
    1400           0 :                         goto out_unlock;
    1401             : 
    1402             :                 /*
    1403             :                  * UFFDIO_COPY will fill file holes even without
    1404             :                  * PROT_WRITE. This check enforces that if this is a
    1405             :                  * MAP_SHARED, the process has write permission to the backing
    1406             :                  * file. If VM_MAYWRITE is set it also enforces that on a
    1407             :                  * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
    1408             :                  * F_WRITE_SEAL can be taken until the vma is destroyed.
    1409             :                  */
    1410           0 :                 ret = -EPERM;
    1411           0 :                 if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
    1412           0 :                         goto out_unlock;
    1413             : 
    1414             :                 /*
    1415             :                  * If this vma contains ending address, and huge pages
    1416             :                  * check alignment.
    1417             :                  */
    1418           0 :                 if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
    1419           0 :                     end > cur->vm_start) {
    1420           0 :                         unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
    1421             : 
    1422           0 :                         ret = -EINVAL;
    1423             : 
    1424           0 :                         if (end & (vma_hpagesize - 1))
    1425           0 :                                 goto out_unlock;
    1426             :                 }
    1427           0 :                 if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
    1428           0 :                         goto out_unlock;
    1429             : 
    1430             :                 /*
    1431             :                  * Check that this vma isn't already owned by a
    1432             :                  * different userfaultfd. We can't allow more than one
    1433             :                  * userfaultfd to own a single vma simultaneously or we
    1434             :                  * wouldn't know which one to deliver the userfaults to.
    1435             :                  */
    1436           0 :                 ret = -EBUSY;
    1437           0 :                 if (cur->vm_userfaultfd_ctx.ctx &&
    1438             :                     cur->vm_userfaultfd_ctx.ctx != ctx)
    1439           0 :                         goto out_unlock;
    1440             : 
    1441             :                 /*
    1442             :                  * Note vmas containing huge pages
    1443             :                  */
    1444           0 :                 if (is_vm_hugetlb_page(cur))
    1445           0 :                         basic_ioctls = true;
    1446             : 
    1447           0 :                 found = true;
    1448           0 :         } for_each_vma_range(vmi, cur, end);
    1449           0 :         BUG_ON(!found);
    1450             : 
    1451           0 :         vma_iter_set(&vmi, start);
    1452           0 :         prev = vma_prev(&vmi);
    1453           0 :         if (vma->vm_start < start)
    1454           0 :                 prev = vma;
    1455             : 
    1456             :         ret = 0;
    1457           0 :         for_each_vma_range(vmi, vma, end) {
    1458           0 :                 cond_resched();
    1459             : 
    1460           0 :                 BUG_ON(!vma_can_userfault(vma, vm_flags));
    1461           0 :                 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
    1462             :                        vma->vm_userfaultfd_ctx.ctx != ctx);
    1463           0 :                 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
    1464             : 
    1465             :                 /*
    1466             :                  * Nothing to do: this vma is already registered into this
    1467             :                  * userfaultfd and with the right tracking mode too.
    1468             :                  */
    1469           0 :                 if (vma->vm_userfaultfd_ctx.ctx == ctx &&
    1470           0 :                     (vma->vm_flags & vm_flags) == vm_flags)
    1471           0 :                         goto skip;
    1472             : 
    1473           0 :                 if (vma->vm_start > start)
    1474             :                         start = vma->vm_start;
    1475           0 :                 vma_end = min(end, vma->vm_end);
    1476             : 
    1477           0 :                 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
    1478           0 :                 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
    1479           0 :                 prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
    1480             :                                  vma->anon_vma, vma->vm_file, pgoff,
    1481             :                                  vma_policy(vma),
    1482           0 :                                  ((struct vm_userfaultfd_ctx){ ctx }),
    1483             :                                  anon_vma_name(vma));
    1484           0 :                 if (prev) {
    1485             :                         /* vma_merge() invalidated the mas */
    1486           0 :                         vma = prev;
    1487           0 :                         goto next;
    1488             :                 }
    1489           0 :                 if (vma->vm_start < start) {
    1490           0 :                         ret = split_vma(&vmi, vma, start, 1);
    1491           0 :                         if (ret)
    1492             :                                 break;
    1493             :                 }
    1494           0 :                 if (vma->vm_end > end) {
    1495           0 :                         ret = split_vma(&vmi, vma, end, 0);
    1496           0 :                         if (ret)
    1497             :                                 break;
    1498             :                 }
    1499           0 :         next:
    1500             :                 /*
    1501             :                  * In the vma_merge() successful mprotect-like case 8:
    1502             :                  * the next vma was merged into the current one and
    1503             :                  * the current one has not been updated yet.
    1504             :                  */
    1505           0 :                 userfaultfd_set_vm_flags(vma, new_flags);
    1506           0 :                 vma->vm_userfaultfd_ctx.ctx = ctx;
    1507             : 
    1508           0 :                 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
    1509           0 :                         hugetlb_unshare_all_pmds(vma);
    1510             : 
    1511           0 :         skip:
    1512           0 :                 prev = vma;
    1513           0 :                 start = vma->vm_end;
    1514             :         }
    1515             : 
    1516           0 : out_unlock:
    1517           0 :         mmap_write_unlock(mm);
    1518           0 :         mmput(mm);
    1519           0 :         if (!ret) {
    1520           0 :                 __u64 ioctls_out;
    1521             : 
    1522           0 :                 ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
    1523             :                     UFFD_API_RANGE_IOCTLS;
    1524             : 
    1525             :                 /*
    1526             :                  * Declare the WP ioctl only if the WP mode is
    1527             :                  * specified and all checks passed with the range
    1528             :                  */
    1529           0 :                 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
    1530           0 :                         ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
    1531             : 
    1532             :                 /* CONTINUE ioctl is only supported for MINOR ranges. */
    1533           0 :                 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
    1534           0 :                         ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
    1535             : 
    1536             :                 /*
    1537             :                  * Now that we scanned all vmas we can already tell
    1538             :                  * userland which ioctls methods are guaranteed to
    1539             :                  * succeed on this range.
    1540             :                  */
    1541           0 :                 if (put_user(ioctls_out, &user_uffdio_register->ioctls))
    1542           0 :                         ret = -EFAULT;
    1543             :         }
    1544           0 : out:
    1545           0 :         return ret;
    1546             : }
    1547             : 
    1548           0 : static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
    1549             :                                   unsigned long arg)
    1550             : {
    1551           0 :         struct mm_struct *mm = ctx->mm;
    1552           0 :         struct vm_area_struct *vma, *prev, *cur;
    1553           0 :         int ret;
    1554           0 :         struct uffdio_range uffdio_unregister;
    1555           0 :         unsigned long new_flags;
    1556           0 :         bool found;
    1557           0 :         unsigned long start, end, vma_end;
    1558           0 :         const void __user *buf = (void __user *)arg;
    1559           0 :         struct vma_iterator vmi;
    1560           0 :         pgoff_t pgoff;
    1561             : 
    1562           0 :         ret = -EFAULT;
    1563           0 :         if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
    1564           0 :                 goto out;
    1565             : 
    1566           0 :         ret = validate_range(mm, uffdio_unregister.start,
    1567             :                              uffdio_unregister.len);
    1568           0 :         if (ret)
    1569           0 :                 goto out;
    1570             : 
    1571           0 :         start = uffdio_unregister.start;
    1572           0 :         end = start + uffdio_unregister.len;
    1573             : 
    1574           0 :         ret = -ENOMEM;
    1575           0 :         if (!mmget_not_zero(mm))
    1576           0 :                 goto out;
    1577             : 
    1578           0 :         mmap_write_lock(mm);
    1579           0 :         ret = -EINVAL;
    1580           0 :         vma_iter_init(&vmi, mm, start);
    1581           0 :         vma = vma_find(&vmi, end);
    1582           0 :         if (!vma)
    1583           0 :                 goto out_unlock;
    1584             : 
    1585             :         /*
    1586             :          * If the first vma contains huge pages, make sure start address
    1587             :          * is aligned to huge page size.
    1588             :          */
    1589           0 :         if (is_vm_hugetlb_page(vma)) {
    1590           0 :                 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
    1591             : 
    1592           0 :                 if (start & (vma_hpagesize - 1))
    1593           0 :                         goto out_unlock;
    1594             :         }
    1595             : 
    1596             :         /*
    1597             :          * Search for not compatible vmas.
    1598             :          */
    1599             :         found = false;
    1600             :         cur = vma;
    1601           0 :         do {
    1602           0 :                 cond_resched();
    1603             : 
    1604           0 :                 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
    1605             :                        !!(cur->vm_flags & __VM_UFFD_FLAGS));
    1606             : 
    1607             :                 /*
    1608             :                  * Check not compatible vmas, not strictly required
    1609             :                  * here as not compatible vmas cannot have an
    1610             :                  * userfaultfd_ctx registered on them, but this
    1611             :                  * provides for more strict behavior to notice
    1612             :                  * unregistration errors.
    1613             :                  */
    1614           0 :                 if (!vma_can_userfault(cur, cur->vm_flags))
    1615           0 :                         goto out_unlock;
    1616             : 
    1617           0 :                 found = true;
    1618           0 :         } for_each_vma_range(vmi, cur, end);
    1619           0 :         BUG_ON(!found);
    1620             : 
    1621           0 :         vma_iter_set(&vmi, start);
    1622           0 :         prev = vma_prev(&vmi);
    1623           0 :         if (vma->vm_start < start)
    1624           0 :                 prev = vma;
    1625             : 
    1626             :         ret = 0;
    1627           0 :         for_each_vma_range(vmi, vma, end) {
    1628           0 :                 cond_resched();
    1629             : 
    1630           0 :                 BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
    1631             : 
    1632             :                 /*
    1633             :                  * Nothing to do: this vma is already registered into this
    1634             :                  * userfaultfd and with the right tracking mode too.
    1635             :                  */
    1636           0 :                 if (!vma->vm_userfaultfd_ctx.ctx)
    1637           0 :                         goto skip;
    1638             : 
    1639           0 :                 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
    1640             : 
    1641           0 :                 if (vma->vm_start > start)
    1642             :                         start = vma->vm_start;
    1643           0 :                 vma_end = min(end, vma->vm_end);
    1644             : 
    1645           0 :                 if (userfaultfd_missing(vma)) {
    1646             :                         /*
    1647             :                          * Wake any concurrent pending userfault while
    1648             :                          * we unregister, so they will not hang
    1649             :                          * permanently and it avoids userland to call
    1650             :                          * UFFDIO_WAKE explicitly.
    1651             :                          */
    1652           0 :                         struct userfaultfd_wake_range range;
    1653           0 :                         range.start = start;
    1654           0 :                         range.len = vma_end - start;
    1655           0 :                         wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
    1656             :                 }
    1657             : 
    1658             :                 /* Reset ptes for the whole vma range if wr-protected */
    1659           0 :                 if (userfaultfd_wp(vma))
    1660           0 :                         uffd_wp_range(vma, start, vma_end - start, false);
    1661             : 
    1662           0 :                 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
    1663           0 :                 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
    1664           0 :                 prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
    1665             :                                  vma->anon_vma, vma->vm_file, pgoff,
    1666             :                                  vma_policy(vma),
    1667           0 :                                  NULL_VM_UFFD_CTX, anon_vma_name(vma));
    1668           0 :                 if (prev) {
    1669           0 :                         vma = prev;
    1670           0 :                         goto next;
    1671             :                 }
    1672           0 :                 if (vma->vm_start < start) {
    1673           0 :                         ret = split_vma(&vmi, vma, start, 1);
    1674           0 :                         if (ret)
    1675             :                                 break;
    1676             :                 }
    1677           0 :                 if (vma->vm_end > end) {
    1678           0 :                         ret = split_vma(&vmi, vma, end, 0);
    1679           0 :                         if (ret)
    1680             :                                 break;
    1681             :                 }
    1682           0 :         next:
    1683             :                 /*
    1684             :                  * In the vma_merge() successful mprotect-like case 8:
    1685             :                  * the next vma was merged into the current one and
    1686             :                  * the current one has not been updated yet.
    1687             :                  */
    1688           0 :                 userfaultfd_set_vm_flags(vma, new_flags);
    1689           0 :                 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
    1690             : 
    1691           0 :         skip:
    1692           0 :                 prev = vma;
    1693           0 :                 start = vma->vm_end;
    1694             :         }
    1695             : 
    1696           0 : out_unlock:
    1697           0 :         mmap_write_unlock(mm);
    1698           0 :         mmput(mm);
    1699           0 : out:
    1700           0 :         return ret;
    1701             : }
    1702             : 
    1703             : /*
    1704             :  * userfaultfd_wake may be used in combination with the
    1705             :  * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
    1706             :  */
    1707           0 : static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
    1708             :                             unsigned long arg)
    1709             : {
    1710           0 :         int ret;
    1711           0 :         struct uffdio_range uffdio_wake;
    1712           0 :         struct userfaultfd_wake_range range;
    1713           0 :         const void __user *buf = (void __user *)arg;
    1714             : 
    1715           0 :         ret = -EFAULT;
    1716           0 :         if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
    1717           0 :                 goto out;
    1718             : 
    1719           0 :         ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
    1720           0 :         if (ret)
    1721           0 :                 goto out;
    1722             : 
    1723           0 :         range.start = uffdio_wake.start;
    1724           0 :         range.len = uffdio_wake.len;
    1725             : 
    1726             :         /*
    1727             :          * len == 0 means wake all and we don't want to wake all here,
    1728             :          * so check it again to be sure.
    1729             :          */
    1730           0 :         VM_BUG_ON(!range.len);
    1731             : 
    1732           0 :         wake_userfault(ctx, &range);
    1733             :         ret = 0;
    1734             : 
    1735           0 : out:
    1736           0 :         return ret;
    1737             : }
    1738             : 
    1739           0 : static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
    1740             :                             unsigned long arg)
    1741             : {
    1742           0 :         __s64 ret;
    1743           0 :         struct uffdio_copy uffdio_copy;
    1744           0 :         struct uffdio_copy __user *user_uffdio_copy;
    1745           0 :         struct userfaultfd_wake_range range;
    1746           0 :         uffd_flags_t flags = 0;
    1747             : 
    1748           0 :         user_uffdio_copy = (struct uffdio_copy __user *) arg;
    1749             : 
    1750           0 :         ret = -EAGAIN;
    1751           0 :         if (atomic_read(&ctx->mmap_changing))
    1752           0 :                 goto out;
    1753             : 
    1754           0 :         ret = -EFAULT;
    1755           0 :         if (copy_from_user(&uffdio_copy, user_uffdio_copy,
    1756             :                            /* don't copy "copy" last field */
    1757             :                            sizeof(uffdio_copy)-sizeof(__s64)))
    1758           0 :                 goto out;
    1759             : 
    1760           0 :         ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
    1761           0 :         if (ret)
    1762           0 :                 goto out;
    1763             :         /*
    1764             :          * double check for wraparound just in case. copy_from_user()
    1765             :          * will later check uffdio_copy.src + uffdio_copy.len to fit
    1766             :          * in the userland range.
    1767             :          */
    1768           0 :         ret = -EINVAL;
    1769           0 :         if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
    1770           0 :                 goto out;
    1771           0 :         if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
    1772           0 :                 goto out;
    1773           0 :         if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP)
    1774           0 :                 flags |= MFILL_ATOMIC_WP;
    1775           0 :         if (mmget_not_zero(ctx->mm)) {
    1776           0 :                 ret = mfill_atomic_copy(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
    1777           0 :                                         uffdio_copy.len, &ctx->mmap_changing,
    1778             :                                         flags);
    1779           0 :                 mmput(ctx->mm);
    1780             :         } else {
    1781             :                 return -ESRCH;
    1782             :         }
    1783           0 :         if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
    1784             :                 return -EFAULT;
    1785           0 :         if (ret < 0)
    1786           0 :                 goto out;
    1787           0 :         BUG_ON(!ret);
    1788             :         /* len == 0 would wake all */
    1789           0 :         range.len = ret;
    1790           0 :         if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
    1791           0 :                 range.start = uffdio_copy.dst;
    1792           0 :                 wake_userfault(ctx, &range);
    1793             :         }
    1794           0 :         ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
    1795           0 : out:
    1796           0 :         return ret;
    1797             : }
    1798             : 
    1799           0 : static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
    1800             :                                 unsigned long arg)
    1801             : {
    1802           0 :         __s64 ret;
    1803           0 :         struct uffdio_zeropage uffdio_zeropage;
    1804           0 :         struct uffdio_zeropage __user *user_uffdio_zeropage;
    1805           0 :         struct userfaultfd_wake_range range;
    1806             : 
    1807           0 :         user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
    1808             : 
    1809           0 :         ret = -EAGAIN;
    1810           0 :         if (atomic_read(&ctx->mmap_changing))
    1811           0 :                 goto out;
    1812             : 
    1813           0 :         ret = -EFAULT;
    1814           0 :         if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
    1815             :                            /* don't copy "zeropage" last field */
    1816             :                            sizeof(uffdio_zeropage)-sizeof(__s64)))
    1817           0 :                 goto out;
    1818             : 
    1819           0 :         ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
    1820             :                              uffdio_zeropage.range.len);
    1821           0 :         if (ret)
    1822           0 :                 goto out;
    1823           0 :         ret = -EINVAL;
    1824           0 :         if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
    1825           0 :                 goto out;
    1826             : 
    1827           0 :         if (mmget_not_zero(ctx->mm)) {
    1828           0 :                 ret = mfill_atomic_zeropage(ctx->mm, uffdio_zeropage.range.start,
    1829           0 :                                            uffdio_zeropage.range.len,
    1830             :                                            &ctx->mmap_changing);
    1831           0 :                 mmput(ctx->mm);
    1832             :         } else {
    1833             :                 return -ESRCH;
    1834             :         }
    1835           0 :         if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
    1836             :                 return -EFAULT;
    1837           0 :         if (ret < 0)
    1838           0 :                 goto out;
    1839             :         /* len == 0 would wake all */
    1840           0 :         BUG_ON(!ret);
    1841           0 :         range.len = ret;
    1842           0 :         if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
    1843           0 :                 range.start = uffdio_zeropage.range.start;
    1844           0 :                 wake_userfault(ctx, &range);
    1845             :         }
    1846           0 :         ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
    1847           0 : out:
    1848           0 :         return ret;
    1849             : }
    1850             : 
    1851           0 : static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
    1852             :                                     unsigned long arg)
    1853             : {
    1854           0 :         int ret;
    1855           0 :         struct uffdio_writeprotect uffdio_wp;
    1856           0 :         struct uffdio_writeprotect __user *user_uffdio_wp;
    1857           0 :         struct userfaultfd_wake_range range;
    1858           0 :         bool mode_wp, mode_dontwake;
    1859             : 
    1860           0 :         if (atomic_read(&ctx->mmap_changing))
    1861             :                 return -EAGAIN;
    1862             : 
    1863           0 :         user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
    1864             : 
    1865           0 :         if (copy_from_user(&uffdio_wp, user_uffdio_wp,
    1866             :                            sizeof(struct uffdio_writeprotect)))
    1867             :                 return -EFAULT;
    1868             : 
    1869           0 :         ret = validate_range(ctx->mm, uffdio_wp.range.start,
    1870             :                              uffdio_wp.range.len);
    1871           0 :         if (ret)
    1872             :                 return ret;
    1873             : 
    1874           0 :         if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
    1875             :                                UFFDIO_WRITEPROTECT_MODE_WP))
    1876             :                 return -EINVAL;
    1877             : 
    1878           0 :         mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
    1879           0 :         mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
    1880             : 
    1881           0 :         if (mode_wp && mode_dontwake)
    1882             :                 return -EINVAL;
    1883             : 
    1884           0 :         if (mmget_not_zero(ctx->mm)) {
    1885           0 :                 ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
    1886           0 :                                           uffdio_wp.range.len, mode_wp,
    1887             :                                           &ctx->mmap_changing);
    1888           0 :                 mmput(ctx->mm);
    1889             :         } else {
    1890             :                 return -ESRCH;
    1891             :         }
    1892             : 
    1893           0 :         if (ret)
    1894             :                 return ret;
    1895             : 
    1896           0 :         if (!mode_wp && !mode_dontwake) {
    1897           0 :                 range.start = uffdio_wp.range.start;
    1898           0 :                 range.len = uffdio_wp.range.len;
    1899           0 :                 wake_userfault(ctx, &range);
    1900             :         }
    1901             :         return ret;
    1902             : }
    1903             : 
    1904           0 : static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
    1905             : {
    1906           0 :         __s64 ret;
    1907           0 :         struct uffdio_continue uffdio_continue;
    1908           0 :         struct uffdio_continue __user *user_uffdio_continue;
    1909           0 :         struct userfaultfd_wake_range range;
    1910           0 :         uffd_flags_t flags = 0;
    1911             : 
    1912           0 :         user_uffdio_continue = (struct uffdio_continue __user *)arg;
    1913             : 
    1914           0 :         ret = -EAGAIN;
    1915           0 :         if (atomic_read(&ctx->mmap_changing))
    1916           0 :                 goto out;
    1917             : 
    1918           0 :         ret = -EFAULT;
    1919           0 :         if (copy_from_user(&uffdio_continue, user_uffdio_continue,
    1920             :                            /* don't copy the output fields */
    1921             :                            sizeof(uffdio_continue) - (sizeof(__s64))))
    1922           0 :                 goto out;
    1923             : 
    1924           0 :         ret = validate_range(ctx->mm, uffdio_continue.range.start,
    1925             :                              uffdio_continue.range.len);
    1926           0 :         if (ret)
    1927           0 :                 goto out;
    1928             : 
    1929           0 :         ret = -EINVAL;
    1930             :         /* double check for wraparound just in case. */
    1931           0 :         if (uffdio_continue.range.start + uffdio_continue.range.len <=
    1932             :             uffdio_continue.range.start) {
    1933           0 :                 goto out;
    1934             :         }
    1935           0 :         if (uffdio_continue.mode & ~(UFFDIO_CONTINUE_MODE_DONTWAKE |
    1936             :                                      UFFDIO_CONTINUE_MODE_WP))
    1937           0 :                 goto out;
    1938           0 :         if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP)
    1939           0 :                 flags |= MFILL_ATOMIC_WP;
    1940             : 
    1941           0 :         if (mmget_not_zero(ctx->mm)) {
    1942           0 :                 ret = mfill_atomic_continue(ctx->mm, uffdio_continue.range.start,
    1943           0 :                                             uffdio_continue.range.len,
    1944             :                                             &ctx->mmap_changing, flags);
    1945           0 :                 mmput(ctx->mm);
    1946             :         } else {
    1947             :                 return -ESRCH;
    1948             :         }
    1949             : 
    1950           0 :         if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
    1951             :                 return -EFAULT;
    1952           0 :         if (ret < 0)
    1953           0 :                 goto out;
    1954             : 
    1955             :         /* len == 0 would wake all */
    1956           0 :         BUG_ON(!ret);
    1957           0 :         range.len = ret;
    1958           0 :         if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
    1959           0 :                 range.start = uffdio_continue.range.start;
    1960           0 :                 wake_userfault(ctx, &range);
    1961             :         }
    1962           0 :         ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
    1963             : 
    1964           0 : out:
    1965           0 :         return ret;
    1966             : }
    1967             : 
    1968             : static inline unsigned int uffd_ctx_features(__u64 user_features)
    1969             : {
    1970             :         /*
    1971             :          * For the current set of features the bits just coincide. Set
    1972             :          * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
    1973             :          */
    1974           0 :         return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
    1975             : }
    1976             : 
    1977             : /*
    1978             :  * userland asks for a certain API version and we return which bits
    1979             :  * and ioctl commands are implemented in this kernel for such API
    1980             :  * version or -EINVAL if unknown.
    1981             :  */
    1982           0 : static int userfaultfd_api(struct userfaultfd_ctx *ctx,
    1983             :                            unsigned long arg)
    1984             : {
    1985           0 :         struct uffdio_api uffdio_api;
    1986           0 :         void __user *buf = (void __user *)arg;
    1987           0 :         unsigned int ctx_features;
    1988           0 :         int ret;
    1989           0 :         __u64 features;
    1990             : 
    1991           0 :         ret = -EFAULT;
    1992           0 :         if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
    1993           0 :                 goto out;
    1994           0 :         features = uffdio_api.features;
    1995           0 :         ret = -EINVAL;
    1996           0 :         if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
    1997           0 :                 goto err_out;
    1998           0 :         ret = -EPERM;
    1999           0 :         if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
    2000           0 :                 goto err_out;
    2001             :         /* report all available features and ioctls to userland */
    2002           0 :         uffdio_api.features = UFFD_API_FEATURES;
    2003             : #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
    2004             :         uffdio_api.features &=
    2005             :                 ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
    2006             : #endif
    2007             : #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
    2008             :         uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
    2009             : #endif
    2010             : #ifndef CONFIG_PTE_MARKER_UFFD_WP
    2011             :         uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
    2012             :         uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
    2013             : #endif
    2014           0 :         uffdio_api.ioctls = UFFD_API_IOCTLS;
    2015           0 :         ret = -EFAULT;
    2016           0 :         if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
    2017           0 :                 goto out;
    2018             : 
    2019             :         /* only enable the requested features for this uffd context */
    2020           0 :         ctx_features = uffd_ctx_features(features);
    2021           0 :         ret = -EINVAL;
    2022           0 :         if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
    2023           0 :                 goto err_out;
    2024             : 
    2025             :         ret = 0;
    2026           0 : out:
    2027           0 :         return ret;
    2028           0 : err_out:
    2029           0 :         memset(&uffdio_api, 0, sizeof(uffdio_api));
    2030           0 :         if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
    2031           0 :                 ret = -EFAULT;
    2032           0 :         goto out;
    2033             : }
    2034             : 
    2035           0 : static long userfaultfd_ioctl(struct file *file, unsigned cmd,
    2036             :                               unsigned long arg)
    2037             : {
    2038           0 :         int ret = -EINVAL;
    2039           0 :         struct userfaultfd_ctx *ctx = file->private_data;
    2040             : 
    2041           0 :         if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
    2042             :                 return -EINVAL;
    2043             : 
    2044           0 :         switch(cmd) {
    2045           0 :         case UFFDIO_API:
    2046           0 :                 ret = userfaultfd_api(ctx, arg);
    2047           0 :                 break;
    2048           0 :         case UFFDIO_REGISTER:
    2049           0 :                 ret = userfaultfd_register(ctx, arg);
    2050           0 :                 break;
    2051           0 :         case UFFDIO_UNREGISTER:
    2052           0 :                 ret = userfaultfd_unregister(ctx, arg);
    2053           0 :                 break;
    2054           0 :         case UFFDIO_WAKE:
    2055           0 :                 ret = userfaultfd_wake(ctx, arg);
    2056           0 :                 break;
    2057           0 :         case UFFDIO_COPY:
    2058           0 :                 ret = userfaultfd_copy(ctx, arg);
    2059           0 :                 break;
    2060           0 :         case UFFDIO_ZEROPAGE:
    2061           0 :                 ret = userfaultfd_zeropage(ctx, arg);
    2062           0 :                 break;
    2063           0 :         case UFFDIO_WRITEPROTECT:
    2064           0 :                 ret = userfaultfd_writeprotect(ctx, arg);
    2065           0 :                 break;
    2066           0 :         case UFFDIO_CONTINUE:
    2067           0 :                 ret = userfaultfd_continue(ctx, arg);
    2068           0 :                 break;
    2069             :         }
    2070           0 :         return ret;
    2071             : }
    2072             : 
    2073             : #ifdef CONFIG_PROC_FS
    2074           0 : static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
    2075             : {
    2076           0 :         struct userfaultfd_ctx *ctx = f->private_data;
    2077           0 :         wait_queue_entry_t *wq;
    2078           0 :         unsigned long pending = 0, total = 0;
    2079             : 
    2080           0 :         spin_lock_irq(&ctx->fault_pending_wqh.lock);
    2081           0 :         list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
    2082           0 :                 pending++;
    2083           0 :                 total++;
    2084             :         }
    2085           0 :         list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
    2086           0 :                 total++;
    2087             :         }
    2088           0 :         spin_unlock_irq(&ctx->fault_pending_wqh.lock);
    2089             : 
    2090             :         /*
    2091             :          * If more protocols will be added, there will be all shown
    2092             :          * separated by a space. Like this:
    2093             :          *      protocols: aa:... bb:...
    2094             :          */
    2095           0 :         seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
    2096             :                    pending, total, UFFD_API, ctx->features,
    2097             :                    UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
    2098           0 : }
    2099             : #endif
    2100             : 
    2101             : static const struct file_operations userfaultfd_fops = {
    2102             : #ifdef CONFIG_PROC_FS
    2103             :         .show_fdinfo    = userfaultfd_show_fdinfo,
    2104             : #endif
    2105             :         .release        = userfaultfd_release,
    2106             :         .poll           = userfaultfd_poll,
    2107             :         .read           = userfaultfd_read,
    2108             :         .unlocked_ioctl = userfaultfd_ioctl,
    2109             :         .compat_ioctl   = compat_ptr_ioctl,
    2110             :         .llseek         = noop_llseek,
    2111             : };
    2112             : 
    2113           0 : static void init_once_userfaultfd_ctx(void *mem)
    2114             : {
    2115           0 :         struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
    2116             : 
    2117           0 :         init_waitqueue_head(&ctx->fault_pending_wqh);
    2118           0 :         init_waitqueue_head(&ctx->fault_wqh);
    2119           0 :         init_waitqueue_head(&ctx->event_wqh);
    2120           0 :         init_waitqueue_head(&ctx->fd_wqh);
    2121           0 :         seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
    2122           0 : }
    2123             : 
    2124           0 : static int new_userfaultfd(int flags)
    2125             : {
    2126           0 :         struct userfaultfd_ctx *ctx;
    2127           0 :         int fd;
    2128             : 
    2129           0 :         BUG_ON(!current->mm);
    2130             : 
    2131             :         /* Check the UFFD_* constants for consistency.  */
    2132           0 :         BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
    2133           0 :         BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
    2134           0 :         BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
    2135             : 
    2136           0 :         if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
    2137             :                 return -EINVAL;
    2138             : 
    2139           0 :         ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
    2140           0 :         if (!ctx)
    2141             :                 return -ENOMEM;
    2142             : 
    2143           0 :         refcount_set(&ctx->refcount, 1);
    2144           0 :         ctx->flags = flags;
    2145           0 :         ctx->features = 0;
    2146           0 :         ctx->released = false;
    2147           0 :         atomic_set(&ctx->mmap_changing, 0);
    2148           0 :         ctx->mm = current->mm;
    2149             :         /* prevent the mm struct to be freed */
    2150           0 :         mmgrab(ctx->mm);
    2151             : 
    2152           0 :         fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
    2153             :                         O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
    2154           0 :         if (fd < 0) {
    2155           0 :                 mmdrop(ctx->mm);
    2156           0 :                 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
    2157             :         }
    2158             :         return fd;
    2159             : }
    2160             : 
    2161           0 : static inline bool userfaultfd_syscall_allowed(int flags)
    2162             : {
    2163             :         /* Userspace-only page faults are always allowed */
    2164           0 :         if (flags & UFFD_USER_MODE_ONLY)
    2165             :                 return true;
    2166             : 
    2167             :         /*
    2168             :          * The user is requesting a userfaultfd which can handle kernel faults.
    2169             :          * Privileged users are always allowed to do this.
    2170             :          */
    2171           0 :         if (capable(CAP_SYS_PTRACE))
    2172             :                 return true;
    2173             : 
    2174             :         /* Otherwise, access to kernel fault handling is sysctl controlled. */
    2175           0 :         return sysctl_unprivileged_userfaultfd;
    2176             : }
    2177             : 
    2178           0 : SYSCALL_DEFINE1(userfaultfd, int, flags)
    2179             : {
    2180           0 :         if (!userfaultfd_syscall_allowed(flags))
    2181             :                 return -EPERM;
    2182             : 
    2183           0 :         return new_userfaultfd(flags);
    2184             : }
    2185             : 
    2186           0 : static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags)
    2187             : {
    2188           0 :         if (cmd != USERFAULTFD_IOC_NEW)
    2189             :                 return -EINVAL;
    2190             : 
    2191           0 :         return new_userfaultfd(flags);
    2192             : }
    2193             : 
    2194             : static const struct file_operations userfaultfd_dev_fops = {
    2195             :         .unlocked_ioctl = userfaultfd_dev_ioctl,
    2196             :         .compat_ioctl = userfaultfd_dev_ioctl,
    2197             :         .owner = THIS_MODULE,
    2198             :         .llseek = noop_llseek,
    2199             : };
    2200             : 
    2201             : static struct miscdevice userfaultfd_misc = {
    2202             :         .minor = MISC_DYNAMIC_MINOR,
    2203             :         .name = "userfaultfd",
    2204             :         .fops = &userfaultfd_dev_fops
    2205             : };
    2206             : 
    2207           0 : static int __init userfaultfd_init(void)
    2208             : {
    2209           0 :         int ret;
    2210             : 
    2211           0 :         ret = misc_register(&userfaultfd_misc);
    2212           0 :         if (ret)
    2213             :                 return ret;
    2214             : 
    2215           0 :         userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
    2216             :                                                 sizeof(struct userfaultfd_ctx),
    2217             :                                                 0,
    2218             :                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
    2219             :                                                 init_once_userfaultfd_ctx);
    2220             : #ifdef CONFIG_SYSCTL
    2221           0 :         register_sysctl_init("vm", vm_userfaultfd_table);
    2222             : #endif
    2223           0 :         return 0;
    2224             : }
    2225             : __initcall(userfaultfd_init);

Generated by: LCOV version 1.14