LCOV - code coverage report
Current view: top level - fs - locks.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-djwa @ Mon Jul 31 20:08:17 PDT 2023 Lines: 924 1220 75.7 %
Date: 2023-07-31 20:08:17 Functions: 70 87 80.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  *  linux/fs/locks.c
       4             :  *
       5             :  * We implement four types of file locks: BSD locks, posix locks, open
       6             :  * file description locks, and leases.  For details about BSD locks,
       7             :  * see the flock(2) man page; for details about the other three, see
       8             :  * fcntl(2).
       9             :  *
      10             :  *
      11             :  * Locking conflicts and dependencies:
      12             :  * If multiple threads attempt to lock the same byte (or flock the same file)
      13             :  * only one can be granted the lock, and other must wait their turn.
      14             :  * The first lock has been "applied" or "granted", the others are "waiting"
      15             :  * and are "blocked" by the "applied" lock..
      16             :  *
      17             :  * Waiting and applied locks are all kept in trees whose properties are:
      18             :  *
      19             :  *      - the root of a tree may be an applied or waiting lock.
      20             :  *      - every other node in the tree is a waiting lock that
      21             :  *        conflicts with every ancestor of that node.
      22             :  *
      23             :  * Every such tree begins life as a waiting singleton which obviously
      24             :  * satisfies the above properties.
      25             :  *
      26             :  * The only ways we modify trees preserve these properties:
      27             :  *
      28             :  *      1. We may add a new leaf node, but only after first verifying that it
      29             :  *         conflicts with all of its ancestors.
      30             :  *      2. We may remove the root of a tree, creating a new singleton
      31             :  *         tree from the root and N new trees rooted in the immediate
      32             :  *         children.
      33             :  *      3. If the root of a tree is not currently an applied lock, we may
      34             :  *         apply it (if possible).
      35             :  *      4. We may upgrade the root of the tree (either extend its range,
      36             :  *         or upgrade its entire range from read to write).
      37             :  *
      38             :  * When an applied lock is modified in a way that reduces or downgrades any
      39             :  * part of its range, we remove all its children (2 above).  This particularly
      40             :  * happens when a lock is unlocked.
      41             :  *
      42             :  * For each of those child trees we "wake up" the thread which is
      43             :  * waiting for the lock so it can continue handling as follows: if the
      44             :  * root of the tree applies, we do so (3).  If it doesn't, it must
      45             :  * conflict with some applied lock.  We remove (wake up) all of its children
      46             :  * (2), and add it is a new leaf to the tree rooted in the applied
      47             :  * lock (1).  We then repeat the process recursively with those
      48             :  * children.
      49             :  *
      50             :  */
      51             : 
      52             : #include <linux/capability.h>
      53             : #include <linux/file.h>
      54             : #include <linux/fdtable.h>
      55             : #include <linux/filelock.h>
      56             : #include <linux/fs.h>
      57             : #include <linux/init.h>
      58             : #include <linux/security.h>
      59             : #include <linux/slab.h>
      60             : #include <linux/syscalls.h>
      61             : #include <linux/time.h>
      62             : #include <linux/rcupdate.h>
      63             : #include <linux/pid_namespace.h>
      64             : #include <linux/hashtable.h>
      65             : #include <linux/percpu.h>
      66             : #include <linux/sysctl.h>
      67             : 
      68             : #define CREATE_TRACE_POINTS
      69             : #include <trace/events/filelock.h>
      70             : 
      71             : #include <linux/uaccess.h>
      72             : 
      73             : #define IS_POSIX(fl)    (fl->fl_flags & FL_POSIX)
      74             : #define IS_FLOCK(fl)    (fl->fl_flags & FL_FLOCK)
      75             : #define IS_LEASE(fl)    (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
      76             : #define IS_OFDLCK(fl)   (fl->fl_flags & FL_OFDLCK)
      77             : #define IS_REMOTELCK(fl)        (fl->fl_pid <= 0)
      78             : 
      79             : static bool lease_breaking(struct file_lock *fl)
      80             : {
      81           2 :         return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
      82             : }
      83             : 
      84             : static int target_leasetype(struct file_lock *fl)
      85             : {
      86           0 :         if (fl->fl_flags & FL_UNLOCK_PENDING)
      87             :                 return F_UNLCK;
      88          16 :         if (fl->fl_flags & FL_DOWNGRADE_PENDING)
      89             :                 return F_RDLCK;
      90          16 :         return fl->fl_type;
      91             : }
      92             : 
      93             : static int leases_enable = 1;
      94             : static int lease_break_time = 45;
      95             : 
      96             : #ifdef CONFIG_SYSCTL
      97             : static struct ctl_table locks_sysctls[] = {
      98             :         {
      99             :                 .procname       = "leases-enable",
     100             :                 .data           = &leases_enable,
     101             :                 .maxlen         = sizeof(int),
     102             :                 .mode           = 0644,
     103             :                 .proc_handler   = proc_dointvec,
     104             :         },
     105             : #ifdef CONFIG_MMU
     106             :         {
     107             :                 .procname       = "lease-break-time",
     108             :                 .data           = &lease_break_time,
     109             :                 .maxlen         = sizeof(int),
     110             :                 .mode           = 0644,
     111             :                 .proc_handler   = proc_dointvec,
     112             :         },
     113             : #endif /* CONFIG_MMU */
     114             :         {}
     115             : };
     116             : 
     117           0 : static int __init init_fs_locks_sysctls(void)
     118             : {
     119           0 :         register_sysctl_init("fs", locks_sysctls);
     120           0 :         return 0;
     121             : }
     122             : early_initcall(init_fs_locks_sysctls);
     123             : #endif /* CONFIG_SYSCTL */
     124             : 
     125             : /*
     126             :  * The global file_lock_list is only used for displaying /proc/locks, so we
     127             :  * keep a list on each CPU, with each list protected by its own spinlock.
     128             :  * Global serialization is done using file_rwsem.
     129             :  *
     130             :  * Note that alterations to the list also require that the relevant flc_lock is
     131             :  * held.
     132             :  */
     133             : struct file_lock_list_struct {
     134             :         spinlock_t              lock;
     135             :         struct hlist_head       hlist;
     136             : };
     137             : static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
     138             : DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
     139             : 
     140             : 
     141             : /*
     142             :  * The blocked_hash is used to find POSIX lock loops for deadlock detection.
     143             :  * It is protected by blocked_lock_lock.
     144             :  *
     145             :  * We hash locks by lockowner in order to optimize searching for the lock a
     146             :  * particular lockowner is waiting on.
     147             :  *
     148             :  * FIXME: make this value scale via some heuristic? We generally will want more
     149             :  * buckets when we have more lockowners holding locks, but that's a little
     150             :  * difficult to determine without knowing what the workload will look like.
     151             :  */
     152             : #define BLOCKED_HASH_BITS       7
     153             : static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
     154             : 
     155             : /*
     156             :  * This lock protects the blocked_hash. Generally, if you're accessing it, you
     157             :  * want to be holding this lock.
     158             :  *
     159             :  * In addition, it also protects the fl->fl_blocked_requests list, and the
     160             :  * fl->fl_blocker pointer for file_lock structures that are acting as lock
     161             :  * requests (in contrast to those that are acting as records of acquired locks).
     162             :  *
     163             :  * Note that when we acquire this lock in order to change the above fields,
     164             :  * we often hold the flc_lock as well. In certain cases, when reading the fields
     165             :  * protected by this lock, we can skip acquiring it iff we already hold the
     166             :  * flc_lock.
     167             :  */
     168             : static DEFINE_SPINLOCK(blocked_lock_lock);
     169             : 
     170             : static struct kmem_cache *flctx_cache __read_mostly;
     171             : static struct kmem_cache *filelock_cache __read_mostly;
     172             : 
     173             : static struct file_lock_context *
     174      295225 : locks_get_lock_context(struct inode *inode, int type)
     175             : {
     176      295225 :         struct file_lock_context *ctx;
     177             : 
     178             :         /* paired with cmpxchg() below */
     179      295225 :         ctx = locks_inode_context(inode);
     180      295225 :         if (likely(ctx) || type == F_UNLCK)
     181      230146 :                 goto out;
     182             : 
     183       65079 :         ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
     184       65079 :         if (!ctx)
     185           0 :                 goto out;
     186             : 
     187       65079 :         spin_lock_init(&ctx->flc_lock);
     188       65079 :         INIT_LIST_HEAD(&ctx->flc_flock);
     189       65079 :         INIT_LIST_HEAD(&ctx->flc_posix);
     190       65079 :         INIT_LIST_HEAD(&ctx->flc_lease);
     191             : 
     192             :         /*
     193             :          * Assign the pointer if it's not already assigned. If it is, then
     194             :          * free the context we just allocated.
     195             :          */
     196       65079 :         if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
     197           0 :                 kmem_cache_free(flctx_cache, ctx);
     198           0 :                 ctx = locks_inode_context(inode);
     199             :         }
     200       65079 : out:
     201      295225 :         trace_locks_get_lock_context(inode, type, ctx);
     202      295224 :         return ctx;
     203             : }
     204             : 
     205             : static void
     206           0 : locks_dump_ctx_list(struct list_head *list, char *list_type)
     207             : {
     208           0 :         struct file_lock *fl;
     209             : 
     210           0 :         list_for_each_entry(fl, list, fl_list) {
     211           0 :                 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
     212             :         }
     213           0 : }
     214             : 
     215             : static void
     216       61564 : locks_check_ctx_lists(struct inode *inode)
     217             : {
     218       61564 :         struct file_lock_context *ctx = inode->i_flctx;
     219             : 
     220       61564 :         if (unlikely(!list_empty(&ctx->flc_flock) ||
     221             :                      !list_empty(&ctx->flc_posix) ||
     222             :                      !list_empty(&ctx->flc_lease))) {
     223           0 :                 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
     224             :                         MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
     225             :                         inode->i_ino);
     226           0 :                 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
     227           0 :                 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
     228           0 :                 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
     229             :         }
     230       61564 : }
     231             : 
     232             : static void
     233      506967 : locks_check_ctx_file_list(struct file *filp, struct list_head *list,
     234             :                                 char *list_type)
     235             : {
     236      506967 :         struct file_lock *fl;
     237      506967 :         struct inode *inode = file_inode(filp);
     238             : 
     239      546335 :         list_for_each_entry(fl, list, fl_list)
     240       39368 :                 if (fl->fl_file == filp)
     241           0 :                         pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
     242             :                                 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
     243             :                                 list_type, MAJOR(inode->i_sb->s_dev),
     244             :                                 MINOR(inode->i_sb->s_dev), inode->i_ino,
     245             :                                 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
     246      506967 : }
     247             : 
     248             : void
     249  1012186289 : locks_free_lock_context(struct inode *inode)
     250             : {
     251  1012186289 :         struct file_lock_context *ctx = locks_inode_context(inode);
     252             : 
     253  1012276146 :         if (unlikely(ctx)) {
     254       61564 :                 locks_check_ctx_lists(inode);
     255       61564 :                 kmem_cache_free(flctx_cache, ctx);
     256             :         }
     257  1012276146 : }
     258             : 
     259      488753 : static void locks_init_lock_heads(struct file_lock *fl)
     260             : {
     261      488753 :         INIT_HLIST_NODE(&fl->fl_link);
     262      488753 :         INIT_LIST_HEAD(&fl->fl_list);
     263      488753 :         INIT_LIST_HEAD(&fl->fl_blocked_requests);
     264      488753 :         INIT_LIST_HEAD(&fl->fl_blocked_member);
     265      488753 :         init_waitqueue_head(&fl->fl_wait);
     266      488755 : }
     267             : 
     268             : /* Allocate an empty lock structure. */
     269      263510 : struct file_lock *locks_alloc_lock(void)
     270             : {
     271      263510 :         struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
     272             : 
     273      263512 :         if (fl)
     274      263512 :                 locks_init_lock_heads(fl);
     275             : 
     276      263512 :         return fl;
     277             : }
     278             : EXPORT_SYMBOL_GPL(locks_alloc_lock);
     279             : 
     280      325869 : void locks_release_private(struct file_lock *fl)
     281             : {
     282      325869 :         BUG_ON(waitqueue_active(&fl->fl_wait));
     283      325869 :         BUG_ON(!list_empty(&fl->fl_list));
     284      325869 :         BUG_ON(!list_empty(&fl->fl_blocked_requests));
     285      325869 :         BUG_ON(!list_empty(&fl->fl_blocked_member));
     286      325869 :         BUG_ON(!hlist_unhashed(&fl->fl_link));
     287             : 
     288      325869 :         if (fl->fl_ops) {
     289        2952 :                 if (fl->fl_ops->fl_release_private)
     290        2952 :                         fl->fl_ops->fl_release_private(fl);
     291        2952 :                 fl->fl_ops = NULL;
     292             :         }
     293             : 
     294      325869 :         if (fl->fl_lmops) {
     295          32 :                 if (fl->fl_lmops->lm_put_owner) {
     296           0 :                         fl->fl_lmops->lm_put_owner(fl->fl_owner);
     297           0 :                         fl->fl_owner = NULL;
     298             :                 }
     299          32 :                 fl->fl_lmops = NULL;
     300             :         }
     301      325869 : }
     302             : EXPORT_SYMBOL_GPL(locks_release_private);
     303             : 
     304             : /**
     305             :  * locks_owner_has_blockers - Check for blocking lock requests
     306             :  * @flctx: file lock context
     307             :  * @owner: lock owner
     308             :  *
     309             :  * Return values:
     310             :  *   %true: @owner has at least one blocker
     311             :  *   %false: @owner has no blockers
     312             :  */
     313           0 : bool locks_owner_has_blockers(struct file_lock_context *flctx,
     314             :                 fl_owner_t owner)
     315             : {
     316           0 :         struct file_lock *fl;
     317             : 
     318           0 :         spin_lock(&flctx->flc_lock);
     319           0 :         list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
     320           0 :                 if (fl->fl_owner != owner)
     321           0 :                         continue;
     322           0 :                 if (!list_empty(&fl->fl_blocked_requests)) {
     323           0 :                         spin_unlock(&flctx->flc_lock);
     324           0 :                         return true;
     325             :                 }
     326             :         }
     327           0 :         spin_unlock(&flctx->flc_lock);
     328           0 :         return false;
     329             : }
     330             : EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
     331             : 
     332             : /* Free a lock which is not in use. */
     333      263512 : void locks_free_lock(struct file_lock *fl)
     334             : {
     335      263512 :         locks_release_private(fl);
     336      263512 :         kmem_cache_free(filelock_cache, fl);
     337      263512 : }
     338             : EXPORT_SYMBOL(locks_free_lock);
     339             : 
     340             : static void
     341      295281 : locks_dispose_list(struct list_head *dispose)
     342             : {
     343      295281 :         struct file_lock *fl;
     344             : 
     345      422287 :         while (!list_empty(dispose)) {
     346      127006 :                 fl = list_first_entry(dispose, struct file_lock, fl_list);
     347      127006 :                 list_del_init(&fl->fl_list);
     348      127006 :                 locks_free_lock(fl);
     349             :         }
     350      295281 : }
     351             : 
     352      225242 : void locks_init_lock(struct file_lock *fl)
     353             : {
     354      225242 :         memset(fl, 0, sizeof(struct file_lock));
     355      225242 :         locks_init_lock_heads(fl);
     356      225240 : }
     357             : EXPORT_SYMBOL(locks_init_lock);
     358             : 
     359             : /*
     360             :  * Initialize a new lock from an existing file_lock structure.
     361             :  */
     362      127032 : void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
     363             : {
     364      127032 :         new->fl_owner = fl->fl_owner;
     365      127032 :         new->fl_pid = fl->fl_pid;
     366      127032 :         new->fl_file = NULL;
     367      127032 :         new->fl_flags = fl->fl_flags;
     368      127032 :         new->fl_type = fl->fl_type;
     369      127032 :         new->fl_start = fl->fl_start;
     370      127032 :         new->fl_end = fl->fl_end;
     371      127032 :         new->fl_lmops = fl->fl_lmops;
     372      127032 :         new->fl_ops = NULL;
     373             : 
     374      127032 :         if (fl->fl_lmops) {
     375           0 :                 if (fl->fl_lmops->lm_get_owner)
     376           0 :                         fl->fl_lmops->lm_get_owner(fl->fl_owner);
     377             :         }
     378      127032 : }
     379             : EXPORT_SYMBOL(locks_copy_conflock);
     380             : 
     381      126990 : void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
     382             : {
     383             :         /* "new" must be a freshly-initialized lock */
     384      126990 :         WARN_ON_ONCE(new->fl_ops);
     385             : 
     386      126990 :         locks_copy_conflock(new, fl);
     387             : 
     388      126988 :         new->fl_file = fl->fl_file;
     389      126988 :         new->fl_ops = fl->fl_ops;
     390             : 
     391      126988 :         if (fl->fl_ops) {
     392        1472 :                 if (fl->fl_ops->fl_copy_lock)
     393        1472 :                         fl->fl_ops->fl_copy_lock(new, fl);
     394             :         }
     395      126988 : }
     396             : EXPORT_SYMBOL(locks_copy_lock);
     397             : 
     398      126986 : static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
     399             : {
     400      126986 :         struct file_lock *f;
     401             : 
     402             :         /*
     403             :          * As ctx->flc_lock is held, new requests cannot be added to
     404             :          * ->fl_blocked_requests, so we don't need a lock to check if it
     405             :          * is empty.
     406             :          */
     407      126986 :         if (list_empty(&fl->fl_blocked_requests))
     408             :                 return;
     409           0 :         spin_lock(&blocked_lock_lock);
     410           0 :         list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
     411           0 :         list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
     412           0 :                 f->fl_blocker = new;
     413           0 :         spin_unlock(&blocked_lock_lock);
     414             : }
     415             : 
     416             : static inline int flock_translate_cmd(int cmd) {
     417       62359 :         switch (cmd) {
     418             :         case LOCK_SH:
     419             :                 return F_RDLCK;
     420             :         case LOCK_EX:
     421             :                 return F_WRLCK;
     422             :         case LOCK_UN:
     423             :                 return F_UNLCK;
     424             :         }
     425             :         return -EINVAL;
     426             : }
     427             : 
     428             : /* Fill in a file_lock structure with an appropriate FLOCK lock. */
     429             : static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
     430             : {
     431      159216 :         locks_init_lock(fl);
     432             : 
     433      159215 :         fl->fl_file = filp;
     434      159215 :         fl->fl_owner = filp;
     435      159215 :         fl->fl_pid = current->tgid;
     436      159215 :         fl->fl_flags = FL_FLOCK;
     437      159215 :         fl->fl_type = type;
     438      159215 :         fl->fl_end = OFFSET_MAX;
     439             : }
     440             : 
     441             : static int assign_type(struct file_lock *fl, long type)
     442             : {
     443       68722 :         switch (type) {
     444       68722 :         case F_RDLCK:
     445             :         case F_WRLCK:
     446             :         case F_UNLCK:
     447       68722 :                 fl->fl_type = type;
     448       68722 :                 break;
     449             :         default:
     450             :                 return -EINVAL;
     451             :         }
     452       68674 :         return 0;
     453             : }
     454             : 
     455       68674 : static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
     456             :                                  struct flock64 *l)
     457             : {
     458       68674 :         switch (l->l_whence) {
     459       68421 :         case SEEK_SET:
     460       68421 :                 fl->fl_start = 0;
     461       68421 :                 break;
     462         253 :         case SEEK_CUR:
     463         253 :                 fl->fl_start = filp->f_pos;
     464         253 :                 break;
     465             :         case SEEK_END:
     466           0 :                 fl->fl_start = i_size_read(file_inode(filp));
     467           0 :                 break;
     468             :         default:
     469             :                 return -EINVAL;
     470             :         }
     471       68674 :         if (l->l_start > OFFSET_MAX - fl->fl_start)
     472             :                 return -EOVERFLOW;
     473       68674 :         fl->fl_start += l->l_start;
     474       68674 :         if (fl->fl_start < 0)
     475             :                 return -EINVAL;
     476             : 
     477             :         /* POSIX-1996 leaves the case l->l_len < 0 undefined;
     478             :            POSIX-2001 defines it. */
     479       68674 :         if (l->l_len > 0) {
     480        1021 :                 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
     481             :                         return -EOVERFLOW;
     482        1021 :                 fl->fl_end = fl->fl_start + (l->l_len - 1);
     483             : 
     484       67653 :         } else if (l->l_len < 0) {
     485           0 :                 if (fl->fl_start + l->l_len < 0)
     486             :                         return -EINVAL;
     487           0 :                 fl->fl_end = fl->fl_start - 1;
     488           0 :                 fl->fl_start += l->l_len;
     489             :         } else
     490       67653 :                 fl->fl_end = OFFSET_MAX;
     491             : 
     492       68674 :         fl->fl_owner = current->files;
     493       68674 :         fl->fl_pid = current->tgid;
     494       68674 :         fl->fl_file = filp;
     495       68674 :         fl->fl_flags = FL_POSIX;
     496       68674 :         fl->fl_ops = NULL;
     497       68674 :         fl->fl_lmops = NULL;
     498             : 
     499       68674 :         return assign_type(fl, l->l_type);
     500             : }
     501             : 
     502             : /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
     503             :  * style lock.
     504             :  */
     505       68674 : static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
     506             :                                struct flock *l)
     507             : {
     508       68674 :         struct flock64 ll = {
     509       68674 :                 .l_type = l->l_type,
     510       68674 :                 .l_whence = l->l_whence,
     511       68674 :                 .l_start = l->l_start,
     512       68674 :                 .l_len = l->l_len,
     513             :         };
     514             : 
     515       68674 :         return flock64_to_posix_lock(filp, fl, &ll);
     516             : }
     517             : 
     518             : /* default lease lock manager operations */
     519             : static bool
     520          10 : lease_break_callback(struct file_lock *fl)
     521             : {
     522          10 :         kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
     523          10 :         return false;
     524             : }
     525             : 
     526             : static void
     527          16 : lease_setup(struct file_lock *fl, void **priv)
     528             : {
     529          16 :         struct file *filp = fl->fl_file;
     530          16 :         struct fasync_struct *fa = *priv;
     531             : 
     532             :         /*
     533             :          * fasync_insert_entry() returns the old entry if any. If there was no
     534             :          * old entry, then it used "priv" and inserted it into the fasync list.
     535             :          * Clear the pointer to indicate that it shouldn't be freed.
     536             :          */
     537          16 :         if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
     538          16 :                 *priv = NULL;
     539             : 
     540          16 :         __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
     541          16 : }
     542             : 
     543             : static const struct lock_manager_operations lease_manager_ops = {
     544             :         .lm_break = lease_break_callback,
     545             :         .lm_change = lease_modify,
     546             :         .lm_setup = lease_setup,
     547             : };
     548             : 
     549             : /*
     550             :  * Initialize a lease, use the default lock manager operations
     551             :  */
     552             : static int lease_init(struct file *filp, long type, struct file_lock *fl)
     553             : {
     554          32 :         if (assign_type(fl, type) != 0)
     555             :                 return -EINVAL;
     556             : 
     557          32 :         fl->fl_owner = filp;
     558          32 :         fl->fl_pid = current->tgid;
     559             : 
     560          32 :         fl->fl_file = filp;
     561          32 :         fl->fl_flags = FL_LEASE;
     562          32 :         fl->fl_start = 0;
     563          32 :         fl->fl_end = OFFSET_MAX;
     564          32 :         fl->fl_ops = NULL;
     565          32 :         fl->fl_lmops = &lease_manager_ops;
     566          32 :         return 0;
     567             : }
     568             : 
     569             : /* Allocate a file_lock initialised to this type of lease */
     570          32 : static struct file_lock *lease_alloc(struct file *filp, long type)
     571             : {
     572          32 :         struct file_lock *fl = locks_alloc_lock();
     573          32 :         int error = -ENOMEM;
     574             : 
     575          32 :         if (fl == NULL)
     576             :                 return ERR_PTR(error);
     577             : 
     578          32 :         error = lease_init(filp, type, fl);
     579          32 :         if (error) {
     580           0 :                 locks_free_lock(fl);
     581           0 :                 return ERR_PTR(error);
     582             :         }
     583             :         return fl;
     584             : }
     585             : 
     586             : /* Check if two locks overlap each other.
     587             :  */
     588             : static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
     589             : {
     590         676 :         return ((fl1->fl_end >= fl2->fl_start) &&
     591         326 :                 (fl2->fl_end >= fl1->fl_start));
     592             : }
     593             : 
     594             : /*
     595             :  * Check whether two locks have the same owner.
     596             :  */
     597             : static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
     598             : {
     599      135700 :         return fl1->fl_owner == fl2->fl_owner;
     600             : }
     601             : 
     602             : /* Must be called with the flc_lock held! */
     603      127006 : static void locks_insert_global_locks(struct file_lock *fl)
     604             : {
     605      127006 :         struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
     606             : 
     607      127006 :         percpu_rwsem_assert_held(&file_rwsem);
     608             : 
     609      127006 :         spin_lock(&fll->lock);
     610      127006 :         fl->fl_link_cpu = smp_processor_id();
     611      127006 :         hlist_add_head(&fl->fl_link, &fll->hlist);
     612      127006 :         spin_unlock(&fll->lock);
     613      127006 : }
     614             : 
     615             : /* Must be called with the flc_lock held! */
     616      127006 : static void locks_delete_global_locks(struct file_lock *fl)
     617             : {
     618      127006 :         struct file_lock_list_struct *fll;
     619             : 
     620      127006 :         percpu_rwsem_assert_held(&file_rwsem);
     621             : 
     622             :         /*
     623             :          * Avoid taking lock if already unhashed. This is safe since this check
     624             :          * is done while holding the flc_lock, and new insertions into the list
     625             :          * also require that it be held.
     626             :          */
     627      127006 :         if (hlist_unhashed(&fl->fl_link))
     628             :                 return;
     629             : 
     630      127006 :         fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
     631      127006 :         spin_lock(&fll->lock);
     632      127006 :         hlist_del_init(&fl->fl_link);
     633      127006 :         spin_unlock(&fll->lock);
     634             : }
     635             : 
     636             : static unsigned long
     637             : posix_owner_key(struct file_lock *fl)
     638             : {
     639           0 :         return (unsigned long)fl->fl_owner;
     640             : }
     641             : 
     642           0 : static void locks_insert_global_blocked(struct file_lock *waiter)
     643             : {
     644           0 :         lockdep_assert_held(&blocked_lock_lock);
     645             : 
     646           0 :         hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
     647           0 : }
     648             : 
     649             : static void locks_delete_global_blocked(struct file_lock *waiter)
     650             : {
     651          11 :         lockdep_assert_held(&blocked_lock_lock);
     652             : 
     653          11 :         hash_del(&waiter->fl_link);
     654             : }
     655             : 
     656             : /* Remove waiter from blocker's block list.
     657             :  * When blocker ends up pointing to itself then the list is empty.
     658             :  *
     659             :  * Must be called with blocked_lock_lock held.
     660             :  */
     661          11 : static void __locks_delete_block(struct file_lock *waiter)
     662             : {
     663          11 :         locks_delete_global_blocked(waiter);
     664          11 :         list_del_init(&waiter->fl_blocked_member);
     665          11 : }
     666             : 
     667          22 : static void __locks_wake_up_blocks(struct file_lock *blocker)
     668             : {
     669          22 :         while (!list_empty(&blocker->fl_blocked_requests)) {
     670           9 :                 struct file_lock *waiter;
     671             : 
     672           9 :                 waiter = list_first_entry(&blocker->fl_blocked_requests,
     673             :                                           struct file_lock, fl_blocked_member);
     674           9 :                 __locks_delete_block(waiter);
     675           9 :                 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
     676           0 :                         waiter->fl_lmops->lm_notify(waiter);
     677             :                 else
     678           9 :                         wake_up(&waiter->fl_wait);
     679             : 
     680             :                 /*
     681             :                  * The setting of fl_blocker to NULL marks the "done"
     682             :                  * point in deleting a block. Paired with acquire at the top
     683             :                  * of locks_delete_block().
     684             :                  */
     685          31 :                 smp_store_release(&waiter->fl_blocker, NULL);
     686             :         }
     687          22 : }
     688             : 
     689             : /**
     690             :  *      locks_delete_block - stop waiting for a file lock
     691             :  *      @waiter: the lock which was waiting
     692             :  *
     693             :  *      lockd/nfsd need to disconnect the lock while working on it.
     694             :  */
     695      135259 : int locks_delete_block(struct file_lock *waiter)
     696             : {
     697      135259 :         int status = -ENOENT;
     698             : 
     699             :         /*
     700             :          * If fl_blocker is NULL, it won't be set again as this thread "owns"
     701             :          * the lock and is the only one that might try to claim the lock.
     702             :          *
     703             :          * We use acquire/release to manage fl_blocker so that we can
     704             :          * optimize away taking the blocked_lock_lock in many cases.
     705             :          *
     706             :          * The smp_load_acquire guarantees two things:
     707             :          *
     708             :          * 1/ that fl_blocked_requests can be tested locklessly. If something
     709             :          * was recently added to that list it must have been in a locked region
     710             :          * *before* the locked region when fl_blocker was set to NULL.
     711             :          *
     712             :          * 2/ that no other thread is accessing 'waiter', so it is safe to free
     713             :          * it.  __locks_wake_up_blocks is careful not to touch waiter after
     714             :          * fl_blocker is released.
     715             :          *
     716             :          * If a lockless check of fl_blocker shows it to be NULL, we know that
     717             :          * no new locks can be inserted into its fl_blocked_requests list, and
     718             :          * can avoid doing anything further if the list is empty.
     719             :          */
     720      135259 :         if (!smp_load_acquire(&waiter->fl_blocker) &&
     721      135257 :             list_empty(&waiter->fl_blocked_requests))
     722             :                 return status;
     723             : 
     724           2 :         spin_lock(&blocked_lock_lock);
     725           2 :         if (waiter->fl_blocker)
     726           2 :                 status = 0;
     727           2 :         __locks_wake_up_blocks(waiter);
     728           2 :         __locks_delete_block(waiter);
     729             : 
     730             :         /*
     731             :          * The setting of fl_blocker to NULL marks the "done" point in deleting
     732             :          * a block. Paired with acquire at the top of this function.
     733             :          */
     734           2 :         smp_store_release(&waiter->fl_blocker, NULL);
     735           2 :         spin_unlock(&blocked_lock_lock);
     736           2 :         return status;
     737             : }
     738             : EXPORT_SYMBOL(locks_delete_block);
     739             : 
     740             : /* Insert waiter into blocker's block list.
     741             :  * We use a circular list so that processes can be easily woken up in
     742             :  * the order they blocked. The documentation doesn't require this but
     743             :  * it seems like the reasonable thing to do.
     744             :  *
     745             :  * Must be called with both the flc_lock and blocked_lock_lock held. The
     746             :  * fl_blocked_requests list itself is protected by the blocked_lock_lock,
     747             :  * but by ensuring that the flc_lock is also held on insertions we can avoid
     748             :  * taking the blocked_lock_lock in some cases when we see that the
     749             :  * fl_blocked_requests list is empty.
     750             :  *
     751             :  * Rather than just adding to the list, we check for conflicts with any existing
     752             :  * waiters, and add beneath any waiter that blocks the new waiter.
     753             :  * Thus wakeups don't happen until needed.
     754             :  */
     755          11 : static void __locks_insert_block(struct file_lock *blocker,
     756             :                                  struct file_lock *waiter,
     757             :                                  bool conflict(struct file_lock *,
     758             :                                                struct file_lock *))
     759             : {
     760          11 :         struct file_lock *fl;
     761          11 :         BUG_ON(!list_empty(&waiter->fl_blocked_member));
     762             : 
     763          11 : new_blocker:
     764          11 :         list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
     765           0 :                 if (conflict(fl, waiter)) {
     766           0 :                         blocker =  fl;
     767           0 :                         goto new_blocker;
     768             :                 }
     769          11 :         waiter->fl_blocker = blocker;
     770          11 :         list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
     771          11 :         if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
     772           0 :                 locks_insert_global_blocked(waiter);
     773             : 
     774             :         /* The requests in waiter->fl_blocked are known to conflict with
     775             :          * waiter, but might not conflict with blocker, or the requests
     776             :          * and lock which block it.  So they all need to be woken.
     777             :          */
     778          11 :         __locks_wake_up_blocks(waiter);
     779          11 : }
     780             : 
     781             : /* Must be called with flc_lock held. */
     782          11 : static void locks_insert_block(struct file_lock *blocker,
     783             :                                struct file_lock *waiter,
     784             :                                bool conflict(struct file_lock *,
     785             :                                              struct file_lock *))
     786             : {
     787          11 :         spin_lock(&blocked_lock_lock);
     788          11 :         __locks_insert_block(blocker, waiter, conflict);
     789          11 :         spin_unlock(&blocked_lock_lock);
     790          11 : }
     791             : 
     792             : /*
     793             :  * Wake up processes blocked waiting for blocker.
     794             :  *
     795             :  * Must be called with the inode->flc_lock held!
     796             :  */
     797      127038 : static void locks_wake_up_blocks(struct file_lock *blocker)
     798             : {
     799             :         /*
     800             :          * Avoid taking global lock if list is empty. This is safe since new
     801             :          * blocked requests are only added to the list under the flc_lock, and
     802             :          * the flc_lock is always held here. Note that removal from the
     803             :          * fl_blocked_requests list does not require the flc_lock, so we must
     804             :          * recheck list_empty() after acquiring the blocked_lock_lock.
     805             :          */
     806      127038 :         if (list_empty(&blocker->fl_blocked_requests))
     807             :                 return;
     808             : 
     809           9 :         spin_lock(&blocked_lock_lock);
     810           9 :         __locks_wake_up_blocks(blocker);
     811           9 :         spin_unlock(&blocked_lock_lock);
     812             : }
     813             : 
     814             : static void
     815      127006 : locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
     816             : {
     817      127006 :         list_add_tail(&fl->fl_list, before);
     818      127006 :         locks_insert_global_locks(fl);
     819      127006 : }
     820             : 
     821             : static void
     822      127006 : locks_unlink_lock_ctx(struct file_lock *fl)
     823             : {
     824      127006 :         locks_delete_global_locks(fl);
     825      127006 :         list_del_init(&fl->fl_list);
     826      127006 :         locks_wake_up_blocks(fl);
     827      127006 : }
     828             : 
     829             : static void
     830      127006 : locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
     831             : {
     832      127006 :         locks_unlink_lock_ctx(fl);
     833      127006 :         if (dispose)
     834      127006 :                 list_add(&fl->fl_list, dispose);
     835             :         else
     836           0 :                 locks_free_lock(fl);
     837      127006 : }
     838             : 
     839             : /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
     840             :  * checks for shared/exclusive status of overlapping locks.
     841             :  */
     842             : static bool locks_conflict(struct file_lock *caller_fl,
     843             :                            struct file_lock *sys_fl)
     844             : {
     845         191 :         if (sys_fl->fl_type == F_WRLCK)
     846             :                 return true;
     847         108 :         if (caller_fl->fl_type == F_WRLCK)
     848          56 :                 return true;
     849             :         return false;
     850             : }
     851             : 
     852             : /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
     853             :  * checking before calling the locks_conflict().
     854             :  */
     855         696 : static bool posix_locks_conflict(struct file_lock *caller_fl,
     856             :                                  struct file_lock *sys_fl)
     857             : {
     858             :         /* POSIX locks owned by the same process do not conflict with
     859             :          * each other.
     860             :          */
     861         696 :         if (posix_same_owner(caller_fl, sys_fl))
     862             :                 return false;
     863             : 
     864             :         /* Check whether they overlap */
     865         350 :         if (!locks_overlap(caller_fl, sys_fl))
     866             :                 return false;
     867             : 
     868         162 :         return locks_conflict(caller_fl, sys_fl);
     869             : }
     870             : 
     871             : /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
     872             :  * checking before calling the locks_conflict().
     873             :  */
     874           0 : static bool flock_locks_conflict(struct file_lock *caller_fl,
     875             :                                  struct file_lock *sys_fl)
     876             : {
     877             :         /* FLOCK locks referring to the same filp do not conflict with
     878             :          * each other.
     879             :          */
     880           0 :         if (caller_fl->fl_file == sys_fl->fl_file)
     881             :                 return false;
     882             : 
     883           7 :         return locks_conflict(caller_fl, sys_fl);
     884             : }
     885             : 
     886             : void
     887         184 : posix_test_lock(struct file *filp, struct file_lock *fl)
     888             : {
     889         184 :         struct file_lock *cfl;
     890         184 :         struct file_lock_context *ctx;
     891         184 :         struct inode *inode = file_inode(filp);
     892         184 :         void *owner;
     893         184 :         void (*func)(void);
     894             : 
     895         184 :         ctx = locks_inode_context(inode);
     896         184 :         if (!ctx || list_empty_careful(&ctx->flc_posix)) {
     897          86 :                 fl->fl_type = F_UNLCK;
     898          86 :                 return;
     899             :         }
     900             : 
     901             : retry:
     902          98 :         spin_lock(&ctx->flc_lock);
     903         154 :         list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
     904          98 :                 if (!posix_locks_conflict(fl, cfl))
     905          56 :                         continue;
     906          42 :                 if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
     907           0 :                         && (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
     908           0 :                         owner = cfl->fl_lmops->lm_mod_owner;
     909           0 :                         func = cfl->fl_lmops->lm_expire_lock;
     910           0 :                         __module_get(owner);
     911           0 :                         spin_unlock(&ctx->flc_lock);
     912           0 :                         (*func)();
     913           0 :                         module_put(owner);
     914           0 :                         goto retry;
     915             :                 }
     916          42 :                 locks_copy_conflock(fl, cfl);
     917          42 :                 goto out;
     918             :         }
     919          56 :         fl->fl_type = F_UNLCK;
     920          98 : out:
     921          98 :         spin_unlock(&ctx->flc_lock);
     922             :         return;
     923             : }
     924             : EXPORT_SYMBOL(posix_test_lock);
     925             : 
     926             : /*
     927             :  * Deadlock detection:
     928             :  *
     929             :  * We attempt to detect deadlocks that are due purely to posix file
     930             :  * locks.
     931             :  *
     932             :  * We assume that a task can be waiting for at most one lock at a time.
     933             :  * So for any acquired lock, the process holding that lock may be
     934             :  * waiting on at most one other lock.  That lock in turns may be held by
     935             :  * someone waiting for at most one other lock.  Given a requested lock
     936             :  * caller_fl which is about to wait for a conflicting lock block_fl, we
     937             :  * follow this chain of waiters to ensure we are not about to create a
     938             :  * cycle.
     939             :  *
     940             :  * Since we do this before we ever put a process to sleep on a lock, we
     941             :  * are ensured that there is never a cycle; that is what guarantees that
     942             :  * the while() loop in posix_locks_deadlock() eventually completes.
     943             :  *
     944             :  * Note: the above assumption may not be true when handling lock
     945             :  * requests from a broken NFS client. It may also fail in the presence
     946             :  * of tasks (such as posix threads) sharing the same open file table.
     947             :  * To handle those cases, we just bail out after a few iterations.
     948             :  *
     949             :  * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
     950             :  * Because the owner is not even nominally tied to a thread of
     951             :  * execution, the deadlock detection below can't reasonably work well. Just
     952             :  * skip it for those.
     953             :  *
     954             :  * In principle, we could do a more limited deadlock detection on FL_OFDLCK
     955             :  * locks that just checks for the case where two tasks are attempting to
     956             :  * upgrade from read to write locks on the same inode.
     957             :  */
     958             : 
     959             : #define MAX_DEADLK_ITERATIONS 10
     960             : 
     961             : /* Find a lock that the owner of the given block_fl is blocking on. */
     962           0 : static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
     963             : {
     964           0 :         struct file_lock *fl;
     965             : 
     966           0 :         hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
     967           0 :                 if (posix_same_owner(fl, block_fl)) {
     968           0 :                         while (fl->fl_blocker)
     969             :                                 fl = fl->fl_blocker;
     970           0 :                         return fl;
     971             :                 }
     972             :         }
     973             :         return NULL;
     974             : }
     975             : 
     976             : /* Must be called with the blocked_lock_lock held! */
     977           0 : static int posix_locks_deadlock(struct file_lock *caller_fl,
     978             :                                 struct file_lock *block_fl)
     979             : {
     980           0 :         int i = 0;
     981             : 
     982           0 :         lockdep_assert_held(&blocked_lock_lock);
     983             : 
     984             :         /*
     985             :          * This deadlock detector can't reasonably detect deadlocks with
     986             :          * FL_OFDLCK locks, since they aren't owned by a process, per-se.
     987             :          */
     988           0 :         if (IS_OFDLCK(caller_fl))
     989             :                 return 0;
     990             : 
     991           0 :         while ((block_fl = what_owner_is_waiting_for(block_fl))) {
     992           0 :                 if (i++ > MAX_DEADLK_ITERATIONS)
     993             :                         return 0;
     994           0 :                 if (posix_same_owner(caller_fl, block_fl))
     995             :                         return 1;
     996             :         }
     997             :         return 0;
     998             : }
     999             : 
    1000             : /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
    1001             :  * after any leases, but before any posix locks.
    1002             :  *
    1003             :  * Note that if called with an FL_EXISTS argument, the caller may determine
    1004             :  * whether or not a lock was successfully freed by testing the return
    1005             :  * value for -ENOENT.
    1006             :  */
    1007      159222 : static int flock_lock_inode(struct inode *inode, struct file_lock *request)
    1008             : {
    1009      159222 :         struct file_lock *new_fl = NULL;
    1010      159222 :         struct file_lock *fl;
    1011      159222 :         struct file_lock_context *ctx;
    1012      159222 :         int error = 0;
    1013      159222 :         bool found = false;
    1014      159222 :         LIST_HEAD(dispose);
    1015             : 
    1016      159222 :         ctx = locks_get_lock_context(inode, request->fl_type);
    1017      159222 :         if (!ctx) {
    1018           0 :                 if (request->fl_type != F_UNLCK)
    1019             :                         return -ENOMEM;
    1020           0 :                 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
    1021             :         }
    1022             : 
    1023      159222 :         if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
    1024       59978 :                 new_fl = locks_alloc_lock();
    1025       59978 :                 if (!new_fl)
    1026             :                         return -ENOMEM;
    1027             :         }
    1028             : 
    1029      159222 :         percpu_down_read(&file_rwsem);
    1030      159220 :         spin_lock(&ctx->flc_lock);
    1031      159224 :         if (request->fl_flags & FL_ACCESS)
    1032           8 :                 goto find_conflict;
    1033             : 
    1034      198556 :         list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
    1035       99314 :                 if (request->fl_file != fl->fl_file)
    1036       39340 :                         continue;
    1037       59974 :                 if (request->fl_type == fl->fl_type)
    1038          65 :                         goto out;
    1039       59909 :                 found = true;
    1040       59909 :                 locks_delete_lock_ctx(fl, &dispose);
    1041       59909 :                 break;
    1042             :         }
    1043             : 
    1044      159150 :         if (request->fl_type == F_UNLCK) {
    1045       99239 :                 if ((request->fl_flags & FL_EXISTS) && !found)
    1046           0 :                         error = -ENOENT;
    1047       99239 :                 goto out;
    1048             :         }
    1049             : 
    1050       59911 : find_conflict:
    1051       59922 :         list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
    1052           7 :                 if (!flock_locks_conflict(request, fl))
    1053           3 :                         continue;
    1054           4 :                 error = -EAGAIN;
    1055           4 :                 if (!(request->fl_flags & FL_SLEEP))
    1056           3 :                         goto out;
    1057           1 :                 error = FILE_LOCK_DEFERRED;
    1058           1 :                 locks_insert_block(fl, request, flock_locks_conflict);
    1059           1 :                 goto out;
    1060             :         }
    1061       59916 :         if (request->fl_flags & FL_ACCESS)
    1062           8 :                 goto out;
    1063       59908 :         locks_copy_lock(new_fl, request);
    1064       59907 :         locks_move_blocks(new_fl, request);
    1065       59908 :         locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
    1066       59908 :         new_fl = NULL;
    1067       59908 :         error = 0;
    1068             : 
    1069      159224 : out:
    1070      159224 :         spin_unlock(&ctx->flc_lock);
    1071      159221 :         percpu_up_read(&file_rwsem);
    1072      159223 :         if (new_fl)
    1073          69 :                 locks_free_lock(new_fl);
    1074      159223 :         locks_dispose_list(&dispose);
    1075      159224 :         trace_flock_lock_inode(inode, request, error);
    1076      159224 :         return error;
    1077             : }
    1078             : 
    1079      135981 : static int posix_lock_inode(struct inode *inode, struct file_lock *request,
    1080             :                             struct file_lock *conflock)
    1081             : {
    1082      135981 :         struct file_lock *fl, *tmp;
    1083      135981 :         struct file_lock *new_fl = NULL;
    1084      135981 :         struct file_lock *new_fl2 = NULL;
    1085      135981 :         struct file_lock *left = NULL;
    1086      135981 :         struct file_lock *right = NULL;
    1087      135981 :         struct file_lock_context *ctx;
    1088      135981 :         int error;
    1089      135981 :         bool added = false;
    1090      135981 :         LIST_HEAD(dispose);
    1091      135981 :         void *owner;
    1092      135981 :         void (*func)(void);
    1093             : 
    1094      135981 :         ctx = locks_get_lock_context(inode, request->fl_type);
    1095      135981 :         if (!ctx)
    1096           0 :                 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
    1097             : 
    1098             :         /*
    1099             :          * We may need two file_lock structures for this operation,
    1100             :          * so we get them in advance to avoid races.
    1101             :          *
    1102             :          * In some cases we can be sure, that no new locks will be needed
    1103             :          */
    1104      135981 :         if (!(request->fl_flags & FL_ACCESS) &&
    1105      134517 :             (request->fl_type != F_UNLCK ||
    1106       67324 :              request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
    1107       67414 :                 new_fl = locks_alloc_lock();
    1108       67414 :                 new_fl2 = locks_alloc_lock();
    1109             :         }
    1110             : 
    1111      135981 : retry:
    1112      135981 :         percpu_down_read(&file_rwsem);
    1113      135981 :         spin_lock(&ctx->flc_lock);
    1114             :         /*
    1115             :          * New lock request. Walk all POSIX locks and look for conflicts. If
    1116             :          * there are any, either return error or put the request on the
    1117             :          * blocker's list of waiters and the global blocked_hash.
    1118             :          */
    1119      135981 :         if (request->fl_type != F_UNLCK) {
    1120       69179 :                 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
    1121         598 :                         if (!posix_locks_conflict(request, fl))
    1122         522 :                                 continue;
    1123          76 :                         if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
    1124           0 :                                 && (*fl->fl_lmops->lm_lock_expirable)(fl)) {
    1125           0 :                                 owner = fl->fl_lmops->lm_mod_owner;
    1126           0 :                                 func = fl->fl_lmops->lm_expire_lock;
    1127           0 :                                 __module_get(owner);
    1128           0 :                                 spin_unlock(&ctx->flc_lock);
    1129           0 :                                 percpu_up_read(&file_rwsem);
    1130           0 :                                 (*func)();
    1131           0 :                                 module_put(owner);
    1132           0 :                                 goto retry;
    1133             :                         }
    1134          76 :                         if (conflock)
    1135           0 :                                 locks_copy_conflock(conflock, fl);
    1136          76 :                         error = -EAGAIN;
    1137          76 :                         if (!(request->fl_flags & FL_SLEEP))
    1138          76 :                                 goto out;
    1139             :                         /*
    1140             :                          * Deadlock detection and insertion into the blocked
    1141             :                          * locks list must be done while holding the same lock!
    1142             :                          */
    1143           0 :                         error = -EDEADLK;
    1144           0 :                         spin_lock(&blocked_lock_lock);
    1145             :                         /*
    1146             :                          * Ensure that we don't find any locks blocked on this
    1147             :                          * request during deadlock detection.
    1148             :                          */
    1149           0 :                         __locks_wake_up_blocks(request);
    1150           0 :                         if (likely(!posix_locks_deadlock(request, fl))) {
    1151           0 :                                 error = FILE_LOCK_DEFERRED;
    1152           0 :                                 __locks_insert_block(fl, request,
    1153             :                                                      posix_locks_conflict);
    1154             :                         }
    1155           0 :                         spin_unlock(&blocked_lock_lock);
    1156           0 :                         goto out;
    1157             :                 }
    1158             :         }
    1159             : 
    1160             :         /* If we're just looking for a conflict, we're done. */
    1161      135905 :         error = 0;
    1162      135905 :         if (request->fl_flags & FL_ACCESS)
    1163        1464 :                 goto out;
    1164             : 
    1165             :         /* Find the first old lock with the same owner as the new lock */
    1166      134779 :         list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
    1167       67537 :                 if (posix_same_owner(request, fl))
    1168             :                         break;
    1169             :         }
    1170             : 
    1171             :         /* Process locks with this owner. */
    1172      201742 :         list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
    1173       67467 :                 if (!posix_same_owner(request, fl))
    1174             :                         break;
    1175             : 
    1176             :                 /* Detect adjacent or overlapping regions (if same lock type) */
    1177       67439 :                 if (request->fl_type == fl->fl_type) {
    1178             :                         /* In all comparisons of start vs end, use
    1179             :                          * "start - 1" rather than "end + 1". If end
    1180             :                          * is OFFSET_MAX, end + 1 will become negative.
    1181             :                          */
    1182         200 :                         if (fl->fl_end < request->fl_start - 1)
    1183         124 :                                 continue;
    1184             :                         /* If the next lock in the list has entirely bigger
    1185             :                          * addresses than the new one, insert the lock here.
    1186             :                          */
    1187          76 :                         if (fl->fl_start - 1 > request->fl_end)
    1188             :                                 break;
    1189             : 
    1190             :                         /* If we come here, the new and old lock are of the
    1191             :                          * same type and adjacent or overlapping. Make one
    1192             :                          * lock yielding from the lower start address of both
    1193             :                          * locks to the higher end address.
    1194             :                          */
    1195          38 :                         if (fl->fl_start > request->fl_start)
    1196          12 :                                 fl->fl_start = request->fl_start;
    1197             :                         else
    1198          26 :                                 request->fl_start = fl->fl_start;
    1199          38 :                         if (fl->fl_end < request->fl_end)
    1200          22 :                                 fl->fl_end = request->fl_end;
    1201             :                         else
    1202          16 :                                 request->fl_end = fl->fl_end;
    1203          38 :                         if (added) {
    1204           0 :                                 locks_delete_lock_ctx(fl, &dispose);
    1205           0 :                                 continue;
    1206             :                         }
    1207             :                         request = fl;
    1208             :                         added = true;
    1209             :                 } else {
    1210             :                         /* Processing for different lock types is a bit
    1211             :                          * more complex.
    1212             :                          */
    1213       67239 :                         if (fl->fl_end < request->fl_start)
    1214          52 :                                 continue;
    1215       67187 :                         if (fl->fl_start > request->fl_end)
    1216             :                                 break;
    1217       67095 :                         if (request->fl_type == F_UNLCK)
    1218       67067 :                                 added = true;
    1219       67095 :                         if (fl->fl_start < request->fl_start)
    1220           8 :                                 left = fl;
    1221             :                         /* If the next lock in the list has a higher end
    1222             :                          * address than the new one, insert the new one here.
    1223             :                          */
    1224       67095 :                         if (fl->fl_end > request->fl_end) {
    1225             :                                 right = fl;
    1226             :                                 break;
    1227             :                         }
    1228       67087 :                         if (fl->fl_start >= request->fl_start) {
    1229             :                                 /* The new lock completely replaces an old
    1230             :                                  * one (This may happen several times).
    1231             :                                  */
    1232       67081 :                                 if (added) {
    1233       67067 :                                         locks_delete_lock_ctx(fl, &dispose);
    1234       67067 :                                         continue;
    1235             :                                 }
    1236             :                                 /*
    1237             :                                  * Replace the old lock with new_fl, and
    1238             :                                  * remove the old one. It's safe to do the
    1239             :                                  * insert here since we know that we won't be
    1240             :                                  * using new_fl later, and that the lock is
    1241             :                                  * just replacing an existing lock.
    1242             :                                  */
    1243          14 :                                 error = -ENOLCK;
    1244          14 :                                 if (!new_fl)
    1245           0 :                                         goto out;
    1246          14 :                                 locks_copy_lock(new_fl, request);
    1247          14 :                                 locks_move_blocks(new_fl, request);
    1248          14 :                                 request = new_fl;
    1249          14 :                                 new_fl = NULL;
    1250          14 :                                 locks_insert_lock_ctx(request, &fl->fl_list);
    1251          14 :                                 locks_delete_lock_ctx(fl, &dispose);
    1252          14 :                                 added = true;
    1253             :                         }
    1254             :                 }
    1255             :         }
    1256             : 
    1257             :         /*
    1258             :          * The above code only modifies existing locks in case of merging or
    1259             :          * replacing. If new lock(s) need to be inserted all modifications are
    1260             :          * done below this, so it's safe yet to bail out.
    1261             :          */
    1262      134441 :         error = -ENOLCK; /* "no luck" */
    1263      134441 :         if (right && left == right && !new_fl2)
    1264           0 :                 goto out;
    1265             : 
    1266      134441 :         error = 0;
    1267      134441 :         if (!added) {
    1268       67338 :                 if (request->fl_type == F_UNLCK) {
    1269         273 :                         if (request->fl_flags & FL_EXISTS)
    1270           0 :                                 error = -ENOENT;
    1271         273 :                         goto out;
    1272             :                 }
    1273             : 
    1274       67065 :                 if (!new_fl) {
    1275           0 :                         error = -ENOLCK;
    1276           0 :                         goto out;
    1277             :                 }
    1278       67065 :                 locks_copy_lock(new_fl, request);
    1279       67065 :                 locks_move_blocks(new_fl, request);
    1280       67065 :                 locks_insert_lock_ctx(new_fl, &fl->fl_list);
    1281       67065 :                 fl = new_fl;
    1282       67065 :                 new_fl = NULL;
    1283             :         }
    1284      134168 :         if (right) {
    1285           8 :                 if (left == right) {
    1286             :                         /* The new lock breaks the old one in two pieces,
    1287             :                          * so we have to use the second new lock.
    1288             :                          */
    1289           2 :                         left = new_fl2;
    1290           2 :                         new_fl2 = NULL;
    1291           2 :                         locks_copy_lock(left, right);
    1292           2 :                         locks_insert_lock_ctx(left, &fl->fl_list);
    1293             :                 }
    1294           8 :                 right->fl_start = request->fl_end + 1;
    1295           8 :                 locks_wake_up_blocks(right);
    1296             :         }
    1297      134168 :         if (left) {
    1298           8 :                 left->fl_end = request->fl_start - 1;
    1299           8 :                 locks_wake_up_blocks(left);
    1300             :         }
    1301      134160 :  out:
    1302      135981 :         spin_unlock(&ctx->flc_lock);
    1303      135981 :         percpu_up_read(&file_rwsem);
    1304             :         /*
    1305             :          * Free any unused locks.
    1306             :          */
    1307      135981 :         if (new_fl)
    1308         335 :                 locks_free_lock(new_fl);
    1309      135981 :         if (new_fl2)
    1310       67412 :                 locks_free_lock(new_fl2);
    1311      135981 :         locks_dispose_list(&dispose);
    1312      135981 :         trace_posix_lock_inode(inode, request, error);
    1313             : 
    1314      135981 :         return error;
    1315             : }
    1316             : 
    1317             : /**
    1318             :  * posix_lock_file - Apply a POSIX-style lock to a file
    1319             :  * @filp: The file to apply the lock to
    1320             :  * @fl: The lock to be applied
    1321             :  * @conflock: Place to return a copy of the conflicting lock, if found.
    1322             :  *
    1323             :  * Add a POSIX style lock to a file.
    1324             :  * We merge adjacent & overlapping locks whenever possible.
    1325             :  * POSIX locks are sorted by owner task, then by starting address
    1326             :  *
    1327             :  * Note that if called with an FL_EXISTS argument, the caller may determine
    1328             :  * whether or not a lock was successfully freed by testing the return
    1329             :  * value for -ENOENT.
    1330             :  */
    1331           0 : int posix_lock_file(struct file *filp, struct file_lock *fl,
    1332             :                         struct file_lock *conflock)
    1333             : {
    1334           0 :         return posix_lock_inode(file_inode(filp), fl, conflock);
    1335             : }
    1336             : EXPORT_SYMBOL(posix_lock_file);
    1337             : 
    1338             : /**
    1339             :  * posix_lock_inode_wait - Apply a POSIX-style lock to a file
    1340             :  * @inode: inode of file to which lock request should be applied
    1341             :  * @fl: The lock to be applied
    1342             :  *
    1343             :  * Apply a POSIX style lock request to an inode.
    1344             :  */
    1345        4392 : static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
    1346             : {
    1347        4392 :         int error;
    1348        4392 :         might_sleep ();
    1349        4392 :         for (;;) {
    1350        4392 :                 error = posix_lock_inode(inode, fl, NULL);
    1351        4392 :                 if (error != FILE_LOCK_DEFERRED)
    1352             :                         break;
    1353           0 :                 error = wait_event_interruptible(fl->fl_wait,
    1354             :                                         list_empty(&fl->fl_blocked_member));
    1355           0 :                 if (error)
    1356             :                         break;
    1357             :         }
    1358        4392 :         locks_delete_block(fl);
    1359        4392 :         return error;
    1360             : }
    1361             : 
    1362          16 : static void lease_clear_pending(struct file_lock *fl, int arg)
    1363             : {
    1364          16 :         switch (arg) {
    1365          16 :         case F_UNLCK:
    1366          16 :                 fl->fl_flags &= ~FL_UNLOCK_PENDING;
    1367          16 :                 fallthrough;
    1368          16 :         case F_RDLCK:
    1369          16 :                 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
    1370             :         }
    1371          16 : }
    1372             : 
    1373             : /* We already had a lease on this file; just change its type */
    1374          16 : int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
    1375             : {
    1376          16 :         int error = assign_type(fl, arg);
    1377             : 
    1378          16 :         if (error)
    1379             :                 return error;
    1380          16 :         lease_clear_pending(fl, arg);
    1381          16 :         locks_wake_up_blocks(fl);
    1382          16 :         if (arg == F_UNLCK) {
    1383          16 :                 struct file *filp = fl->fl_file;
    1384             : 
    1385          16 :                 f_delown(filp);
    1386          16 :                 filp->f_owner.signum = 0;
    1387          16 :                 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
    1388          16 :                 if (fl->fl_fasync != NULL) {
    1389           0 :                         printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
    1390           0 :                         fl->fl_fasync = NULL;
    1391             :                 }
    1392          16 :                 locks_delete_lock_ctx(fl, dispose);
    1393             :         }
    1394             :         return 0;
    1395             : }
    1396             : EXPORT_SYMBOL(lease_modify);
    1397             : 
    1398             : static bool past_time(unsigned long then)
    1399             : {
    1400          56 :         if (!then)
    1401             :                 /* 0 is a special value meaning "this never expires": */
    1402             :                 return false;
    1403           0 :         return time_after(jiffies, then);
    1404             : }
    1405             : 
    1406          48 : static void time_out_leases(struct inode *inode, struct list_head *dispose)
    1407             : {
    1408          48 :         struct file_lock_context *ctx = inode->i_flctx;
    1409          48 :         struct file_lock *fl, *tmp;
    1410             : 
    1411          48 :         lockdep_assert_held(&ctx->flc_lock);
    1412             : 
    1413          76 :         list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
    1414          28 :                 trace_time_out_leases(inode, fl);
    1415          28 :                 if (past_time(fl->fl_downgrade_time))
    1416           0 :                         lease_modify(fl, F_RDLCK, dispose);
    1417          28 :                 if (past_time(fl->fl_break_time))
    1418           0 :                         lease_modify(fl, F_UNLCK, dispose);
    1419             :         }
    1420          48 : }
    1421             : 
    1422          22 : static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
    1423             : {
    1424          22 :         bool rc;
    1425             : 
    1426          22 :         if (lease->fl_lmops->lm_breaker_owns_lease
    1427           0 :                         && lease->fl_lmops->lm_breaker_owns_lease(lease))
    1428             :                 return false;
    1429          22 :         if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
    1430           0 :                 rc = false;
    1431           0 :                 goto trace;
    1432             :         }
    1433          22 :         if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
    1434           0 :                 rc = false;
    1435           0 :                 goto trace;
    1436             :         }
    1437             : 
    1438          22 :         rc = locks_conflict(breaker, lease);
    1439          22 : trace:
    1440          22 :         trace_leases_conflict(rc, lease, breaker);
    1441          22 :         return rc;
    1442             : }
    1443             : 
    1444             : static bool
    1445          20 : any_leases_conflict(struct inode *inode, struct file_lock *breaker)
    1446             : {
    1447          20 :         struct file_lock_context *ctx = inode->i_flctx;
    1448          20 :         struct file_lock *fl;
    1449             : 
    1450          20 :         lockdep_assert_held(&ctx->flc_lock);
    1451             : 
    1452          22 :         list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
    1453          12 :                 if (leases_conflict(fl, breaker))
    1454             :                         return true;
    1455             :         }
    1456             :         return false;
    1457             : }
    1458             : 
    1459             : /**
    1460             :  *      __break_lease   -       revoke all outstanding leases on file
    1461             :  *      @inode: the inode of the file to return
    1462             :  *      @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
    1463             :  *          break all leases
    1464             :  *      @type: FL_LEASE: break leases and delegations; FL_DELEG: break
    1465             :  *          only delegations
    1466             :  *
    1467             :  *      break_lease (inlined for speed) has checked there already is at least
    1468             :  *      some kind of lock (maybe a lease) on this file.  Leases are broken on
    1469             :  *      a call to open() or truncate().  This function can sleep unless you
    1470             :  *      specified %O_NONBLOCK to your open().
    1471             :  */
    1472          12 : int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
    1473             : {
    1474          12 :         int error = 0;
    1475          12 :         struct file_lock_context *ctx;
    1476          12 :         struct file_lock *new_fl, *fl, *tmp;
    1477          12 :         unsigned long break_time;
    1478          12 :         int want_write = (mode & O_ACCMODE) != O_RDONLY;
    1479          12 :         LIST_HEAD(dispose);
    1480             : 
    1481          12 :         new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
    1482          12 :         if (IS_ERR(new_fl))
    1483           0 :                 return PTR_ERR(new_fl);
    1484          12 :         new_fl->fl_flags = type;
    1485             : 
    1486             :         /* typically we will check that ctx is non-NULL before calling */
    1487          12 :         ctx = locks_inode_context(inode);
    1488          12 :         if (!ctx) {
    1489           0 :                 WARN_ON_ONCE(1);
    1490           0 :                 goto free_lock;
    1491             :         }
    1492             : 
    1493          12 :         percpu_down_read(&file_rwsem);
    1494          12 :         spin_lock(&ctx->flc_lock);
    1495             : 
    1496          12 :         time_out_leases(inode, &dispose);
    1497             : 
    1498          12 :         if (!any_leases_conflict(inode, new_fl))
    1499           2 :                 goto out;
    1500             : 
    1501          10 :         break_time = 0;
    1502          10 :         if (lease_break_time > 0) {
    1503          10 :                 break_time = jiffies + lease_break_time * HZ;
    1504          10 :                 if (break_time == 0)
    1505             :                         break_time++;   /* so that 0 means no break time */
    1506             :         }
    1507             : 
    1508          20 :         list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
    1509          10 :                 if (!leases_conflict(fl, new_fl))
    1510           0 :                         continue;
    1511          10 :                 if (want_write) {
    1512           8 :                         if (fl->fl_flags & FL_UNLOCK_PENDING)
    1513           0 :                                 continue;
    1514           8 :                         fl->fl_flags |= FL_UNLOCK_PENDING;
    1515           8 :                         fl->fl_break_time = break_time;
    1516             :                 } else {
    1517           2 :                         if (lease_breaking(fl))
    1518           0 :                                 continue;
    1519           2 :                         fl->fl_flags |= FL_DOWNGRADE_PENDING;
    1520           2 :                         fl->fl_downgrade_time = break_time;
    1521             :                 }
    1522          10 :                 if (fl->fl_lmops->lm_break(fl))
    1523           0 :                         locks_delete_lock_ctx(fl, &dispose);
    1524             :         }
    1525             : 
    1526          10 :         if (list_empty(&ctx->flc_lease))
    1527           0 :                 goto out;
    1528             : 
    1529          10 :         if (mode & O_NONBLOCK) {
    1530           0 :                 trace_break_lease_noblock(inode, new_fl);
    1531           0 :                 error = -EWOULDBLOCK;
    1532           0 :                 goto out;
    1533             :         }
    1534             : 
    1535          10 : restart:
    1536          10 :         fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
    1537          10 :         break_time = fl->fl_break_time;
    1538          10 :         if (break_time != 0)
    1539           8 :                 break_time -= jiffies;
    1540          10 :         if (break_time == 0)
    1541             :                 break_time++;
    1542          10 :         locks_insert_block(fl, new_fl, leases_conflict);
    1543          10 :         trace_break_lease_block(inode, new_fl);
    1544          10 :         spin_unlock(&ctx->flc_lock);
    1545          10 :         percpu_up_read(&file_rwsem);
    1546             : 
    1547          10 :         locks_dispose_list(&dispose);
    1548          18 :         error = wait_event_interruptible_timeout(new_fl->fl_wait,
    1549             :                                         list_empty(&new_fl->fl_blocked_member),
    1550             :                                         break_time);
    1551             : 
    1552          10 :         percpu_down_read(&file_rwsem);
    1553          10 :         spin_lock(&ctx->flc_lock);
    1554          10 :         trace_break_lease_unblock(inode, new_fl);
    1555          10 :         locks_delete_block(new_fl);
    1556          10 :         if (error >= 0) {
    1557             :                 /*
    1558             :                  * Wait for the next conflicting lease that has not been
    1559             :                  * broken yet
    1560             :                  */
    1561           8 :                 if (error == 0)
    1562           0 :                         time_out_leases(inode, &dispose);
    1563           8 :                 if (any_leases_conflict(inode, new_fl))
    1564           0 :                         goto restart;
    1565             :                 error = 0;
    1566             :         }
    1567           2 : out:
    1568          12 :         spin_unlock(&ctx->flc_lock);
    1569          12 :         percpu_up_read(&file_rwsem);
    1570          12 :         locks_dispose_list(&dispose);
    1571          12 : free_lock:
    1572          12 :         locks_free_lock(new_fl);
    1573          12 :         return error;
    1574             : }
    1575             : EXPORT_SYMBOL(__break_lease);
    1576             : 
    1577             : /**
    1578             :  *      lease_get_mtime - update modified time of an inode with exclusive lease
    1579             :  *      @inode: the inode
    1580             :  *      @time:  pointer to a timespec which contains the last modified time
    1581             :  *
    1582             :  * This is to force NFS clients to flush their caches for files with
    1583             :  * exclusive leases.  The justification is that if someone has an
    1584             :  * exclusive lease, then they could be modifying it.
    1585             :  */
    1586           0 : void lease_get_mtime(struct inode *inode, struct timespec64 *time)
    1587             : {
    1588           0 :         bool has_lease = false;
    1589           0 :         struct file_lock_context *ctx;
    1590           0 :         struct file_lock *fl;
    1591             : 
    1592           0 :         ctx = locks_inode_context(inode);
    1593           0 :         if (ctx && !list_empty_careful(&ctx->flc_lease)) {
    1594           0 :                 spin_lock(&ctx->flc_lock);
    1595           0 :                 fl = list_first_entry_or_null(&ctx->flc_lease,
    1596             :                                               struct file_lock, fl_list);
    1597           0 :                 if (fl && (fl->fl_type == F_WRLCK))
    1598           0 :                         has_lease = true;
    1599           0 :                 spin_unlock(&ctx->flc_lock);
    1600             :         }
    1601             : 
    1602           0 :         if (has_lease)
    1603           0 :                 *time = current_time(inode);
    1604           0 : }
    1605             : EXPORT_SYMBOL(lease_get_mtime);
    1606             : 
    1607             : /**
    1608             :  *      fcntl_getlease - Enquire what lease is currently active
    1609             :  *      @filp: the file
    1610             :  *
    1611             :  *      The value returned by this function will be one of
    1612             :  *      (if no lease break is pending):
    1613             :  *
    1614             :  *      %F_RDLCK to indicate a shared lease is held.
    1615             :  *
    1616             :  *      %F_WRLCK to indicate an exclusive lease is held.
    1617             :  *
    1618             :  *      %F_UNLCK to indicate no lease is held.
    1619             :  *
    1620             :  *      (if a lease break is pending):
    1621             :  *
    1622             :  *      %F_RDLCK to indicate an exclusive lease needs to be
    1623             :  *              changed to a shared lease (or removed).
    1624             :  *
    1625             :  *      %F_UNLCK to indicate the lease needs to be removed.
    1626             :  *
    1627             :  *      XXX: sfr & willy disagree over whether F_INPROGRESS
    1628             :  *      should be returned to userspace.
    1629             :  */
    1630          20 : int fcntl_getlease(struct file *filp)
    1631             : {
    1632          20 :         struct file_lock *fl;
    1633          20 :         struct inode *inode = file_inode(filp);
    1634          20 :         struct file_lock_context *ctx;
    1635          20 :         int type = F_UNLCK;
    1636          20 :         LIST_HEAD(dispose);
    1637             : 
    1638          20 :         ctx = locks_inode_context(inode);
    1639          20 :         if (ctx && !list_empty_careful(&ctx->flc_lease)) {
    1640          16 :                 percpu_down_read(&file_rwsem);
    1641          16 :                 spin_lock(&ctx->flc_lock);
    1642          16 :                 time_out_leases(inode, &dispose);
    1643          16 :                 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
    1644          16 :                         if (fl->fl_file != filp)
    1645           0 :                                 continue;
    1646          16 :                         type = target_leasetype(fl);
    1647             :                         break;
    1648             :                 }
    1649          16 :                 spin_unlock(&ctx->flc_lock);
    1650          16 :                 percpu_up_read(&file_rwsem);
    1651             : 
    1652          16 :                 locks_dispose_list(&dispose);
    1653             :         }
    1654          20 :         return type;
    1655             : }
    1656             : 
    1657             : /**
    1658             :  * check_conflicting_open - see if the given file points to an inode that has
    1659             :  *                          an existing open that would conflict with the
    1660             :  *                          desired lease.
    1661             :  * @filp:       file to check
    1662             :  * @arg:        type of lease that we're trying to acquire
    1663             :  * @flags:      current lock flags
    1664             :  *
    1665             :  * Check to see if there's an existing open fd on this file that would
    1666             :  * conflict with the lease we're trying to set.
    1667             :  */
    1668             : static int
    1669          36 : check_conflicting_open(struct file *filp, const long arg, int flags)
    1670             : {
    1671          36 :         struct inode *inode = file_inode(filp);
    1672          36 :         int self_wcount = 0, self_rcount = 0;
    1673             : 
    1674          36 :         if (flags & FL_LAYOUT)
    1675             :                 return 0;
    1676          36 :         if (flags & FL_DELEG)
    1677             :                 /* We leave these checks to the caller */
    1678             :                 return 0;
    1679             : 
    1680          36 :         if (arg == F_RDLCK)
    1681          22 :                 return inode_is_open_for_write(inode) ? -EAGAIN : 0;
    1682          14 :         else if (arg != F_WRLCK)
    1683             :                 return 0;
    1684             : 
    1685             :         /*
    1686             :          * Make sure that only read/write count is from lease requestor.
    1687             :          * Note that this will result in denying write leases when i_writecount
    1688             :          * is negative, which is what we want.  (We shouldn't grant write leases
    1689             :          * on files open for execution.)
    1690             :          */
    1691          14 :         if (filp->f_mode & FMODE_WRITE)
    1692             :                 self_wcount = 1;
    1693           0 :         else if (filp->f_mode & FMODE_READ)
    1694           0 :                 self_rcount = 1;
    1695             : 
    1696          14 :         if (atomic_read(&inode->i_writecount) != self_wcount ||
    1697             :             atomic_read(&inode->i_readcount) != self_rcount)
    1698           2 :                 return -EAGAIN;
    1699             : 
    1700             :         return 0;
    1701             : }
    1702             : 
    1703             : static int
    1704          20 : generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
    1705             : {
    1706          20 :         struct file_lock *fl, *my_fl = NULL, *lease;
    1707          20 :         struct inode *inode = file_inode(filp);
    1708          20 :         struct file_lock_context *ctx;
    1709          20 :         bool is_deleg = (*flp)->fl_flags & FL_DELEG;
    1710          20 :         int error;
    1711          20 :         LIST_HEAD(dispose);
    1712             : 
    1713          20 :         lease = *flp;
    1714          20 :         trace_generic_add_lease(inode, lease);
    1715             : 
    1716             :         /* Note that arg is never F_UNLCK here */
    1717          20 :         ctx = locks_get_lock_context(inode, arg);
    1718          20 :         if (!ctx)
    1719             :                 return -ENOMEM;
    1720             : 
    1721             :         /*
    1722             :          * In the delegation case we need mutual exclusion with
    1723             :          * a number of operations that take the i_mutex.  We trylock
    1724             :          * because delegations are an optional optimization, and if
    1725             :          * there's some chance of a conflict--we'd rather not
    1726             :          * bother, maybe that's a sign this just isn't a good file to
    1727             :          * hand out a delegation on.
    1728             :          */
    1729          20 :         if (is_deleg && !inode_trylock(inode))
    1730             :                 return -EAGAIN;
    1731             : 
    1732          20 :         if (is_deleg && arg == F_WRLCK) {
    1733             :                 /* Write delegations are not currently supported: */
    1734           0 :                 inode_unlock(inode);
    1735           0 :                 WARN_ON_ONCE(1);
    1736           0 :                 return -EINVAL;
    1737             :         }
    1738             : 
    1739          20 :         percpu_down_read(&file_rwsem);
    1740          20 :         spin_lock(&ctx->flc_lock);
    1741          20 :         time_out_leases(inode, &dispose);
    1742          20 :         error = check_conflicting_open(filp, arg, lease->fl_flags);
    1743          20 :         if (error)
    1744           4 :                 goto out;
    1745             : 
    1746             :         /*
    1747             :          * At this point, we know that if there is an exclusive
    1748             :          * lease on this file, then we hold it on this filp
    1749             :          * (otherwise our open of this file would have blocked).
    1750             :          * And if we are trying to acquire an exclusive lease,
    1751             :          * then the file is not open by anyone (including us)
    1752             :          * except for this filp.
    1753             :          */
    1754          16 :         error = -EAGAIN;
    1755          16 :         list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
    1756           0 :                 if (fl->fl_file == filp &&
    1757           0 :                     fl->fl_owner == lease->fl_owner) {
    1758           0 :                         my_fl = fl;
    1759           0 :                         continue;
    1760             :                 }
    1761             : 
    1762             :                 /*
    1763             :                  * No exclusive leases if someone else has a lease on
    1764             :                  * this file:
    1765             :                  */
    1766           0 :                 if (arg == F_WRLCK)
    1767           0 :                         goto out;
    1768             :                 /*
    1769             :                  * Modifying our existing lease is OK, but no getting a
    1770             :                  * new lease if someone else is opening for write:
    1771             :                  */
    1772           0 :                 if (fl->fl_flags & FL_UNLOCK_PENDING)
    1773           0 :                         goto out;
    1774             :         }
    1775             : 
    1776          16 :         if (my_fl != NULL) {
    1777           0 :                 lease = my_fl;
    1778           0 :                 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
    1779           0 :                 if (error)
    1780           0 :                         goto out;
    1781           0 :                 goto out_setup;
    1782             :         }
    1783             : 
    1784          16 :         error = -EINVAL;
    1785          16 :         if (!leases_enable)
    1786           0 :                 goto out;
    1787             : 
    1788          16 :         locks_insert_lock_ctx(lease, &ctx->flc_lease);
    1789             :         /*
    1790             :          * The check in break_lease() is lockless. It's possible for another
    1791             :          * open to race in after we did the earlier check for a conflicting
    1792             :          * open but before the lease was inserted. Check again for a
    1793             :          * conflicting open and cancel the lease if there is one.
    1794             :          *
    1795             :          * We also add a barrier here to ensure that the insertion of the lock
    1796             :          * precedes these checks.
    1797             :          */
    1798          16 :         smp_mb();
    1799          16 :         error = check_conflicting_open(filp, arg, lease->fl_flags);
    1800          16 :         if (error) {
    1801           0 :                 locks_unlink_lock_ctx(lease);
    1802           0 :                 goto out;
    1803             :         }
    1804             : 
    1805          16 : out_setup:
    1806          16 :         if (lease->fl_lmops->lm_setup)
    1807          16 :                 lease->fl_lmops->lm_setup(lease, priv);
    1808           0 : out:
    1809          20 :         spin_unlock(&ctx->flc_lock);
    1810          20 :         percpu_up_read(&file_rwsem);
    1811          20 :         locks_dispose_list(&dispose);
    1812          20 :         if (is_deleg)
    1813           0 :                 inode_unlock(inode);
    1814          20 :         if (!error && !my_fl)
    1815          16 :                 *flp = NULL;
    1816             :         return error;
    1817             : }
    1818             : 
    1819          16 : static int generic_delete_lease(struct file *filp, void *owner)
    1820             : {
    1821          16 :         int error = -EAGAIN;
    1822          16 :         struct file_lock *fl, *victim = NULL;
    1823          16 :         struct inode *inode = file_inode(filp);
    1824          16 :         struct file_lock_context *ctx;
    1825          16 :         LIST_HEAD(dispose);
    1826             : 
    1827          16 :         ctx = locks_inode_context(inode);
    1828          16 :         if (!ctx) {
    1829           2 :                 trace_generic_delete_lease(inode, NULL);
    1830           2 :                 return error;
    1831             :         }
    1832             : 
    1833          14 :         percpu_down_read(&file_rwsem);
    1834          14 :         spin_lock(&ctx->flc_lock);
    1835          14 :         list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
    1836          14 :                 if (fl->fl_file == filp &&
    1837          14 :                     fl->fl_owner == owner) {
    1838             :                         victim = fl;
    1839             :                         break;
    1840             :                 }
    1841             :         }
    1842          14 :         trace_generic_delete_lease(inode, victim);
    1843          14 :         if (victim)
    1844          14 :                 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
    1845          14 :         spin_unlock(&ctx->flc_lock);
    1846          14 :         percpu_up_read(&file_rwsem);
    1847          14 :         locks_dispose_list(&dispose);
    1848          14 :         return error;
    1849             : }
    1850             : 
    1851             : /**
    1852             :  *      generic_setlease        -       sets a lease on an open file
    1853             :  *      @filp:  file pointer
    1854             :  *      @arg:   type of lease to obtain
    1855             :  *      @flp:   input - file_lock to use, output - file_lock inserted
    1856             :  *      @priv:  private data for lm_setup (may be NULL if lm_setup
    1857             :  *              doesn't require it)
    1858             :  *
    1859             :  *      The (input) flp->fl_lmops->lm_break function is required
    1860             :  *      by break_lease().
    1861             :  */
    1862          36 : int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
    1863             :                         void **priv)
    1864             : {
    1865          36 :         struct inode *inode = file_inode(filp);
    1866          36 :         vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode);
    1867          36 :         int error;
    1868             : 
    1869          72 :         if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
    1870             :                 return -EACCES;
    1871          36 :         if (!S_ISREG(inode->i_mode))
    1872             :                 return -EINVAL;
    1873          36 :         error = security_file_lock(filp, arg);
    1874          36 :         if (error)
    1875             :                 return error;
    1876             : 
    1877          36 :         switch (arg) {
    1878          16 :         case F_UNLCK:
    1879          16 :                 return generic_delete_lease(filp, *priv);
    1880          20 :         case F_RDLCK:
    1881             :         case F_WRLCK:
    1882          20 :                 if (!(*flp)->fl_lmops->lm_break) {
    1883           0 :                         WARN_ON_ONCE(1);
    1884           0 :                         return -ENOLCK;
    1885             :                 }
    1886             : 
    1887          20 :                 return generic_add_lease(filp, arg, flp, priv);
    1888             :         default:
    1889             :                 return -EINVAL;
    1890             :         }
    1891             : }
    1892             : EXPORT_SYMBOL(generic_setlease);
    1893             : 
    1894             : /*
    1895             :  * Kernel subsystems can register to be notified on any attempt to set
    1896             :  * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
    1897             :  * to close files that it may have cached when there is an attempt to set a
    1898             :  * conflicting lease.
    1899             :  */
    1900             : static struct srcu_notifier_head lease_notifier_chain;
    1901             : 
    1902             : static inline void
    1903             : lease_notifier_chain_init(void)
    1904             : {
    1905           0 :         srcu_init_notifier_head(&lease_notifier_chain);
    1906             : }
    1907             : 
    1908             : static inline void
    1909             : setlease_notifier(long arg, struct file_lock *lease)
    1910             : {
    1911          20 :         if (arg != F_UNLCK)
    1912          20 :                 srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
    1913             : }
    1914             : 
    1915           0 : int lease_register_notifier(struct notifier_block *nb)
    1916             : {
    1917           0 :         return srcu_notifier_chain_register(&lease_notifier_chain, nb);
    1918             : }
    1919             : EXPORT_SYMBOL_GPL(lease_register_notifier);
    1920             : 
    1921           0 : void lease_unregister_notifier(struct notifier_block *nb)
    1922             : {
    1923           0 :         srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
    1924           0 : }
    1925             : EXPORT_SYMBOL_GPL(lease_unregister_notifier);
    1926             : 
    1927             : /**
    1928             :  * vfs_setlease        -       sets a lease on an open file
    1929             :  * @filp:       file pointer
    1930             :  * @arg:        type of lease to obtain
    1931             :  * @lease:      file_lock to use when adding a lease
    1932             :  * @priv:       private info for lm_setup when adding a lease (may be
    1933             :  *              NULL if lm_setup doesn't require it)
    1934             :  *
    1935             :  * Call this to establish a lease on the file. The "lease" argument is not
    1936             :  * used for F_UNLCK requests and may be NULL. For commands that set or alter
    1937             :  * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
    1938             :  * set; if not, this function will return -ENOLCK (and generate a scary-looking
    1939             :  * stack trace).
    1940             :  *
    1941             :  * The "priv" pointer is passed directly to the lm_setup function as-is. It
    1942             :  * may be NULL if the lm_setup operation doesn't require it.
    1943             :  */
    1944             : int
    1945          36 : vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
    1946             : {
    1947          36 :         if (lease)
    1948          20 :                 setlease_notifier(arg, *lease);
    1949          36 :         if (filp->f_op->setlease)
    1950           0 :                 return filp->f_op->setlease(filp, arg, lease, priv);
    1951             :         else
    1952          36 :                 return generic_setlease(filp, arg, lease, priv);
    1953             : }
    1954             : EXPORT_SYMBOL_GPL(vfs_setlease);
    1955             : 
    1956          20 : static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
    1957             : {
    1958          20 :         struct file_lock *fl;
    1959          20 :         struct fasync_struct *new;
    1960          20 :         int error;
    1961             : 
    1962          20 :         fl = lease_alloc(filp, arg);
    1963          20 :         if (IS_ERR(fl))
    1964           0 :                 return PTR_ERR(fl);
    1965             : 
    1966          20 :         new = fasync_alloc();
    1967          20 :         if (!new) {
    1968           0 :                 locks_free_lock(fl);
    1969           0 :                 return -ENOMEM;
    1970             :         }
    1971          20 :         new->fa_fd = fd;
    1972             : 
    1973          20 :         error = vfs_setlease(filp, arg, &fl, (void **)&new);
    1974          20 :         if (fl)
    1975           4 :                 locks_free_lock(fl);
    1976          20 :         if (new)
    1977           4 :                 fasync_free(new);
    1978             :         return error;
    1979             : }
    1980             : 
    1981             : /**
    1982             :  *      fcntl_setlease  -       sets a lease on an open file
    1983             :  *      @fd: open file descriptor
    1984             :  *      @filp: file pointer
    1985             :  *      @arg: type of lease to obtain
    1986             :  *
    1987             :  *      Call this fcntl to establish a lease on the file.
    1988             :  *      Note that you also need to call %F_SETSIG to
    1989             :  *      receive a signal when the lease is broken.
    1990             :  */
    1991          36 : int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
    1992             : {
    1993          36 :         if (arg == F_UNLCK)
    1994          16 :                 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
    1995          20 :         return do_fcntl_add_lease(fd, filp, arg);
    1996             : }
    1997             : 
    1998             : /**
    1999             :  * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
    2000             :  * @inode: inode of the file to apply to
    2001             :  * @fl: The lock to be applied
    2002             :  *
    2003             :  * Apply a FLOCK style lock request to an inode.
    2004             :  */
    2005       62367 : static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
    2006             : {
    2007       62367 :         int error;
    2008       62367 :         might_sleep();
    2009       62367 :         for (;;) {
    2010       62367 :                 error = flock_lock_inode(inode, fl);
    2011       62367 :                 if (error != FILE_LOCK_DEFERRED)
    2012             :                         break;
    2013           2 :                 error = wait_event_interruptible(fl->fl_wait,
    2014             :                                 list_empty(&fl->fl_blocked_member));
    2015           1 :                 if (error)
    2016             :                         break;
    2017             :         }
    2018       62366 :         locks_delete_block(fl);
    2019       62367 :         return error;
    2020             : }
    2021             : 
    2022             : /**
    2023             :  * locks_lock_inode_wait - Apply a lock to an inode
    2024             :  * @inode: inode of the file to apply to
    2025             :  * @fl: The lock to be applied
    2026             :  *
    2027             :  * Apply a POSIX or FLOCK style lock request to an inode.
    2028             :  */
    2029       66759 : int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
    2030             : {
    2031       66759 :         int res = 0;
    2032       66759 :         switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
    2033        4392 :                 case FL_POSIX:
    2034        4392 :                         res = posix_lock_inode_wait(inode, fl);
    2035        4392 :                         break;
    2036       62367 :                 case FL_FLOCK:
    2037       62367 :                         res = flock_lock_inode_wait(inode, fl);
    2038       62367 :                         break;
    2039           0 :                 default:
    2040           0 :                         BUG();
    2041             :         }
    2042       66757 :         return res;
    2043             : }
    2044             : EXPORT_SYMBOL(locks_lock_inode_wait);
    2045             : 
    2046             : /**
    2047             :  *      sys_flock: - flock() system call.
    2048             :  *      @fd: the file descriptor to lock.
    2049             :  *      @cmd: the type of lock to apply.
    2050             :  *
    2051             :  *      Apply a %FL_FLOCK style lock to an open file descriptor.
    2052             :  *      The @cmd can be one of:
    2053             :  *
    2054             :  *      - %LOCK_SH -- a shared lock.
    2055             :  *      - %LOCK_EX -- an exclusive lock.
    2056             :  *      - %LOCK_UN -- remove an existing lock.
    2057             :  *      - %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
    2058             :  *
    2059             :  *      %LOCK_MAND support has been removed from the kernel.
    2060             :  */
    2061      124718 : SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
    2062             : {
    2063       62359 :         int can_sleep, error, type;
    2064       62359 :         struct file_lock fl;
    2065       62359 :         struct fd f;
    2066             : 
    2067             :         /*
    2068             :          * LOCK_MAND locks were broken for a long time in that they never
    2069             :          * conflicted with one another and didn't prevent any sort of open,
    2070             :          * read or write activity.
    2071             :          *
    2072             :          * Just ignore these requests now, to preserve legacy behavior, but
    2073             :          * throw a warning to let people know that they don't actually work.
    2074             :          */
    2075       62359 :         if (cmd & LOCK_MAND) {
    2076           0 :                 pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current->comm, current->pid);
    2077           0 :                 return 0;
    2078             :         }
    2079             : 
    2080       62359 :         type = flock_translate_cmd(cmd & ~LOCK_NB);
    2081       62359 :         if (type < 0)
    2082           0 :                 return type;
    2083             : 
    2084       62359 :         error = -EBADF;
    2085       62359 :         f = fdget(fd);
    2086       62359 :         if (!f.file)
    2087             :                 return error;
    2088             : 
    2089       62359 :         if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
    2090           0 :                 goto out_putf;
    2091             : 
    2092       62359 :         flock_make_lock(f.file, &fl, type);
    2093             : 
    2094       62358 :         error = security_file_lock(f.file, fl.fl_type);
    2095       62358 :         if (error)
    2096             :                 goto out_putf;
    2097             : 
    2098       62358 :         can_sleep = !(cmd & LOCK_NB);
    2099       62358 :         if (can_sleep)
    2100       25631 :                 fl.fl_flags |= FL_SLEEP;
    2101             : 
    2102       62358 :         if (f.file->f_op->flock)
    2103          24 :                 error = f.file->f_op->flock(f.file,
    2104             :                                             (can_sleep) ? F_SETLKW : F_SETLK,
    2105             :                                             &fl);
    2106             :         else
    2107       62342 :                 error = locks_lock_file_wait(f.file, &fl);
    2108             : 
    2109       62359 :         locks_release_private(&fl);
    2110       62359 :  out_putf:
    2111       62359 :         fdput(f);
    2112             : 
    2113       62359 :         return error;
    2114             : }
    2115             : 
    2116             : /**
    2117             :  * vfs_test_lock - test file byte range lock
    2118             :  * @filp: The file to test lock for
    2119             :  * @fl: The lock to test; also used to hold result
    2120             :  *
    2121             :  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
    2122             :  * setting conf->fl_type to something other than F_UNLCK.
    2123             :  */
    2124         184 : int vfs_test_lock(struct file *filp, struct file_lock *fl)
    2125             : {
    2126         184 :         WARN_ON_ONCE(filp != fl->fl_file);
    2127         184 :         if (filp->f_op->lock)
    2128           0 :                 return filp->f_op->lock(filp, F_GETLK, fl);
    2129         184 :         posix_test_lock(filp, fl);
    2130         184 :         return 0;
    2131             : }
    2132             : EXPORT_SYMBOL_GPL(vfs_test_lock);
    2133             : 
    2134             : /**
    2135             :  * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
    2136             :  * @fl: The file_lock who's fl_pid should be translated
    2137             :  * @ns: The namespace into which the pid should be translated
    2138             :  *
    2139             :  * Used to tranlate a fl_pid into a namespace virtual pid number
    2140             :  */
    2141          66 : static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
    2142             : {
    2143          66 :         pid_t vnr;
    2144          66 :         struct pid *pid;
    2145             : 
    2146          66 :         if (IS_OFDLCK(fl))
    2147             :                 return -1;
    2148          34 :         if (IS_REMOTELCK(fl))
    2149             :                 return fl->fl_pid;
    2150             :         /*
    2151             :          * If the flock owner process is dead and its pid has been already
    2152             :          * freed, the translation below won't work, but we still want to show
    2153             :          * flock owner pid number in init pidns.
    2154             :          */
    2155          34 :         if (ns == &init_pid_ns)
    2156          34 :                 return (pid_t)fl->fl_pid;
    2157             : 
    2158           0 :         rcu_read_lock();
    2159           0 :         pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
    2160           0 :         vnr = pid_nr_ns(pid, ns);
    2161           0 :         rcu_read_unlock();
    2162           0 :         return vnr;
    2163             : }
    2164             : 
    2165          42 : static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
    2166             : {
    2167          42 :         flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
    2168             : #if BITS_PER_LONG == 32
    2169             :         /*
    2170             :          * Make sure we can represent the posix lock via
    2171             :          * legacy 32bit flock.
    2172             :          */
    2173             :         if (fl->fl_start > OFFT_OFFSET_MAX)
    2174             :                 return -EOVERFLOW;
    2175             :         if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
    2176             :                 return -EOVERFLOW;
    2177             : #endif
    2178          42 :         flock->l_start = fl->fl_start;
    2179          42 :         flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
    2180          42 :                 fl->fl_end - fl->fl_start + 1;
    2181          42 :         flock->l_whence = 0;
    2182          42 :         flock->l_type = fl->fl_type;
    2183          42 :         return 0;
    2184             : }
    2185             : 
    2186             : #if BITS_PER_LONG == 32
    2187             : static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
    2188             : {
    2189             :         flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
    2190             :         flock->l_start = fl->fl_start;
    2191             :         flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
    2192             :                 fl->fl_end - fl->fl_start + 1;
    2193             :         flock->l_whence = 0;
    2194             :         flock->l_type = fl->fl_type;
    2195             : }
    2196             : #endif
    2197             : 
    2198             : /* Report the first existing lock that would conflict with l.
    2199             :  * This implements the F_GETLK command of fcntl().
    2200             :  */
    2201         184 : int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
    2202             : {
    2203         184 :         struct file_lock *fl;
    2204         184 :         int error;
    2205             : 
    2206         184 :         fl = locks_alloc_lock();
    2207         184 :         if (fl == NULL)
    2208             :                 return -ENOMEM;
    2209         184 :         error = -EINVAL;
    2210         184 :         if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
    2211           0 :                 goto out;
    2212             : 
    2213         184 :         error = flock_to_posix_lock(filp, fl, flock);
    2214         184 :         if (error)
    2215           0 :                 goto out;
    2216             : 
    2217         184 :         if (cmd == F_OFD_GETLK) {
    2218         146 :                 error = -EINVAL;
    2219         146 :                 if (flock->l_pid != 0)
    2220           0 :                         goto out;
    2221             : 
    2222         146 :                 fl->fl_flags |= FL_OFDLCK;
    2223         146 :                 fl->fl_owner = filp;
    2224             :         }
    2225             : 
    2226         184 :         error = vfs_test_lock(filp, fl);
    2227         184 :         if (error)
    2228           0 :                 goto out;
    2229             : 
    2230         184 :         flock->l_type = fl->fl_type;
    2231         184 :         if (fl->fl_type != F_UNLCK) {
    2232          42 :                 error = posix_lock_to_flock(flock, fl);
    2233          42 :                 if (error)
    2234           0 :                         goto out;
    2235             :         }
    2236         184 : out:
    2237         184 :         locks_free_lock(fl);
    2238         184 :         return error;
    2239             : }
    2240             : 
    2241             : /**
    2242             :  * vfs_lock_file - file byte range lock
    2243             :  * @filp: The file to apply the lock to
    2244             :  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
    2245             :  * @fl: The lock to be applied
    2246             :  * @conf: Place to return a copy of the conflicting lock, if found.
    2247             :  *
    2248             :  * A caller that doesn't care about the conflicting lock may pass NULL
    2249             :  * as the final argument.
    2250             :  *
    2251             :  * If the filesystem defines a private ->lock() method, then @conf will
    2252             :  * be left unchanged; so a caller that cares should initialize it to
    2253             :  * some acceptable default.
    2254             :  *
    2255             :  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
    2256             :  * locks, the ->lock() interface may return asynchronously, before the lock has
    2257             :  * been granted or denied by the underlying filesystem, if (and only if)
    2258             :  * lm_grant is set. Callers expecting ->lock() to return asynchronously
    2259             :  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
    2260             :  * the request is for a blocking lock. When ->lock() does return asynchronously,
    2261             :  * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
    2262             :  * request completes.
    2263             :  * If the request is for non-blocking lock the file system should return
    2264             :  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
    2265             :  * with the result. If the request timed out the callback routine will return a
    2266             :  * nonzero return code and the file system should release the lock. The file
    2267             :  * system is also responsible to keep a corresponding posix lock when it
    2268             :  * grants a lock so the VFS can find out which locks are locally held and do
    2269             :  * the correct lock cleanup when required.
    2270             :  * The underlying filesystem must not drop the kernel lock or call
    2271             :  * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
    2272             :  * return code.
    2273             :  */
    2274      134517 : int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
    2275             : {
    2276      134517 :         WARN_ON_ONCE(filp != fl->fl_file);
    2277      134517 :         if (filp->f_op->lock)
    2278        2928 :                 return filp->f_op->lock(filp, cmd, fl);
    2279             :         else
    2280      131589 :                 return posix_lock_file(filp, fl, conf);
    2281             : }
    2282             : EXPORT_SYMBOL_GPL(vfs_lock_file);
    2283             : 
    2284       68490 : static int do_lock_file_wait(struct file *filp, unsigned int cmd,
    2285             :                              struct file_lock *fl)
    2286             : {
    2287       68490 :         int error;
    2288             : 
    2289       68490 :         error = security_file_lock(filp, fl->fl_type);
    2290       68490 :         if (error)
    2291             :                 return error;
    2292             : 
    2293       68490 :         for (;;) {
    2294       68490 :                 error = vfs_lock_file(filp, cmd, fl, NULL);
    2295       68490 :                 if (error != FILE_LOCK_DEFERRED)
    2296             :                         break;
    2297           0 :                 error = wait_event_interruptible(fl->fl_wait,
    2298             :                                         list_empty(&fl->fl_blocked_member));
    2299           0 :                 if (error)
    2300             :                         break;
    2301             :         }
    2302       68490 :         locks_delete_block(fl);
    2303             : 
    2304       68490 :         return error;
    2305             : }
    2306             : 
    2307             : /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
    2308             : static int
    2309       68490 : check_fmode_for_setlk(struct file_lock *fl)
    2310             : {
    2311       68490 :         switch (fl->fl_type) {
    2312        4207 :         case F_RDLCK:
    2313        4207 :                 if (!(fl->fl_file->f_mode & FMODE_READ))
    2314           0 :                         return -EBADF;
    2315             :                 break;
    2316       62986 :         case F_WRLCK:
    2317       62986 :                 if (!(fl->fl_file->f_mode & FMODE_WRITE))
    2318           0 :                         return -EBADF;
    2319             :         }
    2320             :         return 0;
    2321             : }
    2322             : 
    2323             : /* Apply the lock described by l to an open file descriptor.
    2324             :  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
    2325             :  */
    2326       68490 : int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
    2327             :                 struct flock *flock)
    2328             : {
    2329       68490 :         struct file_lock *file_lock = locks_alloc_lock();
    2330       68490 :         struct inode *inode = file_inode(filp);
    2331       68490 :         struct file *f;
    2332       68490 :         int error;
    2333             : 
    2334       68490 :         if (file_lock == NULL)
    2335             :                 return -ENOLCK;
    2336             : 
    2337       68490 :         error = flock_to_posix_lock(filp, file_lock, flock);
    2338       68490 :         if (error)
    2339           0 :                 goto out;
    2340             : 
    2341       68490 :         error = check_fmode_for_setlk(file_lock);
    2342       68490 :         if (error)
    2343           0 :                 goto out;
    2344             : 
    2345             :         /*
    2346             :          * If the cmd is requesting file-private locks, then set the
    2347             :          * FL_OFDLCK flag and override the owner.
    2348             :          */
    2349       68490 :         switch (cmd) {
    2350           0 :         case F_OFD_SETLK:
    2351           0 :                 error = -EINVAL;
    2352           0 :                 if (flock->l_pid != 0)
    2353           0 :                         goto out;
    2354             : 
    2355           0 :                 cmd = F_SETLK;
    2356           0 :                 file_lock->fl_flags |= FL_OFDLCK;
    2357           0 :                 file_lock->fl_owner = filp;
    2358           0 :                 break;
    2359         108 :         case F_OFD_SETLKW:
    2360         108 :                 error = -EINVAL;
    2361         108 :                 if (flock->l_pid != 0)
    2362           0 :                         goto out;
    2363             : 
    2364         108 :                 cmd = F_SETLKW;
    2365         108 :                 file_lock->fl_flags |= FL_OFDLCK;
    2366         108 :                 file_lock->fl_owner = filp;
    2367        6993 :                 fallthrough;
    2368        6993 :         case F_SETLKW:
    2369        6993 :                 file_lock->fl_flags |= FL_SLEEP;
    2370             :         }
    2371             : 
    2372       68490 :         error = do_lock_file_wait(filp, cmd, file_lock);
    2373             : 
    2374             :         /*
    2375             :          * Attempt to detect a close/fcntl race and recover by releasing the
    2376             :          * lock that was just acquired. There is no need to do that when we're
    2377             :          * unlocking though, or for OFD locks.
    2378             :          */
    2379       68490 :         if (!error && file_lock->fl_type != F_UNLCK &&
    2380       67117 :             !(file_lock->fl_flags & FL_OFDLCK)) {
    2381       67009 :                 struct files_struct *files = current->files;
    2382             :                 /*
    2383             :                  * We need that spin_lock here - it prevents reordering between
    2384             :                  * update of i_flctx->flc_posix and check for it done in
    2385             :                  * close(). rcu_read_lock() wouldn't do.
    2386             :                  */
    2387       67009 :                 spin_lock(&files->file_lock);
    2388       67009 :                 f = files_lookup_fd_locked(files, fd);
    2389       67009 :                 spin_unlock(&files->file_lock);
    2390       67009 :                 if (f != filp) {
    2391           0 :                         file_lock->fl_type = F_UNLCK;
    2392           0 :                         error = do_lock_file_wait(filp, cmd, file_lock);
    2393           0 :                         WARN_ON_ONCE(error);
    2394             :                         error = -EBADF;
    2395             :                 }
    2396             :         }
    2397       68490 : out:
    2398       68490 :         trace_fcntl_setlk(inode, file_lock, error);
    2399       68490 :         locks_free_lock(file_lock);
    2400       68490 :         return error;
    2401             : }
    2402             : 
    2403             : #if BITS_PER_LONG == 32
    2404             : /* Report the first existing lock that would conflict with l.
    2405             :  * This implements the F_GETLK command of fcntl().
    2406             :  */
    2407             : int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
    2408             : {
    2409             :         struct file_lock *fl;
    2410             :         int error;
    2411             : 
    2412             :         fl = locks_alloc_lock();
    2413             :         if (fl == NULL)
    2414             :                 return -ENOMEM;
    2415             : 
    2416             :         error = -EINVAL;
    2417             :         if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
    2418             :                 goto out;
    2419             : 
    2420             :         error = flock64_to_posix_lock(filp, fl, flock);
    2421             :         if (error)
    2422             :                 goto out;
    2423             : 
    2424             :         if (cmd == F_OFD_GETLK) {
    2425             :                 error = -EINVAL;
    2426             :                 if (flock->l_pid != 0)
    2427             :                         goto out;
    2428             : 
    2429             :                 fl->fl_flags |= FL_OFDLCK;
    2430             :                 fl->fl_owner = filp;
    2431             :         }
    2432             : 
    2433             :         error = vfs_test_lock(filp, fl);
    2434             :         if (error)
    2435             :                 goto out;
    2436             : 
    2437             :         flock->l_type = fl->fl_type;
    2438             :         if (fl->fl_type != F_UNLCK)
    2439             :                 posix_lock_to_flock64(flock, fl);
    2440             : 
    2441             : out:
    2442             :         locks_free_lock(fl);
    2443             :         return error;
    2444             : }
    2445             : 
    2446             : /* Apply the lock described by l to an open file descriptor.
    2447             :  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
    2448             :  */
    2449             : int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
    2450             :                 struct flock64 *flock)
    2451             : {
    2452             :         struct file_lock *file_lock = locks_alloc_lock();
    2453             :         struct file *f;
    2454             :         int error;
    2455             : 
    2456             :         if (file_lock == NULL)
    2457             :                 return -ENOLCK;
    2458             : 
    2459             :         error = flock64_to_posix_lock(filp, file_lock, flock);
    2460             :         if (error)
    2461             :                 goto out;
    2462             : 
    2463             :         error = check_fmode_for_setlk(file_lock);
    2464             :         if (error)
    2465             :                 goto out;
    2466             : 
    2467             :         /*
    2468             :          * If the cmd is requesting file-private locks, then set the
    2469             :          * FL_OFDLCK flag and override the owner.
    2470             :          */
    2471             :         switch (cmd) {
    2472             :         case F_OFD_SETLK:
    2473             :                 error = -EINVAL;
    2474             :                 if (flock->l_pid != 0)
    2475             :                         goto out;
    2476             : 
    2477             :                 cmd = F_SETLK64;
    2478             :                 file_lock->fl_flags |= FL_OFDLCK;
    2479             :                 file_lock->fl_owner = filp;
    2480             :                 break;
    2481             :         case F_OFD_SETLKW:
    2482             :                 error = -EINVAL;
    2483             :                 if (flock->l_pid != 0)
    2484             :                         goto out;
    2485             : 
    2486             :                 cmd = F_SETLKW64;
    2487             :                 file_lock->fl_flags |= FL_OFDLCK;
    2488             :                 file_lock->fl_owner = filp;
    2489             :                 fallthrough;
    2490             :         case F_SETLKW64:
    2491             :                 file_lock->fl_flags |= FL_SLEEP;
    2492             :         }
    2493             : 
    2494             :         error = do_lock_file_wait(filp, cmd, file_lock);
    2495             : 
    2496             :         /*
    2497             :          * Attempt to detect a close/fcntl race and recover by releasing the
    2498             :          * lock that was just acquired. There is no need to do that when we're
    2499             :          * unlocking though, or for OFD locks.
    2500             :          */
    2501             :         if (!error && file_lock->fl_type != F_UNLCK &&
    2502             :             !(file_lock->fl_flags & FL_OFDLCK)) {
    2503             :                 struct files_struct *files = current->files;
    2504             :                 /*
    2505             :                  * We need that spin_lock here - it prevents reordering between
    2506             :                  * update of i_flctx->flc_posix and check for it done in
    2507             :                  * close(). rcu_read_lock() wouldn't do.
    2508             :                  */
    2509             :                 spin_lock(&files->file_lock);
    2510             :                 f = files_lookup_fd_locked(files, fd);
    2511             :                 spin_unlock(&files->file_lock);
    2512             :                 if (f != filp) {
    2513             :                         file_lock->fl_type = F_UNLCK;
    2514             :                         error = do_lock_file_wait(filp, cmd, file_lock);
    2515             :                         WARN_ON_ONCE(error);
    2516             :                         error = -EBADF;
    2517             :                 }
    2518             :         }
    2519             : out:
    2520             :         locks_free_lock(file_lock);
    2521             :         return error;
    2522             : }
    2523             : #endif /* BITS_PER_LONG == 32 */
    2524             : 
    2525             : /*
    2526             :  * This function is called when the file is being removed
    2527             :  * from the task's fd array.  POSIX locks belonging to this task
    2528             :  * are deleted at this time.
    2529             :  */
    2530   627380957 : void locks_remove_posix(struct file *filp, fl_owner_t owner)
    2531             : {
    2532   627380957 :         int error;
    2533   627380957 :         struct inode *inode = file_inode(filp);
    2534   627380957 :         struct file_lock lock;
    2535   627380957 :         struct file_lock_context *ctx;
    2536             : 
    2537             :         /*
    2538             :          * If there are no locks held on this file, we don't need to call
    2539             :          * posix_lock_file().  Another process could be setting a lock on this
    2540             :          * file at the same time, but we wouldn't remove that lock anyway.
    2541             :          */
    2542   627380957 :         ctx = locks_inode_context(inode);
    2543   627461844 :         if (!ctx || list_empty(&ctx->flc_posix))
    2544   627395817 :                 return;
    2545             : 
    2546       66027 :         locks_init_lock(&lock);
    2547       66027 :         lock.fl_type = F_UNLCK;
    2548       66027 :         lock.fl_flags = FL_POSIX | FL_CLOSE;
    2549       66027 :         lock.fl_start = 0;
    2550       66027 :         lock.fl_end = OFFSET_MAX;
    2551       66027 :         lock.fl_owner = owner;
    2552       66027 :         lock.fl_pid = current->tgid;
    2553       66027 :         lock.fl_file = filp;
    2554       66027 :         lock.fl_ops = NULL;
    2555       66027 :         lock.fl_lmops = NULL;
    2556             : 
    2557       66027 :         error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
    2558             : 
    2559       66027 :         if (lock.fl_ops && lock.fl_ops->fl_release_private)
    2560        1464 :                 lock.fl_ops->fl_release_private(&lock);
    2561       66027 :         trace_locks_remove_posix(inode, &lock, error);
    2562             : }
    2563             : EXPORT_SYMBOL(locks_remove_posix);
    2564             : 
    2565             : /* The i_flctx must be valid when calling into here */
    2566             : static void
    2567      168988 : locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
    2568             : {
    2569      168988 :         struct file_lock fl;
    2570      168988 :         struct inode *inode = file_inode(filp);
    2571             : 
    2572      168988 :         if (list_empty(&flctx->flc_flock))
    2573       72131 :                 return;
    2574             : 
    2575       96857 :         flock_make_lock(filp, &fl, F_UNLCK);
    2576       96857 :         fl.fl_flags |= FL_CLOSE;
    2577             : 
    2578       96857 :         if (filp->f_op->flock)
    2579           0 :                 filp->f_op->flock(filp, F_SETLKW, &fl);
    2580             :         else
    2581       96857 :                 flock_lock_inode(inode, &fl);
    2582             : 
    2583       96856 :         if (fl.fl_ops && fl.fl_ops->fl_release_private)
    2584           0 :                 fl.fl_ops->fl_release_private(&fl);
    2585             : }
    2586             : 
    2587             : /* The i_flctx must be valid when calling into here */
    2588             : static void
    2589      168987 : locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
    2590             : {
    2591      168987 :         struct file_lock *fl, *tmp;
    2592      168987 :         LIST_HEAD(dispose);
    2593             : 
    2594      168987 :         if (list_empty(&ctx->flc_lease))
    2595      168983 :                 return;
    2596             : 
    2597           4 :         percpu_down_read(&file_rwsem);
    2598           4 :         spin_lock(&ctx->flc_lock);
    2599           8 :         list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
    2600           4 :                 if (filp == fl->fl_file)
    2601           2 :                         lease_modify(fl, F_UNLCK, &dispose);
    2602           4 :         spin_unlock(&ctx->flc_lock);
    2603           4 :         percpu_up_read(&file_rwsem);
    2604             : 
    2605           4 :         locks_dispose_list(&dispose);
    2606             : }
    2607             : 
    2608             : /*
    2609             :  * This function is called on the last close of an open file.
    2610             :  */
    2611   576346287 : void locks_remove_file(struct file *filp)
    2612             : {
    2613   576346287 :         struct file_lock_context *ctx;
    2614             : 
    2615   576346287 :         ctx = locks_inode_context(file_inode(filp));
    2616   576383575 :         if (!ctx)
    2617             :                 return;
    2618             : 
    2619             :         /* remove any OFD locks */
    2620      168989 :         locks_remove_posix(filp, filp);
    2621             : 
    2622             :         /* remove flock locks */
    2623      168988 :         locks_remove_flock(filp, ctx);
    2624             : 
    2625             :         /* remove any leases */
    2626      168987 :         locks_remove_lease(filp, ctx);
    2627             : 
    2628      168987 :         spin_lock(&ctx->flc_lock);
    2629      168989 :         locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
    2630      168989 :         locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
    2631      168989 :         locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
    2632      168989 :         spin_unlock(&ctx->flc_lock);
    2633             : }
    2634             : 
    2635             : /**
    2636             :  * vfs_cancel_lock - file byte range unblock lock
    2637             :  * @filp: The file to apply the unblock to
    2638             :  * @fl: The lock to be unblocked
    2639             :  *
    2640             :  * Used by lock managers to cancel blocked requests
    2641             :  */
    2642           0 : int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
    2643             : {
    2644           0 :         WARN_ON_ONCE(filp != fl->fl_file);
    2645           0 :         if (filp->f_op->lock)
    2646           0 :                 return filp->f_op->lock(filp, F_CANCELLK, fl);
    2647             :         return 0;
    2648             : }
    2649             : EXPORT_SYMBOL_GPL(vfs_cancel_lock);
    2650             : 
    2651             : /**
    2652             :  * vfs_inode_has_locks - are any file locks held on @inode?
    2653             :  * @inode: inode to check for locks
    2654             :  *
    2655             :  * Return true if there are any FL_POSIX or FL_FLOCK locks currently
    2656             :  * set on @inode.
    2657             :  */
    2658           0 : bool vfs_inode_has_locks(struct inode *inode)
    2659             : {
    2660           0 :         struct file_lock_context *ctx;
    2661           0 :         bool ret;
    2662             : 
    2663           0 :         ctx = locks_inode_context(inode);
    2664           0 :         if (!ctx)
    2665             :                 return false;
    2666             : 
    2667           0 :         spin_lock(&ctx->flc_lock);
    2668           0 :         ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
    2669           0 :         spin_unlock(&ctx->flc_lock);
    2670           0 :         return ret;
    2671             : }
    2672             : EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
    2673             : 
    2674             : #ifdef CONFIG_PROC_FS
    2675             : #include <linux/proc_fs.h>
    2676             : #include <linux/seq_file.h>
    2677             : 
    2678             : struct locks_iterator {
    2679             :         int     li_cpu;
    2680             :         loff_t  li_pos;
    2681             : };
    2682             : 
    2683          12 : static void lock_get_status(struct seq_file *f, struct file_lock *fl,
    2684             :                             loff_t id, char *pfx, int repeat)
    2685             : {
    2686          12 :         struct inode *inode = NULL;
    2687          12 :         unsigned int fl_pid;
    2688          12 :         struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
    2689          12 :         int type;
    2690             : 
    2691          12 :         fl_pid = locks_translate_pid(fl, proc_pidns);
    2692             :         /*
    2693             :          * If lock owner is dead (and pid is freed) or not visible in current
    2694             :          * pidns, zero is shown as a pid value. Check lock info from
    2695             :          * init_pid_ns to get saved lock pid value.
    2696             :          */
    2697             : 
    2698          12 :         if (fl->fl_file != NULL)
    2699          12 :                 inode = file_inode(fl->fl_file);
    2700             : 
    2701          12 :         seq_printf(f, "%lld: ", id);
    2702             : 
    2703          12 :         if (repeat)
    2704           0 :                 seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
    2705             : 
    2706          12 :         if (IS_POSIX(fl)) {
    2707           4 :                 if (fl->fl_flags & FL_ACCESS)
    2708           0 :                         seq_puts(f, "ACCESS");
    2709           4 :                 else if (IS_OFDLCK(fl))
    2710           0 :                         seq_puts(f, "OFDLCK");
    2711             :                 else
    2712           4 :                         seq_puts(f, "POSIX ");
    2713             : 
    2714           8 :                 seq_printf(f, " %s ",
    2715             :                              (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
    2716           8 :         } else if (IS_FLOCK(fl)) {
    2717           8 :                 seq_puts(f, "FLOCK  ADVISORY  ");
    2718           0 :         } else if (IS_LEASE(fl)) {
    2719           0 :                 if (fl->fl_flags & FL_DELEG)
    2720           0 :                         seq_puts(f, "DELEG  ");
    2721             :                 else
    2722           0 :                         seq_puts(f, "LEASE  ");
    2723             : 
    2724           0 :                 if (lease_breaking(fl))
    2725           0 :                         seq_puts(f, "BREAKING  ");
    2726           0 :                 else if (fl->fl_file)
    2727           0 :                         seq_puts(f, "ACTIVE    ");
    2728             :                 else
    2729           0 :                         seq_puts(f, "BREAKER   ");
    2730             :         } else {
    2731           0 :                 seq_puts(f, "UNKNOWN UNKNOWN  ");
    2732             :         }
    2733          12 :         type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
    2734             : 
    2735          12 :         seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
    2736           0 :                              (type == F_RDLCK) ? "READ" : "UNLCK");
    2737          12 :         if (inode) {
    2738             :                 /* userspace relies on this representation of dev_t */
    2739          12 :                 seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
    2740          12 :                                 MAJOR(inode->i_sb->s_dev),
    2741             :                                 MINOR(inode->i_sb->s_dev), inode->i_ino);
    2742             :         } else {
    2743           0 :                 seq_printf(f, "%d <none>:0 ", fl_pid);
    2744             :         }
    2745          12 :         if (IS_POSIX(fl)) {
    2746           4 :                 if (fl->fl_end == OFFSET_MAX)
    2747           4 :                         seq_printf(f, "%Ld EOF\n", fl->fl_start);
    2748             :                 else
    2749           0 :                         seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
    2750             :         } else {
    2751           8 :                 seq_puts(f, "0 EOF\n");
    2752             :         }
    2753          12 : }
    2754             : 
    2755          12 : static struct file_lock *get_next_blocked_member(struct file_lock *node)
    2756             : {
    2757          12 :         struct file_lock *tmp;
    2758             : 
    2759             :         /* NULL node or root node */
    2760          12 :         if (node == NULL || node->fl_blocker == NULL)
    2761             :                 return NULL;
    2762             : 
    2763             :         /* Next member in the linked list could be itself */
    2764           0 :         tmp = list_next_entry(node, fl_blocked_member);
    2765           0 :         if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
    2766           0 :                 || tmp == node) {
    2767           0 :                 return NULL;
    2768             :         }
    2769             : 
    2770             :         return tmp;
    2771             : }
    2772             : 
    2773          12 : static int locks_show(struct seq_file *f, void *v)
    2774             : {
    2775          12 :         struct locks_iterator *iter = f->private;
    2776          12 :         struct file_lock *cur, *tmp;
    2777          12 :         struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
    2778          12 :         int level = 0;
    2779             : 
    2780          12 :         cur = hlist_entry(v, struct file_lock, fl_link);
    2781             : 
    2782          12 :         if (locks_translate_pid(cur, proc_pidns) == 0)
    2783             :                 return 0;
    2784             : 
    2785             :         /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
    2786             :          * is the left child of current node, the next silibing in fl_blocked_member is the
    2787             :          * right child, we can alse get the parent of current node from fl_blocker, so this
    2788             :          * question becomes traversal of a binary tree
    2789             :          */
    2790          24 :         while (cur != NULL) {
    2791          12 :                 if (level)
    2792           0 :                         lock_get_status(f, cur, iter->li_pos, "-> ", level);
    2793             :                 else
    2794          12 :                         lock_get_status(f, cur, iter->li_pos, "", level);
    2795             : 
    2796          12 :                 if (!list_empty(&cur->fl_blocked_requests)) {
    2797             :                         /* Turn left */
    2798           0 :                         cur = list_first_entry_or_null(&cur->fl_blocked_requests,
    2799             :                                 struct file_lock, fl_blocked_member);
    2800           0 :                         level++;
    2801             :                 } else {
    2802             :                         /* Turn right */
    2803          12 :                         tmp = get_next_blocked_member(cur);
    2804             :                         /* Fall back to parent node */
    2805          12 :                         while (tmp == NULL && cur->fl_blocker != NULL) {
    2806           0 :                                 cur = cur->fl_blocker;
    2807           0 :                                 level--;
    2808           0 :                                 tmp = get_next_blocked_member(cur);
    2809             :                         }
    2810             :                         cur = tmp;
    2811             :                 }
    2812             :         }
    2813             : 
    2814             :         return 0;
    2815             : }
    2816             : 
    2817           0 : static void __show_fd_locks(struct seq_file *f,
    2818             :                         struct list_head *head, int *id,
    2819             :                         struct file *filp, struct files_struct *files)
    2820             : {
    2821           0 :         struct file_lock *fl;
    2822             : 
    2823           0 :         list_for_each_entry(fl, head, fl_list) {
    2824             : 
    2825           0 :                 if (filp != fl->fl_file)
    2826           0 :                         continue;
    2827           0 :                 if (fl->fl_owner != files &&
    2828             :                     fl->fl_owner != filp)
    2829           0 :                         continue;
    2830             : 
    2831           0 :                 (*id)++;
    2832           0 :                 seq_puts(f, "lock:\t");
    2833           0 :                 lock_get_status(f, fl, *id, "", 0);
    2834             :         }
    2835           0 : }
    2836             : 
    2837           0 : void show_fd_locks(struct seq_file *f,
    2838             :                   struct file *filp, struct files_struct *files)
    2839             : {
    2840           0 :         struct inode *inode = file_inode(filp);
    2841           0 :         struct file_lock_context *ctx;
    2842           0 :         int id = 0;
    2843             : 
    2844           0 :         ctx = locks_inode_context(inode);
    2845           0 :         if (!ctx)
    2846           0 :                 return;
    2847             : 
    2848           0 :         spin_lock(&ctx->flc_lock);
    2849           0 :         __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
    2850           0 :         __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
    2851           0 :         __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
    2852           0 :         spin_unlock(&ctx->flc_lock);
    2853             : }
    2854             : 
    2855           6 : static void *locks_start(struct seq_file *f, loff_t *pos)
    2856             :         __acquires(&blocked_lock_lock)
    2857             : {
    2858           6 :         struct locks_iterator *iter = f->private;
    2859             : 
    2860           6 :         iter->li_pos = *pos + 1;
    2861           6 :         percpu_down_write(&file_rwsem);
    2862           6 :         spin_lock(&blocked_lock_lock);
    2863           6 :         return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
    2864             : }
    2865             : 
    2866          12 : static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
    2867             : {
    2868          12 :         struct locks_iterator *iter = f->private;
    2869             : 
    2870          12 :         ++iter->li_pos;
    2871          12 :         return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
    2872             : }
    2873             : 
    2874           6 : static void locks_stop(struct seq_file *f, void *v)
    2875             :         __releases(&blocked_lock_lock)
    2876             : {
    2877           6 :         spin_unlock(&blocked_lock_lock);
    2878           6 :         percpu_up_write(&file_rwsem);
    2879           6 : }
    2880             : 
    2881             : static const struct seq_operations locks_seq_operations = {
    2882             :         .start  = locks_start,
    2883             :         .next   = locks_next,
    2884             :         .stop   = locks_stop,
    2885             :         .show   = locks_show,
    2886             : };
    2887             : 
    2888           0 : static int __init proc_locks_init(void)
    2889             : {
    2890           0 :         proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
    2891             :                         sizeof(struct locks_iterator), NULL);
    2892           0 :         return 0;
    2893             : }
    2894             : fs_initcall(proc_locks_init);
    2895             : #endif
    2896             : 
    2897           0 : static int __init filelock_init(void)
    2898             : {
    2899           0 :         int i;
    2900             : 
    2901           0 :         flctx_cache = kmem_cache_create("file_lock_ctx",
    2902             :                         sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
    2903             : 
    2904           0 :         filelock_cache = kmem_cache_create("file_lock_cache",
    2905             :                         sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
    2906             : 
    2907           0 :         for_each_possible_cpu(i) {
    2908           0 :                 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
    2909             : 
    2910           0 :                 spin_lock_init(&fll->lock);
    2911           0 :                 INIT_HLIST_HEAD(&fll->hlist);
    2912             :         }
    2913             : 
    2914           0 :         lease_notifier_chain_init();
    2915           0 :         return 0;
    2916             : }
    2917             : core_initcall(filelock_init);

Generated by: LCOV version 1.14