LCOV - code coverage report
Current view: top level - fs/xfs - xfs_pwork.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-acha @ Mon Jul 31 20:08:06 PDT 2023 Lines: 38 39 97.4 %
Date: 2023-07-31 20:08:07 Functions: 5 5 100.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  * Copyright (C) 2019 Oracle.  All Rights Reserved.
       4             :  * Author: Darrick J. Wong <darrick.wong@oracle.com>
       5             :  */
       6             : #include "xfs.h"
       7             : #include "xfs_fs.h"
       8             : #include "xfs_shared.h"
       9             : #include "xfs_format.h"
      10             : #include "xfs_log_format.h"
      11             : #include "xfs_trans_resv.h"
      12             : #include "xfs_mount.h"
      13             : #include "xfs_trace.h"
      14             : #include "xfs_sysctl.h"
      15             : #include "xfs_pwork.h"
      16             : #include <linux/nmi.h>
      17             : 
      18             : /*
      19             :  * Parallel Work Queue
      20             :  * ===================
      21             :  *
      22             :  * Abstract away the details of running a large and "obviously" parallelizable
      23             :  * task across multiple CPUs.  Callers initialize the pwork control object with
      24             :  * a desired level of parallelization and a work function.  Next, they embed
      25             :  * struct xfs_pwork in whatever structure they use to pass work context to a
      26             :  * worker thread and queue that pwork.  The work function will be passed the
      27             :  * pwork item when it is run (from process context) and any returned error will
      28             :  * be recorded in xfs_pwork_ctl.error.  Work functions should check for errors
      29             :  * and abort if necessary; the non-zeroness of xfs_pwork_ctl.error does not
      30             :  * stop workqueue item processing.
      31             :  *
      32             :  * This is the rough equivalent of the xfsprogs workqueue code, though we can't
      33             :  * reuse that name here.
      34             :  */
      35             : 
      36             : /* Invoke our caller's function. */
      37             : static void
      38       16393 : xfs_pwork_work(
      39             :         struct work_struct      *work)
      40             : {
      41       16393 :         struct xfs_pwork        *pwork;
      42       16393 :         struct xfs_pwork_ctl    *pctl;
      43       16393 :         int                     error;
      44             : 
      45       16393 :         pwork = container_of(work, struct xfs_pwork, work);
      46       16393 :         pctl = pwork->pctl;
      47       16393 :         error = pctl->work_fn(pctl->mp, pwork);
      48       16387 :         if (error && !pctl->error)
      49           0 :                 pctl->error = error;
      50       32760 :         if (atomic_dec_and_test(&pctl->nr_work))
      51        2967 :                 wake_up(&pctl->poll_wait);
      52       16373 : }
      53             : 
      54             : /*
      55             :  * Set up control data for parallel work.  @work_fn is the function that will
      56             :  * be called.  @tag will be written into the kernel threads.  @nr_threads is
      57             :  * the level of parallelism desired, or 0 for no limit.
      58             :  */
      59             : int
      60        2965 : xfs_pwork_init(
      61             :         struct xfs_mount        *mp,
      62             :         struct xfs_pwork_ctl    *pctl,
      63             :         xfs_pwork_work_fn       work_fn,
      64             :         const char              *tag)
      65             : {
      66        2965 :         unsigned int            nr_threads = 0;
      67             : 
      68             : #ifdef DEBUG
      69        2965 :         if (xfs_globals.pwork_threads >= 0)
      70             :                 nr_threads = xfs_globals.pwork_threads;
      71             : #endif
      72        2965 :         trace_xfs_pwork_init(mp, nr_threads, current->pid);
      73             : 
      74        2965 :         pctl->wq = alloc_workqueue("%s-%d",
      75             :                         WQ_UNBOUND | WQ_SYSFS | WQ_FREEZABLE, nr_threads, tag,
      76             :                         current->pid);
      77        2965 :         if (!pctl->wq)
      78             :                 return -ENOMEM;
      79        2965 :         pctl->work_fn = work_fn;
      80        2965 :         pctl->error = 0;
      81        2965 :         pctl->mp = mp;
      82        2965 :         atomic_set(&pctl->nr_work, 0);
      83        2965 :         init_waitqueue_head(&pctl->poll_wait);
      84             : 
      85        2965 :         return 0;
      86             : }
      87             : 
      88             : /* Queue some parallel work. */
      89             : void
      90       16393 : xfs_pwork_queue(
      91             :         struct xfs_pwork_ctl    *pctl,
      92             :         struct xfs_pwork        *pwork)
      93             : {
      94       16393 :         INIT_WORK(&pwork->work, xfs_pwork_work);
      95       16393 :         pwork->pctl = pctl;
      96       16393 :         atomic_inc(&pctl->nr_work);
      97       16393 :         queue_work(pctl->wq, &pwork->work);
      98       16393 : }
      99             : 
     100             : /* Wait for the work to finish and tear down the control structure. */
     101             : int
     102        2965 : xfs_pwork_destroy(
     103             :         struct xfs_pwork_ctl    *pctl)
     104             : {
     105        2965 :         destroy_workqueue(pctl->wq);
     106        2965 :         pctl->wq = NULL;
     107        2965 :         return pctl->error;
     108             : }
     109             : 
     110             : /*
     111             :  * Wait for the work to finish by polling completion status and touch the soft
     112             :  * lockup watchdog.  This is for callers such as mount which hold locks.
     113             :  */
     114             : void
     115        2965 : xfs_pwork_poll(
     116             :         struct xfs_pwork_ctl    *pctl)
     117             : {
     118        8901 :         while (wait_event_timeout(pctl->poll_wait,
     119        5936 :                                 atomic_read(&pctl->nr_work) == 0, HZ) == 0)
     120           3 :                 touch_softlockup_watchdog();
     121        2965 : }

Generated by: LCOV version 1.14