LCOV - code coverage report
Current view: top level - fs/xfs - xfs_pwork.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsx @ Mon Jul 31 20:08:34 PDT 2023 Lines: 38 39 97.4 %
Date: 2023-07-31 20:08:34 Functions: 5 5 100.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  * Copyright (C) 2019 Oracle.  All Rights Reserved.
       4             :  * Author: Darrick J. Wong <darrick.wong@oracle.com>
       5             :  */
       6             : #include "xfs.h"
       7             : #include "xfs_fs.h"
       8             : #include "xfs_shared.h"
       9             : #include "xfs_format.h"
      10             : #include "xfs_log_format.h"
      11             : #include "xfs_trans_resv.h"
      12             : #include "xfs_mount.h"
      13             : #include "xfs_trace.h"
      14             : #include "xfs_sysctl.h"
      15             : #include "xfs_pwork.h"
      16             : #include <linux/nmi.h>
      17             : 
      18             : /*
      19             :  * Parallel Work Queue
      20             :  * ===================
      21             :  *
      22             :  * Abstract away the details of running a large and "obviously" parallelizable
      23             :  * task across multiple CPUs.  Callers initialize the pwork control object with
      24             :  * a desired level of parallelization and a work function.  Next, they embed
      25             :  * struct xfs_pwork in whatever structure they use to pass work context to a
      26             :  * worker thread and queue that pwork.  The work function will be passed the
      27             :  * pwork item when it is run (from process context) and any returned error will
      28             :  * be recorded in xfs_pwork_ctl.error.  Work functions should check for errors
      29             :  * and abort if necessary; the non-zeroness of xfs_pwork_ctl.error does not
      30             :  * stop workqueue item processing.
      31             :  *
      32             :  * This is the rough equivalent of the xfsprogs workqueue code, though we can't
      33             :  * reuse that name here.
      34             :  */
      35             : 
      36             : /* Invoke our caller's function. */
      37             : static void
      38       95570 : xfs_pwork_work(
      39             :         struct work_struct      *work)
      40             : {
      41       95570 :         struct xfs_pwork        *pwork;
      42       95570 :         struct xfs_pwork_ctl    *pctl;
      43       95570 :         int                     error;
      44             : 
      45       95570 :         pwork = container_of(work, struct xfs_pwork, work);
      46       95570 :         pctl = pwork->pctl;
      47       95570 :         error = pctl->work_fn(pctl->mp, pwork);
      48       95471 :         if (error && !pctl->error)
      49           0 :                 pctl->error = error;
      50       95471 :         if (atomic_dec_and_test(&pctl->nr_work))
      51       14201 :                 wake_up(&pctl->poll_wait);
      52       95491 : }
      53             : 
      54             : /*
      55             :  * Set up control data for parallel work.  @work_fn is the function that will
      56             :  * be called.  @tag will be written into the kernel threads.  @nr_threads is
      57             :  * the level of parallelism desired, or 0 for no limit.
      58             :  */
      59             : int
      60       14200 : xfs_pwork_init(
      61             :         struct xfs_mount        *mp,
      62             :         struct xfs_pwork_ctl    *pctl,
      63             :         xfs_pwork_work_fn       work_fn,
      64             :         const char              *tag)
      65             : {
      66       14200 :         unsigned int            nr_threads = 0;
      67             : 
      68             : #ifdef DEBUG
      69       14200 :         if (xfs_globals.pwork_threads >= 0)
      70             :                 nr_threads = xfs_globals.pwork_threads;
      71             : #endif
      72       14200 :         trace_xfs_pwork_init(mp, nr_threads, current->pid);
      73             : 
      74       14200 :         pctl->wq = alloc_workqueue("%s-%d",
      75             :                         WQ_UNBOUND | WQ_SYSFS | WQ_FREEZABLE, nr_threads, tag,
      76             :                         current->pid);
      77       14200 :         if (!pctl->wq)
      78             :                 return -ENOMEM;
      79       14200 :         pctl->work_fn = work_fn;
      80       14200 :         pctl->error = 0;
      81       14200 :         pctl->mp = mp;
      82       14200 :         atomic_set(&pctl->nr_work, 0);
      83       14200 :         init_waitqueue_head(&pctl->poll_wait);
      84             : 
      85       14200 :         return 0;
      86             : }
      87             : 
      88             : /* Queue some parallel work. */
      89             : void
      90       95570 : xfs_pwork_queue(
      91             :         struct xfs_pwork_ctl    *pctl,
      92             :         struct xfs_pwork        *pwork)
      93             : {
      94       95570 :         INIT_WORK(&pwork->work, xfs_pwork_work);
      95       95570 :         pwork->pctl = pctl;
      96       95570 :         atomic_inc(&pctl->nr_work);
      97       95570 :         queue_work(pctl->wq, &pwork->work);
      98       95570 : }
      99             : 
     100             : /* Wait for the work to finish and tear down the control structure. */
     101             : int
     102       14200 : xfs_pwork_destroy(
     103             :         struct xfs_pwork_ctl    *pctl)
     104             : {
     105       14200 :         destroy_workqueue(pctl->wq);
     106       14200 :         pctl->wq = NULL;
     107       14200 :         return pctl->error;
     108             : }
     109             : 
     110             : /*
     111             :  * Wait for the work to finish by polling completion status and touch the soft
     112             :  * lockup watchdog.  This is for callers such as mount which hold locks.
     113             :  */
     114             : void
     115       14200 : xfs_pwork_poll(
     116             :         struct xfs_pwork_ctl    *pctl)
     117             : {
     118       42602 :         while (wait_event_timeout(pctl->poll_wait,
     119       28407 :                                 atomic_read(&pctl->nr_work) == 0, HZ) == 0)
     120           4 :                 touch_softlockup_watchdog();
     121       14200 : }

Generated by: LCOV version 1.14