LCOV - code coverage report
Current view: top level - fs/xfs - xfs_log.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-djwa @ Mon Jul 31 20:08:17 PDT 2023 Lines: 1301 1437 90.5 %
Date: 2023-07-31 20:08:17 Functions: 87 89 97.8 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
       4             :  * All Rights Reserved.
       5             :  */
       6             : #include "xfs.h"
       7             : #include "xfs_fs.h"
       8             : #include "xfs_shared.h"
       9             : #include "xfs_format.h"
      10             : #include "xfs_log_format.h"
      11             : #include "xfs_trans_resv.h"
      12             : #include "xfs_mount.h"
      13             : #include "xfs_errortag.h"
      14             : #include "xfs_error.h"
      15             : #include "xfs_trans.h"
      16             : #include "xfs_trans_priv.h"
      17             : #include "xfs_log.h"
      18             : #include "xfs_log_priv.h"
      19             : #include "xfs_trace.h"
      20             : #include "xfs_sysfs.h"
      21             : #include "xfs_sb.h"
      22             : #include "xfs_health.h"
      23             : 
      24             : struct kmem_cache       *xfs_log_ticket_cache;
      25             : 
      26             : /* Local miscellaneous function prototypes */
      27             : STATIC struct xlog *
      28             : xlog_alloc_log(
      29             :         struct xfs_mount        *mp,
      30             :         struct xfs_buftarg      *log_target,
      31             :         xfs_daddr_t             blk_offset,
      32             :         int                     num_bblks);
      33             : STATIC int
      34             : xlog_space_left(
      35             :         struct xlog             *log,
      36             :         atomic64_t              *head);
      37             : STATIC void
      38             : xlog_dealloc_log(
      39             :         struct xlog             *log);
      40             : 
      41             : /* local state machine functions */
      42             : STATIC void xlog_state_done_syncing(
      43             :         struct xlog_in_core     *iclog);
      44             : STATIC void xlog_state_do_callback(
      45             :         struct xlog             *log);
      46             : STATIC int
      47             : xlog_state_get_iclog_space(
      48             :         struct xlog             *log,
      49             :         int                     len,
      50             :         struct xlog_in_core     **iclog,
      51             :         struct xlog_ticket      *ticket,
      52             :         int                     *logoffsetp);
      53             : STATIC void
      54             : xlog_grant_push_ail(
      55             :         struct xlog             *log,
      56             :         int                     need_bytes);
      57             : STATIC void
      58             : xlog_sync(
      59             :         struct xlog             *log,
      60             :         struct xlog_in_core     *iclog,
      61             :         struct xlog_ticket      *ticket);
      62             : #if defined(DEBUG)
      63             : STATIC void
      64             : xlog_verify_grant_tail(
      65             :         struct xlog *log);
      66             : STATIC void
      67             : xlog_verify_iclog(
      68             :         struct xlog             *log,
      69             :         struct xlog_in_core     *iclog,
      70             :         int                     count);
      71             : STATIC void
      72             : xlog_verify_tail_lsn(
      73             :         struct xlog             *log,
      74             :         struct xlog_in_core     *iclog);
      75             : #else
      76             : #define xlog_verify_grant_tail(a)
      77             : #define xlog_verify_iclog(a,b,c)
      78             : #define xlog_verify_tail_lsn(a,b)
      79             : #endif
      80             : 
      81             : STATIC int
      82             : xlog_iclogs_empty(
      83             :         struct xlog             *log);
      84             : 
      85             : static int
      86             : xfs_log_cover(struct xfs_mount *);
      87             : 
      88             : /*
      89             :  * We need to make sure the buffer pointer returned is naturally aligned for the
      90             :  * biggest basic data type we put into it. We have already accounted for this
      91             :  * padding when sizing the buffer.
      92             :  *
      93             :  * However, this padding does not get written into the log, and hence we have to
      94             :  * track the space used by the log vectors separately to prevent log space hangs
      95             :  * due to inaccurate accounting (i.e. a leak) of the used log space through the
      96             :  * CIL context ticket.
      97             :  *
      98             :  * We also add space for the xlog_op_header that describes this region in the
      99             :  * log. This prepends the data region we return to the caller to copy their data
     100             :  * into, so do all the static initialisation of the ophdr now. Because the ophdr
     101             :  * is not 8 byte aligned, we have to be careful to ensure that we align the
     102             :  * start of the buffer such that the region we return to the call is 8 byte
     103             :  * aligned and packed against the tail of the ophdr.
     104             :  */
     105             : void *
     106 10030896343 : xlog_prepare_iovec(
     107             :         struct xfs_log_vec      *lv,
     108             :         struct xfs_log_iovec    **vecp,
     109             :         uint                    type)
     110             : {
     111 10030896343 :         struct xfs_log_iovec    *vec = *vecp;
     112 10030896343 :         struct xlog_op_header   *oph;
     113 10030896343 :         uint32_t                len;
     114 10030896343 :         void                    *buf;
     115             : 
     116 10030896343 :         if (vec) {
     117  5441753128 :                 ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
     118  5441753128 :                 vec++;
     119             :         } else {
     120  4589143215 :                 vec = &lv->lv_iovecp[0];
     121             :         }
     122             : 
     123 10030896343 :         len = lv->lv_buf_len + sizeof(struct xlog_op_header);
     124 10030896343 :         if (!IS_ALIGNED(len, sizeof(uint64_t))) {
     125  9885889959 :                 lv->lv_buf_len = round_up(len, sizeof(uint64_t)) -
     126             :                                         sizeof(struct xlog_op_header);
     127             :         }
     128             : 
     129 10030896343 :         vec->i_type = type;
     130 10030896343 :         vec->i_addr = lv->lv_buf + lv->lv_buf_len;
     131             : 
     132 10030896343 :         oph = vec->i_addr;
     133 10030896343 :         oph->oh_clientid = XFS_TRANSACTION;
     134 10030896343 :         oph->oh_res2 = 0;
     135 10030896343 :         oph->oh_flags = 0;
     136             : 
     137 10030896343 :         buf = vec->i_addr + sizeof(struct xlog_op_header);
     138 10030896343 :         ASSERT(IS_ALIGNED((unsigned long)buf, sizeof(uint64_t)));
     139             : 
     140 10030896343 :         *vecp = vec;
     141 10030896343 :         return buf;
     142             : }
     143             : 
     144             : static void
     145  2602923761 : xlog_grant_sub_space(
     146             :         struct xlog             *log,
     147             :         atomic64_t              *head,
     148             :         int                     bytes)
     149             : {
     150  2602923761 :         int64_t head_val = atomic64_read(head);
     151  2604058910 :         int64_t new, old;
     152             : 
     153  2604058910 :         do {
     154  2604058910 :                 int     cycle, space;
     155             : 
     156  2604058910 :                 xlog_crack_grant_head_val(head_val, &cycle, &space);
     157             : 
     158  2604058910 :                 space -= bytes;
     159  2604058910 :                 if (space < 0) {
     160    30062546 :                         space += log->l_logsize;
     161    30062546 :                         cycle--;
     162             :                 }
     163             : 
     164  2604058910 :                 old = head_val;
     165  2604058910 :                 new = xlog_assign_grant_head_val(cycle, space);
     166  2604058910 :                 head_val = atomic64_cmpxchg(head, old, new);
     167  2604280819 :         } while (head_val != old);
     168  2603145670 : }
     169             : 
     170             : static void
     171  2008842371 : xlog_grant_add_space(
     172             :         struct xlog             *log,
     173             :         atomic64_t              *head,
     174             :         int                     bytes)
     175             : {
     176  2008842371 :         int64_t head_val = atomic64_read(head);
     177  2010186487 :         int64_t new, old;
     178             : 
     179  2010186487 :         do {
     180  2010186487 :                 int             tmp;
     181  2010186487 :                 int             cycle, space;
     182             : 
     183  2010186487 :                 xlog_crack_grant_head_val(head_val, &cycle, &space);
     184             : 
     185  2010186487 :                 tmp = log->l_logsize - space;
     186  2010186487 :                 if (tmp > bytes)
     187  1980110824 :                         space += bytes;
     188             :                 else {
     189    30075663 :                         space = bytes - tmp;
     190    30075663 :                         cycle++;
     191             :                 }
     192             : 
     193  2010186487 :                 old = head_val;
     194  2010186487 :                 new = xlog_assign_grant_head_val(cycle, space);
     195  2010186487 :                 head_val = atomic64_cmpxchg(head, old, new);
     196  2010466934 :         } while (head_val != old);
     197  2009122818 : }
     198             : 
     199             : STATIC void
     200       44982 : xlog_grant_head_init(
     201             :         struct xlog_grant_head  *head)
     202             : {
     203       44982 :         xlog_assign_grant_head(&head->grant, 1, 0);
     204       44982 :         INIT_LIST_HEAD(&head->waiters);
     205       44982 :         spin_lock_init(&head->lock);
     206       44982 : }
     207             : 
     208             : STATIC void
     209       21474 : xlog_grant_head_wake_all(
     210             :         struct xlog_grant_head  *head)
     211             : {
     212       21474 :         struct xlog_ticket      *tic;
     213             : 
     214       21474 :         spin_lock(&head->lock);
     215       21474 :         list_for_each_entry(tic, &head->waiters, t_queue)
     216           0 :                 wake_up_process(tic->t_task);
     217       21474 :         spin_unlock(&head->lock);
     218       21474 : }
     219             : 
     220             : static inline int
     221  1104527026 : xlog_ticket_reservation(
     222             :         struct xlog             *log,
     223             :         struct xlog_grant_head  *head,
     224             :         struct xlog_ticket      *tic)
     225             : {
     226  1104527026 :         if (head == &log->l_write_head) {
     227   157743915 :                 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
     228   157743915 :                 return tic->t_unit_res;
     229             :         }
     230             : 
     231   946783111 :         if (tic->t_flags & XLOG_TIC_PERM_RESERV)
     232   784777450 :                 return tic->t_unit_res * tic->t_cnt;
     233             : 
     234   162005661 :         return tic->t_unit_res;
     235             : }
     236             : 
     237             : STATIC bool
     238     4911813 : xlog_grant_head_wake(
     239             :         struct xlog             *log,
     240             :         struct xlog_grant_head  *head,
     241             :         int                     *free_bytes)
     242             : {
     243     4911813 :         struct xlog_ticket      *tic;
     244     4911813 :         int                     need_bytes;
     245     4911813 :         bool                    woken_task = false;
     246             : 
     247   104064215 :         list_for_each_entry(tic, &head->waiters, t_queue) {
     248             : 
     249             :                 /*
     250             :                  * There is a chance that the size of the CIL checkpoints in
     251             :                  * progress at the last AIL push target calculation resulted in
     252             :                  * limiting the target to the log head (l_last_sync_lsn) at the
     253             :                  * time. This may not reflect where the log head is now as the
     254             :                  * CIL checkpoints may have completed.
     255             :                  *
     256             :                  * Hence when we are woken here, it may be that the head of the
     257             :                  * log that has moved rather than the tail. As the tail didn't
     258             :                  * move, there still won't be space available for the
     259             :                  * reservation we require.  However, if the AIL has already
     260             :                  * pushed to the target defined by the old log head location, we
     261             :                  * will hang here waiting for something else to update the AIL
     262             :                  * push target.
     263             :                  *
     264             :                  * Therefore, if there isn't space to wake the first waiter on
     265             :                  * the grant head, we need to push the AIL again to ensure the
     266             :                  * target reflects both the current log tail and log head
     267             :                  * position before we wait for the tail to move again.
     268             :                  */
     269             : 
     270   101993657 :                 need_bytes = xlog_ticket_reservation(log, head, tic);
     271   101993657 :                 if (*free_bytes < need_bytes) {
     272     2841255 :                         if (!woken_task)
     273      447598 :                                 xlog_grant_push_ail(log, need_bytes);
     274     2841255 :                         return false;
     275             :                 }
     276             : 
     277    99152402 :                 *free_bytes -= need_bytes;
     278    99152402 :                 trace_xfs_log_grant_wake_up(log, tic);
     279    99152402 :                 wake_up_process(tic->t_task);
     280    99152402 :                 woken_task = true;
     281             :         }
     282             : 
     283             :         return true;
     284             : }
     285             : 
     286             : STATIC int
     287     1613705 : xlog_grant_head_wait(
     288             :         struct xlog             *log,
     289             :         struct xlog_grant_head  *head,
     290             :         struct xlog_ticket      *tic,
     291             :         int                     need_bytes) __releases(&head->lock)
     292             :                                             __acquires(&head->lock)
     293             : {
     294     1613705 :         list_add_tail(&tic->t_queue, &head->waiters);
     295             : 
     296     1613905 :         do {
     297     3227810 :                 if (xlog_is_shutdown(log))
     298           0 :                         goto shutdown;
     299     1613905 :                 xlog_grant_push_ail(log, need_bytes);
     300             : 
     301     1613905 :                 __set_current_state(TASK_UNINTERRUPTIBLE);
     302     1613905 :                 spin_unlock(&head->lock);
     303             : 
     304     1613884 :                 XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
     305             : 
     306     1613902 :                 trace_xfs_log_grant_sleep(log, tic);
     307     1613891 :                 schedule();
     308     1613643 :                 trace_xfs_log_grant_wake(log, tic);
     309             : 
     310     1613684 :                 spin_lock(&head->lock);
     311     3227810 :                 if (xlog_is_shutdown(log))
     312           0 :                         goto shutdown;
     313     1613905 :         } while (xlog_space_left(log, &head->grant) < need_bytes);
     314             : 
     315     1613705 :         list_del_init(&tic->t_queue);
     316     1613705 :         return 0;
     317           0 : shutdown:
     318           0 :         list_del_init(&tic->t_queue);
     319           0 :         return -EIO;
     320             : }
     321             : 
     322             : /*
     323             :  * Atomically get the log space required for a log ticket.
     324             :  *
     325             :  * Once a ticket gets put onto head->waiters, it will only return after the
     326             :  * needed reservation is satisfied.
     327             :  *
     328             :  * This function is structured so that it has a lock free fast path. This is
     329             :  * necessary because every new transaction reservation will come through this
     330             :  * path. Hence any lock will be globally hot if we take it unconditionally on
     331             :  * every pass.
     332             :  *
     333             :  * As tickets are only ever moved on and off head->waiters under head->lock, we
     334             :  * only need to take that lock if we are going to add the ticket to the queue
     335             :  * and sleep. We can avoid taking the lock if the ticket was never added to
     336             :  * head->waiters because the t_queue list head will be empty and we hold the
     337             :  * only reference to it so it can safely be checked unlocked.
     338             :  */
     339             : STATIC int
     340  1002484730 : xlog_grant_head_check(
     341             :         struct xlog             *log,
     342             :         struct xlog_grant_head  *head,
     343             :         struct xlog_ticket      *tic,
     344             :         int                     *need_bytes)
     345             : {
     346  1002484730 :         int                     free_bytes;
     347  1002484730 :         int                     error = 0;
     348             : 
     349  2004969460 :         ASSERT(!xlog_in_recovery(log));
     350             : 
     351             :         /*
     352             :          * If there are other waiters on the queue then give them a chance at
     353             :          * logspace before us.  Wake up the first waiters, if we do not wake
     354             :          * up all the waiters then go to sleep waiting for more free space,
     355             :          * otherwise try to get some space for this transaction.
     356             :          */
     357  1002484730 :         *need_bytes = xlog_ticket_reservation(log, head, tic);
     358  1002518534 :         free_bytes = xlog_space_left(log, &head->grant);
     359  1002555415 :         if (!list_empty_careful(&head->waiters)) {
     360     2366100 :                 spin_lock(&head->lock);
     361     2323576 :                 if (!xlog_grant_head_wake(log, head, &free_bytes) ||
     362      746924 :                     free_bytes < *need_bytes) {
     363     1601104 :                         error = xlog_grant_head_wait(log, head, tic,
     364             :                                                      *need_bytes);
     365             :                 }
     366     2323576 :                 spin_unlock(&head->lock);
     367  1000241589 :         } else if (free_bytes < *need_bytes) {
     368       12601 :                 spin_lock(&head->lock);
     369       12601 :                 error = xlog_grant_head_wait(log, head, tic, *need_bytes);
     370       12601 :                 spin_unlock(&head->lock);
     371             :         }
     372             : 
     373  1002564625 :         return error;
     374             : }
     375             : 
     376             : bool
     377       95963 : xfs_log_writable(
     378             :         struct xfs_mount        *mp)
     379             : {
     380             :         /*
     381             :          * Do not write to the log on norecovery mounts, if the data or log
     382             :          * devices are read-only, or if the filesystem is shutdown. Read-only
     383             :          * mounts allow internal writes for log recovery and unmount purposes,
     384             :          * so don't restrict that case.
     385             :          */
     386       95963 :         if (xfs_has_norecovery(mp))
     387             :                 return false;
     388       95943 :         if (xfs_readonly_buftarg(mp->m_ddev_targp))
     389             :                 return false;
     390       95939 :         if (xfs_readonly_buftarg(mp->m_log->l_targ))
     391             :                 return false;
     392      191878 :         if (xlog_is_shutdown(mp->m_log))
     393       21466 :                 return false;
     394             :         return true;
     395             : }
     396             : 
     397             : /*
     398             :  * Replenish the byte reservation required by moving the grant write head.
     399             :  */
     400             : int
     401   454210513 : xfs_log_regrant(
     402             :         struct xfs_mount        *mp,
     403             :         struct xlog_ticket      *tic)
     404             : {
     405   454210513 :         struct xlog             *log = mp->m_log;
     406   454210513 :         int                     need_bytes;
     407   454210513 :         int                     error = 0;
     408             : 
     409   908421026 :         if (xlog_is_shutdown(log))
     410             :                 return -EIO;
     411             : 
     412   454210486 :         XFS_STATS_INC(mp, xs_try_logspace);
     413             : 
     414             :         /*
     415             :          * This is a new transaction on the ticket, so we need to change the
     416             :          * transaction ID so that the next transaction has a different TID in
     417             :          * the log. Just add one to the existing tid so that we can see chains
     418             :          * of rolling transactions in the log easily.
     419             :          */
     420   454214727 :         tic->t_tid++;
     421             : 
     422   454214727 :         xlog_grant_push_ail(log, tic->t_unit_res);
     423             : 
     424   454214140 :         tic->t_curr_res = tic->t_unit_res;
     425   454214140 :         if (tic->t_cnt > 0)
     426             :                 return 0;
     427             : 
     428   157743882 :         trace_xfs_log_regrant(log, tic);
     429             : 
     430   157743969 :         error = xlog_grant_head_check(log, &log->l_write_head, tic,
     431             :                                       &need_bytes);
     432   157743988 :         if (error)
     433           0 :                 goto out_error;
     434             : 
     435   157743988 :         xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
     436   157744084 :         trace_xfs_log_regrant_exit(log, tic);
     437   157744109 :         xlog_verify_grant_tail(log);
     438   157744109 :         return 0;
     439             : 
     440             : out_error:
     441             :         /*
     442             :          * If we are failing, make sure the ticket doesn't have any current
     443             :          * reservations.  We don't want to add this back when the ticket/
     444             :          * transaction gets cancelled.
     445             :          */
     446           0 :         tic->t_curr_res = 0;
     447           0 :         tic->t_cnt = 0;      /* ungrant will give back unit_res * t_cnt. */
     448           0 :         return error;
     449             : }
     450             : 
     451             : /*
     452             :  * Reserve log space and return a ticket corresponding to the reservation.
     453             :  *
     454             :  * Each reservation is going to reserve extra space for a log record header.
     455             :  * When writes happen to the on-disk log, we don't subtract the length of the
     456             :  * log record header from any reservation.  By wasting space in each
     457             :  * reservation, we prevent over allocation problems.
     458             :  */
     459             : int
     460   844679702 : xfs_log_reserve(
     461             :         struct xfs_mount        *mp,
     462             :         int                     unit_bytes,
     463             :         int                     cnt,
     464             :         struct xlog_ticket      **ticp,
     465             :         bool                    permanent)
     466             : {
     467   844679702 :         struct xlog             *log = mp->m_log;
     468   844679702 :         struct xlog_ticket      *tic;
     469   844679702 :         int                     need_bytes;
     470   844679702 :         int                     error = 0;
     471             : 
     472  1689359404 :         if (xlog_is_shutdown(log))
     473             :                 return -EIO;
     474             : 
     475   844677881 :         XFS_STATS_INC(mp, xs_try_logspace);
     476             : 
     477   844794131 :         ASSERT(*ticp == NULL);
     478   844794131 :         tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
     479   844824594 :         *ticp = tic;
     480             : 
     481   844824594 :         xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
     482             :                                             : tic->t_unit_res);
     483             : 
     484   844663263 :         trace_xfs_log_reserve(log, tic);
     485             : 
     486   844772529 :         error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
     487             :                                       &need_bytes);
     488   844808363 :         if (error)
     489           0 :                 goto out_error;
     490             : 
     491   844808363 :         xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
     492   844822802 :         xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
     493   844878301 :         trace_xfs_log_reserve_exit(log, tic);
     494   844869561 :         xlog_verify_grant_tail(log);
     495   844869561 :         return 0;
     496             : 
     497             : out_error:
     498             :         /*
     499             :          * If we are failing, make sure the ticket doesn't have any current
     500             :          * reservations.  We don't want to add this back when the ticket/
     501             :          * transaction gets cancelled.
     502             :          */
     503           0 :         tic->t_curr_res = 0;
     504           0 :         tic->t_cnt = 0;      /* ungrant will give back unit_res * t_cnt. */
     505           0 :         return error;
     506             : }
     507             : 
     508             : /*
     509             :  * Run all the pending iclog callbacks and wake log force waiters and iclog
     510             :  * space waiters so they can process the newly set shutdown state. We really
     511             :  * don't care what order we process callbacks here because the log is shut down
     512             :  * and so state cannot change on disk anymore. However, we cannot wake waiters
     513             :  * until the callbacks have been processed because we may be in unmount and
     514             :  * we must ensure that all AIL operations the callbacks perform have completed
     515             :  * before we tear down the AIL.
     516             :  *
     517             :  * We avoid processing actively referenced iclogs so that we don't run callbacks
     518             :  * while the iclog owner might still be preparing the iclog for IO submssion.
     519             :  * These will be caught by xlog_state_iclog_release() and call this function
     520             :  * again to process any callbacks that may have been added to that iclog.
     521             :  */
     522             : static void
     523       12276 : xlog_state_shutdown_callbacks(
     524             :         struct xlog             *log)
     525             : {
     526       12276 :         struct xlog_in_core     *iclog;
     527       12276 :         LIST_HEAD(cb_list);
     528             : 
     529       12276 :         iclog = log->l_iclog;
     530       98208 :         do {
     531       98208 :                 if (atomic_read(&iclog->ic_refcnt)) {
     532             :                         /* Reference holder will re-run iclog callbacks. */
     533        1531 :                         continue;
     534             :                 }
     535       96677 :                 list_splice_init(&iclog->ic_callbacks, &cb_list);
     536       96677 :                 spin_unlock(&log->l_icloglock);
     537             : 
     538       96677 :                 xlog_cil_process_committed(&cb_list);
     539             : 
     540       96677 :                 spin_lock(&log->l_icloglock);
     541       96677 :                 wake_up_all(&iclog->ic_write_wait);
     542       96677 :                 wake_up_all(&iclog->ic_force_wait);
     543       98208 :         } while ((iclog = iclog->ic_next) != log->l_iclog);
     544             : 
     545       12276 :         wake_up_all(&log->l_flush_wait);
     546       12276 : }
     547             : 
     548             : /*
     549             :  * Flush iclog to disk if this is the last reference to the given iclog and the
     550             :  * it is in the WANT_SYNC state.
     551             :  *
     552             :  * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
     553             :  * log tail is updated correctly. NEED_FUA indicates that the iclog will be
     554             :  * written to stable storage, and implies that a commit record is contained
     555             :  * within the iclog. We need to ensure that the log tail does not move beyond
     556             :  * the tail that the first commit record in the iclog ordered against, otherwise
     557             :  * correct recovery of that checkpoint becomes dependent on future operations
     558             :  * performed on this iclog.
     559             :  *
     560             :  * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
     561             :  * current tail into iclog. Once the iclog tail is set, future operations must
     562             :  * not modify it, otherwise they potentially violate ordering constraints for
     563             :  * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
     564             :  * the iclog will get zeroed on activation of the iclog after sync, so we
     565             :  * always capture the tail lsn on the iclog on the first NEED_FUA release
     566             :  * regardless of the number of active reference counts on this iclog.
     567             :  */
     568             : int
     569    19841338 : xlog_state_release_iclog(
     570             :         struct xlog             *log,
     571             :         struct xlog_in_core     *iclog,
     572             :         struct xlog_ticket      *ticket)
     573             : {
     574    19841338 :         xfs_lsn_t               tail_lsn;
     575    19841338 :         bool                    last_ref;
     576             : 
     577    19841338 :         lockdep_assert_held(&log->l_icloglock);
     578             : 
     579    19841338 :         trace_xlog_iclog_release(iclog, _RET_IP_);
     580             :         /*
     581             :          * Grabbing the current log tail needs to be atomic w.r.t. the writing
     582             :          * of the tail LSN into the iclog so we guarantee that the log tail does
     583             :          * not move between the first time we know that the iclog needs to be
     584             :          * made stable and when we eventually submit it.
     585             :          */
     586    19841356 :         if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
     587     7237768 :              (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
     588    14757084 :             !iclog->ic_header.h_tail_lsn) {
     589    12572372 :                 tail_lsn = xlog_assign_tail_lsn(log->l_mp);
     590    12572371 :                 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
     591             :         }
     592             : 
     593    19841355 :         last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
     594             : 
     595    39682726 :         if (xlog_is_shutdown(log)) {
     596             :                 /*
     597             :                  * If there are no more references to this iclog, process the
     598             :                  * pending iclog callbacks that were waiting on the release of
     599             :                  * this iclog.
     600             :                  */
     601        1540 :                 if (last_ref)
     602        1539 :                         xlog_state_shutdown_callbacks(log);
     603        1540 :                 return -EIO;
     604             :         }
     605             : 
     606    19839823 :         if (!last_ref)
     607             :                 return 0;
     608             : 
     609    17260010 :         if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
     610     4689214 :                 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
     611     4689214 :                 return 0;
     612             :         }
     613             : 
     614    12570796 :         iclog->ic_state = XLOG_STATE_SYNCING;
     615    12570796 :         xlog_verify_tail_lsn(log, iclog);
     616    12570796 :         trace_xlog_iclog_syncing(iclog, _RET_IP_);
     617             : 
     618    12570795 :         spin_unlock(&log->l_icloglock);
     619    12570793 :         xlog_sync(log, iclog, ticket);
     620    12570756 :         spin_lock(&log->l_icloglock);
     621    12570756 :         return 0;
     622             : }
     623             : 
     624             : /*
     625             :  * Mount a log filesystem
     626             :  *
     627             :  * mp           - ubiquitous xfs mount point structure
     628             :  * log_target   - buftarg of on-disk log device
     629             :  * blk_offset   - Start block # where block size is 512 bytes (BBSIZE)
     630             :  * num_bblocks  - Number of BBSIZE blocks in on-disk log
     631             :  *
     632             :  * Return error or zero.
     633             :  */
     634             : int
     635       22491 : xfs_log_mount(
     636             :         xfs_mount_t     *mp,
     637             :         xfs_buftarg_t   *log_target,
     638             :         xfs_daddr_t     blk_offset,
     639             :         int             num_bblks)
     640             : {
     641       22491 :         struct xlog     *log;
     642       22491 :         int             error = 0;
     643       22491 :         int             min_logfsbs;
     644             : 
     645       22491 :         if (!xfs_has_norecovery(mp)) {
     646       22481 :                 xfs_notice(mp, "Mounting V%d Filesystem %pU",
     647             :                            XFS_SB_VERSION_NUM(&mp->m_sb),
     648             :                            &mp->m_sb.sb_uuid);
     649             :         } else {
     650          10 :                 xfs_notice(mp,
     651             : "Mounting V%d filesystem %pU in no-recovery mode. Filesystem will be inconsistent.",
     652             :                            XFS_SB_VERSION_NUM(&mp->m_sb),
     653             :                            &mp->m_sb.sb_uuid);
     654          20 :                 ASSERT(xfs_is_readonly(mp));
     655             :         }
     656             : 
     657       22491 :         log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
     658       22491 :         if (IS_ERR(log)) {
     659           0 :                 error = PTR_ERR(log);
     660           0 :                 goto out;
     661             :         }
     662       22491 :         mp->m_log = log;
     663             : 
     664             :         /*
     665             :          * Now that we have set up the log and it's internal geometry
     666             :          * parameters, we can validate the given log space and drop a critical
     667             :          * message via syslog if the log size is too small. A log that is too
     668             :          * small can lead to unexpected situations in transaction log space
     669             :          * reservation stage. The superblock verifier has already validated all
     670             :          * the other log geometry constraints, so we don't have to check those
     671             :          * here.
     672             :          *
     673             :          * Note: For v4 filesystems, we can't just reject the mount if the
     674             :          * validation fails.  This would mean that people would have to
     675             :          * downgrade their kernel just to remedy the situation as there is no
     676             :          * way to grow the log (short of black magic surgery with xfs_db).
     677             :          *
     678             :          * We can, however, reject mounts for V5 format filesystems, as the
     679             :          * mkfs binary being used to make the filesystem should never create a
     680             :          * filesystem with a log that is too small.
     681             :          */
     682       22491 :         min_logfsbs = xfs_log_calc_minimum_size(mp);
     683       22491 :         if (mp->m_sb.sb_logblocks < min_logfsbs) {
     684           0 :                 xfs_warn(mp,
     685             :                 "Log size %d blocks too small, minimum size is %d blocks",
     686             :                          mp->m_sb.sb_logblocks, min_logfsbs);
     687             : 
     688             :                 /*
     689             :                  * Log check errors are always fatal on v5; or whenever bad
     690             :                  * metadata leads to a crash.
     691             :                  */
     692           0 :                 if (xfs_has_crc(mp)) {
     693           0 :                         xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
     694           0 :                         ASSERT(0);
     695           0 :                         error = -EINVAL;
     696           0 :                         goto out_free_log;
     697             :                 }
     698           0 :                 xfs_crit(mp, "Log size out of supported range.");
     699           0 :                 xfs_crit(mp,
     700             : "Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
     701             :         }
     702             : 
     703             :         /*
     704             :          * Initialize the AIL now we have a log.
     705             :          */
     706       22491 :         error = xfs_trans_ail_init(mp);
     707       22491 :         if (error) {
     708           0 :                 xfs_warn(mp, "AIL initialisation failed: error %d", error);
     709           0 :                 goto out_free_log;
     710             :         }
     711       22491 :         log->l_ailp = mp->m_ail;
     712             : 
     713             :         /*
     714             :          * skip log recovery on a norecovery mount.  pretend it all
     715             :          * just worked.
     716             :          */
     717       22491 :         if (!xfs_has_norecovery(mp)) {
     718             :                 /*
     719             :                  * log recovery ignores readonly state and so we need to clear
     720             :                  * mount-based read only state so it can write to disk.
     721             :                  */
     722       22481 :                 bool    readonly = test_and_clear_bit(XFS_OPSTATE_READONLY,
     723       22481 :                                                 &mp->m_opstate);
     724       22481 :                 error = xlog_recover(log);
     725       22481 :                 if (readonly)
     726        2038 :                         set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
     727       22481 :                 if (error) {
     728           8 :                         xfs_warn(mp, "log mount/recovery failed: error %d",
     729             :                                 error);
     730           8 :                         xlog_recover_cancel(log);
     731           8 :                         goto out_destroy_ail;
     732             :                 }
     733             :         }
     734             : 
     735       22483 :         error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
     736             :                                "log");
     737       22483 :         if (error)
     738           0 :                 goto out_destroy_ail;
     739             : 
     740             :         /* Normal transactions can now occur */
     741       22483 :         clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
     742             : 
     743             :         /*
     744             :          * Now the log has been fully initialised and we know were our
     745             :          * space grant counters are, we can initialise the permanent ticket
     746             :          * needed for delayed logging to work.
     747             :          */
     748       22483 :         xlog_cil_init_post_recovery(log);
     749             : 
     750       22483 :         return 0;
     751             : 
     752           8 : out_destroy_ail:
     753           8 :         xfs_trans_ail_destroy(mp);
     754           8 : out_free_log:
     755           8 :         xlog_dealloc_log(log);
     756             : out:
     757             :         return error;
     758             : }
     759             : 
     760             : /*
     761             :  * Finish the recovery of the file system.  This is separate from the
     762             :  * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
     763             :  * in the root and real-time bitmap inodes between calling xfs_log_mount() and
     764             :  * here.
     765             :  *
     766             :  * If we finish recovery successfully, start the background log work. If we are
     767             :  * not doing recovery, then we have a RO filesystem and we don't need to start
     768             :  * it.
     769             :  */
     770             : int
     771       22473 : xfs_log_mount_finish(
     772             :         struct xfs_mount        *mp)
     773             : {
     774       22473 :         struct xlog             *log = mp->m_log;
     775       22473 :         bool                    readonly;
     776       22473 :         int                     error = 0;
     777             : 
     778       22473 :         if (xfs_has_norecovery(mp)) {
     779          16 :                 ASSERT(xfs_is_readonly(mp));
     780           8 :                 return 0;
     781             :         }
     782             : 
     783             :         /*
     784             :          * log recovery ignores readonly state and so we need to clear
     785             :          * mount-based read only state so it can write to disk.
     786             :          */
     787       22465 :         readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
     788             : 
     789             :         /*
     790             :          * During the second phase of log recovery, we need iget and
     791             :          * iput to behave like they do for an active filesystem.
     792             :          * xfs_fs_drop_inode needs to be able to prevent the deletion
     793             :          * of inodes before we're done replaying log items on those
     794             :          * inodes.  Turn it off immediately after recovery finishes
     795             :          * so that we don't leak the quota inodes if subsequent mount
     796             :          * activities fail.
     797             :          *
     798             :          * We let all inodes involved in redo item processing end up on
     799             :          * the LRU instead of being evicted immediately so that if we do
     800             :          * something to an unlinked inode, the irele won't cause
     801             :          * premature truncation and freeing of the inode, which results
     802             :          * in log recovery failure.  We have to evict the unreferenced
     803             :          * lru inodes after clearing SB_ACTIVE because we don't
     804             :          * otherwise clean up the lru if there's a subsequent failure in
     805             :          * xfs_mountfs, which leads to us leaking the inodes if nothing
     806             :          * else (e.g. quotacheck) references the inodes before the
     807             :          * mount failure occurs.
     808             :          */
     809       22465 :         mp->m_super->s_flags |= SB_ACTIVE;
     810       22465 :         xfs_log_work_queue(mp);
     811       44930 :         if (xlog_recovery_needed(log))
     812       10294 :                 error = xlog_recover_finish(log);
     813       22465 :         mp->m_super->s_flags &= ~SB_ACTIVE;
     814       22465 :         evict_inodes(mp->m_super);
     815             : 
     816             :         /*
     817             :          * Drain the buffer LRU after log recovery. This is required for v4
     818             :          * filesystems to avoid leaving around buffers with NULL verifier ops,
     819             :          * but we do it unconditionally to make sure we're always in a clean
     820             :          * cache state after mount.
     821             :          *
     822             :          * Don't push in the error case because the AIL may have pending intents
     823             :          * that aren't removed until recovery is cancelled.
     824             :          */
     825       44930 :         if (xlog_recovery_needed(log)) {
     826       10294 :                 if (!error) {
     827       10292 :                         xfs_log_force(mp, XFS_LOG_SYNC);
     828       10292 :                         xfs_ail_push_all_sync(mp->m_ail);
     829             :                 }
     830       10294 :                 xfs_notice(mp, "Ending recovery (logdev: %s)",
     831             :                                 mp->m_logname ? mp->m_logname : "internal");
     832             :         } else {
     833       12171 :                 xfs_info(mp, "Ending clean mount");
     834             :         }
     835       22465 :         xfs_buftarg_drain(mp->m_ddev_targp);
     836             : 
     837       22465 :         clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
     838       22465 :         if (readonly)
     839        2034 :                 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
     840             : 
     841             :         /* Make sure the log is dead if we're returning failure. */
     842       22467 :         ASSERT(!error || xlog_is_shutdown(log));
     843             : 
     844             :         return error;
     845             : }
     846             : 
     847             : /*
     848             :  * The mount has failed. Cancel the recovery if it hasn't completed and destroy
     849             :  * the log.
     850             :  */
     851             : void
     852          34 : xfs_log_mount_cancel(
     853             :         struct xfs_mount        *mp)
     854             : {
     855          34 :         xlog_recover_cancel(mp->m_log);
     856          34 :         xfs_log_unmount(mp);
     857          34 : }
     858             : 
     859             : /*
     860             :  * Flush out the iclog to disk ensuring that device caches are flushed and
     861             :  * the iclog hits stable storage before any completion waiters are woken.
     862             :  */
     863             : static inline int
     864     2095645 : xlog_force_iclog(
     865             :         struct xlog_in_core     *iclog)
     866             : {
     867     2095645 :         atomic_inc(&iclog->ic_refcnt);
     868     2095645 :         iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
     869     2095645 :         if (iclog->ic_state == XLOG_STATE_ACTIVE)
     870     2095644 :                 xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
     871     2095645 :         return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
     872             : }
     873             : 
     874             : /*
     875             :  * Cycle all the iclogbuf locks to make sure all log IO completion
     876             :  * is done before we tear down these buffers.
     877             :  */
     878             : static void
     879       22486 : xlog_wait_iclog_completion(struct xlog *log)
     880             : {
     881       22486 :         int             i;
     882       22486 :         struct xlog_in_core     *iclog = log->l_iclog;
     883             : 
     884      202362 :         for (i = 0; i < log->l_iclog_bufs; i++) {
     885      179876 :                 down(&iclog->ic_sema);
     886      179876 :                 up(&iclog->ic_sema);
     887      179876 :                 iclog = iclog->ic_next;
     888             :         }
     889       22486 : }
     890             : 
     891             : /*
     892             :  * Wait for the iclog and all prior iclogs to be written disk as required by the
     893             :  * log force state machine. Waiting on ic_force_wait ensures iclog completions
     894             :  * have been ordered and callbacks run before we are woken here, hence
     895             :  * guaranteeing that all the iclogs up to this one are on stable storage.
     896             :  */
     897             : int
     898     4918958 : xlog_wait_on_iclog(
     899             :         struct xlog_in_core     *iclog)
     900             :                 __releases(iclog->ic_log->l_icloglock)
     901             : {
     902     4918958 :         struct xlog             *log = iclog->ic_log;
     903             : 
     904     4918958 :         trace_xlog_iclog_wait_on(iclog, _RET_IP_);
     905     9837916 :         if (!xlog_is_shutdown(log) &&
     906     4917754 :             iclog->ic_state != XLOG_STATE_ACTIVE &&
     907             :             iclog->ic_state != XLOG_STATE_DIRTY) {
     908     3966420 :                 XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
     909     3966418 :                 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
     910             :         } else {
     911      952538 :                 spin_unlock(&log->l_icloglock);
     912             :         }
     913             : 
     914     9837298 :         if (xlog_is_shutdown(log))
     915        4864 :                 return -EIO;
     916             :         return 0;
     917             : }
     918             : 
     919             : /*
     920             :  * Write out an unmount record using the ticket provided. We have to account for
     921             :  * the data space used in the unmount ticket as this write is not done from a
     922             :  * transaction context that has already done the accounting for us.
     923             :  */
     924             : static int
     925       14117 : xlog_write_unmount_record(
     926             :         struct xlog             *log,
     927             :         struct xlog_ticket      *ticket)
     928             : {
     929       14117 :         struct  {
     930             :                 struct xlog_op_header ophdr;
     931             :                 struct xfs_unmount_log_format ulf;
     932       28234 :         } unmount_rec = {
     933             :                 .ophdr = {
     934             :                         .oh_clientid = XFS_LOG,
     935       14117 :                         .oh_tid = cpu_to_be32(ticket->t_tid),
     936             :                         .oh_flags = XLOG_UNMOUNT_TRANS,
     937             :                 },
     938             :                 .ulf = {
     939             :                         .magic = XLOG_UNMOUNT_TYPE,
     940             :                 },
     941             :         };
     942       14117 :         struct xfs_log_iovec reg = {
     943             :                 .i_addr = &unmount_rec,
     944             :                 .i_len = sizeof(unmount_rec),
     945             :                 .i_type = XLOG_REG_TYPE_UNMOUNT,
     946             :         };
     947       14117 :         struct xfs_log_vec vec = {
     948             :                 .lv_niovecs = 1,
     949             :                 .lv_iovecp = &reg,
     950             :         };
     951       14117 :         LIST_HEAD(lv_chain);
     952       14117 :         list_add(&vec.lv_list, &lv_chain);
     953             : 
     954       14117 :         BUILD_BUG_ON((sizeof(struct xlog_op_header) +
     955             :                       sizeof(struct xfs_unmount_log_format)) !=
     956             :                                                         sizeof(unmount_rec));
     957             : 
     958             :         /* account for space used by record data */
     959       14117 :         ticket->t_curr_res -= sizeof(unmount_rec);
     960             : 
     961       14117 :         return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len);
     962             : }
     963             : 
     964             : /*
     965             :  * Mark the filesystem clean by writing an unmount record to the head of the
     966             :  * log.
     967             :  */
     968             : static void
     969       14117 : xlog_unmount_write(
     970             :         struct xlog             *log)
     971             : {
     972       14117 :         struct xfs_mount        *mp = log->l_mp;
     973       14117 :         struct xlog_in_core     *iclog;
     974       14117 :         struct xlog_ticket      *tic = NULL;
     975       14117 :         int                     error;
     976             : 
     977       14117 :         error = xfs_log_reserve(mp, 600, 1, &tic, 0);
     978       14117 :         if (error)
     979           0 :                 goto out_err;
     980             : 
     981       14117 :         error = xlog_write_unmount_record(log, tic);
     982             :         /*
     983             :          * At this point, we're umounting anyway, so there's no point in
     984             :          * transitioning log state to shutdown. Just continue...
     985             :          */
     986       14117 : out_err:
     987       14117 :         if (error)
     988           0 :                 xfs_alert(mp, "%s: unmount record failed", __func__);
     989             : 
     990       14117 :         spin_lock(&log->l_icloglock);
     991       14117 :         iclog = log->l_iclog;
     992       14117 :         error = xlog_force_iclog(iclog);
     993       14117 :         xlog_wait_on_iclog(iclog);
     994             : 
     995       14117 :         if (tic) {
     996       14117 :                 trace_xfs_log_umount_write(log, tic);
     997       14117 :                 xfs_log_ticket_ungrant(log, tic);
     998             :         }
     999       14117 : }
    1000             : 
    1001             : static void
    1002       14117 : xfs_log_unmount_verify_iclog(
    1003             :         struct xlog             *log)
    1004             : {
    1005       14117 :         struct xlog_in_core     *iclog = log->l_iclog;
    1006             : 
    1007      112924 :         do {
    1008      112924 :                 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
    1009      112924 :                 ASSERT(iclog->ic_offset == 0);
    1010      112924 :         } while ((iclog = iclog->ic_next) != log->l_iclog);
    1011       14117 : }
    1012             : 
    1013             : /*
    1014             :  * Unmount record used to have a string "Unmount filesystem--" in the
    1015             :  * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
    1016             :  * We just write the magic number now since that particular field isn't
    1017             :  * currently architecture converted and "Unmount" is a bit foo.
    1018             :  * As far as I know, there weren't any dependencies on the old behaviour.
    1019             :  */
    1020             : static void
    1021       24959 : xfs_log_unmount_write(
    1022             :         struct xfs_mount        *mp)
    1023             : {
    1024       24959 :         struct xlog             *log = mp->m_log;
    1025             : 
    1026       24959 :         if (!xfs_log_writable(mp))
    1027             :                 return;
    1028             : 
    1029       14208 :         xfs_log_force(mp, XFS_LOG_SYNC);
    1030             : 
    1031       28416 :         if (xlog_is_shutdown(log))
    1032             :                 return;
    1033             : 
    1034             :         /*
    1035             :          * If we think the summary counters are bad, avoid writing the unmount
    1036             :          * record to force log recovery at next mount, after which the summary
    1037             :          * counters will be recalculated.  Refer to xlog_check_unmount_rec for
    1038             :          * more details.
    1039             :          */
    1040       14208 :         if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
    1041             :                         XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
    1042          91 :                 xfs_alert(mp, "%s: will fix summary counters at next mount",
    1043             :                                 __func__);
    1044          91 :                 return;
    1045             :         }
    1046             : 
    1047       14117 :         xfs_log_unmount_verify_iclog(log);
    1048       14117 :         xlog_unmount_write(log);
    1049             : }
    1050             : 
    1051             : /*
    1052             :  * Empty the log for unmount/freeze.
    1053             :  *
    1054             :  * To do this, we first need to shut down the background log work so it is not
    1055             :  * trying to cover the log as we clean up. We then need to unpin all objects in
    1056             :  * the log so we can then flush them out. Once they have completed their IO and
    1057             :  * run the callbacks removing themselves from the AIL, we can cover the log.
    1058             :  */
    1059             : int
    1060       71004 : xfs_log_quiesce(
    1061             :         struct xfs_mount        *mp)
    1062             : {
    1063             :         /*
    1064             :          * Clear log incompat features since we're quiescing the log.  Report
    1065             :          * failures, though it's not fatal to have a higher log feature
    1066             :          * protection level than the log contents actually require.
    1067             :          */
    1068       71004 :         if (xfs_clear_incompat_log_features(mp)) {
    1069           0 :                 int error;
    1070             : 
    1071           0 :                 error = xfs_sync_sb(mp, false);
    1072           0 :                 if (error)
    1073           0 :                         xfs_warn(mp,
    1074             :         "Failed to clear log incompat features on quiesce");
    1075             :         }
    1076             : 
    1077       71004 :         cancel_delayed_work_sync(&mp->m_log->l_work);
    1078       71004 :         xfs_log_force(mp, XFS_LOG_SYNC);
    1079             : 
    1080             :         /*
    1081             :          * The superblock buffer is uncached and while xfs_ail_push_all_sync()
    1082             :          * will push it, xfs_buftarg_wait() will not wait for it. Further,
    1083             :          * xfs_buf_iowait() cannot be used because it was pushed with the
    1084             :          * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
    1085             :          * the IO to complete.
    1086             :          */
    1087       71004 :         xfs_ail_push_all_sync(mp->m_ail);
    1088       71004 :         xfs_buftarg_wait(mp->m_ddev_targp);
    1089       71004 :         xfs_buf_lock(mp->m_sb_bp);
    1090       71004 :         xfs_buf_unlock(mp->m_sb_bp);
    1091             : 
    1092       71004 :         return xfs_log_cover(mp);
    1093             : }
    1094             : 
    1095             : void
    1096        2473 : xfs_log_clean(
    1097             :         struct xfs_mount        *mp)
    1098             : {
    1099        2473 :         xfs_log_quiesce(mp);
    1100       24959 :         xfs_log_unmount_write(mp);
    1101        2473 : }
    1102             : 
    1103             : /*
    1104             :  * Shut down and release the AIL and Log.
    1105             :  *
    1106             :  * During unmount, we need to ensure we flush all the dirty metadata objects
    1107             :  * from the AIL so that the log is empty before we write the unmount record to
    1108             :  * the log. Once this is done, we can tear down the AIL and the log.
    1109             :  */
    1110             : void
    1111       22486 : xfs_log_unmount(
    1112             :         struct xfs_mount        *mp)
    1113             : {
    1114       22486 :         xfs_log_clean(mp);
    1115             : 
    1116             :         /*
    1117             :          * If shutdown has come from iclog IO context, the log
    1118             :          * cleaning will have been skipped and so we need to wait
    1119             :          * for the iclog to complete shutdown processing before we
    1120             :          * tear anything down.
    1121             :          */
    1122       22486 :         xlog_wait_iclog_completion(mp->m_log);
    1123             : 
    1124       22486 :         xfs_buftarg_drain(mp->m_ddev_targp);
    1125             : 
    1126       22486 :         xfs_trans_ail_destroy(mp);
    1127             : 
    1128       22486 :         xfs_sysfs_del(&mp->m_log->l_kobj);
    1129             : 
    1130       22486 :         xlog_dealloc_log(mp->m_log);
    1131       22486 : }
    1132             : 
    1133             : void
    1134  4705048811 : xfs_log_item_init(
    1135             :         struct xfs_mount        *mp,
    1136             :         struct xfs_log_item     *item,
    1137             :         int                     type,
    1138             :         const struct xfs_item_ops *ops)
    1139             : {
    1140  4705048811 :         item->li_log = mp->m_log;
    1141  4705048811 :         item->li_ailp = mp->m_ail;
    1142  4705048811 :         item->li_type = type;
    1143  4705048811 :         item->li_ops = ops;
    1144  4705048811 :         item->li_lv = NULL;
    1145             : 
    1146  4705048811 :         INIT_LIST_HEAD(&item->li_ail);
    1147  4705048811 :         INIT_LIST_HEAD(&item->li_cil);
    1148  4705048811 :         INIT_LIST_HEAD(&item->li_bio_list);
    1149  4705048811 :         INIT_LIST_HEAD(&item->li_trans);
    1150  4705048811 : }
    1151             : 
    1152             : /*
    1153             :  * Wake up processes waiting for log space after we have moved the log tail.
    1154             :  */
    1155             : void
    1156   848038367 : xfs_log_space_wake(
    1157             :         struct xfs_mount        *mp)
    1158             : {
    1159   848038367 :         struct xlog             *log = mp->m_log;
    1160   848038367 :         int                     free_bytes;
    1161             : 
    1162  1696076734 :         if (xlog_is_shutdown(log))
    1163       48588 :                 return;
    1164             : 
    1165   847989779 :         if (!list_empty_careful(&log->l_write_head.waiters)) {
    1166          52 :                 ASSERT(!xlog_in_recovery(log));
    1167             : 
    1168          26 :                 spin_lock(&log->l_write_head.lock);
    1169          26 :                 free_bytes = xlog_space_left(log, &log->l_write_head.grant);
    1170          26 :                 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
    1171          26 :                 spin_unlock(&log->l_write_head.lock);
    1172             :         }
    1173             : 
    1174   848005880 :         if (!list_empty_careful(&log->l_reserve_head.waiters)) {
    1175     5176368 :                 ASSERT(!xlog_in_recovery(log));
    1176             : 
    1177     2588184 :                 spin_lock(&log->l_reserve_head.lock);
    1178     2588211 :                 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
    1179     2588211 :                 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
    1180     2588211 :                 spin_unlock(&log->l_reserve_head.lock);
    1181             :         }
    1182             : }
    1183             : 
    1184             : /*
    1185             :  * Determine if we have a transaction that has gone to disk that needs to be
    1186             :  * covered. To begin the transition to the idle state firstly the log needs to
    1187             :  * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
    1188             :  * we start attempting to cover the log.
    1189             :  *
    1190             :  * Only if we are then in a state where covering is needed, the caller is
    1191             :  * informed that dummy transactions are required to move the log into the idle
    1192             :  * state.
    1193             :  *
    1194             :  * If there are any items in the AIl or CIL, then we do not want to attempt to
    1195             :  * cover the log as we may be in a situation where there isn't log space
    1196             :  * available to run a dummy transaction and this can lead to deadlocks when the
    1197             :  * tail of the log is pinned by an item that is modified in the CIL.  Hence
    1198             :  * there's no point in running a dummy transaction at this point because we
    1199             :  * can't start trying to idle the log until both the CIL and AIL are empty.
    1200             :  */
    1201             : static bool
    1202      182930 : xfs_log_need_covered(
    1203             :         struct xfs_mount        *mp)
    1204             : {
    1205      182930 :         struct xlog             *log = mp->m_log;
    1206      182930 :         bool                    needed = false;
    1207             : 
    1208      182930 :         if (!xlog_cil_empty(log))
    1209             :                 return false;
    1210             : 
    1211      179968 :         spin_lock(&log->l_icloglock);
    1212      179968 :         switch (log->l_covered_state) {
    1213             :         case XLOG_STATE_COVER_DONE:
    1214             :         case XLOG_STATE_COVER_DONE2:
    1215             :         case XLOG_STATE_COVER_IDLE:
    1216             :                 break;
    1217      109394 :         case XLOG_STATE_COVER_NEED:
    1218             :         case XLOG_STATE_COVER_NEED2:
    1219      109394 :                 if (xfs_ail_min_lsn(log->l_ailp))
    1220             :                         break;
    1221      109164 :                 if (!xlog_iclogs_empty(log))
    1222             :                         break;
    1223             : 
    1224      109164 :                 needed = true;
    1225      109164 :                 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
    1226       54587 :                         log->l_covered_state = XLOG_STATE_COVER_DONE;
    1227             :                 else
    1228       54577 :                         log->l_covered_state = XLOG_STATE_COVER_DONE2;
    1229             :                 break;
    1230           0 :         default:
    1231           0 :                 needed = true;
    1232           0 :                 break;
    1233             :         }
    1234      179968 :         spin_unlock(&log->l_icloglock);
    1235      179968 :         return needed;
    1236             : }
    1237             : 
    1238             : /*
    1239             :  * Explicitly cover the log. This is similar to background log covering but
    1240             :  * intended for usage in quiesce codepaths. The caller is responsible to ensure
    1241             :  * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
    1242             :  * must all be empty.
    1243             :  */
    1244             : static int
    1245       71004 : xfs_log_cover(
    1246             :         struct xfs_mount        *mp)
    1247             : {
    1248       71004 :         int                     error = 0;
    1249       71004 :         bool                    need_covered;
    1250             : 
    1251       77471 :         ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
    1252             :                 !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
    1253             :                 xlog_is_shutdown(mp->m_log));
    1254             : 
    1255       71004 :         if (!xfs_log_writable(mp))
    1256             :                 return 0;
    1257             : 
    1258             :         /*
    1259             :          * xfs_log_need_covered() is not idempotent because it progresses the
    1260             :          * state machine if the log requires covering. Therefore, we must call
    1261             :          * this function once and use the result until we've issued an sb sync.
    1262             :          * Do so first to make that abundantly clear.
    1263             :          *
    1264             :          * Fall into the covering sequence if the log needs covering or the
    1265             :          * mount has lazy superblock accounting to sync to disk. The sb sync
    1266             :          * used for covering accumulates the in-core counters, so covering
    1267             :          * handles this for us.
    1268             :          */
    1269       60265 :         need_covered = xfs_log_need_covered(mp);
    1270       60265 :         if (!need_covered && !xfs_has_lazysbcount(mp))
    1271             :                 return 0;
    1272             : 
    1273             :         /*
    1274             :          * To cover the log, commit the superblock twice (at most) in
    1275             :          * independent checkpoints. The first serves as a reference for the
    1276             :          * tail pointer. The sync transaction and AIL push empties the AIL and
    1277             :          * updates the in-core tail to the LSN of the first checkpoint. The
    1278             :          * second commit updates the on-disk tail with the in-core LSN,
    1279             :          * covering the log. Push the AIL one more time to leave it empty, as
    1280             :          * we found it.
    1281             :          */
    1282      114796 :         do {
    1283      114796 :                 error = xfs_sync_sb(mp, true);
    1284      114796 :                 if (error)
    1285             :                         break;
    1286      114788 :                 xfs_ail_push_all_sync(mp->m_ail);
    1287      114788 :         } while (xfs_log_need_covered(mp));
    1288             : 
    1289             :         return error;
    1290             : }
    1291             : 
    1292             : /*
    1293             :  * We may be holding the log iclog lock upon entering this routine.
    1294             :  */
    1295             : xfs_lsn_t
    1296    13136197 : xlog_assign_tail_lsn_locked(
    1297             :         struct xfs_mount        *mp)
    1298             : {
    1299    13136197 :         struct xlog             *log = mp->m_log;
    1300    13136197 :         struct xfs_log_item     *lip;
    1301    13136197 :         xfs_lsn_t               tail_lsn;
    1302             : 
    1303    13136197 :         assert_spin_locked(&mp->m_ail->ail_lock);
    1304             : 
    1305             :         /*
    1306             :          * To make sure we always have a valid LSN for the log tail we keep
    1307             :          * track of the last LSN which was committed in log->l_last_sync_lsn,
    1308             :          * and use that when the AIL was empty.
    1309             :          */
    1310    13136197 :         lip = xfs_ail_min(mp->m_ail);
    1311    12497652 :         if (lip)
    1312    12497652 :                 tail_lsn = lip->li_lsn;
    1313             :         else
    1314      638545 :                 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
    1315    13136197 :         trace_xfs_log_assign_tail_lsn(log, tail_lsn);
    1316    13136197 :         atomic64_set(&log->l_tail_lsn, tail_lsn);
    1317    13136197 :         return tail_lsn;
    1318             : }
    1319             : 
    1320             : xfs_lsn_t
    1321    12582668 : xlog_assign_tail_lsn(
    1322             :         struct xfs_mount        *mp)
    1323             : {
    1324    12582668 :         xfs_lsn_t               tail_lsn;
    1325             : 
    1326    12582668 :         spin_lock(&mp->m_ail->ail_lock);
    1327    12582668 :         tail_lsn = xlog_assign_tail_lsn_locked(mp);
    1328    12582665 :         spin_unlock(&mp->m_ail->ail_lock);
    1329             : 
    1330    12582668 :         return tail_lsn;
    1331             : }
    1332             : 
    1333             : /*
    1334             :  * Return the space in the log between the tail and the head.  The head
    1335             :  * is passed in the cycle/bytes formal parms.  In the special case where
    1336             :  * the reserve head has wrapped passed the tail, this calculation is no
    1337             :  * longer valid.  In this case, just return 0 which means there is no space
    1338             :  * in the log.  This works for all places where this function is called
    1339             :  * with the reserve head.  Of course, if the write head were to ever
    1340             :  * wrap the tail, we should blow up.  Rather than catch this case here,
    1341             :  * we depend on other ASSERTions in other parts of the code.   XXXmiken
    1342             :  *
    1343             :  * If reservation head is behind the tail, we have a problem. Warn about it,
    1344             :  * but then treat it as if the log is empty.
    1345             :  *
    1346             :  * If the log is shut down, the head and tail may be invalid or out of whack, so
    1347             :  * shortcut invalidity asserts in this case so that we don't trigger them
    1348             :  * falsely.
    1349             :  */
    1350             : STATIC int
    1351  2310208078 : xlog_space_left(
    1352             :         struct xlog     *log,
    1353             :         atomic64_t      *head)
    1354             : {
    1355  2310208078 :         int             tail_bytes;
    1356  2310208078 :         int             tail_cycle;
    1357  2310208078 :         int             head_cycle;
    1358  2310208078 :         int             head_bytes;
    1359             : 
    1360  2310208078 :         xlog_crack_grant_head(head, &head_cycle, &head_bytes);
    1361  2310208078 :         xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
    1362  2310208078 :         tail_bytes = BBTOB(tail_bytes);
    1363  2310208078 :         if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
    1364  1876755766 :                 return log->l_logsize - (head_bytes - tail_bytes);
    1365   433452312 :         if (tail_cycle + 1 < head_cycle)
    1366             :                 return 0;
    1367             : 
    1368             :         /* Ignore potential inconsistency when shutdown. */
    1369   866904620 :         if (xlog_is_shutdown(log))
    1370           0 :                 return log->l_logsize;
    1371             : 
    1372   433452310 :         if (tail_cycle < head_cycle) {
    1373   433452310 :                 ASSERT(tail_cycle == (head_cycle - 1));
    1374   433452310 :                 return tail_bytes - head_bytes;
    1375             :         }
    1376             : 
    1377             :         /*
    1378             :          * The reservation head is behind the tail. In this case we just want to
    1379             :          * return the size of the log as the amount of space left.
    1380             :          */
    1381           0 :         xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
    1382           0 :         xfs_alert(log->l_mp, "  tail_cycle = %d, tail_bytes = %d",
    1383             :                   tail_cycle, tail_bytes);
    1384           0 :         xfs_alert(log->l_mp, "  GH   cycle = %d, GH   bytes = %d",
    1385             :                   head_cycle, head_bytes);
    1386           0 :         ASSERT(0);
    1387           0 :         return log->l_logsize;
    1388             : }
    1389             : 
    1390             : 
    1391             : static void
    1392    12570792 : xlog_ioend_work(
    1393             :         struct work_struct      *work)
    1394             : {
    1395    12570792 :         struct xlog_in_core     *iclog =
    1396    12570792 :                 container_of(work, struct xlog_in_core, ic_end_io_work);
    1397    12570792 :         struct xlog             *log = iclog->ic_log;
    1398    12570792 :         int                     error;
    1399             : 
    1400    12570792 :         error = blk_status_to_errno(iclog->ic_bio.bi_status);
    1401             : #ifdef DEBUG
    1402             :         /* treat writes with injected CRC errors as failed */
    1403    12570792 :         if (iclog->ic_fail_crc)
    1404             :                 error = -EIO;
    1405             : #endif
    1406             : 
    1407             :         /*
    1408             :          * Race to shutdown the filesystem if we see an error.
    1409             :          */
    1410    12570781 :         if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
    1411        9724 :                 xfs_alert(log->l_mp, "log I/O error %d", error);
    1412        9724 :                 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
    1413             :         }
    1414             : 
    1415    12570792 :         xlog_state_done_syncing(iclog);
    1416    12570792 :         bio_uninit(&iclog->ic_bio);
    1417             : 
    1418             :         /*
    1419             :          * Drop the lock to signal that we are done. Nothing references the
    1420             :          * iclog after this, so an unmount waiting on this lock can now tear it
    1421             :          * down safely. As such, it is unsafe to reference the iclog after the
    1422             :          * unlock as we could race with it being freed.
    1423             :          */
    1424    12570792 :         up(&iclog->ic_sema);
    1425    12570792 : }
    1426             : 
    1427             : /*
    1428             :  * Return size of each in-core log record buffer.
    1429             :  *
    1430             :  * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
    1431             :  *
    1432             :  * If the filesystem blocksize is too large, we may need to choose a
    1433             :  * larger size since the directory code currently logs entire blocks.
    1434             :  */
    1435             : STATIC void
    1436       22491 : xlog_get_iclog_buffer_size(
    1437             :         struct xfs_mount        *mp,
    1438             :         struct xlog             *log)
    1439             : {
    1440       22491 :         if (mp->m_logbufs <= 0)
    1441       22487 :                 mp->m_logbufs = XLOG_MAX_ICLOGS;
    1442       22491 :         if (mp->m_logbsize <= 0)
    1443       22285 :                 mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
    1444             : 
    1445       22491 :         log->l_iclog_bufs = mp->m_logbufs;
    1446       22491 :         log->l_iclog_size = mp->m_logbsize;
    1447             : 
    1448             :         /*
    1449             :          * # headers = size / 32k - one header holds cycles from 32k of data.
    1450             :          */
    1451       22491 :         log->l_iclog_heads =
    1452       22491 :                 DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE);
    1453       22491 :         log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
    1454       22491 : }
    1455             : 
    1456             : void
    1457       76820 : xfs_log_work_queue(
    1458             :         struct xfs_mount        *mp)
    1459             : {
    1460       76820 :         queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
    1461       76820 :                                 msecs_to_jiffies(xfs_syncd_centisecs * 10));
    1462       76820 : }
    1463             : 
    1464             : /*
    1465             :  * Clear the log incompat flags if we have the opportunity.
    1466             :  *
    1467             :  * This only happens if we're about to log the second dummy transaction as part
    1468             :  * of covering the log and we can get the log incompat feature usage lock.
    1469             :  */
    1470             : static inline void
    1471          93 : xlog_clear_incompat(
    1472             :         struct xlog             *log)
    1473             : {
    1474          93 :         struct xfs_mount        *mp = log->l_mp;
    1475             : 
    1476          93 :         if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
    1477             :                                 XFS_SB_FEAT_INCOMPAT_LOG_ALL))
    1478             :                 return;
    1479             : 
    1480           0 :         if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
    1481             :                 return;
    1482             : 
    1483           0 :         if (!down_write_trylock(&log->l_incompat_users))
    1484             :                 return;
    1485             : 
    1486           0 :         xfs_clear_incompat_log_features(mp);
    1487           0 :         up_write(&log->l_incompat_users);
    1488             : }
    1489             : 
    1490             : /*
    1491             :  * Every sync period we need to unpin all items in the AIL and push them to
    1492             :  * disk. If there is nothing dirty, then we might need to cover the log to
    1493             :  * indicate that the filesystem is idle.
    1494             :  */
    1495             : static void
    1496        7875 : xfs_log_worker(
    1497             :         struct work_struct      *work)
    1498             : {
    1499        7875 :         struct xlog             *log = container_of(to_delayed_work(work),
    1500             :                                                 struct xlog, l_work);
    1501        7875 :         struct xfs_mount        *mp = log->l_mp;
    1502             : 
    1503             :         /* dgc: errors ignored - not fatal and nowhere to report them */
    1504        7875 :         if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
    1505             :                 /*
    1506             :                  * Dump a transaction into the log that contains no real change.
    1507             :                  * This is needed to stamp the current tail LSN into the log
    1508             :                  * during the covering operation.
    1509             :                  *
    1510             :                  * We cannot use an inode here for this - that will push dirty
    1511             :                  * state back up into the VFS and then periodic inode flushing
    1512             :                  * will prevent log covering from making progress. Hence we
    1513             :                  * synchronously log the superblock instead to ensure the
    1514             :                  * superblock is immediately unpinned and can be written back.
    1515             :                  */
    1516          93 :                 xlog_clear_incompat(log);
    1517          93 :                 xfs_sync_sb(mp, true);
    1518             :         } else
    1519        7783 :                 xfs_log_force(mp, 0);
    1520             : 
    1521             :         /* start pushing all the metadata that is currently dirty */
    1522        7881 :         xfs_ail_push_all(mp->m_ail);
    1523             : 
    1524             :         /* queue us up again */
    1525        7881 :         xfs_log_work_queue(mp);
    1526        7881 : }
    1527             : 
    1528             : /*
    1529             :  * This routine initializes some of the log structure for a given mount point.
    1530             :  * Its primary purpose is to fill in enough, so recovery can occur.  However,
    1531             :  * some other stuff may be filled in too.
    1532             :  */
    1533             : STATIC struct xlog *
    1534       22491 : xlog_alloc_log(
    1535             :         struct xfs_mount        *mp,
    1536             :         struct xfs_buftarg      *log_target,
    1537             :         xfs_daddr_t             blk_offset,
    1538             :         int                     num_bblks)
    1539             : {
    1540       22491 :         struct xlog             *log;
    1541       22491 :         xlog_rec_header_t       *head;
    1542       22491 :         xlog_in_core_t          **iclogp;
    1543       22491 :         xlog_in_core_t          *iclog, *prev_iclog=NULL;
    1544       22491 :         int                     i;
    1545       22491 :         int                     error = -ENOMEM;
    1546       22491 :         uint                    log2_size = 0;
    1547             : 
    1548       22491 :         log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
    1549       22491 :         if (!log) {
    1550           0 :                 xfs_warn(mp, "Log allocation failed: No memory!");
    1551           0 :                 goto out;
    1552             :         }
    1553             : 
    1554       22491 :         log->l_mp       = mp;
    1555       22491 :         log->l_targ     = log_target;
    1556       22491 :         log->l_logsize     = BBTOB(num_bblks);
    1557       22491 :         log->l_logBBstart  = blk_offset;
    1558       22491 :         log->l_logBBsize   = num_bblks;
    1559       22491 :         log->l_covered_state = XLOG_STATE_COVER_IDLE;
    1560       22491 :         set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
    1561       22491 :         INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
    1562             : 
    1563       22491 :         log->l_prev_block  = -1;
    1564             :         /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
    1565       22491 :         xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
    1566       22491 :         xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
    1567       22491 :         log->l_curr_cycle  = 1;          /* 0 is bad since this is initial value */
    1568             : 
    1569       22491 :         if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
    1570       22214 :                 log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
    1571             :         else
    1572         277 :                 log->l_iclog_roundoff = BBSIZE;
    1573             : 
    1574       22491 :         xlog_grant_head_init(&log->l_reserve_head);
    1575       22491 :         xlog_grant_head_init(&log->l_write_head);
    1576             : 
    1577       22491 :         error = -EFSCORRUPTED;
    1578       22491 :         if (xfs_has_sector(mp)) {
    1579       22186 :                 log2_size = mp->m_sb.sb_logsectlog;
    1580       22186 :                 if (log2_size < BBSHIFT) {
    1581           0 :                         xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
    1582             :                                 log2_size, BBSHIFT);
    1583           0 :                         goto out_free_log;
    1584             :                 }
    1585             : 
    1586       22186 :                 log2_size -= BBSHIFT;
    1587       22186 :                 if (log2_size > mp->m_sectbb_log) {
    1588           0 :                         xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
    1589             :                                 log2_size, mp->m_sectbb_log);
    1590           0 :                         goto out_free_log;
    1591             :                 }
    1592             : 
    1593             :                 /* for larger sector sizes, must have v2 or external log */
    1594       22186 :                 if (log2_size && log->l_logBBstart > 0 &&
    1595             :                             !xfs_has_logv2(mp)) {
    1596           0 :                         xfs_warn(mp,
    1597             :                 "log sector size (0x%x) invalid for configuration.",
    1598             :                                 log2_size);
    1599           0 :                         goto out_free_log;
    1600             :                 }
    1601             :         }
    1602       22186 :         log->l_sectBBsize = 1 << log2_size;
    1603             : 
    1604       22491 :         init_rwsem(&log->l_incompat_users);
    1605             : 
    1606       22491 :         xlog_get_iclog_buffer_size(mp, log);
    1607             : 
    1608       22491 :         spin_lock_init(&log->l_icloglock);
    1609       22491 :         init_waitqueue_head(&log->l_flush_wait);
    1610             : 
    1611       22491 :         iclogp = &log->l_iclog;
    1612             :         /*
    1613             :          * The amount of memory to allocate for the iclog structure is
    1614             :          * rather funky due to the way the structure is defined.  It is
    1615             :          * done this way so that we can use different sizes for machines
    1616             :          * with different amounts of memory.  See the definition of
    1617             :          * xlog_in_core_t in xfs_log_priv.h for details.
    1618             :          */
    1619       22491 :         ASSERT(log->l_iclog_size >= 4096);
    1620      202407 :         for (i = 0; i < log->l_iclog_bufs; i++) {
    1621      179916 :                 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
    1622             :                                 sizeof(struct bio_vec);
    1623             : 
    1624      179916 :                 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
    1625      179916 :                 if (!iclog)
    1626           0 :                         goto out_free_iclog;
    1627             : 
    1628      179916 :                 *iclogp = iclog;
    1629      179916 :                 iclog->ic_prev = prev_iclog;
    1630      179916 :                 prev_iclog = iclog;
    1631             : 
    1632      179916 :                 iclog->ic_data = kvzalloc(log->l_iclog_size,
    1633             :                                 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
    1634      179916 :                 if (!iclog->ic_data)
    1635           0 :                         goto out_free_iclog;
    1636      179916 :                 head = &iclog->ic_header;
    1637      179916 :                 memset(head, 0, sizeof(xlog_rec_header_t));
    1638      179916 :                 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
    1639      179916 :                 head->h_version = cpu_to_be32(
    1640             :                         xfs_has_logv2(log->l_mp) ? 2 : 1);
    1641      179916 :                 head->h_size = cpu_to_be32(log->l_iclog_size);
    1642             :                 /* new fields */
    1643      179916 :                 head->h_fmt = cpu_to_be32(XLOG_FMT);
    1644      359832 :                 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
    1645             : 
    1646      179916 :                 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
    1647      179916 :                 iclog->ic_state = XLOG_STATE_ACTIVE;
    1648      179916 :                 iclog->ic_log = log;
    1649      179916 :                 atomic_set(&iclog->ic_refcnt, 0);
    1650      179916 :                 INIT_LIST_HEAD(&iclog->ic_callbacks);
    1651      179916 :                 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
    1652             : 
    1653      179916 :                 init_waitqueue_head(&iclog->ic_force_wait);
    1654      179916 :                 init_waitqueue_head(&iclog->ic_write_wait);
    1655      179916 :                 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
    1656      179916 :                 sema_init(&iclog->ic_sema, 1);
    1657             : 
    1658      179916 :                 iclogp = &iclog->ic_next;
    1659             :         }
    1660       22491 :         *iclogp = log->l_iclog;                      /* complete ring */
    1661       22491 :         log->l_iclog->ic_prev = prev_iclog;       /* re-write 1st prev ptr */
    1662             : 
    1663       44982 :         log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
    1664             :                         XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM |
    1665             :                                     WQ_HIGHPRI),
    1666       22491 :                         0, mp->m_super->s_id);
    1667       22491 :         if (!log->l_ioend_workqueue)
    1668           0 :                 goto out_free_iclog;
    1669             : 
    1670       22491 :         error = xlog_cil_init(log);
    1671       22491 :         if (error)
    1672           0 :                 goto out_destroy_workqueue;
    1673             :         return log;
    1674             : 
    1675             : out_destroy_workqueue:
    1676           0 :         destroy_workqueue(log->l_ioend_workqueue);
    1677           0 : out_free_iclog:
    1678           0 :         for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
    1679           0 :                 prev_iclog = iclog->ic_next;
    1680           0 :                 kmem_free(iclog->ic_data);
    1681           0 :                 kmem_free(iclog);
    1682           0 :                 if (prev_iclog == log->l_iclog)
    1683             :                         break;
    1684             :         }
    1685           0 : out_free_log:
    1686           0 :         kmem_free(log);
    1687           0 : out:
    1688           0 :         return ERR_PTR(error);
    1689             : }       /* xlog_alloc_log */
    1690             : 
    1691             : /*
    1692             :  * Compute the LSN that we'd need to push the log tail towards in order to have
    1693             :  * (a) enough on-disk log space to log the number of bytes specified, (b) at
    1694             :  * least 25% of the log space free, and (c) at least 256 blocks free.  If the
    1695             :  * log free space already meets all three thresholds, this function returns
    1696             :  * NULLCOMMITLSN.
    1697             :  */
    1698             : xfs_lsn_t
    1699  1303591355 : xlog_grant_push_threshold(
    1700             :         struct xlog     *log,
    1701             :         int             need_bytes)
    1702             : {
    1703  1303591355 :         xfs_lsn_t       threshold_lsn = 0;
    1704  1303591355 :         xfs_lsn_t       last_sync_lsn;
    1705  1303591355 :         int             free_blocks;
    1706  1303591355 :         int             free_bytes;
    1707  1303591355 :         int             threshold_block;
    1708  1303591355 :         int             threshold_cycle;
    1709  1303591355 :         int             free_threshold;
    1710             : 
    1711  1303591355 :         ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
    1712             : 
    1713  1303591355 :         free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
    1714  1303650190 :         free_blocks = BTOBBT(free_bytes);
    1715             : 
    1716             :         /*
    1717             :          * Set the threshold for the minimum number of free blocks in the
    1718             :          * log to the maximum of what the caller needs, one quarter of the
    1719             :          * log, and 256 blocks.
    1720             :          */
    1721  1303650190 :         free_threshold = BTOBB(need_bytes);
    1722  1303650190 :         free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
    1723  1303650190 :         free_threshold = max(free_threshold, 256);
    1724  1303650190 :         if (free_blocks >= free_threshold)
    1725             :                 return NULLCOMMITLSN;
    1726             : 
    1727     8112045 :         xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
    1728             :                                                 &threshold_block);
    1729     8112045 :         threshold_block += free_threshold;
    1730     8112045 :         if (threshold_block >= log->l_logBBsize) {
    1731     1727654 :                 threshold_block -= log->l_logBBsize;
    1732     1727654 :                 threshold_cycle += 1;
    1733             :         }
    1734     8112045 :         threshold_lsn = xlog_assign_lsn(threshold_cycle,
    1735             :                                         threshold_block);
    1736             :         /*
    1737             :          * Don't pass in an lsn greater than the lsn of the last
    1738             :          * log record known to be on disk. Use a snapshot of the last sync lsn
    1739             :          * so that it doesn't change between the compare and the set.
    1740             :          */
    1741     8112045 :         last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
    1742     8112045 :         if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
    1743             :                 threshold_lsn = last_sync_lsn;
    1744             : 
    1745             :         return threshold_lsn;
    1746             : }
    1747             : 
    1748             : /*
    1749             :  * Push the tail of the log if we need to do so to maintain the free log space
    1750             :  * thresholds set out by xlog_grant_push_threshold.  We may need to adopt a
    1751             :  * policy which pushes on an lsn which is further along in the log once we
    1752             :  * reach the high water mark.  In this manner, we would be creating a low water
    1753             :  * mark.
    1754             :  */
    1755             : STATIC void
    1756  1303287035 : xlog_grant_push_ail(
    1757             :         struct xlog     *log,
    1758             :         int             need_bytes)
    1759             : {
    1760  1303287035 :         xfs_lsn_t       threshold_lsn;
    1761             : 
    1762  1303287035 :         threshold_lsn = xlog_grant_push_threshold(log, need_bytes);
    1763  1311578647 :         if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log))
    1764             :                 return;
    1765             : 
    1766             :         /*
    1767             :          * Get the transaction layer to kick the dirty buffers out to
    1768             :          * disk asynchronously. No point in trying to do this if
    1769             :          * the filesystem is shutting down.
    1770             :          */
    1771     8111325 :         xfs_ail_push(log->l_ailp, threshold_lsn);
    1772             : }
    1773             : 
    1774             : /*
    1775             :  * Stamp cycle number in every block
    1776             :  */
    1777             : STATIC void
    1778    12570752 : xlog_pack_data(
    1779             :         struct xlog             *log,
    1780             :         struct xlog_in_core     *iclog,
    1781             :         int                     roundoff)
    1782             : {
    1783    12570752 :         int                     i, j, k;
    1784    12570752 :         int                     size = iclog->ic_offset + roundoff;
    1785    12570752 :         __be32                  cycle_lsn;
    1786    12570752 :         char                    *dp;
    1787             : 
    1788    12570752 :         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
    1789             : 
    1790    12570752 :         dp = iclog->ic_datap;
    1791   710520904 :         for (i = 0; i < BTOBB(size); i++) {
    1792   697952241 :                 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
    1793             :                         break;
    1794   697950114 :                 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
    1795   697950152 :                 *(__be32 *)dp = cycle_lsn;
    1796   697950152 :                 dp += BBSIZE;
    1797             :         }
    1798             : 
    1799    12570790 :         if (xfs_has_logv2(log->l_mp)) {
    1800    12570778 :                 xlog_in_core_2_t *xhdr = iclog->ic_data;
    1801             : 
    1802    13186742 :                 for ( ; i < BTOBB(size); i++) {
    1803      616000 :                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
    1804      616000 :                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
    1805      616000 :                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
    1806      615964 :                         *(__be32 *)dp = cycle_lsn;
    1807      615964 :                         dp += BBSIZE;
    1808             :                 }
    1809             : 
    1810    12583252 :                 for (i = 1; i < log->l_iclog_heads; i++)
    1811       12510 :                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
    1812             :         }
    1813    12570754 : }
    1814             : 
    1815             : /*
    1816             :  * Calculate the checksum for a log buffer.
    1817             :  *
    1818             :  * This is a little more complicated than it should be because the various
    1819             :  * headers and the actual data are non-contiguous.
    1820             :  */
    1821             : __le32
    1822    16084454 : xlog_cksum(
    1823             :         struct xlog             *log,
    1824             :         struct xlog_rec_header  *rhead,
    1825             :         char                    *dp,
    1826             :         int                     size)
    1827             : {
    1828    16084454 :         uint32_t                crc;
    1829             : 
    1830             :         /* first generate the crc for the record header ... */
    1831    16084454 :         crc = xfs_start_cksum_update((char *)rhead,
    1832             :                               sizeof(struct xlog_rec_header),
    1833             :                               offsetof(struct xlog_rec_header, h_crc));
    1834             : 
    1835             :         /* ... then for additional cycle data for v2 logs ... */
    1836    16084521 :         if (xfs_has_logv2(log->l_mp)) {
    1837    16084531 :                 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
    1838    16084531 :                 int             i;
    1839    16084531 :                 int             xheads;
    1840             : 
    1841    16084531 :                 xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE);
    1842             : 
    1843    16102061 :                 for (i = 1; i < xheads; i++) {
    1844       17533 :                         crc = crc32c(crc, &xhdr[i].hic_xheader,
    1845             :                                      sizeof(struct xlog_rec_ext_header));
    1846             :                 }
    1847             :         }
    1848             : 
    1849             :         /* ... and finally for the payload */
    1850    16084518 :         crc = crc32c(crc, dp, size);
    1851             : 
    1852    16084516 :         return xfs_end_cksum(crc);
    1853             : }
    1854             : 
    1855             : static void
    1856    12570792 : xlog_bio_end_io(
    1857             :         struct bio              *bio)
    1858             : {
    1859    12570792 :         struct xlog_in_core     *iclog = bio->bi_private;
    1860             : 
    1861    12570792 :         queue_work(iclog->ic_log->l_ioend_workqueue,
    1862             :                    &iclog->ic_end_io_work);
    1863    12570792 : }
    1864             : 
    1865             : static int
    1866    12570702 : xlog_map_iclog_data(
    1867             :         struct bio              *bio,
    1868             :         void                    *data,
    1869             :         size_t                  count)
    1870             : {
    1871    12574542 :         do {
    1872    12574542 :                 struct page     *page = kmem_to_page(data);
    1873    12574556 :                 unsigned int    off = offset_in_page(data);
    1874    12574556 :                 size_t          len = min_t(size_t, count, PAGE_SIZE - off);
    1875             : 
    1876    12574556 :                 if (bio_add_page(bio, page, len, off) != len)
    1877             :                         return -EIO;
    1878             : 
    1879    12574536 :                 data += len;
    1880    12574536 :                 count -= len;
    1881    12574536 :         } while (count);
    1882             : 
    1883             :         return 0;
    1884             : }
    1885             : 
    1886             : STATIC void
    1887    12570689 : xlog_write_iclog(
    1888             :         struct xlog             *log,
    1889             :         struct xlog_in_core     *iclog,
    1890             :         uint64_t                bno,
    1891             :         unsigned int            count)
    1892             : {
    1893    12570689 :         ASSERT(bno < log->l_logBBsize);
    1894    12570689 :         trace_xlog_iclog_write(iclog, _RET_IP_);
    1895             : 
    1896             :         /*
    1897             :          * We lock the iclogbufs here so that we can serialise against I/O
    1898             :          * completion during unmount.  We might be processing a shutdown
    1899             :          * triggered during unmount, and that can occur asynchronously to the
    1900             :          * unmount thread, and hence we need to ensure that completes before
    1901             :          * tearing down the iclogbufs.  Hence we need to hold the buffer lock
    1902             :          * across the log IO to archieve that.
    1903             :          */
    1904    12570777 :         down(&iclog->ic_sema);
    1905    25141438 :         if (xlog_is_shutdown(log)) {
    1906             :                 /*
    1907             :                  * It would seem logical to return EIO here, but we rely on
    1908             :                  * the log state machine to propagate I/O errors instead of
    1909             :                  * doing it here.  We kick of the state machine and unlock
    1910             :                  * the buffer manually, the code needs to be kept in sync
    1911             :                  * with the I/O completion path.
    1912             :                  */
    1913           6 :                 xlog_state_done_syncing(iclog);
    1914           6 :                 up(&iclog->ic_sema);
    1915           6 :                 return;
    1916             :         }
    1917             : 
    1918             :         /*
    1919             :          * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
    1920             :          * IOs coming immediately after this one. This prevents the block layer
    1921             :          * writeback throttle from throttling log writes behind background
    1922             :          * metadata writeback and causing priority inversions.
    1923             :          */
    1924    12570713 :         bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
    1925    12570713 :                  howmany(count, PAGE_SIZE),
    1926             :                  REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
    1927    12570742 :         iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
    1928    12570742 :         iclog->ic_bio.bi_end_io = xlog_bio_end_io;
    1929    12570742 :         iclog->ic_bio.bi_private = iclog;
    1930             : 
    1931    12570742 :         if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
    1932     3623663 :                 iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
    1933             :                 /*
    1934             :                  * For external log devices, we also need to flush the data
    1935             :                  * device cache first to ensure all metadata writeback covered
    1936             :                  * by the LSN in this iclog is on stable storage. This is slow,
    1937             :                  * but it *must* complete before we issue the external log IO.
    1938             :                  *
    1939             :                  * If the flush fails, we cannot conclude that past metadata
    1940             :                  * writeback from the log succeeded.  Repeating the flush is
    1941             :                  * not possible, hence we must shut down with log IO error to
    1942             :                  * avoid shutdown re-entering this path and erroring out again.
    1943             :                  */
    1944     3623667 :                 if (log->l_targ != log->l_mp->m_ddev_targp &&
    1945           4 :                     blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) {
    1946           0 :                         xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
    1947           0 :                         return;
    1948             :                 }
    1949             :         }
    1950    12570742 :         if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
    1951     2542729 :                 iclog->ic_bio.bi_opf |= REQ_FUA;
    1952             : 
    1953    12570742 :         iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
    1954             : 
    1955    12570742 :         if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
    1956           0 :                 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
    1957           0 :                 return;
    1958             :         }
    1959    12570697 :         if (is_vmalloc_addr(iclog->ic_data))
    1960             :                 flush_kernel_vmap_range(iclog->ic_data, count);
    1961             : 
    1962             :         /*
    1963             :          * If this log buffer would straddle the end of the log we will have
    1964             :          * to split it up into two bios, so that we can continue at the start.
    1965             :          */
    1966    12570679 :         if (bno + BTOBB(count) > log->l_logBBsize) {
    1967        4531 :                 struct bio *split;
    1968             : 
    1969        4531 :                 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
    1970             :                                   GFP_NOIO, &fs_bio_set);
    1971        4531 :                 bio_chain(split, &iclog->ic_bio);
    1972        4531 :                 submit_bio(split);
    1973             : 
    1974             :                 /* restart at logical offset zero for the remainder */
    1975        4531 :                 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
    1976             :         }
    1977             : 
    1978    12570679 :         submit_bio(&iclog->ic_bio);
    1979             : }
    1980             : 
    1981             : /*
    1982             :  * We need to bump cycle number for the part of the iclog that is
    1983             :  * written to the start of the log. Watch out for the header magic
    1984             :  * number case, though.
    1985             :  */
    1986             : static void
    1987        4531 : xlog_split_iclog(
    1988             :         struct xlog             *log,
    1989             :         void                    *data,
    1990             :         uint64_t                bno,
    1991             :         unsigned int            count)
    1992             : {
    1993        4531 :         unsigned int            split_offset = BBTOB(log->l_logBBsize - bno);
    1994        4531 :         unsigned int            i;
    1995             : 
    1996      145547 :         for (i = split_offset; i < count; i += BBSIZE) {
    1997      141016 :                 uint32_t cycle = get_unaligned_be32(data + i);
    1998             : 
    1999      141016 :                 if (++cycle == XLOG_HEADER_MAGIC_NUM)
    2000           0 :                         cycle++;
    2001      141016 :                 put_unaligned_be32(cycle, data + i);
    2002             :         }
    2003        4531 : }
    2004             : 
    2005             : static int
    2006    12570789 : xlog_calc_iclog_size(
    2007             :         struct xlog             *log,
    2008             :         struct xlog_in_core     *iclog,
    2009             :         uint32_t                *roundoff)
    2010             : {
    2011    12570789 :         uint32_t                count_init, count;
    2012             : 
    2013             :         /* Add for LR header */
    2014    12570789 :         count_init = log->l_iclog_hsize + iclog->ic_offset;
    2015    12570789 :         count = roundup(count_init, log->l_iclog_roundoff);
    2016             : 
    2017    12570789 :         *roundoff = count - count_init;
    2018             : 
    2019    12570789 :         ASSERT(count >= count_init);
    2020    12570789 :         ASSERT(*roundoff < log->l_iclog_roundoff);
    2021    12570789 :         return count;
    2022             : }
    2023             : 
    2024             : /*
    2025             :  * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
    2026             :  * fashion.  Previously, we should have moved the current iclog
    2027             :  * ptr in the log to point to the next available iclog.  This allows further
    2028             :  * write to continue while this code syncs out an iclog ready to go.
    2029             :  * Before an in-core log can be written out, the data section must be scanned
    2030             :  * to save away the 1st word of each BBSIZE block into the header.  We replace
    2031             :  * it with the current cycle count.  Each BBSIZE block is tagged with the
    2032             :  * cycle count because there in an implicit assumption that drives will
    2033             :  * guarantee that entire 512 byte blocks get written at once.  In other words,
    2034             :  * we can't have part of a 512 byte block written and part not written.  By
    2035             :  * tagging each block, we will know which blocks are valid when recovering
    2036             :  * after an unclean shutdown.
    2037             :  *
    2038             :  * This routine is single threaded on the iclog.  No other thread can be in
    2039             :  * this routine with the same iclog.  Changing contents of iclog can there-
    2040             :  * fore be done without grabbing the state machine lock.  Updating the global
    2041             :  * log will require grabbing the lock though.
    2042             :  *
    2043             :  * The entire log manager uses a logical block numbering scheme.  Only
    2044             :  * xlog_write_iclog knows about the fact that the log may not start with
    2045             :  * block zero on a given device.
    2046             :  */
    2047             : STATIC void
    2048    12570782 : xlog_sync(
    2049             :         struct xlog             *log,
    2050             :         struct xlog_in_core     *iclog,
    2051             :         struct xlog_ticket      *ticket)
    2052             : {
    2053    12570782 :         unsigned int            count;          /* byte count of bwrite */
    2054    12570782 :         unsigned int            roundoff;       /* roundoff to BB or stripe */
    2055    12570782 :         uint64_t                bno;
    2056    12570782 :         unsigned int            size;
    2057             : 
    2058    12570782 :         ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
    2059    12570782 :         trace_xlog_iclog_sync(iclog, _RET_IP_);
    2060             : 
    2061    12570789 :         count = xlog_calc_iclog_size(log, iclog, &roundoff);
    2062             : 
    2063             :         /*
    2064             :          * If we have a ticket, account for the roundoff via the ticket
    2065             :          * reservation to avoid touching the hot grant heads needlessly.
    2066             :          * Otherwise, we have to move grant heads directly.
    2067             :          */
    2068    12570796 :         if (ticket) {
    2069    10495089 :                 ticket->t_curr_res -= roundoff;
    2070             :         } else {
    2071     2075707 :                 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
    2072     2075707 :                 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
    2073             :         }
    2074             : 
    2075             :         /* put cycle number in every block */
    2076    12570796 :         xlog_pack_data(log, iclog, roundoff);
    2077             : 
    2078             :         /* real byte length */
    2079    12570763 :         size = iclog->ic_offset;
    2080    12570763 :         if (xfs_has_logv2(log->l_mp))
    2081    12570780 :                 size += roundoff;
    2082    12570763 :         iclog->ic_header.h_len = cpu_to_be32(size);
    2083             : 
    2084    12570763 :         XFS_STATS_INC(log->l_mp, xs_log_writes);
    2085    12570772 :         XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
    2086             : 
    2087    12570785 :         bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
    2088             : 
    2089             :         /* Do we need to split this write into 2 parts? */
    2090    12570785 :         if (bno + BTOBB(count) > log->l_logBBsize)
    2091        4531 :                 xlog_split_iclog(log, &iclog->ic_header, bno, count);
    2092             : 
    2093             :         /* calculcate the checksum */
    2094    25141480 :         iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
    2095    12570785 :                                             iclog->ic_datap, size);
    2096             :         /*
    2097             :          * Intentionally corrupt the log record CRC based on the error injection
    2098             :          * frequency, if defined. This facilitates testing log recovery in the
    2099             :          * event of torn writes. Hence, set the IOABORT state to abort the log
    2100             :          * write on I/O completion and shutdown the fs. The subsequent mount
    2101             :          * detects the bad CRC and attempts to recover.
    2102             :          */
    2103             : #ifdef DEBUG
    2104    12570695 :         if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
    2105          12 :                 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
    2106          12 :                 iclog->ic_fail_crc = true;
    2107          12 :                 xfs_warn(log->l_mp,
    2108             :         "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
    2109             :                          be64_to_cpu(iclog->ic_header.h_lsn));
    2110             :         }
    2111             : #endif
    2112    12570740 :         xlog_verify_iclog(log, iclog, count);
    2113    12570789 :         xlog_write_iclog(log, iclog, bno, count);
    2114    12570766 : }
    2115             : 
    2116             : /*
    2117             :  * Deallocate a log structure
    2118             :  */
    2119             : STATIC void
    2120       22494 : xlog_dealloc_log(
    2121             :         struct xlog     *log)
    2122             : {
    2123       22494 :         xlog_in_core_t  *iclog, *next_iclog;
    2124       22494 :         int             i;
    2125             : 
    2126             :         /*
    2127             :          * Destroy the CIL after waiting for iclog IO completion because an
    2128             :          * iclog EIO error will try to shut down the log, which accesses the
    2129             :          * CIL to wake up the waiters.
    2130             :          */
    2131       22494 :         xlog_cil_destroy(log);
    2132             : 
    2133       22494 :         iclog = log->l_iclog;
    2134      202434 :         for (i = 0; i < log->l_iclog_bufs; i++) {
    2135      179940 :                 next_iclog = iclog->ic_next;
    2136      179940 :                 kmem_free(iclog->ic_data);
    2137      179940 :                 kmem_free(iclog);
    2138      179940 :                 iclog = next_iclog;
    2139             :         }
    2140             : 
    2141       22494 :         log->l_mp->m_log = NULL;
    2142       22494 :         destroy_workqueue(log->l_ioend_workqueue);
    2143       22494 :         kmem_free(log);
    2144       22494 : }
    2145             : 
    2146             : /*
    2147             :  * Update counters atomically now that memcpy is done.
    2148             :  */
    2149             : static inline void
    2150             : xlog_state_finish_copy(
    2151             :         struct xlog             *log,
    2152             :         struct xlog_in_core     *iclog,
    2153             :         int                     record_cnt,
    2154             :         int                     copy_bytes)
    2155             : {
    2156    15200111 :         lockdep_assert_held(&log->l_icloglock);
    2157             : 
    2158    15200111 :         be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
    2159    15200111 :         iclog->ic_offset += copy_bytes;
    2160             : }
    2161             : 
    2162             : /*
    2163             :  * print out info relating to regions written which consume
    2164             :  * the reservation
    2165             :  */
    2166             : void
    2167           0 : xlog_print_tic_res(
    2168             :         struct xfs_mount        *mp,
    2169             :         struct xlog_ticket      *ticket)
    2170             : {
    2171           0 :         xfs_warn(mp, "ticket reservation summary:");
    2172           0 :         xfs_warn(mp, "  unit res    = %d bytes", ticket->t_unit_res);
    2173           0 :         xfs_warn(mp, "  current res = %d bytes", ticket->t_curr_res);
    2174           0 :         xfs_warn(mp, "  original count  = %d", ticket->t_ocnt);
    2175           0 :         xfs_warn(mp, "  remaining count = %d", ticket->t_cnt);
    2176           0 : }
    2177             : 
    2178             : /*
    2179             :  * Print a summary of the transaction.
    2180             :  */
    2181             : void
    2182           0 : xlog_print_trans(
    2183             :         struct xfs_trans        *tp)
    2184             : {
    2185           0 :         struct xfs_mount        *mp = tp->t_mountp;
    2186           0 :         struct xfs_log_item     *lip;
    2187             : 
    2188             :         /* dump core transaction and ticket info */
    2189           0 :         xfs_warn(mp, "transaction summary:");
    2190           0 :         xfs_warn(mp, "  log res   = %d", tp->t_log_res);
    2191           0 :         xfs_warn(mp, "  log count = %d", tp->t_log_count);
    2192           0 :         xfs_warn(mp, "  flags     = 0x%x", tp->t_flags);
    2193             : 
    2194           0 :         xlog_print_tic_res(mp, tp->t_ticket);
    2195             : 
    2196             :         /* dump each log item */
    2197           0 :         list_for_each_entry(lip, &tp->t_items, li_trans) {
    2198           0 :                 struct xfs_log_vec      *lv = lip->li_lv;
    2199           0 :                 struct xfs_log_iovec    *vec;
    2200           0 :                 int                     i;
    2201             : 
    2202           0 :                 xfs_warn(mp, "log item: ");
    2203           0 :                 xfs_warn(mp, "  type       = 0x%x", lip->li_type);
    2204           0 :                 xfs_warn(mp, "  flags      = 0x%lx", lip->li_flags);
    2205           0 :                 if (!lv)
    2206           0 :                         continue;
    2207           0 :                 xfs_warn(mp, "  niovecs    = %d", lv->lv_niovecs);
    2208           0 :                 xfs_warn(mp, "  size       = %d", lv->lv_size);
    2209           0 :                 xfs_warn(mp, "  bytes      = %d", lv->lv_bytes);
    2210           0 :                 xfs_warn(mp, "  buf len    = %d", lv->lv_buf_len);
    2211             : 
    2212             :                 /* dump each iovec for the log item */
    2213           0 :                 vec = lv->lv_iovecp;
    2214           0 :                 for (i = 0; i < lv->lv_niovecs; i++) {
    2215           0 :                         int dumplen = min(vec->i_len, 32);
    2216             : 
    2217           0 :                         xfs_warn(mp, "  iovec[%d]", i);
    2218           0 :                         xfs_warn(mp, "    type     = 0x%x", vec->i_type);
    2219           0 :                         xfs_warn(mp, "    len      = %d", vec->i_len);
    2220           0 :                         xfs_warn(mp, "    first %d bytes of iovec[%d]:", dumplen, i);
    2221           0 :                         xfs_hex_dump(vec->i_addr, dumplen);
    2222             : 
    2223           0 :                         vec++;
    2224             :                 }
    2225             :         }
    2226           0 : }
    2227             : 
    2228             : static inline void
    2229  1084263277 : xlog_write_iovec(
    2230             :         struct xlog_in_core     *iclog,
    2231             :         uint32_t                *log_offset,
    2232             :         void                    *data,
    2233             :         uint32_t                write_len,
    2234             :         int                     *bytes_left,
    2235             :         uint32_t                *record_cnt,
    2236             :         uint32_t                *data_cnt)
    2237             : {
    2238  1084263277 :         ASSERT(*log_offset < iclog->ic_log->l_iclog_size);
    2239  1084263277 :         ASSERT(*log_offset % sizeof(int32_t) == 0);
    2240  1084263277 :         ASSERT(write_len % sizeof(int32_t) == 0);
    2241             : 
    2242  2168526554 :         memcpy(iclog->ic_datap + *log_offset, data, write_len);
    2243  1084263277 :         *log_offset += write_len;
    2244  1084263277 :         *bytes_left -= write_len;
    2245  1084263277 :         (*record_cnt)++;
    2246  1084263277 :         *data_cnt += write_len;
    2247  1084263277 : }
    2248             : 
    2249             : /*
    2250             :  * Write log vectors into a single iclog which is guaranteed by the caller
    2251             :  * to have enough space to write the entire log vector into.
    2252             :  */
    2253             : static void
    2254   409758110 : xlog_write_full(
    2255             :         struct xfs_log_vec      *lv,
    2256             :         struct xlog_ticket      *ticket,
    2257             :         struct xlog_in_core     *iclog,
    2258             :         uint32_t                *log_offset,
    2259             :         uint32_t                *len,
    2260             :         uint32_t                *record_cnt,
    2261             :         uint32_t                *data_cnt)
    2262             : {
    2263   409758110 :         int                     index;
    2264             : 
    2265   409758110 :         ASSERT(*log_offset + *len <= iclog->ic_size ||
    2266             :                 iclog->ic_state == XLOG_STATE_WANT_SYNC);
    2267             : 
    2268             :         /*
    2269             :          * Ordered log vectors have no regions to write so this
    2270             :          * loop will naturally skip them.
    2271             :          */
    2272  1457148418 :         for (index = 0; index < lv->lv_niovecs; index++) {
    2273  1047373374 :                 struct xfs_log_iovec    *reg = &lv->lv_iovecp[index];
    2274  1047373374 :                 struct xlog_op_header   *ophdr = reg->i_addr;
    2275             : 
    2276  1047373374 :                 ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
    2277  1047373374 :                 xlog_write_iovec(iclog, log_offset, reg->i_addr,
    2278  1047373374 :                                 reg->i_len, len, record_cnt, data_cnt);
    2279             :         }
    2280   409775044 : }
    2281             : 
    2282             : static int
    2283    10097395 : xlog_write_get_more_iclog_space(
    2284             :         struct xlog_ticket      *ticket,
    2285             :         struct xlog_in_core     **iclogp,
    2286             :         uint32_t                *log_offset,
    2287             :         uint32_t                len,
    2288             :         uint32_t                *record_cnt,
    2289             :         uint32_t                *data_cnt)
    2290             : {
    2291    10097395 :         struct xlog_in_core     *iclog = *iclogp;
    2292    10097395 :         struct xlog             *log = iclog->ic_log;
    2293    10097395 :         int                     error;
    2294             : 
    2295    10097395 :         spin_lock(&log->l_icloglock);
    2296    10097432 :         ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
    2297    10097432 :         xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
    2298    10097432 :         error = xlog_state_release_iclog(log, iclog, ticket);
    2299    10097431 :         spin_unlock(&log->l_icloglock);
    2300    10097428 :         if (error)
    2301             :                 return error;
    2302             : 
    2303    10097420 :         error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
    2304             :                                         log_offset);
    2305    10097409 :         if (error)
    2306             :                 return error;
    2307    10094790 :         *record_cnt = 0;
    2308    10094790 :         *data_cnt = 0;
    2309    10094790 :         *iclogp = iclog;
    2310    10094790 :         return 0;
    2311             : }
    2312             : 
    2313             : /*
    2314             :  * Write log vectors into a single iclog which is smaller than the current chain
    2315             :  * length. We write until we cannot fit a full record into the remaining space
    2316             :  * and then stop. We return the log vector that is to be written that cannot
    2317             :  * wholly fit in the iclog.
    2318             :  */
    2319             : static int
    2320    10097036 : xlog_write_partial(
    2321             :         struct xfs_log_vec      *lv,
    2322             :         struct xlog_ticket      *ticket,
    2323             :         struct xlog_in_core     **iclogp,
    2324             :         uint32_t                *log_offset,
    2325             :         uint32_t                *len,
    2326             :         uint32_t                *record_cnt,
    2327             :         uint32_t                *data_cnt)
    2328             : {
    2329    10097036 :         struct xlog_in_core     *iclog = *iclogp;
    2330    10097036 :         struct xlog_op_header   *ophdr;
    2331    10097036 :         int                     index = 0;
    2332    10097036 :         uint32_t                rlen;
    2333    10097036 :         int                     error;
    2334             : 
    2335             :         /* walk the logvec, copying until we run out of space in the iclog */
    2336    37392737 :         for (index = 0; index < lv->lv_niovecs; index++) {
    2337    27298357 :                 struct xfs_log_iovec    *reg = &lv->lv_iovecp[index];
    2338    27298357 :                 uint32_t                reg_offset = 0;
    2339             : 
    2340             :                 /*
    2341             :                  * The first region of a continuation must have a non-zero
    2342             :                  * length otherwise log recovery will just skip over it and
    2343             :                  * start recovering from the next opheader it finds. Because we
    2344             :                  * mark the next opheader as a continuation, recovery will then
    2345             :                  * incorrectly add the continuation to the previous region and
    2346             :                  * that breaks stuff.
    2347             :                  *
    2348             :                  * Hence if there isn't space for region data after the
    2349             :                  * opheader, then we need to start afresh with a new iclog.
    2350             :                  */
    2351    27298357 :                 if (iclog->ic_size - *log_offset <=
    2352             :                                         sizeof(struct xlog_op_header)) {
    2353      493016 :                         error = xlog_write_get_more_iclog_space(ticket,
    2354             :                                         &iclog, log_offset, *len, record_cnt,
    2355             :                                         data_cnt);
    2356      493016 :                         if (error)
    2357          81 :                                 return error;
    2358             :                 }
    2359             : 
    2360    27298276 :                 ophdr = reg->i_addr;
    2361    27298276 :                 rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset);
    2362             : 
    2363    27298276 :                 ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
    2364    27298276 :                 ophdr->oh_len = cpu_to_be32(rlen - sizeof(struct xlog_op_header));
    2365    27298276 :                 if (rlen != reg->i_len)
    2366     9604070 :                         ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
    2367             : 
    2368    27298276 :                 xlog_write_iovec(iclog, log_offset, reg->i_addr,
    2369             :                                 rlen, len, record_cnt, data_cnt);
    2370             : 
    2371             :                 /* If we wrote the whole region, move to the next. */
    2372    27298250 :                 if (rlen == reg->i_len)
    2373    17694273 :                         continue;
    2374             : 
    2375             :                 /*
    2376             :                  * We now have a partially written iovec, but it can span
    2377             :                  * multiple iclogs so we loop here. First we release the iclog
    2378             :                  * we currently have, then we get a new iclog and add a new
    2379             :                  * opheader. Then we continue copying from where we were until
    2380             :                  * we either complete the iovec or fill the iclog. If we
    2381             :                  * complete the iovec, then we increment the index and go right
    2382             :                  * back to the top of the outer loop. if we fill the iclog, we
    2383             :                  * run the inner loop again.
    2384             :                  *
    2385             :                  * This is complicated by the tail of a region using all the
    2386             :                  * space in an iclog and hence requiring us to release the iclog
    2387             :                  * and get a new one before returning to the outer loop. We must
    2388             :                  * always guarantee that we exit this inner loop with at least
    2389             :                  * space for log transaction opheaders left in the current
    2390             :                  * iclog, hence we cannot just terminate the loop at the end
    2391             :                  * of the of the continuation. So we loop while there is no
    2392             :                  * space left in the current iclog, and check for the end of the
    2393             :                  * continuation after getting a new iclog.
    2394             :                  */
    2395     9604373 :                 do {
    2396             :                         /*
    2397             :                          * Ensure we include the continuation opheader in the
    2398             :                          * space we need in the new iclog by adding that size
    2399             :                          * to the length we require. This continuation opheader
    2400             :                          * needs to be accounted to the ticket as the space it
    2401             :                          * consumes hasn't been accounted to the lv we are
    2402             :                          * writing.
    2403             :                          */
    2404     9604373 :                         error = xlog_write_get_more_iclog_space(ticket,
    2405             :                                         &iclog, log_offset,
    2406     9604373 :                                         *len + sizeof(struct xlog_op_header),
    2407             :                                         record_cnt, data_cnt);
    2408     9604408 :                         if (error)
    2409        2547 :                                 return error;
    2410             : 
    2411     9601861 :                         ophdr = iclog->ic_datap + *log_offset;
    2412     9601861 :                         ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
    2413     9601861 :                         ophdr->oh_clientid = XFS_TRANSACTION;
    2414     9601861 :                         ophdr->oh_res2 = 0;
    2415     9601861 :                         ophdr->oh_flags = XLOG_WAS_CONT_TRANS;
    2416             : 
    2417     9601861 :                         ticket->t_curr_res -= sizeof(struct xlog_op_header);
    2418     9601861 :                         *log_offset += sizeof(struct xlog_op_header);
    2419     9601861 :                         *data_cnt += sizeof(struct xlog_op_header);
    2420             : 
    2421             :                         /*
    2422             :                          * If rlen fits in the iclog, then end the region
    2423             :                          * continuation. Otherwise we're going around again.
    2424             :                          */
    2425     9601861 :                         reg_offset += rlen;
    2426     9601861 :                         rlen = reg->i_len - reg_offset;
    2427     9601861 :                         if (rlen <= iclog->ic_size - *log_offset)
    2428     9601531 :                                 ophdr->oh_flags |= XLOG_END_TRANS;
    2429             :                         else
    2430         330 :                                 ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
    2431             : 
    2432     9601861 :                         rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset);
    2433     9601861 :                         ophdr->oh_len = cpu_to_be32(rlen);
    2434             : 
    2435     9601861 :                         xlog_write_iovec(iclog, log_offset,
    2436     9601861 :                                         reg->i_addr + reg_offset,
    2437             :                                         rlen, len, record_cnt, data_cnt);
    2438             : 
    2439     9601824 :                 } while (ophdr->oh_flags & XLOG_CONTINUE_TRANS);
    2440             :         }
    2441             : 
    2442             :         /*
    2443             :          * No more iovecs remain in this logvec so return the next log vec to
    2444             :          * the caller so it can go back to fast path copying.
    2445             :          */
    2446    10094380 :         *iclogp = iclog;
    2447    10094380 :         return 0;
    2448             : }
    2449             : 
    2450             : /*
    2451             :  * Write some region out to in-core log
    2452             :  *
    2453             :  * This will be called when writing externally provided regions or when
    2454             :  * writing out a commit record for a given transaction.
    2455             :  *
    2456             :  * General algorithm:
    2457             :  *      1. Find total length of this write.  This may include adding to the
    2458             :  *              lengths passed in.
    2459             :  *      2. Check whether we violate the tickets reservation.
    2460             :  *      3. While writing to this iclog
    2461             :  *          A. Reserve as much space in this iclog as can get
    2462             :  *          B. If this is first write, save away start lsn
    2463             :  *          C. While writing this region:
    2464             :  *              1. If first write of transaction, write start record
    2465             :  *              2. Write log operation header (header per region)
    2466             :  *              3. Find out if we can fit entire region into this iclog
    2467             :  *              4. Potentially, verify destination memcpy ptr
    2468             :  *              5. Memcpy (partial) region
    2469             :  *              6. If partial copy, release iclog; otherwise, continue
    2470             :  *                      copying more regions into current iclog
    2471             :  *      4. Mark want sync bit (in simulation mode)
    2472             :  *      5. Release iclog for potential flush to on-disk log.
    2473             :  *
    2474             :  * ERRORS:
    2475             :  * 1.   Panic if reservation is overrun.  This should never happen since
    2476             :  *      reservation amounts are generated internal to the filesystem.
    2477             :  * NOTES:
    2478             :  * 1. Tickets are single threaded data structures.
    2479             :  * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
    2480             :  *      syncing routine.  When a single log_write region needs to span
    2481             :  *      multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
    2482             :  *      on all log operation writes which don't contain the end of the
    2483             :  *      region.  The XLOG_END_TRANS bit is used for the in-core log
    2484             :  *      operation which contains the end of the continued log_write region.
    2485             :  * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
    2486             :  *      we don't really know exactly how much space will be used.  As a result,
    2487             :  *      we don't update ic_offset until the end when we know exactly how many
    2488             :  *      bytes have been written out.
    2489             :  */
    2490             : int
    2491     5105305 : xlog_write(
    2492             :         struct xlog             *log,
    2493             :         struct xfs_cil_ctx      *ctx,
    2494             :         struct list_head        *lv_chain,
    2495             :         struct xlog_ticket      *ticket,
    2496             :         uint32_t                len)
    2497             : 
    2498             : {
    2499     5105305 :         struct xlog_in_core     *iclog = NULL;
    2500     5105305 :         struct xfs_log_vec      *lv;
    2501     5105305 :         uint32_t                record_cnt = 0;
    2502     5105305 :         uint32_t                data_cnt = 0;
    2503     5105305 :         int                     error = 0;
    2504     5105305 :         int                     log_offset;
    2505             : 
    2506     5105305 :         if (ticket->t_curr_res < 0) {
    2507           0 :                 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
    2508             :                      "ctx ticket reservation ran out. Need to up reservation");
    2509           0 :                 xlog_print_tic_res(log->l_mp, ticket);
    2510           0 :                 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
    2511             :         }
    2512             : 
    2513     5105305 :         error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
    2514             :                                            &log_offset);
    2515     5105305 :         if (error)
    2516             :                 return error;
    2517             : 
    2518     5105305 :         ASSERT(log_offset <= iclog->ic_size - 1);
    2519             : 
    2520             :         /*
    2521             :          * If we have a context pointer, pass it the first iclog we are
    2522             :          * writing to so it can record state needed for iclog write
    2523             :          * ordering.
    2524             :          */
    2525     5105305 :         if (ctx)
    2526     5091188 :                 xlog_cil_set_ctx_write_state(ctx, iclog);
    2527             : 
    2528   424955415 :         list_for_each_entry(lv, lv_chain, lv_list) {
    2529             :                 /*
    2530             :                  * If the entire log vec does not fit in the iclog, punt it to
    2531             :                  * the partial copy loop which can handle this case.
    2532             :                  */
    2533   419852739 :                 if (lv->lv_niovecs &&
    2534   419367432 :                     lv->lv_bytes > iclog->ic_size - log_offset) {
    2535    10097047 :                         error = xlog_write_partial(lv, ticket, &iclog,
    2536             :                                         &log_offset, &len, &record_cnt,
    2537             :                                         &data_cnt);
    2538    10097012 :                         if (error) {
    2539             :                                 /*
    2540             :                                  * We have no iclog to release, so just return
    2541             :                                  * the error immediately.
    2542             :                                  */
    2543        2628 :                                 return error;
    2544             :                         }
    2545             :                 } else {
    2546   409755692 :                         xlog_write_full(lv, ticket, iclog, &log_offset,
    2547             :                                          &len, &record_cnt, &data_cnt);
    2548             :                 }
    2549             :         }
    2550     5102676 :         ASSERT(len == 0);
    2551             : 
    2552             :         /*
    2553             :          * We've already been guaranteed that the last writes will fit inside
    2554             :          * the current iclog, and hence it will already have the space used by
    2555             :          * those writes accounted to it. Hence we do not need to update the
    2556             :          * iclog with the number of bytes written here.
    2557             :          */
    2558     5102676 :         spin_lock(&log->l_icloglock);
    2559     5102679 :         xlog_state_finish_copy(log, iclog, record_cnt, 0);
    2560     5102679 :         error = xlog_state_release_iclog(log, iclog, ticket);
    2561     5102683 :         spin_unlock(&log->l_icloglock);
    2562             : 
    2563     5102683 :         return error;
    2564             : }
    2565             : 
    2566             : static void
    2567    12560676 : xlog_state_activate_iclog(
    2568             :         struct xlog_in_core     *iclog,
    2569             :         int                     *iclogs_changed)
    2570             : {
    2571    25121352 :         ASSERT(list_empty_careful(&iclog->ic_callbacks));
    2572    12560676 :         trace_xlog_iclog_activate(iclog, _RET_IP_);
    2573             : 
    2574             :         /*
    2575             :          * If the number of ops in this iclog indicate it just contains the
    2576             :          * dummy transaction, we can change state into IDLE (the second time
    2577             :          * around). Otherwise we should change the state into NEED a dummy.
    2578             :          * We don't need to cover the dummy.
    2579             :          */
    2580    12560676 :         if (*iclogs_changed == 0 &&
    2581    12560676 :             iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
    2582      141676 :                 *iclogs_changed = 1;
    2583             :         } else {
    2584             :                 /*
    2585             :                  * We have two dirty iclogs so start over.  This could also be
    2586             :                  * num of ops indicating this is not the dummy going out.
    2587             :                  */
    2588    12419000 :                 *iclogs_changed = 2;
    2589             :         }
    2590             : 
    2591    12560676 :         iclog->ic_state      = XLOG_STATE_ACTIVE;
    2592    12560676 :         iclog->ic_offset = 0;
    2593    12560676 :         iclog->ic_header.h_num_logops = 0;
    2594    12560676 :         memset(iclog->ic_header.h_cycle_data, 0,
    2595             :                 sizeof(iclog->ic_header.h_cycle_data));
    2596    12560676 :         iclog->ic_header.h_lsn = 0;
    2597    12560676 :         iclog->ic_header.h_tail_lsn = 0;
    2598    12560676 : }
    2599             : 
    2600             : /*
    2601             :  * Loop through all iclogs and mark all iclogs currently marked DIRTY as
    2602             :  * ACTIVE after iclog I/O has completed.
    2603             :  */
    2604             : static void
    2605    12560676 : xlog_state_activate_iclogs(
    2606             :         struct xlog             *log,
    2607             :         int                     *iclogs_changed)
    2608             : {
    2609    12560676 :         struct xlog_in_core     *iclog = log->l_iclog;
    2610             : 
    2611    70373145 :         do {
    2612    70373145 :                 if (iclog->ic_state == XLOG_STATE_DIRTY)
    2613    12560676 :                         xlog_state_activate_iclog(iclog, iclogs_changed);
    2614             :                 /*
    2615             :                  * The ordering of marking iclogs ACTIVE must be maintained, so
    2616             :                  * an iclog doesn't become ACTIVE beyond one that is SYNCING.
    2617             :                  */
    2618    57812469 :                 else if (iclog->ic_state != XLOG_STATE_ACTIVE)
    2619             :                         break;
    2620    60948562 :         } while ((iclog = iclog->ic_next) != log->l_iclog);
    2621    12560676 : }
    2622             : 
    2623             : static int
    2624    12560676 : xlog_covered_state(
    2625             :         int                     prev_state,
    2626             :         int                     iclogs_changed)
    2627             : {
    2628             :         /*
    2629             :          * We go to NEED for any non-covering writes. We go to NEED2 if we just
    2630             :          * wrote the first covering record (DONE). We go to IDLE if we just
    2631             :          * wrote the second covering record (DONE2) and remain in IDLE until a
    2632             :          * non-covering write occurs.
    2633             :          */
    2634    12560676 :         switch (prev_state) {
    2635       83132 :         case XLOG_STATE_COVER_IDLE:
    2636       83132 :                 if (iclogs_changed == 1)
    2637        8121 :                         return XLOG_STATE_COVER_IDLE;
    2638             :                 fallthrough;
    2639             :         case XLOG_STATE_COVER_NEED:
    2640             :         case XLOG_STATE_COVER_NEED2:
    2641             :                 break;
    2642       54587 :         case XLOG_STATE_COVER_DONE:
    2643       54587 :                 if (iclogs_changed == 1)
    2644       54587 :                         return XLOG_STATE_COVER_NEED2;
    2645             :                 break;
    2646       54577 :         case XLOG_STATE_COVER_DONE2:
    2647       54577 :                 if (iclogs_changed == 1)
    2648       54577 :                         return XLOG_STATE_COVER_IDLE;
    2649             :                 break;
    2650           0 :         default:
    2651           0 :                 ASSERT(0);
    2652             :         }
    2653             : 
    2654             :         return XLOG_STATE_COVER_NEED;
    2655             : }
    2656             : 
    2657             : STATIC void
    2658    12560676 : xlog_state_clean_iclog(
    2659             :         struct xlog             *log,
    2660             :         struct xlog_in_core     *dirty_iclog)
    2661             : {
    2662    12560676 :         int                     iclogs_changed = 0;
    2663             : 
    2664    12560676 :         trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
    2665             : 
    2666    12560676 :         dirty_iclog->ic_state = XLOG_STATE_DIRTY;
    2667             : 
    2668    12560676 :         xlog_state_activate_iclogs(log, &iclogs_changed);
    2669    12560676 :         wake_up_all(&dirty_iclog->ic_force_wait);
    2670             : 
    2671    12560676 :         if (iclogs_changed) {
    2672    12560676 :                 log->l_covered_state = xlog_covered_state(log->l_covered_state,
    2673             :                                 iclogs_changed);
    2674             :         }
    2675    12560676 : }
    2676             : 
    2677             : STATIC xfs_lsn_t
    2678    12560676 : xlog_get_lowest_lsn(
    2679             :         struct xlog             *log)
    2680             : {
    2681    12560676 :         struct xlog_in_core     *iclog = log->l_iclog;
    2682    12560676 :         xfs_lsn_t               lowest_lsn = 0, lsn;
    2683             : 
    2684   100485384 :         do {
    2685   100485384 :                 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
    2686             :                     iclog->ic_state == XLOG_STATE_DIRTY)
    2687    48449011 :                         continue;
    2688             : 
    2689    52036373 :                 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
    2690    52036373 :                 if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
    2691             :                         lowest_lsn = lsn;
    2692   100485384 :         } while ((iclog = iclog->ic_next) != log->l_iclog);
    2693             : 
    2694    12560676 :         return lowest_lsn;
    2695             : }
    2696             : 
    2697             : /*
    2698             :  * Completion of a iclog IO does not imply that a transaction has completed, as
    2699             :  * transactions can be large enough to span many iclogs. We cannot change the
    2700             :  * tail of the log half way through a transaction as this may be the only
    2701             :  * transaction in the log and moving the tail to point to the middle of it
    2702             :  * will prevent recovery from finding the start of the transaction. Hence we
    2703             :  * should only update the last_sync_lsn if this iclog contains transaction
    2704             :  * completion callbacks on it.
    2705             :  *
    2706             :  * We have to do this before we drop the icloglock to ensure we are the only one
    2707             :  * that can update it.
    2708             :  *
    2709             :  * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
    2710             :  * the reservation grant head pushing. This is due to the fact that the push
    2711             :  * target is bound by the current last_sync_lsn value. Hence if we have a large
    2712             :  * amount of log space bound up in this committing transaction then the
    2713             :  * last_sync_lsn value may be the limiting factor preventing tail pushing from
    2714             :  * freeing space in the log. Hence once we've updated the last_sync_lsn we
    2715             :  * should push the AIL to ensure the push target (and hence the grant head) is
    2716             :  * no longer bound by the old log head location and can move forwards and make
    2717             :  * progress again.
    2718             :  */
    2719             : static void
    2720    12560676 : xlog_state_set_callback(
    2721             :         struct xlog             *log,
    2722             :         struct xlog_in_core     *iclog,
    2723             :         xfs_lsn_t               header_lsn)
    2724             : {
    2725    12560676 :         trace_xlog_iclog_callback(iclog, _RET_IP_);
    2726    12560676 :         iclog->ic_state = XLOG_STATE_CALLBACK;
    2727             : 
    2728    25121352 :         ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
    2729             :                            header_lsn) <= 0);
    2730             : 
    2731    12560676 :         if (list_empty_careful(&iclog->ic_callbacks))
    2732             :                 return;
    2733             : 
    2734     2526039 :         atomic64_set(&log->l_last_sync_lsn, header_lsn);
    2735     2526039 :         xlog_grant_push_ail(log, 0);
    2736             : }
    2737             : 
    2738             : /*
    2739             :  * Return true if we need to stop processing, false to continue to the next
    2740             :  * iclog. The caller will need to run callbacks if the iclog is returned in the
    2741             :  * XLOG_STATE_CALLBACK state.
    2742             :  */
    2743             : static bool
    2744   117223146 : xlog_state_iodone_process_iclog(
    2745             :         struct xlog             *log,
    2746             :         struct xlog_in_core     *iclog)
    2747             : {
    2748   117223146 :         xfs_lsn_t               lowest_lsn;
    2749   117223146 :         xfs_lsn_t               header_lsn;
    2750             : 
    2751   117223146 :         switch (iclog->ic_state) {
    2752             :         case XLOG_STATE_ACTIVE:
    2753             :         case XLOG_STATE_DIRTY:
    2754             :                 /*
    2755             :                  * Skip all iclogs in the ACTIVE & DIRTY states:
    2756             :                  */
    2757             :                 return false;
    2758    12560676 :         case XLOG_STATE_DONE_SYNC:
    2759             :                 /*
    2760             :                  * Now that we have an iclog that is in the DONE_SYNC state, do
    2761             :                  * one more check here to see if we have chased our tail around.
    2762             :                  * If this is not the lowest lsn iclog, then we will leave it
    2763             :                  * for another completion to process.
    2764             :                  */
    2765    12560676 :                 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
    2766    12560676 :                 lowest_lsn = xlog_get_lowest_lsn(log);
    2767    12560676 :                 if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
    2768             :                         return false;
    2769    12560676 :                 xlog_state_set_callback(log, iclog, header_lsn);
    2770    12560676 :                 return false;
    2771    15362448 :         default:
    2772             :                 /*
    2773             :                  * Can only perform callbacks in order.  Since this iclog is not
    2774             :                  * in the DONE_SYNC state, we skip the rest and just try to
    2775             :                  * clean up.
    2776             :                  */
    2777    15362448 :                 return true;
    2778             :         }
    2779             : }
    2780             : 
    2781             : /*
    2782             :  * Loop over all the iclogs, running attached callbacks on them. Return true if
    2783             :  * we ran any callbacks, indicating that we dropped the icloglock. We don't need
    2784             :  * to handle transient shutdown state here at all because
    2785             :  * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
    2786             :  * cleanup of the callbacks.
    2787             :  */
    2788             : static bool
    2789    21649369 : xlog_state_do_iclog_callbacks(
    2790             :         struct xlog             *log)
    2791             :                 __releases(&log->l_icloglock)
    2792             :                 __acquires(&log->l_icloglock)
    2793             : {
    2794    21649369 :         struct xlog_in_core     *first_iclog = log->l_iclog;
    2795    21649369 :         struct xlog_in_core     *iclog = first_iclog;
    2796    21649369 :         bool                    ran_callback = false;
    2797             : 
    2798   117223146 :         do {
    2799   117223146 :                 LIST_HEAD(cb_list);
    2800             : 
    2801   117223146 :                 if (xlog_state_iodone_process_iclog(log, iclog))
    2802             :                         break;
    2803   101860698 :                 if (iclog->ic_state != XLOG_STATE_CALLBACK) {
    2804    89300022 :                         iclog = iclog->ic_next;
    2805    89300022 :                         continue;
    2806             :                 }
    2807    12560676 :                 list_splice_init(&iclog->ic_callbacks, &cb_list);
    2808    12560676 :                 spin_unlock(&log->l_icloglock);
    2809             : 
    2810    12560676 :                 trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
    2811    12560676 :                 xlog_cil_process_committed(&cb_list);
    2812    12560676 :                 trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
    2813    12560676 :                 ran_callback = true;
    2814             : 
    2815    12560676 :                 spin_lock(&log->l_icloglock);
    2816    12560676 :                 xlog_state_clean_iclog(log, iclog);
    2817    12560676 :                 iclog = iclog->ic_next;
    2818   101860698 :         } while (iclog != first_iclog);
    2819             : 
    2820    21649369 :         return ran_callback;
    2821             : }
    2822             : 
    2823             : 
    2824             : /*
    2825             :  * Loop running iclog completion callbacks until there are no more iclogs in a
    2826             :  * state that can run callbacks.
    2827             :  */
    2828             : STATIC void
    2829    12570798 : xlog_state_do_callback(
    2830             :         struct xlog             *log)
    2831             : {
    2832    12570798 :         int                     flushcnt = 0;
    2833    12570798 :         int                     repeats = 0;
    2834             : 
    2835    12570798 :         spin_lock(&log->l_icloglock);
    2836    21649369 :         while (xlog_state_do_iclog_callbacks(log)) {
    2837    18157144 :                 if (xlog_is_shutdown(log))
    2838             :                         break;
    2839             : 
    2840     9078571 :                 if (++repeats > 5000) {
    2841           0 :                         flushcnt += repeats;
    2842           0 :                         repeats = 0;
    2843           0 :                         xfs_warn(log->l_mp,
    2844             :                                 "%s: possible infinite loop (%d iterations)",
    2845             :                                 __func__, flushcnt);
    2846             :                 }
    2847             :         }
    2848             : 
    2849    12570798 :         if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
    2850    10672876 :                 wake_up_all(&log->l_flush_wait);
    2851             : 
    2852    12570798 :         spin_unlock(&log->l_icloglock);
    2853    12570798 : }
    2854             : 
    2855             : 
    2856             : /*
    2857             :  * Finish transitioning this iclog to the dirty state.
    2858             :  *
    2859             :  * Callbacks could take time, so they are done outside the scope of the
    2860             :  * global state machine log lock.
    2861             :  */
    2862             : STATIC void
    2863    12570798 : xlog_state_done_syncing(
    2864             :         struct xlog_in_core     *iclog)
    2865             : {
    2866    12570798 :         struct xlog             *log = iclog->ic_log;
    2867             : 
    2868    12570798 :         spin_lock(&log->l_icloglock);
    2869    12570798 :         ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
    2870    12570798 :         trace_xlog_iclog_sync_done(iclog, _RET_IP_);
    2871             : 
    2872             :         /*
    2873             :          * If we got an error, either on the first buffer, or in the case of
    2874             :          * split log writes, on the second, we shut down the file system and
    2875             :          * no iclogs should ever be attempted to be written to disk again.
    2876             :          */
    2877    25141596 :         if (!xlog_is_shutdown(log)) {
    2878    12560708 :                 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
    2879    12560708 :                 iclog->ic_state = XLOG_STATE_DONE_SYNC;
    2880             :         }
    2881             : 
    2882             :         /*
    2883             :          * Someone could be sleeping prior to writing out the next
    2884             :          * iclog buffer, we wake them all, one will get to do the
    2885             :          * I/O, the others get to wait for the result.
    2886             :          */
    2887    12570798 :         wake_up_all(&iclog->ic_write_wait);
    2888    12570798 :         spin_unlock(&log->l_icloglock);
    2889    12570798 :         xlog_state_do_callback(log);
    2890    12570798 : }
    2891             : 
    2892             : /*
    2893             :  * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
    2894             :  * sleep.  We wait on the flush queue on the head iclog as that should be
    2895             :  * the first iclog to complete flushing. Hence if all iclogs are syncing,
    2896             :  * we will wait here and all new writes will sleep until a sync completes.
    2897             :  *
    2898             :  * The in-core logs are used in a circular fashion. They are not used
    2899             :  * out-of-order even when an iclog past the head is free.
    2900             :  *
    2901             :  * return:
    2902             :  *      * log_offset where xlog_write() can start writing into the in-core
    2903             :  *              log's data space.
    2904             :  *      * in-core log pointer to which xlog_write() should write.
    2905             :  *      * boolean indicating this is a continued write to an in-core log.
    2906             :  *              If this is the last write, then the in-core log's offset field
    2907             :  *              needs to be incremented, depending on the amount of data which
    2908             :  *              is copied.
    2909             :  */
    2910             : STATIC int
    2911    15202710 : xlog_state_get_iclog_space(
    2912             :         struct xlog             *log,
    2913             :         int                     len,
    2914             :         struct xlog_in_core     **iclogp,
    2915             :         struct xlog_ticket      *ticket,
    2916             :         int                     *logoffsetp)
    2917             : {
    2918    16973962 :         int               log_offset;
    2919    16973962 :         xlog_rec_header_t *head;
    2920    16973962 :         xlog_in_core_t    *iclog;
    2921             : 
    2922             : restart:
    2923    16973962 :         spin_lock(&log->l_icloglock);
    2924    33947948 :         if (xlog_is_shutdown(log)) {
    2925        2619 :                 spin_unlock(&log->l_icloglock);
    2926        2619 :                 return -EIO;
    2927             :         }
    2928             : 
    2929    16971355 :         iclog = log->l_iclog;
    2930    16971355 :         if (iclog->ic_state != XLOG_STATE_ACTIVE) {
    2931     1769926 :                 XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
    2932             : 
    2933             :                 /* Wait for log writes to have flushed */
    2934     1769926 :                 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
    2935     1769923 :                 goto restart;
    2936             :         }
    2937             : 
    2938    15201429 :         head = &iclog->ic_header;
    2939             : 
    2940    15201429 :         atomic_inc(&iclog->ic_refcnt);   /* prevents sync */
    2941    15201424 :         log_offset = iclog->ic_offset;
    2942             : 
    2943    15201424 :         trace_xlog_iclog_get_space(iclog, _RET_IP_);
    2944             : 
    2945             :         /* On the 1st write to an iclog, figure out lsn.  This works
    2946             :          * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
    2947             :          * committing to.  If the offset is set, that's how many blocks
    2948             :          * must be written.
    2949             :          */
    2950    15201438 :         if (log_offset == 0) {
    2951    12572366 :                 ticket->t_curr_res -= log->l_iclog_hsize;
    2952    12572366 :                 head->h_cycle = cpu_to_be32(log->l_curr_cycle);
    2953    12572366 :                 head->h_lsn = cpu_to_be64(
    2954             :                         xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
    2955    12572366 :                 ASSERT(log->l_curr_block >= 0);
    2956             :         }
    2957             : 
    2958             :         /* If there is enough room to write everything, then do it.  Otherwise,
    2959             :          * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
    2960             :          * bit is on, so this will get flushed out.  Don't update ic_offset
    2961             :          * until you know exactly how many bytes get copied.  Therefore, wait
    2962             :          * until later to update ic_offset.
    2963             :          *
    2964             :          * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
    2965             :          * can fit into remaining data section.
    2966             :          */
    2967    15201438 :         if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
    2968        1329 :                 int             error = 0;
    2969             : 
    2970        1329 :                 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
    2971             : 
    2972             :                 /*
    2973             :                  * If we are the only one writing to this iclog, sync it to
    2974             :                  * disk.  We need to do an atomic compare and decrement here to
    2975             :                  * avoid racing with concurrent atomic_dec_and_lock() calls in
    2976             :                  * xlog_state_release_iclog() when there is more than one
    2977             :                  * reference to the iclog.
    2978             :                  */
    2979        2658 :                 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
    2980        1322 :                         error = xlog_state_release_iclog(log, iclog, ticket);
    2981        1329 :                 spin_unlock(&log->l_icloglock);
    2982        1329 :                 if (error)
    2983           0 :                         return error;
    2984        1329 :                 goto restart;
    2985             :         }
    2986             : 
    2987             :         /* Do we have enough room to write the full amount in the remainder
    2988             :          * of this iclog?  Or must we continue a write on the next iclog and
    2989             :          * mark this iclog as completely taken?  In the case where we switch
    2990             :          * iclogs (to mark it taken), this particular iclog will release/sync
    2991             :          * to disk in xlog_write().
    2992             :          */
    2993    15200109 :         if (len <= iclog->ic_size - iclog->ic_offset)
    2994     5102679 :                 iclog->ic_offset += len;
    2995             :         else
    2996    10097430 :                 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
    2997    15200109 :         *iclogp = iclog;
    2998             : 
    2999    15200109 :         ASSERT(iclog->ic_offset <= iclog->ic_size);
    3000    15200109 :         spin_unlock(&log->l_icloglock);
    3001             : 
    3002    15200091 :         *logoffsetp = log_offset;
    3003    15200091 :         return 0;
    3004             : }
    3005             : 
    3006             : /*
    3007             :  * The first cnt-1 times a ticket goes through here we don't need to move the
    3008             :  * grant write head because the permanent reservation has reserved cnt times the
    3009             :  * unit amount.  Release part of current permanent unit reservation and reset
    3010             :  * current reservation to be one units worth.  Also move grant reservation head
    3011             :  * forward.
    3012             :  */
    3013             : void
    3014   454213476 : xfs_log_ticket_regrant(
    3015             :         struct xlog             *log,
    3016             :         struct xlog_ticket      *ticket)
    3017             : {
    3018   454213476 :         trace_xfs_log_ticket_regrant(log, ticket);
    3019             : 
    3020   454214434 :         if (ticket->t_cnt > 0)
    3021   301828783 :                 ticket->t_cnt--;
    3022             : 
    3023   454214434 :         xlog_grant_sub_space(log, &log->l_reserve_head.grant,
    3024             :                                         ticket->t_curr_res);
    3025   454215901 :         xlog_grant_sub_space(log, &log->l_write_head.grant,
    3026             :                                         ticket->t_curr_res);
    3027   454215485 :         ticket->t_curr_res = ticket->t_unit_res;
    3028             : 
    3029   454215485 :         trace_xfs_log_ticket_regrant_sub(log, ticket);
    3030             : 
    3031             :         /* just return if we still have some of the pre-reserved space */
    3032   454215637 :         if (!ticket->t_cnt) {
    3033   157744071 :                 xlog_grant_add_space(log, &log->l_reserve_head.grant,
    3034             :                                      ticket->t_unit_res);
    3035   157744056 :                 trace_xfs_log_ticket_regrant_exit(log, ticket);
    3036             : 
    3037   157744104 :                 ticket->t_curr_res = ticket->t_unit_res;
    3038             :         }
    3039             : 
    3040   454215670 :         xfs_log_ticket_put(ticket);
    3041   454215957 : }
    3042             : 
    3043             : /*
    3044             :  * Give back the space left from a reservation.
    3045             :  *
    3046             :  * All the information we need to make a correct determination of space left
    3047             :  * is present.  For non-permanent reservations, things are quite easy.  The
    3048             :  * count should have been decremented to zero.  We only need to deal with the
    3049             :  * space remaining in the current reservation part of the ticket.  If the
    3050             :  * ticket contains a permanent reservation, there may be left over space which
    3051             :  * needs to be released.  A count of N means that N-1 refills of the current
    3052             :  * reservation can be done before we need to ask for more space.  The first
    3053             :  * one goes to fill up the first current reservation.  Once we run out of
    3054             :  * space, the count will stay at zero and the only space remaining will be
    3055             :  * in the current reservation field.
    3056             :  */
    3057             : void
    3058   847215867 : xfs_log_ticket_ungrant(
    3059             :         struct xlog             *log,
    3060             :         struct xlog_ticket      *ticket)
    3061             : {
    3062   847215867 :         int                     bytes;
    3063             : 
    3064   847215867 :         trace_xfs_log_ticket_ungrant(log, ticket);
    3065             : 
    3066   847305540 :         if (ticket->t_cnt > 0)
    3067   759043134 :                 ticket->t_cnt--;
    3068             : 
    3069   847305540 :         trace_xfs_log_ticket_ungrant_sub(log, ticket);
    3070             : 
    3071             :         /*
    3072             :          * If this is a permanent reservation ticket, we may be able to free
    3073             :          * up more space based on the remaining count.
    3074             :          */
    3075   847335323 :         bytes = ticket->t_curr_res;
    3076   847335323 :         if (ticket->t_cnt > 0) {
    3077   732061552 :                 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
    3078   732061552 :                 bytes += ticket->t_unit_res*ticket->t_cnt;
    3079             :         }
    3080             : 
    3081   847335323 :         xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
    3082   847462889 :         xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
    3083             : 
    3084   847450061 :         trace_xfs_log_ticket_ungrant_exit(log, ticket);
    3085             : 
    3086   847443833 :         xfs_log_space_wake(log->l_mp);
    3087   847392554 :         xfs_log_ticket_put(ticket);
    3088   847449449 : }
    3089             : 
    3090             : /*
    3091             :  * This routine will mark the current iclog in the ring as WANT_SYNC and move
    3092             :  * the current iclog pointer to the next iclog in the ring.
    3093             :  */
    3094             : void
    3095    12571243 : xlog_state_switch_iclogs(
    3096             :         struct xlog             *log,
    3097             :         struct xlog_in_core     *iclog,
    3098             :         int                     eventual_size)
    3099             : {
    3100    12571243 :         ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
    3101    12571243 :         assert_spin_locked(&log->l_icloglock);
    3102    12571243 :         trace_xlog_iclog_switch(iclog, _RET_IP_);
    3103             : 
    3104    12571243 :         if (!eventual_size)
    3105     2472487 :                 eventual_size = iclog->ic_offset;
    3106    12571243 :         iclog->ic_state = XLOG_STATE_WANT_SYNC;
    3107    12571243 :         iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
    3108    12571243 :         log->l_prev_block = log->l_curr_block;
    3109    12571243 :         log->l_prev_cycle = log->l_curr_cycle;
    3110             : 
    3111             :         /* roll log?: ic_offset changed later */
    3112    12571243 :         log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
    3113             : 
    3114             :         /* Round up to next log-sunit */
    3115    12571243 :         if (log->l_iclog_roundoff > BBSIZE) {
    3116    12569032 :                 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
    3117    12569032 :                 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
    3118             :         }
    3119             : 
    3120    12571243 :         if (log->l_curr_block >= log->l_logBBsize) {
    3121             :                 /*
    3122             :                  * Rewind the current block before the cycle is bumped to make
    3123             :                  * sure that the combined LSN never transiently moves forward
    3124             :                  * when the log wraps to the next cycle. This is to support the
    3125             :                  * unlocked sample of these fields from xlog_valid_lsn(). Most
    3126             :                  * other cases should acquire l_icloglock.
    3127             :                  */
    3128        5327 :                 log->l_curr_block -= log->l_logBBsize;
    3129        5327 :                 ASSERT(log->l_curr_block >= 0);
    3130        5327 :                 smp_wmb();
    3131        5327 :                 log->l_curr_cycle++;
    3132        5327 :                 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
    3133           0 :                         log->l_curr_cycle++;
    3134             :         }
    3135    12571243 :         ASSERT(iclog == log->l_iclog);
    3136    12571243 :         log->l_iclog = iclog->ic_next;
    3137    12571243 : }
    3138             : 
    3139             : /*
    3140             :  * Force the iclog to disk and check if the iclog has been completed before
    3141             :  * xlog_force_iclog() returns. This can happen on synchronous (e.g.
    3142             :  * pmem) or fast async storage because we drop the icloglock to issue the IO.
    3143             :  * If completion has already occurred, tell the caller so that it can avoid an
    3144             :  * unnecessary wait on the iclog.
    3145             :  */
    3146             : static int
    3147     2081528 : xlog_force_and_check_iclog(
    3148             :         struct xlog_in_core     *iclog,
    3149             :         bool                    *completed)
    3150             : {
    3151     2081528 :         xfs_lsn_t               lsn = be64_to_cpu(iclog->ic_header.h_lsn);
    3152     2081528 :         int                     error;
    3153             : 
    3154     2081528 :         *completed = false;
    3155     2081528 :         error = xlog_force_iclog(iclog);
    3156     2081529 :         if (error)
    3157             :                 return error;
    3158             : 
    3159             :         /*
    3160             :          * If the iclog has already been completed and reused the header LSN
    3161             :          * will have been rewritten by completion
    3162             :          */
    3163     2081529 :         if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
    3164         989 :                 *completed = true;
    3165             :         return 0;
    3166             : }
    3167             : 
    3168             : /*
    3169             :  * Write out all data in the in-core log as of this exact moment in time.
    3170             :  *
    3171             :  * Data may be written to the in-core log during this call.  However,
    3172             :  * we don't guarantee this data will be written out.  A change from past
    3173             :  * implementation means this routine will *not* write out zero length LRs.
    3174             :  *
    3175             :  * Basically, we try and perform an intelligent scan of the in-core logs.
    3176             :  * If we determine there is no flushable data, we just return.  There is no
    3177             :  * flushable data if:
    3178             :  *
    3179             :  *      1. the current iclog is active and has no data; the previous iclog
    3180             :  *              is in the active or dirty state.
    3181             :  *      2. the current iclog is drity, and the previous iclog is in the
    3182             :  *              active or dirty state.
    3183             :  *
    3184             :  * We may sleep if:
    3185             :  *
    3186             :  *      1. the current iclog is not in the active nor dirty state.
    3187             :  *      2. the current iclog dirty, and the previous iclog is not in the
    3188             :  *              active nor dirty state.
    3189             :  *      3. the current iclog is active, and there is another thread writing
    3190             :  *              to this particular iclog.
    3191             :  *      4. a) the current iclog is active and has no other writers
    3192             :  *         b) when we return from flushing out this iclog, it is still
    3193             :  *              not in the active nor dirty state.
    3194             :  */
    3195             : int
    3196     2223461 : xfs_log_force(
    3197             :         struct xfs_mount        *mp,
    3198             :         uint                    flags)
    3199             : {
    3200     2223461 :         struct xlog             *log = mp->m_log;
    3201     2223461 :         struct xlog_in_core     *iclog;
    3202             : 
    3203     2223461 :         XFS_STATS_INC(mp, xs_log_force);
    3204     2223346 :         trace_xfs_log_force(mp, 0, _RET_IP_);
    3205             : 
    3206     2223277 :         xlog_cil_force(log);
    3207             : 
    3208     2223502 :         spin_lock(&log->l_icloglock);
    3209     4447028 :         if (xlog_is_shutdown(log))
    3210       39086 :                 goto out_error;
    3211             : 
    3212     2184428 :         iclog = log->l_iclog;
    3213     2184428 :         trace_xlog_iclog_force(iclog, _RET_IP_);
    3214             : 
    3215     2184434 :         if (iclog->ic_state == XLOG_STATE_DIRTY ||
    3216     2182067 :             (iclog->ic_state == XLOG_STATE_ACTIVE &&
    3217     1845693 :              atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
    3218             :                 /*
    3219             :                  * If the head is dirty or (active and empty), then we need to
    3220             :                  * look at the previous iclog.
    3221             :                  *
    3222             :                  * If the previous iclog is active or dirty we are done.  There
    3223             :                  * is nothing to sync out. Otherwise, we attach ourselves to the
    3224             :                  * previous iclog and go to sleep.
    3225             :                  */
    3226     1093254 :                 iclog = iclog->ic_prev;
    3227     1091180 :         } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
    3228     1088817 :                 if (atomic_read(&iclog->ic_refcnt) == 0) {
    3229             :                         /* We have exclusive access to this iclog. */
    3230      752443 :                         bool    completed;
    3231             : 
    3232      752443 :                         if (xlog_force_and_check_iclog(iclog, &completed))
    3233           0 :                                 goto out_error;
    3234             : 
    3235      752443 :                         if (completed)
    3236         191 :                                 goto out_unlock;
    3237             :                 } else {
    3238             :                         /*
    3239             :                          * Someone else is still writing to this iclog, so we
    3240             :                          * need to ensure that when they release the iclog it
    3241             :                          * gets synced immediately as we may be waiting on it.
    3242             :                          */
    3243      336374 :                         xlog_state_switch_iclogs(log, iclog, 0);
    3244             :                 }
    3245             :         }
    3246             : 
    3247             :         /*
    3248             :          * The iclog we are about to wait on may contain the checkpoint pushed
    3249             :          * by the above xlog_cil_force() call, but it may not have been pushed
    3250             :          * to disk yet. Like the ACTIVE case above, we need to make sure caches
    3251             :          * are flushed when this iclog is written.
    3252             :          */
    3253     2184243 :         if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
    3254      351963 :                 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
    3255             : 
    3256     2184243 :         if (flags & XFS_LOG_SYNC)
    3257     2136883 :                 return xlog_wait_on_iclog(iclog);
    3258       47360 : out_unlock:
    3259       47551 :         spin_unlock(&log->l_icloglock);
    3260       47551 :         return 0;
    3261       39086 : out_error:
    3262       39086 :         spin_unlock(&log->l_icloglock);
    3263       39086 :         return -EIO;
    3264             : }
    3265             : 
    3266             : /*
    3267             :  * Force the log to a specific LSN.
    3268             :  *
    3269             :  * If an iclog with that lsn can be found:
    3270             :  *      If it is in the DIRTY state, just return.
    3271             :  *      If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
    3272             :  *              state and go to sleep or return.
    3273             :  *      If it is in any other state, go to sleep or return.
    3274             :  *
    3275             :  * Synchronous forces are implemented with a wait queue.  All callers trying
    3276             :  * to force a given lsn to disk must wait on the queue attached to the
    3277             :  * specific in-core log.  When given in-core log finally completes its write
    3278             :  * to disk, that thread will wake up all threads waiting on the queue.
    3279             :  */
    3280             : static int
    3281     1799297 : xlog_force_lsn(
    3282             :         struct xlog             *log,
    3283             :         xfs_lsn_t               lsn,
    3284             :         uint                    flags,
    3285             :         int                     *log_flushed,
    3286             :         bool                    already_slept)
    3287             : {
    3288     1799297 :         struct xlog_in_core     *iclog;
    3289     1799297 :         bool                    completed;
    3290             : 
    3291     1799297 :         spin_lock(&log->l_icloglock);
    3292     3598644 :         if (xlog_is_shutdown(log))
    3293         333 :                 goto out_error;
    3294             : 
    3295     1798989 :         iclog = log->l_iclog;
    3296     3147386 :         while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
    3297     1350143 :                 trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
    3298     1350144 :                 iclog = iclog->ic_next;
    3299     1350144 :                 if (iclog == log->l_iclog)
    3300        1747 :                         goto out_unlock;
    3301             :         }
    3302             : 
    3303     1797243 :         switch (iclog->ic_state) {
    3304     1491106 :         case XLOG_STATE_ACTIVE:
    3305             :                 /*
    3306             :                  * We sleep here if we haven't already slept (e.g. this is the
    3307             :                  * first time we've looked at the correct iclog buf) and the
    3308             :                  * buffer before us is going to be sync'ed.  The reason for this
    3309             :                  * is that if we are doing sync transactions here, by waiting
    3310             :                  * for the previous I/O to complete, we can allow a few more
    3311             :                  * transactions into this iclog before we close it down.
    3312             :                  *
    3313             :                  * Otherwise, we mark the buffer WANT_SYNC, and bump up the
    3314             :                  * refcnt so we can release the log (which drops the ref count).
    3315             :                  * The state switch keeps new transaction commits from using
    3316             :                  * this buffer.  When the current commits finish writing into
    3317             :                  * the buffer, the refcount will drop to zero and the buffer
    3318             :                  * will go out then.
    3319             :                  */
    3320     1491106 :                 if (!already_slept &&
    3321     1358545 :                     (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
    3322             :                      iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
    3323      162021 :                         xlog_wait(&iclog->ic_prev->ic_write_wait,
    3324      162021 :                                         &log->l_icloglock);
    3325      162021 :                         return -EAGAIN;
    3326             :                 }
    3327     1329085 :                 if (xlog_force_and_check_iclog(iclog, &completed))
    3328           0 :                         goto out_error;
    3329     1329086 :                 if (log_flushed)
    3330     1194377 :                         *log_flushed = 1;
    3331     1329086 :                 if (completed)
    3332         798 :                         goto out_unlock;
    3333             :                 break;
    3334        4114 :         case XLOG_STATE_WANT_SYNC:
    3335             :                 /*
    3336             :                  * This iclog may contain the checkpoint pushed by the
    3337             :                  * xlog_cil_force_seq() call, but there are other writers still
    3338             :                  * accessing it so it hasn't been pushed to disk yet. Like the
    3339             :                  * ACTIVE case above, we need to make sure caches are flushed
    3340             :                  * when this iclog is written.
    3341             :                  */
    3342        4114 :                 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
    3343        4114 :                 break;
    3344             :         default:
    3345             :                 /*
    3346             :                  * The entire checkpoint was written by the CIL force and is on
    3347             :                  * its way to disk already. It will be stable when it
    3348             :                  * completes, so we don't need to manipulate caches here at all.
    3349             :                  * We just need to wait for completion if necessary.
    3350             :                  */
    3351             :                 break;
    3352             :         }
    3353             : 
    3354     1634425 :         if (flags & XFS_LOG_SYNC)
    3355     1634425 :                 return xlog_wait_on_iclog(iclog);
    3356           0 : out_unlock:
    3357        2545 :         spin_unlock(&log->l_icloglock);
    3358        2545 :         return 0;
    3359         333 : out_error:
    3360         333 :         spin_unlock(&log->l_icloglock);
    3361         333 :         return -EIO;
    3362             : }
    3363             : 
    3364             : /*
    3365             :  * Force the log to a specific checkpoint sequence.
    3366             :  *
    3367             :  * First force the CIL so that all the required changes have been flushed to the
    3368             :  * iclogs. If the CIL force completed it will return a commit LSN that indicates
    3369             :  * the iclog that needs to be flushed to stable storage. If the caller needs
    3370             :  * a synchronous log force, we will wait on the iclog with the LSN returned by
    3371             :  * xlog_cil_force_seq() to be completed.
    3372             :  */
    3373             : int
    3374     1665652 : xfs_log_force_seq(
    3375             :         struct xfs_mount        *mp,
    3376             :         xfs_csn_t               seq,
    3377             :         uint                    flags,
    3378             :         int                     *log_flushed)
    3379             : {
    3380     1665652 :         struct xlog             *log = mp->m_log;
    3381     1665652 :         xfs_lsn_t               lsn;
    3382     1665652 :         int                     ret;
    3383     1665652 :         ASSERT(seq != 0);
    3384             : 
    3385     1665652 :         XFS_STATS_INC(mp, xs_log_force);
    3386     1665650 :         trace_xfs_log_force(mp, seq, _RET_IP_);
    3387             : 
    3388     1665651 :         lsn = xlog_cil_force_seq(log, seq);
    3389     1665569 :         if (lsn == NULLCOMMITLSN)
    3390             :                 return 0;
    3391             : 
    3392     1637217 :         ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
    3393     1637192 :         if (ret == -EAGAIN) {
    3394      162021 :                 XFS_STATS_INC(mp, xs_log_force_sleep);
    3395      162015 :                 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
    3396             :         }
    3397             :         return ret;
    3398             : }
    3399             : 
    3400             : /*
    3401             :  * Free a used ticket when its refcount falls to zero.
    3402             :  */
    3403             : void
    3404  1301459955 : xfs_log_ticket_put(
    3405             :         xlog_ticket_t   *ticket)
    3406             : {
    3407  1301459955 :         ASSERT(atomic_read(&ticket->t_ref) > 0);
    3408  2603167487 :         if (atomic_dec_and_test(&ticket->t_ref))
    3409   847490854 :                 kmem_cache_free(xfs_log_ticket_cache, ticket);
    3410  1301681283 : }
    3411             : 
    3412             : xlog_ticket_t *
    3413   454157150 : xfs_log_ticket_get(
    3414             :         xlog_ticket_t   *ticket)
    3415             : {
    3416   454157150 :         ASSERT(atomic_read(&ticket->t_ref) > 0);
    3417   454157150 :         atomic_inc(&ticket->t_ref);
    3418   454160750 :         return ticket;
    3419             : }
    3420             : 
    3421             : /*
    3422             :  * Figure out the total log space unit (in bytes) that would be
    3423             :  * required for a log ticket.
    3424             :  */
    3425             : static int
    3426   847389387 : xlog_calc_unit_res(
    3427             :         struct xlog             *log,
    3428             :         int                     unit_bytes,
    3429             :         int                     *niclogs)
    3430             : {
    3431   847389387 :         int                     iclog_space;
    3432   847389387 :         uint                    num_headers;
    3433             : 
    3434             :         /*
    3435             :          * Permanent reservations have up to 'cnt'-1 active log operations
    3436             :          * in the log.  A unit in this case is the amount of space for one
    3437             :          * of these log operations.  Normal reservations have a cnt of 1
    3438             :          * and their unit amount is the total amount of space required.
    3439             :          *
    3440             :          * The following lines of code account for non-transaction data
    3441             :          * which occupy space in the on-disk log.
    3442             :          *
    3443             :          * Normal form of a transaction is:
    3444             :          * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
    3445             :          * and then there are LR hdrs, split-recs and roundoff at end of syncs.
    3446             :          *
    3447             :          * We need to account for all the leadup data and trailer data
    3448             :          * around the transaction data.
    3449             :          * And then we need to account for the worst case in terms of using
    3450             :          * more space.
    3451             :          * The worst case will happen if:
    3452             :          * - the placement of the transaction happens to be such that the
    3453             :          *   roundoff is at its maximum
    3454             :          * - the transaction data is synced before the commit record is synced
    3455             :          *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
    3456             :          *   Therefore the commit record is in its own Log Record.
    3457             :          *   This can happen as the commit record is called with its
    3458             :          *   own region to xlog_write().
    3459             :          *   This then means that in the worst case, roundoff can happen for
    3460             :          *   the commit-rec as well.
    3461             :          *   The commit-rec is smaller than padding in this scenario and so it is
    3462             :          *   not added separately.
    3463             :          */
    3464             : 
    3465             :         /* for trans header */
    3466   847389387 :         unit_bytes += sizeof(xlog_op_header_t);
    3467   847389387 :         unit_bytes += sizeof(xfs_trans_header_t);
    3468             : 
    3469             :         /* for start-rec */
    3470   847389387 :         unit_bytes += sizeof(xlog_op_header_t);
    3471             : 
    3472             :         /*
    3473             :          * for LR headers - the space for data in an iclog is the size minus
    3474             :          * the space used for the headers. If we use the iclog size, then we
    3475             :          * undercalculate the number of headers required.
    3476             :          *
    3477             :          * Furthermore - the addition of op headers for split-recs might
    3478             :          * increase the space required enough to require more log and op
    3479             :          * headers, so take that into account too.
    3480             :          *
    3481             :          * IMPORTANT: This reservation makes the assumption that if this
    3482             :          * transaction is the first in an iclog and hence has the LR headers
    3483             :          * accounted to it, then the remaining space in the iclog is
    3484             :          * exclusively for this transaction.  i.e. if the transaction is larger
    3485             :          * than the iclog, it will be the only thing in that iclog.
    3486             :          * Fundamentally, this means we must pass the entire log vector to
    3487             :          * xlog_write to guarantee this.
    3488             :          */
    3489   847389387 :         iclog_space = log->l_iclog_size - log->l_iclog_hsize;
    3490   847389387 :         num_headers = howmany(unit_bytes, iclog_space);
    3491             : 
    3492             :         /* for split-recs - ophdrs added when data split over LRs */
    3493   847389387 :         unit_bytes += sizeof(xlog_op_header_t) * num_headers;
    3494             : 
    3495             :         /* add extra header reservations if we overrun */
    3496   847312109 :         while (!num_headers ||
    3497   847312109 :                howmany(unit_bytes, iclog_space) > num_headers) {
    3498           0 :                 unit_bytes += sizeof(xlog_op_header_t);
    3499           0 :                 num_headers++;
    3500             :         }
    3501   847389387 :         unit_bytes += log->l_iclog_hsize * num_headers;
    3502             : 
    3503             :         /* for commit-rec LR header - note: padding will subsume the ophdr */
    3504   847389387 :         unit_bytes += log->l_iclog_hsize;
    3505             : 
    3506             :         /* roundoff padding for transaction data and one for commit record */
    3507   847389387 :         unit_bytes += 2 * log->l_iclog_roundoff;
    3508             : 
    3509   847389387 :         if (niclogs)
    3510   847366896 :                 *niclogs = num_headers;
    3511   847389387 :         return unit_bytes;
    3512             : }
    3513             : 
    3514             : int
    3515       22491 : xfs_log_calc_unit_res(
    3516             :         struct xfs_mount        *mp,
    3517             :         int                     unit_bytes)
    3518             : {
    3519       22491 :         return xlog_calc_unit_res(mp->m_log, unit_bytes, NULL);
    3520             : }
    3521             : 
    3522             : /*
    3523             :  * Allocate and initialise a new log ticket.
    3524             :  */
    3525             : struct xlog_ticket *
    3526   847172563 : xlog_ticket_alloc(
    3527             :         struct xlog             *log,
    3528             :         int                     unit_bytes,
    3529             :         int                     cnt,
    3530             :         bool                    permanent)
    3531             : {
    3532   847172563 :         struct xlog_ticket      *tic;
    3533   847172563 :         int                     unit_res;
    3534             : 
    3535   847172563 :         tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL);
    3536             : 
    3537   847331657 :         unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
    3538             : 
    3539   847405421 :         atomic_set(&tic->t_ref, 1);
    3540   847405421 :         tic->t_task          = current;
    3541   847405421 :         INIT_LIST_HEAD(&tic->t_queue);
    3542   847405421 :         tic->t_unit_res              = unit_res;
    3543   847405421 :         tic->t_curr_res              = unit_res;
    3544   847405421 :         tic->t_cnt           = cnt;
    3545   847405421 :         tic->t_ocnt          = cnt;
    3546   847405421 :         tic->t_tid           = get_random_u32();
    3547   847416430 :         if (permanent)
    3548   761763040 :                 tic->t_flags |= XLOG_TIC_PERM_RESERV;
    3549             : 
    3550   847416430 :         return tic;
    3551             : }
    3552             : 
    3553             : #if defined(DEBUG)
    3554             : /*
    3555             :  * Check to make sure the grant write head didn't just over lap the tail.  If
    3556             :  * the cycles are the same, we can't be overlapping.  Otherwise, make sure that
    3557             :  * the cycles differ by exactly one and check the byte count.
    3558             :  *
    3559             :  * This check is run unlocked, so can give false positives. Rather than assert
    3560             :  * on failures, use a warn-once flag and a panic tag to allow the admin to
    3561             :  * determine if they want to panic the machine when such an error occurs. For
    3562             :  * debug kernels this will have the same effect as using an assert but, unlinke
    3563             :  * an assert, it can be turned off at runtime.
    3564             :  */
    3565             : STATIC void
    3566  1002621621 : xlog_verify_grant_tail(
    3567             :         struct xlog     *log)
    3568             : {
    3569  1002621621 :         int             tail_cycle, tail_blocks;
    3570  1002621621 :         int             cycle, space;
    3571             : 
    3572  1002621621 :         xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
    3573  1002621621 :         xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
    3574  1002621621 :         if (tail_cycle != cycle) {
    3575   202948887 :                 if (cycle - 1 != tail_cycle &&
    3576           2 :                     !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
    3577           0 :                         xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
    3578             :                                 "%s: cycle - 1 != tail_cycle", __func__);
    3579             :                 }
    3580             : 
    3581   202948960 :                 if (space > BBTOB(tail_blocks) &&
    3582          75 :                     !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
    3583          15 :                         xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
    3584             :                                 "%s: space > BBTOB(tail_blocks)", __func__);
    3585             :                 }
    3586             :         }
    3587  1002621621 : }
    3588             : 
    3589             : /* check if it will fit */
    3590             : STATIC void
    3591    12570795 : xlog_verify_tail_lsn(
    3592             :         struct xlog             *log,
    3593             :         struct xlog_in_core     *iclog)
    3594             : {
    3595    12570795 :         xfs_lsn_t       tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
    3596    12570795 :         int             blocks;
    3597             : 
    3598    12570795 :     if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
    3599     8708612 :         blocks =
    3600     8708612 :             log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
    3601     8708612 :         if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
    3602           0 :                 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
    3603             :     } else {
    3604     3862183 :         ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
    3605             : 
    3606     3862183 :         if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
    3607           0 :                 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
    3608             : 
    3609     3862183 :         blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
    3610     3862183 :         if (blocks < BTOBB(iclog->ic_offset) + 1)
    3611           0 :                 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
    3612             :     }
    3613    12570795 : }
    3614             : 
    3615             : /*
    3616             :  * Perform a number of checks on the iclog before writing to disk.
    3617             :  *
    3618             :  * 1. Make sure the iclogs are still circular
    3619             :  * 2. Make sure we have a good magic number
    3620             :  * 3. Make sure we don't have magic numbers in the data
    3621             :  * 4. Check fields of each log operation header for:
    3622             :  *      A. Valid client identifier
    3623             :  *      B. tid ptr value falls in valid ptr space (user space code)
    3624             :  *      C. Length in log record header is correct according to the
    3625             :  *              individual operation headers within record.
    3626             :  * 5. When a bwrite will occur within 5 blocks of the front of the physical
    3627             :  *      log, check the preceding blocks of the physical log to make sure all
    3628             :  *      the cycle numbers agree with the current cycle number.
    3629             :  */
    3630             : STATIC void
    3631    12570724 : xlog_verify_iclog(
    3632             :         struct xlog             *log,
    3633             :         struct xlog_in_core     *iclog,
    3634             :         int                     count)
    3635             : {
    3636    12570724 :         xlog_op_header_t        *ophead;
    3637    12570724 :         xlog_in_core_t          *icptr;
    3638    12570724 :         xlog_in_core_2_t        *xhdr;
    3639    12570724 :         void                    *base_ptr, *ptr, *p;
    3640    12570724 :         ptrdiff_t               field_offset;
    3641    12570724 :         uint8_t                 clientid;
    3642    12570724 :         int                     len, i, j, k, op_len;
    3643    12570724 :         int                     idx;
    3644             : 
    3645             :         /* check validity of iclog pointers */
    3646    12570724 :         spin_lock(&log->l_icloglock);
    3647    12570797 :         icptr = log->l_iclog;
    3648   113137100 :         for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
    3649   100566306 :                 ASSERT(icptr);
    3650             : 
    3651    12570794 :         if (icptr != log->l_iclog)
    3652           0 :                 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
    3653    12570794 :         spin_unlock(&log->l_icloglock);
    3654             : 
    3655             :         /* check log magic numbers */
    3656    12570737 :         if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
    3657           0 :                 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
    3658             : 
    3659    12570737 :         base_ptr = ptr = &iclog->ic_header;
    3660    12570737 :         p = &iclog->ic_header;
    3661   711158112 :         for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
    3662   698587315 :                 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
    3663           0 :                         xfs_emerg(log->l_mp, "%s: unexpected magic num",
    3664             :                                 __func__);
    3665             :         }
    3666             : 
    3667             :         /* check fields */
    3668    12570797 :         len = be32_to_cpu(iclog->ic_header.h_num_logops);
    3669    12570797 :         base_ptr = ptr = iclog->ic_datap;
    3670    12570797 :         ophead = ptr;
    3671    12570797 :         xhdr = iclog->ic_data;
    3672  1097075446 :         for (i = 0; i < len; i++) {
    3673  1084504681 :                 ophead = ptr;
    3674             : 
    3675             :                 /* clientid is only 1 byte */
    3676  1084504681 :                 p = &ophead->oh_clientid;
    3677  1084504681 :                 field_offset = p - base_ptr;
    3678  1084504681 :                 if (field_offset & 0x1ff) {
    3679  1076424198 :                         clientid = ophead->oh_clientid;
    3680             :                 } else {
    3681     8080483 :                         idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
    3682     8080483 :                         if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
    3683        3030 :                                 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
    3684        3030 :                                 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
    3685        3030 :                                 clientid = xlog_get_client_id(
    3686        3030 :                                         xhdr[j].hic_xheader.xh_cycle_data[k]);
    3687             :                         } else {
    3688     8077447 :                                 clientid = xlog_get_client_id(
    3689     8077453 :                                         iclog->ic_header.h_cycle_data[idx]);
    3690             :                         }
    3691             :                 }
    3692  1084504675 :                 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) {
    3693           0 :                         xfs_warn(log->l_mp,
    3694             :                                 "%s: op %d invalid clientid %d op "PTR_FMT" offset 0x%lx",
    3695             :                                 __func__, i, clientid, ophead,
    3696             :                                 (unsigned long)field_offset);
    3697             :                 }
    3698             : 
    3699             :                 /* check length */
    3700  1084504644 :                 p = &ophead->oh_len;
    3701  1084504644 :                 field_offset = p - base_ptr;
    3702  1084504644 :                 if (field_offset & 0x1ff) {
    3703  1076378149 :                         op_len = be32_to_cpu(ophead->oh_len);
    3704             :                 } else {
    3705     8126495 :                         idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
    3706     8126495 :                         if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
    3707        2931 :                                 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
    3708        2931 :                                 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
    3709        2931 :                                 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
    3710             :                         } else {
    3711     8123564 :                                 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
    3712             :                         }
    3713             :                 }
    3714  1084504649 :                 ptr += sizeof(xlog_op_header_t) + op_len;
    3715             :         }
    3716    12570765 : }
    3717             : #endif
    3718             : 
    3719             : /*
    3720             :  * Perform a forced shutdown on the log.
    3721             :  *
    3722             :  * This can be called from low level log code to trigger a shutdown, or from the
    3723             :  * high level mount shutdown code when the mount shuts down.
    3724             :  *
    3725             :  * Our main objectives here are to make sure that:
    3726             :  *      a. if the shutdown was not due to a log IO error, flush the logs to
    3727             :  *         disk. Anything modified after this is ignored.
    3728             :  *      b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
    3729             :  *         parties to find out. Nothing new gets queued after this is done.
    3730             :  *      c. Tasks sleeping on log reservations, pinned objects and
    3731             :  *         other resources get woken up.
    3732             :  *      d. The mount is also marked as shut down so that log triggered shutdowns
    3733             :  *         still behave the same as if they called xfs_forced_shutdown().
    3734             :  *
    3735             :  * Return true if the shutdown cause was a log IO error and we actually shut the
    3736             :  * log down.
    3737             :  */
    3738             : bool
    3739       16264 : xlog_force_shutdown(
    3740             :         struct xlog     *log,
    3741             :         uint32_t        shutdown_flags)
    3742             : {
    3743       16264 :         bool            log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
    3744             : 
    3745       16264 :         if (!log)
    3746             :                 return false;
    3747             : 
    3748             :         /*
    3749             :          * Flush all the completed transactions to disk before marking the log
    3750             :          * being shut down. We need to do this first as shutting down the log
    3751             :          * before the force will prevent the log force from flushing the iclogs
    3752             :          * to disk.
    3753             :          *
    3754             :          * When we are in recovery, there are no transactions to flush, and
    3755             :          * we don't want to touch the log because we don't want to perturb the
    3756             :          * current head/tail for future recovery attempts. Hence we need to
    3757             :          * avoid a log force in this case.
    3758             :          *
    3759             :          * If we are shutting down due to a log IO error, then we must avoid
    3760             :          * trying to write the log as that may just result in more IO errors and
    3761             :          * an endless shutdown/force loop.
    3762             :          */
    3763       18734 :         if (!log_error && !xlog_in_recovery(log))
    3764        2470 :                 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
    3765             : 
    3766             :         /*
    3767             :          * Atomically set the shutdown state. If the shutdown state is already
    3768             :          * set, there someone else is performing the shutdown and so we are done
    3769             :          * here. This should never happen because we should only ever get called
    3770             :          * once by the first shutdown caller.
    3771             :          *
    3772             :          * Much of the log state machine transitions assume that shutdown state
    3773             :          * cannot change once they hold the log->l_icloglock. Hence we need to
    3774             :          * hold that lock here, even though we use the atomic test_and_set_bit()
    3775             :          * operation to set the shutdown state.
    3776             :          */
    3777       16264 :         spin_lock(&log->l_icloglock);
    3778       32528 :         if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
    3779        5527 :                 spin_unlock(&log->l_icloglock);
    3780        5527 :                 return false;
    3781             :         }
    3782       10737 :         spin_unlock(&log->l_icloglock);
    3783             : 
    3784             :         /*
    3785             :          * If this log shutdown also sets the mount shutdown state, issue a
    3786             :          * shutdown warning message.
    3787             :          */
    3788       21474 :         if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
    3789        4208 :                 xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
    3790             : "Filesystem has been shut down due to log error (0x%x).",
    3791             :                                 shutdown_flags);
    3792        4208 :                 xfs_alert(log->l_mp,
    3793             : "Please unmount the filesystem and rectify the problem(s).");
    3794        4208 :                 if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
    3795           0 :                         xfs_stack_trace();
    3796             :         }
    3797             : 
    3798             :         /*
    3799             :          * We don't want anybody waiting for log reservations after this. That
    3800             :          * means we have to wake up everybody queued up on reserveq as well as
    3801             :          * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
    3802             :          * we don't enqueue anything once the SHUTDOWN flag is set, and this
    3803             :          * action is protected by the grant locks.
    3804             :          */
    3805       10737 :         xlog_grant_head_wake_all(&log->l_reserve_head);
    3806       10737 :         xlog_grant_head_wake_all(&log->l_write_head);
    3807             : 
    3808             :         /*
    3809             :          * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
    3810             :          * as if the log writes were completed. The abort handling in the log
    3811             :          * item committed callback functions will do this again under lock to
    3812             :          * avoid races.
    3813             :          */
    3814       10737 :         spin_lock(&log->l_cilp->xc_push_lock);
    3815       10737 :         wake_up_all(&log->l_cilp->xc_start_wait);
    3816       10737 :         wake_up_all(&log->l_cilp->xc_commit_wait);
    3817       10737 :         spin_unlock(&log->l_cilp->xc_push_lock);
    3818             : 
    3819       10737 :         spin_lock(&log->l_icloglock);
    3820       10737 :         xlog_state_shutdown_callbacks(log);
    3821       10737 :         spin_unlock(&log->l_icloglock);
    3822             : 
    3823       10737 :         wake_up_var(&log->l_opstate);
    3824       10737 :         return log_error;
    3825             : }
    3826             : 
    3827             : STATIC int
    3828      180168 : xlog_iclogs_empty(
    3829             :         struct xlog     *log)
    3830             : {
    3831      180168 :         xlog_in_core_t  *iclog;
    3832             : 
    3833      180168 :         iclog = log->l_iclog;
    3834     1430630 :         do {
    3835             :                 /* endianness does not matter here, zero is zero in
    3836             :                  * any language.
    3837             :                  */
    3838     1430630 :                 if (iclog->ic_header.h_num_logops)
    3839             :                         return 0;
    3840     1424163 :                 iclog = iclog->ic_next;
    3841     1424163 :         } while (iclog != log->l_iclog);
    3842             :         return 1;
    3843             : }
    3844             : 
    3845             : /*
    3846             :  * Verify that an LSN stamped into a piece of metadata is valid. This is
    3847             :  * intended for use in read verifiers on v5 superblocks.
    3848             :  */
    3849             : bool
    3850   147960423 : xfs_log_check_lsn(
    3851             :         struct xfs_mount        *mp,
    3852             :         xfs_lsn_t               lsn)
    3853             : {
    3854   147960423 :         struct xlog             *log = mp->m_log;
    3855   147960423 :         bool                    valid;
    3856             : 
    3857             :         /*
    3858             :          * norecovery mode skips mount-time log processing and unconditionally
    3859             :          * resets the in-core LSN. We can't validate in this mode, but
    3860             :          * modifications are not allowed anyways so just return true.
    3861             :          */
    3862   147960423 :         if (xfs_has_norecovery(mp))
    3863             :                 return true;
    3864             : 
    3865             :         /*
    3866             :          * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
    3867             :          * handled by recovery and thus safe to ignore here.
    3868             :          */
    3869   147960311 :         if (lsn == NULLCOMMITLSN)
    3870             :                 return true;
    3871             : 
    3872   147491284 :         valid = xlog_valid_lsn(mp->m_log, lsn);
    3873             : 
    3874             :         /* warn the user about what's gone wrong before verifier failure */
    3875   147491216 :         if (!valid) {
    3876           2 :                 spin_lock(&log->l_icloglock);
    3877           2 :                 xfs_warn(mp,
    3878             : "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
    3879             : "Please unmount and run xfs_repair (>= v4.3) to resolve.",
    3880             :                          CYCLE_LSN(lsn), BLOCK_LSN(lsn),
    3881             :                          log->l_curr_cycle, log->l_curr_block);
    3882           2 :                 spin_unlock(&log->l_icloglock);
    3883             :         }
    3884             : 
    3885             :         return valid;
    3886             : }
    3887             : 
    3888             : /*
    3889             :  * Notify the log that we're about to start using a feature that is protected
    3890             :  * by a log incompat feature flag.  This will prevent log covering from
    3891             :  * clearing those flags.
    3892             :  */
    3893             : void
    3894          98 : xlog_use_incompat_feat(
    3895             :         struct xlog             *log)
    3896             : {
    3897          98 :         down_read(&log->l_incompat_users);
    3898          98 : }
    3899             : 
    3900             : /* Notify the log that we've finished using log incompat features. */
    3901             : void
    3902          98 : xlog_drop_incompat_feat(
    3903             :         struct xlog             *log)
    3904             : {
    3905          98 :         up_read(&log->l_incompat_users);
    3906          98 : }

Generated by: LCOV version 1.14