Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 : * All Rights Reserved.
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_mount.h"
13 : #include "xfs_errortag.h"
14 : #include "xfs_error.h"
15 : #include "xfs_trans.h"
16 : #include "xfs_trans_priv.h"
17 : #include "xfs_log.h"
18 : #include "xfs_log_priv.h"
19 : #include "xfs_trace.h"
20 : #include "xfs_sysfs.h"
21 : #include "xfs_sb.h"
22 : #include "xfs_health.h"
23 :
24 : struct kmem_cache *xfs_log_ticket_cache;
25 :
26 : /* Local miscellaneous function prototypes */
27 : STATIC struct xlog *
28 : xlog_alloc_log(
29 : struct xfs_mount *mp,
30 : struct xfs_buftarg *log_target,
31 : xfs_daddr_t blk_offset,
32 : int num_bblks);
33 : STATIC int
34 : xlog_space_left(
35 : struct xlog *log,
36 : atomic64_t *head);
37 : STATIC void
38 : xlog_dealloc_log(
39 : struct xlog *log);
40 :
41 : /* local state machine functions */
42 : STATIC void xlog_state_done_syncing(
43 : struct xlog_in_core *iclog);
44 : STATIC void xlog_state_do_callback(
45 : struct xlog *log);
46 : STATIC int
47 : xlog_state_get_iclog_space(
48 : struct xlog *log,
49 : int len,
50 : struct xlog_in_core **iclog,
51 : struct xlog_ticket *ticket,
52 : int *logoffsetp);
53 : STATIC void
54 : xlog_grant_push_ail(
55 : struct xlog *log,
56 : int need_bytes);
57 : STATIC void
58 : xlog_sync(
59 : struct xlog *log,
60 : struct xlog_in_core *iclog,
61 : struct xlog_ticket *ticket);
62 : #if defined(DEBUG)
63 : STATIC void
64 : xlog_verify_grant_tail(
65 : struct xlog *log);
66 : STATIC void
67 : xlog_verify_iclog(
68 : struct xlog *log,
69 : struct xlog_in_core *iclog,
70 : int count);
71 : STATIC void
72 : xlog_verify_tail_lsn(
73 : struct xlog *log,
74 : struct xlog_in_core *iclog);
75 : #else
76 : #define xlog_verify_grant_tail(a)
77 : #define xlog_verify_iclog(a,b,c)
78 : #define xlog_verify_tail_lsn(a,b)
79 : #endif
80 :
81 : STATIC int
82 : xlog_iclogs_empty(
83 : struct xlog *log);
84 :
85 : static int
86 : xfs_log_cover(struct xfs_mount *);
87 :
88 : /*
89 : * We need to make sure the buffer pointer returned is naturally aligned for the
90 : * biggest basic data type we put into it. We have already accounted for this
91 : * padding when sizing the buffer.
92 : *
93 : * However, this padding does not get written into the log, and hence we have to
94 : * track the space used by the log vectors separately to prevent log space hangs
95 : * due to inaccurate accounting (i.e. a leak) of the used log space through the
96 : * CIL context ticket.
97 : *
98 : * We also add space for the xlog_op_header that describes this region in the
99 : * log. This prepends the data region we return to the caller to copy their data
100 : * into, so do all the static initialisation of the ophdr now. Because the ophdr
101 : * is not 8 byte aligned, we have to be careful to ensure that we align the
102 : * start of the buffer such that the region we return to the call is 8 byte
103 : * aligned and packed against the tail of the ophdr.
104 : */
105 : void *
106 39773147133 : xlog_prepare_iovec(
107 : struct xfs_log_vec *lv,
108 : struct xfs_log_iovec **vecp,
109 : uint type)
110 : {
111 39773147133 : struct xfs_log_iovec *vec = *vecp;
112 39773147133 : struct xlog_op_header *oph;
113 39773147133 : uint32_t len;
114 39773147133 : void *buf;
115 :
116 39773147133 : if (vec) {
117 22649797471 : ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
118 22649797471 : vec++;
119 : } else {
120 17123349662 : vec = &lv->lv_iovecp[0];
121 : }
122 :
123 39773147133 : len = lv->lv_buf_len + sizeof(struct xlog_op_header);
124 39773147133 : if (!IS_ALIGNED(len, sizeof(uint64_t))) {
125 39571925547 : lv->lv_buf_len = round_up(len, sizeof(uint64_t)) -
126 : sizeof(struct xlog_op_header);
127 : }
128 :
129 39773147133 : vec->i_type = type;
130 39773147133 : vec->i_addr = lv->lv_buf + lv->lv_buf_len;
131 :
132 39773147133 : oph = vec->i_addr;
133 39773147133 : oph->oh_clientid = XFS_TRANSACTION;
134 39773147133 : oph->oh_res2 = 0;
135 39773147133 : oph->oh_flags = 0;
136 :
137 39773147133 : buf = vec->i_addr + sizeof(struct xlog_op_header);
138 39773147133 : ASSERT(IS_ALIGNED((unsigned long)buf, sizeof(uint64_t)));
139 :
140 39773147133 : *vecp = vec;
141 39773147133 : return buf;
142 : }
143 :
144 : static void
145 7911946777 : xlog_grant_sub_space(
146 : struct xlog *log,
147 : atomic64_t *head,
148 : int bytes)
149 : {
150 7911946777 : int64_t head_val = atomic64_read(head);
151 7915619910 : int64_t new, old;
152 :
153 7915619910 : do {
154 7915619910 : int cycle, space;
155 :
156 7915619910 : xlog_crack_grant_head_val(head_val, &cycle, &space);
157 :
158 7915619910 : space -= bytes;
159 7915619910 : if (space < 0) {
160 66823547 : space += log->l_logsize;
161 66823547 : cycle--;
162 : }
163 :
164 7915619910 : old = head_val;
165 7915619910 : new = xlog_assign_grant_head_val(cycle, space);
166 7915619910 : head_val = atomic64_cmpxchg(head, old, new);
167 7916699628 : } while (head_val != old);
168 7913026495 : }
169 :
170 : static void
171 5037547159 : xlog_grant_add_space(
172 : struct xlog *log,
173 : atomic64_t *head,
174 : int bytes)
175 : {
176 5037547159 : int64_t head_val = atomic64_read(head);
177 5047485076 : int64_t new, old;
178 :
179 5047485076 : do {
180 5047485076 : int tmp;
181 5047485076 : int cycle, space;
182 :
183 5047485076 : xlog_crack_grant_head_val(head_val, &cycle, &space);
184 :
185 5047485076 : tmp = log->l_logsize - space;
186 5047485076 : if (tmp > bytes)
187 4980578044 : space += bytes;
188 : else {
189 66907032 : space = bytes - tmp;
190 66907032 : cycle++;
191 : }
192 :
193 5047485076 : old = head_val;
194 5047485076 : new = xlog_assign_grant_head_val(cycle, space);
195 5047485076 : head_val = atomic64_cmpxchg(head, old, new);
196 5050807756 : } while (head_val != old);
197 5040869839 : }
198 :
199 : STATIC void
200 133712 : xlog_grant_head_init(
201 : struct xlog_grant_head *head)
202 : {
203 133712 : xlog_assign_grant_head(&head->grant, 1, 0);
204 133712 : INIT_LIST_HEAD(&head->waiters);
205 133712 : spin_lock_init(&head->lock);
206 133712 : }
207 :
208 : STATIC void
209 25860 : xlog_grant_head_wake_all(
210 : struct xlog_grant_head *head)
211 : {
212 25860 : struct xlog_ticket *tic;
213 :
214 25860 : spin_lock(&head->lock);
215 25860 : list_for_each_entry(tic, &head->waiters, t_queue)
216 0 : wake_up_process(tic->t_task);
217 25860 : spin_unlock(&head->lock);
218 25860 : }
219 :
220 : static inline int
221 2986104642 : xlog_ticket_reservation(
222 : struct xlog *log,
223 : struct xlog_grant_head *head,
224 : struct xlog_ticket *tic)
225 : {
226 2986104642 : if (head == &log->l_write_head) {
227 529701649 : ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
228 529701649 : return tic->t_unit_res;
229 : }
230 :
231 2456402993 : if (tic->t_flags & XLOG_TIC_PERM_RESERV)
232 1937143850 : return tic->t_unit_res * tic->t_cnt;
233 :
234 519259143 : return tic->t_unit_res;
235 : }
236 :
237 : STATIC bool
238 22532190 : xlog_grant_head_wake(
239 : struct xlog *log,
240 : struct xlog_grant_head *head,
241 : int *free_bytes)
242 : {
243 22532190 : struct xlog_ticket *tic;
244 22532190 : int need_bytes;
245 22532190 : bool woken_task = false;
246 :
247 477181035 : list_for_each_entry(tic, &head->waiters, t_queue) {
248 :
249 : /*
250 : * There is a chance that the size of the CIL checkpoints in
251 : * progress at the last AIL push target calculation resulted in
252 : * limiting the target to the log head (l_last_sync_lsn) at the
253 : * time. This may not reflect where the log head is now as the
254 : * CIL checkpoints may have completed.
255 : *
256 : * Hence when we are woken here, it may be that the head of the
257 : * log that has moved rather than the tail. As the tail didn't
258 : * move, there still won't be space available for the
259 : * reservation we require. However, if the AIL has already
260 : * pushed to the target defined by the old log head location, we
261 : * will hang here waiting for something else to update the AIL
262 : * push target.
263 : *
264 : * Therefore, if there isn't space to wake the first waiter on
265 : * the grant head, we need to push the AIL again to ensure the
266 : * target reflects both the current log tail and log head
267 : * position before we wait for the tail to move again.
268 : */
269 :
270 473734156 : need_bytes = xlog_ticket_reservation(log, head, tic);
271 473734156 : if (*free_bytes < need_bytes) {
272 19085311 : if (!woken_task)
273 1098331 : xlog_grant_push_ail(log, need_bytes);
274 19085311 : return false;
275 : }
276 :
277 454648845 : *free_bytes -= need_bytes;
278 454648845 : trace_xfs_log_grant_wake_up(log, tic);
279 454648845 : wake_up_process(tic->t_task);
280 454648845 : woken_task = true;
281 : }
282 :
283 : return true;
284 : }
285 :
286 : STATIC int
287 9942511 : xlog_grant_head_wait(
288 : struct xlog *log,
289 : struct xlog_grant_head *head,
290 : struct xlog_ticket *tic,
291 : int need_bytes) __releases(&head->lock)
292 : __acquires(&head->lock)
293 : {
294 9942511 : list_add_tail(&tic->t_queue, &head->waiters);
295 :
296 9949322 : do {
297 19898644 : if (xlog_is_shutdown(log))
298 0 : goto shutdown;
299 9949322 : xlog_grant_push_ail(log, need_bytes);
300 :
301 9949322 : __set_current_state(TASK_UNINTERRUPTIBLE);
302 9949322 : spin_unlock(&head->lock);
303 :
304 9949311 : XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
305 :
306 9949289 : trace_xfs_log_grant_sleep(log, tic);
307 9949026 : schedule();
308 9890163 : trace_xfs_log_grant_wake(log, tic);
309 :
310 9815520 : spin_lock(&head->lock);
311 19898644 : if (xlog_is_shutdown(log))
312 0 : goto shutdown;
313 9949322 : } while (xlog_space_left(log, &head->grant) < need_bytes);
314 :
315 9942511 : list_del_init(&tic->t_queue);
316 9942511 : return 0;
317 0 : shutdown:
318 0 : list_del_init(&tic->t_queue);
319 0 : return -EIO;
320 : }
321 :
322 : /*
323 : * Atomically get the log space required for a log ticket.
324 : *
325 : * Once a ticket gets put onto head->waiters, it will only return after the
326 : * needed reservation is satisfied.
327 : *
328 : * This function is structured so that it has a lock free fast path. This is
329 : * necessary because every new transaction reservation will come through this
330 : * path. Hence any lock will be globally hot if we take it unconditionally on
331 : * every pass.
332 : *
333 : * As tickets are only ever moved on and off head->waiters under head->lock, we
334 : * only need to take that lock if we are going to add the ticket to the queue
335 : * and sleep. We can avoid taking the lock if the ticket was never added to
336 : * head->waiters because the t_queue list head will be empty and we hold the
337 : * only reference to it so it can safely be checked unlocked.
338 : */
339 : STATIC int
340 2512415558 : xlog_grant_head_check(
341 : struct xlog *log,
342 : struct xlog_grant_head *head,
343 : struct xlog_ticket *tic,
344 : int *need_bytes)
345 : {
346 2512415558 : int free_bytes;
347 2512415558 : int error = 0;
348 :
349 5024831116 : ASSERT(!xlog_in_recovery(log));
350 :
351 : /*
352 : * If there are other waiters on the queue then give them a chance at
353 : * logspace before us. Wake up the first waiters, if we do not wake
354 : * up all the waiters then go to sleep waiting for more free space,
355 : * otherwise try to get some space for this transaction.
356 : */
357 2512415558 : *need_bytes = xlog_ticket_reservation(log, head, tic);
358 2511805043 : free_bytes = xlog_space_left(log, &head->grant);
359 2511804928 : if (!list_empty_careful(&head->waiters)) {
360 10636577 : spin_lock(&head->lock);
361 11130293 : if (!xlog_grant_head_wake(log, head, &free_bytes) ||
362 1480587 : free_bytes < *need_bytes) {
363 9867923 : error = xlog_grant_head_wait(log, head, tic,
364 : *need_bytes);
365 : }
366 11130293 : spin_unlock(&head->lock);
367 2501386175 : } else if (free_bytes < *need_bytes) {
368 74308 : spin_lock(&head->lock);
369 74588 : error = xlog_grant_head_wait(log, head, tic, *need_bytes);
370 74588 : spin_unlock(&head->lock);
371 : }
372 :
373 2512516209 : return error;
374 : }
375 :
376 : bool
377 208227 : xfs_log_writable(
378 : struct xfs_mount *mp)
379 : {
380 : /*
381 : * Do not write to the log on norecovery mounts, if the data or log
382 : * devices are read-only, or if the filesystem is shutdown. Read-only
383 : * mounts allow internal writes for log recovery and unmount purposes,
384 : * so don't restrict that case.
385 : */
386 208227 : if (xfs_has_norecovery(mp))
387 : return false;
388 208119 : if (xfs_readonly_buftarg(mp->m_ddev_targp))
389 : return false;
390 208095 : if (xfs_readonly_buftarg(mp->m_log->l_targ))
391 : return false;
392 416190 : if (xlog_is_shutdown(mp->m_log))
393 25825 : return false;
394 : return true;
395 : }
396 :
397 : /*
398 : * Replenish the byte reservation required by moving the grant write head.
399 : */
400 : int
401 1962714607 : xfs_log_regrant(
402 : struct xfs_mount *mp,
403 : struct xlog_ticket *tic)
404 : {
405 1962714607 : struct xlog *log = mp->m_log;
406 1962714607 : int need_bytes;
407 1962714607 : int error = 0;
408 :
409 3925429214 : if (xlog_is_shutdown(log))
410 : return -EIO;
411 :
412 1962714578 : XFS_STATS_INC(mp, xs_try_logspace);
413 :
414 : /*
415 : * This is a new transaction on the ticket, so we need to change the
416 : * transaction ID so that the next transaction has a different TID in
417 : * the log. Just add one to the existing tid so that we can see chains
418 : * of rolling transactions in the log easily.
419 : */
420 1962693122 : tic->t_tid++;
421 :
422 1962693122 : xlog_grant_push_ail(log, tic->t_unit_res);
423 :
424 1962683724 : tic->t_curr_res = tic->t_unit_res;
425 1962683724 : if (tic->t_cnt > 0)
426 : return 0;
427 :
428 529712628 : trace_xfs_log_regrant(log, tic);
429 :
430 529699338 : error = xlog_grant_head_check(log, &log->l_write_head, tic,
431 : &need_bytes);
432 529710341 : if (error)
433 0 : goto out_error;
434 :
435 529710341 : xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
436 529726988 : trace_xfs_log_regrant_exit(log, tic);
437 529719982 : xlog_verify_grant_tail(log);
438 529719982 : return 0;
439 :
440 : out_error:
441 : /*
442 : * If we are failing, make sure the ticket doesn't have any current
443 : * reservations. We don't want to add this back when the ticket/
444 : * transaction gets cancelled.
445 : */
446 0 : tic->t_curr_res = 0;
447 0 : tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
448 0 : return error;
449 : }
450 :
451 : /*
452 : * Reserve log space and return a ticket corresponding to the reservation.
453 : *
454 : * Each reservation is going to reserve extra space for a log record header.
455 : * When writes happen to the on-disk log, we don't subtract the length of the
456 : * log record header from any reservation. By wasting space in each
457 : * reservation, we prevent over allocation problems.
458 : */
459 : int
460 1983714877 : xfs_log_reserve(
461 : struct xfs_mount *mp,
462 : int unit_bytes,
463 : int cnt,
464 : struct xlog_ticket **ticp,
465 : bool permanent)
466 : {
467 1983714877 : struct xlog *log = mp->m_log;
468 1983714877 : struct xlog_ticket *tic;
469 1983714877 : int need_bytes;
470 1983714877 : int error = 0;
471 :
472 3967429754 : if (xlog_is_shutdown(log))
473 : return -EIO;
474 :
475 1983712424 : XFS_STATS_INC(mp, xs_try_logspace);
476 :
477 1982641630 : ASSERT(*ticp == NULL);
478 1982641630 : tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
479 1983269383 : *ticp = tic;
480 :
481 1983269383 : xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
482 : : tic->t_unit_res);
483 :
484 1982958847 : trace_xfs_log_reserve(log, tic);
485 :
486 1982834904 : error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
487 : &need_bytes);
488 1983196734 : if (error)
489 0 : goto out_error;
490 :
491 1983196734 : xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
492 1985660192 : xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
493 1986419749 : trace_xfs_log_reserve_exit(log, tic);
494 1985998602 : xlog_verify_grant_tail(log);
495 1985998602 : return 0;
496 :
497 : out_error:
498 : /*
499 : * If we are failing, make sure the ticket doesn't have any current
500 : * reservations. We don't want to add this back when the ticket/
501 : * transaction gets cancelled.
502 : */
503 0 : tic->t_curr_res = 0;
504 0 : tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
505 0 : return error;
506 : }
507 :
508 : /*
509 : * Run all the pending iclog callbacks and wake log force waiters and iclog
510 : * space waiters so they can process the newly set shutdown state. We really
511 : * don't care what order we process callbacks here because the log is shut down
512 : * and so state cannot change on disk anymore. However, we cannot wake waiters
513 : * until the callbacks have been processed because we may be in unmount and
514 : * we must ensure that all AIL operations the callbacks perform have completed
515 : * before we tear down the AIL.
516 : *
517 : * We avoid processing actively referenced iclogs so that we don't run callbacks
518 : * while the iclog owner might still be preparing the iclog for IO submssion.
519 : * These will be caught by xlog_state_iclog_release() and call this function
520 : * again to process any callbacks that may have been added to that iclog.
521 : */
522 : static void
523 14979 : xlog_state_shutdown_callbacks(
524 : struct xlog *log)
525 : {
526 14979 : struct xlog_in_core *iclog;
527 14979 : LIST_HEAD(cb_list);
528 :
529 14979 : iclog = log->l_iclog;
530 119832 : do {
531 119832 : if (atomic_read(&iclog->ic_refcnt)) {
532 : /* Reference holder will re-run iclog callbacks. */
533 2046 : continue;
534 : }
535 117786 : list_splice_init(&iclog->ic_callbacks, &cb_list);
536 117786 : spin_unlock(&log->l_icloglock);
537 :
538 117786 : xlog_cil_process_committed(&cb_list);
539 :
540 117786 : spin_lock(&log->l_icloglock);
541 117786 : wake_up_all(&iclog->ic_write_wait);
542 117786 : wake_up_all(&iclog->ic_force_wait);
543 119832 : } while ((iclog = iclog->ic_next) != log->l_iclog);
544 :
545 14979 : wake_up_all(&log->l_flush_wait);
546 14979 : }
547 :
548 : /*
549 : * Flush iclog to disk if this is the last reference to the given iclog and the
550 : * it is in the WANT_SYNC state.
551 : *
552 : * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
553 : * log tail is updated correctly. NEED_FUA indicates that the iclog will be
554 : * written to stable storage, and implies that a commit record is contained
555 : * within the iclog. We need to ensure that the log tail does not move beyond
556 : * the tail that the first commit record in the iclog ordered against, otherwise
557 : * correct recovery of that checkpoint becomes dependent on future operations
558 : * performed on this iclog.
559 : *
560 : * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
561 : * current tail into iclog. Once the iclog tail is set, future operations must
562 : * not modify it, otherwise they potentially violate ordering constraints for
563 : * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
564 : * the iclog will get zeroed on activation of the iclog after sync, so we
565 : * always capture the tail lsn on the iclog on the first NEED_FUA release
566 : * regardless of the number of active reference counts on this iclog.
567 : */
568 : int
569 65550628 : xlog_state_release_iclog(
570 : struct xlog *log,
571 : struct xlog_in_core *iclog,
572 : struct xlog_ticket *ticket)
573 : {
574 65550628 : xfs_lsn_t tail_lsn;
575 65550628 : bool last_ref;
576 :
577 65550628 : lockdep_assert_held(&log->l_icloglock);
578 :
579 65550628 : trace_xlog_iclog_release(iclog, _RET_IP_);
580 : /*
581 : * Grabbing the current log tail needs to be atomic w.r.t. the writing
582 : * of the tail LSN into the iclog so we guarantee that the log tail does
583 : * not move between the first time we know that the iclog needs to be
584 : * made stable and when we eventually submit it.
585 : */
586 65550627 : if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
587 19802518 : (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
588 51875512 : !iclog->ic_header.h_tail_lsn) {
589 45665930 : tail_lsn = xlog_assign_tail_lsn(log->l_mp);
590 45665933 : iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
591 : }
592 :
593 65550630 : last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
594 :
595 131101266 : if (xlog_is_shutdown(log)) {
596 : /*
597 : * If there are no more references to this iclog, process the
598 : * pending iclog callbacks that were waiting on the release of
599 : * this iclog.
600 : */
601 2049 : if (last_ref)
602 2049 : xlog_state_shutdown_callbacks(log);
603 2049 : return -EIO;
604 : }
605 :
606 65548584 : if (!last_ref)
607 : return 0;
608 :
609 58614099 : if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
610 12950267 : ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
611 12950267 : return 0;
612 : }
613 :
614 45663832 : iclog->ic_state = XLOG_STATE_SYNCING;
615 45663832 : xlog_verify_tail_lsn(log, iclog);
616 45663831 : trace_xlog_iclog_syncing(iclog, _RET_IP_);
617 :
618 45663829 : spin_unlock(&log->l_icloglock);
619 45663829 : xlog_sync(log, iclog, ticket);
620 45663570 : spin_lock(&log->l_icloglock);
621 45663570 : return 0;
622 : }
623 :
624 : /*
625 : * Mount a log filesystem
626 : *
627 : * mp - ubiquitous xfs mount point structure
628 : * log_target - buftarg of on-disk log device
629 : * blk_offset - Start block # where block size is 512 bytes (BBSIZE)
630 : * num_bblocks - Number of BBSIZE blocks in on-disk log
631 : *
632 : * Return error or zero.
633 : */
634 : int
635 66856 : xfs_log_mount(
636 : xfs_mount_t *mp,
637 : xfs_buftarg_t *log_target,
638 : xfs_daddr_t blk_offset,
639 : int num_bblks)
640 : {
641 66856 : struct xlog *log;
642 66856 : int error = 0;
643 66856 : int min_logfsbs;
644 :
645 66856 : if (!xfs_has_norecovery(mp)) {
646 66802 : xfs_notice(mp, "Mounting V%d Filesystem %pU",
647 : XFS_SB_VERSION_NUM(&mp->m_sb),
648 : &mp->m_sb.sb_uuid);
649 : } else {
650 54 : xfs_notice(mp,
651 : "Mounting V%d filesystem %pU in no-recovery mode. Filesystem will be inconsistent.",
652 : XFS_SB_VERSION_NUM(&mp->m_sb),
653 : &mp->m_sb.sb_uuid);
654 108 : ASSERT(xfs_is_readonly(mp));
655 : }
656 :
657 66856 : log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
658 66856 : if (IS_ERR(log)) {
659 0 : error = PTR_ERR(log);
660 0 : goto out;
661 : }
662 66856 : mp->m_log = log;
663 :
664 : /*
665 : * Now that we have set up the log and it's internal geometry
666 : * parameters, we can validate the given log space and drop a critical
667 : * message via syslog if the log size is too small. A log that is too
668 : * small can lead to unexpected situations in transaction log space
669 : * reservation stage. The superblock verifier has already validated all
670 : * the other log geometry constraints, so we don't have to check those
671 : * here.
672 : *
673 : * Note: For v4 filesystems, we can't just reject the mount if the
674 : * validation fails. This would mean that people would have to
675 : * downgrade their kernel just to remedy the situation as there is no
676 : * way to grow the log (short of black magic surgery with xfs_db).
677 : *
678 : * We can, however, reject mounts for V5 format filesystems, as the
679 : * mkfs binary being used to make the filesystem should never create a
680 : * filesystem with a log that is too small.
681 : */
682 66856 : min_logfsbs = xfs_log_calc_minimum_size(mp);
683 66856 : if (mp->m_sb.sb_logblocks < min_logfsbs) {
684 0 : xfs_warn(mp,
685 : "Log size %d blocks too small, minimum size is %d blocks",
686 : mp->m_sb.sb_logblocks, min_logfsbs);
687 :
688 : /*
689 : * Log check errors are always fatal on v5; or whenever bad
690 : * metadata leads to a crash.
691 : */
692 0 : if (xfs_has_crc(mp)) {
693 0 : xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
694 0 : ASSERT(0);
695 0 : error = -EINVAL;
696 0 : goto out_free_log;
697 : }
698 0 : xfs_crit(mp, "Log size out of supported range.");
699 0 : xfs_crit(mp,
700 : "Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
701 : }
702 :
703 : /*
704 : * Initialize the AIL now we have a log.
705 : */
706 66856 : error = xfs_trans_ail_init(mp);
707 66856 : if (error) {
708 0 : xfs_warn(mp, "AIL initialisation failed: error %d", error);
709 0 : goto out_free_log;
710 : }
711 66856 : log->l_ailp = mp->m_ail;
712 :
713 : /*
714 : * skip log recovery on a norecovery mount. pretend it all
715 : * just worked.
716 : */
717 66856 : if (!xfs_has_norecovery(mp)) {
718 : /*
719 : * log recovery ignores readonly state and so we need to clear
720 : * mount-based read only state so it can write to disk.
721 : */
722 66802 : bool readonly = test_and_clear_bit(XFS_OPSTATE_READONLY,
723 66802 : &mp->m_opstate);
724 66802 : error = xlog_recover(log);
725 66802 : if (readonly)
726 2066 : set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
727 66802 : if (error) {
728 42 : xfs_warn(mp, "log mount/recovery failed: error %d",
729 : error);
730 42 : xlog_recover_cancel(log);
731 42 : goto out_destroy_ail;
732 : }
733 : }
734 :
735 66814 : error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
736 : "log");
737 66814 : if (error)
738 0 : goto out_destroy_ail;
739 :
740 : /* Normal transactions can now occur */
741 66814 : clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
742 :
743 : /*
744 : * Now the log has been fully initialised and we know were our
745 : * space grant counters are, we can initialise the permanent ticket
746 : * needed for delayed logging to work.
747 : */
748 66814 : xlog_cil_init_post_recovery(log);
749 :
750 66814 : return 0;
751 :
752 42 : out_destroy_ail:
753 42 : xfs_trans_ail_destroy(mp);
754 42 : out_free_log:
755 42 : xlog_dealloc_log(log);
756 : out:
757 : return error;
758 : }
759 :
760 : /*
761 : * Finish the recovery of the file system. This is separate from the
762 : * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
763 : * in the root and real-time bitmap inodes between calling xfs_log_mount() and
764 : * here.
765 : *
766 : * If we finish recovery successfully, start the background log work. If we are
767 : * not doing recovery, then we have a RO filesystem and we don't need to start
768 : * it.
769 : */
770 : int
771 66752 : xfs_log_mount_finish(
772 : struct xfs_mount *mp)
773 : {
774 66752 : struct xlog *log = mp->m_log;
775 66752 : bool readonly;
776 66752 : int error = 0;
777 :
778 66752 : if (xfs_has_norecovery(mp)) {
779 88 : ASSERT(xfs_is_readonly(mp));
780 44 : return 0;
781 : }
782 :
783 : /*
784 : * log recovery ignores readonly state and so we need to clear
785 : * mount-based read only state so it can write to disk.
786 : */
787 66708 : readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
788 :
789 : /*
790 : * During the second phase of log recovery, we need iget and
791 : * iput to behave like they do for an active filesystem.
792 : * xfs_fs_drop_inode needs to be able to prevent the deletion
793 : * of inodes before we're done replaying log items on those
794 : * inodes. Turn it off immediately after recovery finishes
795 : * so that we don't leak the quota inodes if subsequent mount
796 : * activities fail.
797 : *
798 : * We let all inodes involved in redo item processing end up on
799 : * the LRU instead of being evicted immediately so that if we do
800 : * something to an unlinked inode, the irele won't cause
801 : * premature truncation and freeing of the inode, which results
802 : * in log recovery failure. We have to evict the unreferenced
803 : * lru inodes after clearing SB_ACTIVE because we don't
804 : * otherwise clean up the lru if there's a subsequent failure in
805 : * xfs_mountfs, which leads to us leaking the inodes if nothing
806 : * else (e.g. quotacheck) references the inodes before the
807 : * mount failure occurs.
808 : */
809 66708 : mp->m_super->s_flags |= SB_ACTIVE;
810 66708 : xfs_log_work_queue(mp);
811 133416 : if (xlog_recovery_needed(log))
812 13581 : error = xlog_recover_finish(log);
813 66708 : mp->m_super->s_flags &= ~SB_ACTIVE;
814 66708 : evict_inodes(mp->m_super);
815 :
816 : /*
817 : * Drain the buffer LRU after log recovery. This is required for v4
818 : * filesystems to avoid leaving around buffers with NULL verifier ops,
819 : * but we do it unconditionally to make sure we're always in a clean
820 : * cache state after mount.
821 : *
822 : * Don't push in the error case because the AIL may have pending intents
823 : * that aren't removed until recovery is cancelled.
824 : */
825 133416 : if (xlog_recovery_needed(log)) {
826 13581 : if (!error) {
827 13570 : xfs_log_force(mp, XFS_LOG_SYNC);
828 13570 : xfs_ail_push_all_sync(mp->m_ail);
829 : }
830 15456 : xfs_notice(mp, "Ending recovery (logdev: %s)",
831 : mp->m_logname ? mp->m_logname : "internal");
832 : } else {
833 53127 : xfs_info(mp, "Ending clean mount");
834 : }
835 66708 : xfs_buftarg_drain(mp->m_ddev_targp);
836 :
837 66708 : clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
838 66708 : if (readonly)
839 2045 : set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
840 :
841 : /* Make sure the log is dead if we're returning failure. */
842 66719 : ASSERT(!error || xlog_is_shutdown(log));
843 :
844 : return error;
845 : }
846 :
847 : /*
848 : * The mount has failed. Cancel the recovery if it hasn't completed and destroy
849 : * the log.
850 : */
851 : void
852 178 : xfs_log_mount_cancel(
853 : struct xfs_mount *mp)
854 : {
855 178 : xlog_recover_cancel(mp->m_log);
856 178 : xfs_log_unmount(mp);
857 178 : }
858 :
859 : /*
860 : * Flush out the iclog to disk ensuring that device caches are flushed and
861 : * the iclog hits stable storage before any completion waiters are woken.
862 : */
863 : static inline int
864 6008950 : xlog_force_iclog(
865 : struct xlog_in_core *iclog)
866 : {
867 6008950 : atomic_inc(&iclog->ic_refcnt);
868 6008950 : iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
869 6008950 : if (iclog->ic_state == XLOG_STATE_ACTIVE)
870 6008950 : xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
871 6008948 : return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
872 : }
873 :
874 : /*
875 : * Cycle all the iclogbuf locks to make sure all log IO completion
876 : * is done before we tear down these buffers.
877 : */
878 : static void
879 66825 : xlog_wait_iclog_completion(struct xlog *log)
880 : {
881 66825 : int i;
882 66825 : struct xlog_in_core *iclog = log->l_iclog;
883 :
884 601365 : for (i = 0; i < log->l_iclog_bufs; i++) {
885 534540 : down(&iclog->ic_sema);
886 534540 : up(&iclog->ic_sema);
887 534540 : iclog = iclog->ic_next;
888 : }
889 66825 : }
890 :
891 : /*
892 : * Wait for the iclog and all prior iclogs to be written disk as required by the
893 : * log force state machine. Waiting on ic_force_wait ensures iclog completions
894 : * have been ordered and callbacks run before we are woken here, hence
895 : * guaranteeing that all the iclogs up to this one are on stable storage.
896 : */
897 : int
898 13443415 : xlog_wait_on_iclog(
899 : struct xlog_in_core *iclog)
900 : __releases(iclog->ic_log->l_icloglock)
901 : {
902 13443415 : struct xlog *log = iclog->ic_log;
903 :
904 13443415 : trace_xlog_iclog_wait_on(iclog, _RET_IP_);
905 26886832 : if (!xlog_is_shutdown(log) &&
906 13443370 : iclog->ic_state != XLOG_STATE_ACTIVE &&
907 : iclog->ic_state != XLOG_STATE_DIRTY) {
908 10849014 : XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
909 10849016 : xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
910 : } else {
911 2594402 : spin_unlock(&log->l_icloglock);
912 : }
913 :
914 26882880 : if (xlog_is_shutdown(log))
915 4913 : return -EIO;
916 : return 0;
917 : }
918 :
919 : /*
920 : * Write out an unmount record using the ticket provided. We have to account for
921 : * the data space used in the unmount ticket as this write is not done from a
922 : * transaction context that has already done the accounting for us.
923 : */
924 : static int
925 56252 : xlog_write_unmount_record(
926 : struct xlog *log,
927 : struct xlog_ticket *ticket)
928 : {
929 56252 : struct {
930 : struct xlog_op_header ophdr;
931 : struct xfs_unmount_log_format ulf;
932 56252 : } unmount_rec = {
933 : .ophdr = {
934 : .oh_clientid = XFS_LOG,
935 56252 : .oh_tid = cpu_to_be32(ticket->t_tid),
936 : .oh_flags = XLOG_UNMOUNT_TRANS,
937 : },
938 : .ulf = {
939 : .magic = XLOG_UNMOUNT_TYPE,
940 : },
941 : };
942 56252 : struct xfs_log_iovec reg = {
943 : .i_addr = &unmount_rec,
944 : .i_len = sizeof(unmount_rec),
945 : .i_type = XLOG_REG_TYPE_UNMOUNT,
946 : };
947 56252 : struct xfs_log_vec vec = {
948 : .lv_niovecs = 1,
949 : .lv_iovecp = ®,
950 : };
951 56252 : LIST_HEAD(lv_chain);
952 56252 : list_add(&vec.lv_list, &lv_chain);
953 :
954 56252 : BUILD_BUG_ON((sizeof(struct xlog_op_header) +
955 : sizeof(struct xfs_unmount_log_format)) !=
956 : sizeof(unmount_rec));
957 :
958 : /* account for space used by record data */
959 56252 : ticket->t_curr_res -= sizeof(unmount_rec);
960 :
961 56252 : return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len);
962 : }
963 :
964 : /*
965 : * Mark the filesystem clean by writing an unmount record to the head of the
966 : * log.
967 : */
968 : static void
969 56252 : xlog_unmount_write(
970 : struct xlog *log)
971 : {
972 56252 : struct xfs_mount *mp = log->l_mp;
973 56252 : struct xlog_in_core *iclog;
974 56252 : struct xlog_ticket *tic = NULL;
975 56252 : int error;
976 :
977 56252 : error = xfs_log_reserve(mp, 600, 1, &tic, 0);
978 56252 : if (error)
979 0 : goto out_err;
980 :
981 56252 : error = xlog_write_unmount_record(log, tic);
982 : /*
983 : * At this point, we're umounting anyway, so there's no point in
984 : * transitioning log state to shutdown. Just continue...
985 : */
986 56252 : out_err:
987 56252 : if (error)
988 0 : xfs_alert(mp, "%s: unmount record failed", __func__);
989 :
990 56252 : spin_lock(&log->l_icloglock);
991 56252 : iclog = log->l_iclog;
992 56252 : error = xlog_force_iclog(iclog);
993 56252 : xlog_wait_on_iclog(iclog);
994 :
995 56252 : if (tic) {
996 56252 : trace_xfs_log_umount_write(log, tic);
997 56252 : xfs_log_ticket_ungrant(log, tic);
998 : }
999 56252 : }
1000 :
1001 : static void
1002 56252 : xfs_log_unmount_verify_iclog(
1003 : struct xlog *log)
1004 : {
1005 56252 : struct xlog_in_core *iclog = log->l_iclog;
1006 :
1007 449956 : do {
1008 449956 : ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
1009 449956 : ASSERT(iclog->ic_offset == 0);
1010 449956 : } while ((iclog = iclog->ic_next) != log->l_iclog);
1011 56252 : }
1012 :
1013 : /*
1014 : * Unmount record used to have a string "Unmount filesystem--" in the
1015 : * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
1016 : * We just write the magic number now since that particular field isn't
1017 : * currently architecture converted and "Unmount" is a bit foo.
1018 : * As far as I know, there weren't any dependencies on the old behaviour.
1019 : */
1020 : static void
1021 69280 : xfs_log_unmount_write(
1022 : struct xfs_mount *mp)
1023 : {
1024 69280 : struct xlog *log = mp->m_log;
1025 :
1026 69280 : if (!xfs_log_writable(mp))
1027 : return;
1028 :
1029 56273 : xfs_log_force(mp, XFS_LOG_SYNC);
1030 :
1031 112546 : if (xlog_is_shutdown(log))
1032 : return;
1033 :
1034 : /*
1035 : * If we think the summary counters are bad, avoid writing the unmount
1036 : * record to force log recovery at next mount, after which the summary
1037 : * counters will be recalculated. Refer to xlog_check_unmount_rec for
1038 : * more details.
1039 : */
1040 56273 : if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
1041 : XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
1042 21 : xfs_alert(mp, "%s: will fix summary counters at next mount",
1043 : __func__);
1044 21 : return;
1045 : }
1046 :
1047 56252 : xfs_log_unmount_verify_iclog(log);
1048 56252 : xlog_unmount_write(log);
1049 : }
1050 :
1051 : /*
1052 : * Empty the log for unmount/freeze.
1053 : *
1054 : * To do this, we first need to shut down the background log work so it is not
1055 : * trying to cover the log as we clean up. We then need to unpin all objects in
1056 : * the log so we can then flush them out. Once they have completed their IO and
1057 : * run the callbacks removing themselves from the AIL, we can cover the log.
1058 : */
1059 : int
1060 138947 : xfs_log_quiesce(
1061 : struct xfs_mount *mp)
1062 : {
1063 : /*
1064 : * Clear log incompat features since we're quiescing the log. Report
1065 : * failures, though it's not fatal to have a higher log feature
1066 : * protection level than the log contents actually require.
1067 : */
1068 138947 : if (xfs_clear_incompat_log_features(mp, XFS_SB_FEAT_INCOMPAT_LOG_ALL)) {
1069 85632 : int error;
1070 :
1071 85632 : error = xfs_sync_sb(mp, false);
1072 85632 : if (error)
1073 0 : xfs_warn(mp,
1074 : "Failed to clear log incompat features on quiesce");
1075 : }
1076 :
1077 138947 : cancel_delayed_work_sync(&mp->m_log->l_work);
1078 138947 : xfs_log_force(mp, XFS_LOG_SYNC);
1079 :
1080 : /*
1081 : * The superblock buffer is uncached and while xfs_ail_push_all_sync()
1082 : * will push it, xfs_buftarg_wait() will not wait for it. Further,
1083 : * xfs_buf_iowait() cannot be used because it was pushed with the
1084 : * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
1085 : * the IO to complete.
1086 : */
1087 138947 : xfs_ail_push_all_sync(mp->m_ail);
1088 138947 : xfs_buftarg_wait(mp->m_ddev_targp);
1089 138947 : xfs_buf_lock(mp->m_sb_bp);
1090 138947 : xfs_buf_unlock(mp->m_sb_bp);
1091 :
1092 138947 : return xfs_log_cover(mp);
1093 : }
1094 :
1095 : void
1096 2455 : xfs_log_clean(
1097 : struct xfs_mount *mp)
1098 : {
1099 2455 : xfs_log_quiesce(mp);
1100 69280 : xfs_log_unmount_write(mp);
1101 2455 : }
1102 :
1103 : /*
1104 : * Shut down and release the AIL and Log.
1105 : *
1106 : * During unmount, we need to ensure we flush all the dirty metadata objects
1107 : * from the AIL so that the log is empty before we write the unmount record to
1108 : * the log. Once this is done, we can tear down the AIL and the log.
1109 : */
1110 : void
1111 66825 : xfs_log_unmount(
1112 : struct xfs_mount *mp)
1113 : {
1114 66825 : xfs_log_clean(mp);
1115 :
1116 : /*
1117 : * If shutdown has come from iclog IO context, the log
1118 : * cleaning will have been skipped and so we need to wait
1119 : * for the iclog to complete shutdown processing before we
1120 : * tear anything down.
1121 : */
1122 66825 : xlog_wait_iclog_completion(mp->m_log);
1123 :
1124 66825 : xfs_buftarg_drain(mp->m_ddev_targp);
1125 :
1126 66825 : xfs_trans_ail_destroy(mp);
1127 :
1128 66825 : xfs_sysfs_del(&mp->m_log->l_kobj);
1129 :
1130 66825 : xlog_dealloc_log(mp->m_log);
1131 66825 : }
1132 :
1133 : void
1134 18125556722 : xfs_log_item_init(
1135 : struct xfs_mount *mp,
1136 : struct xfs_log_item *item,
1137 : int type,
1138 : const struct xfs_item_ops *ops)
1139 : {
1140 18125556722 : item->li_log = mp->m_log;
1141 18125556722 : item->li_ailp = mp->m_ail;
1142 18125556722 : item->li_type = type;
1143 18125556722 : item->li_ops = ops;
1144 18125556722 : item->li_lv = NULL;
1145 :
1146 18125556722 : INIT_LIST_HEAD(&item->li_ail);
1147 18125556722 : INIT_LIST_HEAD(&item->li_cil);
1148 18125556722 : INIT_LIST_HEAD(&item->li_bio_list);
1149 18125556722 : INIT_LIST_HEAD(&item->li_trans);
1150 18125556722 : }
1151 :
1152 : /*
1153 : * Wake up processes waiting for log space after we have moved the log tail.
1154 : */
1155 : void
1156 1995288599 : xfs_log_space_wake(
1157 : struct xfs_mount *mp)
1158 : {
1159 1995288599 : struct xlog *log = mp->m_log;
1160 1995288599 : int free_bytes;
1161 :
1162 3990577198 : if (xlog_is_shutdown(log))
1163 66622 : return;
1164 :
1165 1995221977 : if (!list_empty_careful(&log->l_write_head.waiters)) {
1166 692 : ASSERT(!xlog_in_recovery(log));
1167 :
1168 346 : spin_lock(&log->l_write_head.lock);
1169 346 : free_bytes = xlog_space_left(log, &log->l_write_head.grant);
1170 346 : xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1171 346 : spin_unlock(&log->l_write_head.lock);
1172 : }
1173 :
1174 1995194484 : if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1175 22786254 : ASSERT(!xlog_in_recovery(log));
1176 :
1177 11393127 : spin_lock(&log->l_reserve_head.lock);
1178 11401551 : free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1179 11401551 : xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1180 11401551 : spin_unlock(&log->l_reserve_head.lock);
1181 : }
1182 : }
1183 :
1184 : /*
1185 : * Determine if we have a transaction that has gone to disk that needs to be
1186 : * covered. To begin the transition to the idle state firstly the log needs to
1187 : * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1188 : * we start attempting to cover the log.
1189 : *
1190 : * Only if we are then in a state where covering is needed, the caller is
1191 : * informed that dummy transactions are required to move the log into the idle
1192 : * state.
1193 : *
1194 : * If there are any items in the AIl or CIL, then we do not want to attempt to
1195 : * cover the log as we may be in a situation where there isn't log space
1196 : * available to run a dummy transaction and this can lead to deadlocks when the
1197 : * tail of the log is pinned by an item that is modified in the CIL. Hence
1198 : * there's no point in running a dummy transaction at this point because we
1199 : * can't start trying to idle the log until both the CIL and AIL are empty.
1200 : */
1201 : static bool
1202 376132 : xfs_log_need_covered(
1203 : struct xfs_mount *mp)
1204 : {
1205 376132 : struct xlog *log = mp->m_log;
1206 376132 : bool needed = false;
1207 :
1208 376132 : if (!xlog_cil_empty(log))
1209 : return false;
1210 :
1211 365457 : spin_lock(&log->l_icloglock);
1212 365457 : switch (log->l_covered_state) {
1213 : case XLOG_STATE_COVER_DONE:
1214 : case XLOG_STATE_COVER_DONE2:
1215 : case XLOG_STATE_COVER_IDLE:
1216 : break;
1217 206310 : case XLOG_STATE_COVER_NEED:
1218 : case XLOG_STATE_COVER_NEED2:
1219 206310 : if (xfs_ail_min_lsn(log->l_ailp))
1220 : break;
1221 204985 : if (!xlog_iclogs_empty(log))
1222 : break;
1223 :
1224 204985 : needed = true;
1225 204985 : if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1226 102540 : log->l_covered_state = XLOG_STATE_COVER_DONE;
1227 : else
1228 102445 : log->l_covered_state = XLOG_STATE_COVER_DONE2;
1229 : break;
1230 0 : default:
1231 0 : needed = true;
1232 0 : break;
1233 : }
1234 365457 : spin_unlock(&log->l_icloglock);
1235 365457 : return needed;
1236 : }
1237 :
1238 : /*
1239 : * Explicitly cover the log. This is similar to background log covering but
1240 : * intended for usage in quiesce codepaths. The caller is responsible to ensure
1241 : * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1242 : * must all be empty.
1243 : */
1244 : static int
1245 138947 : xfs_log_cover(
1246 : struct xfs_mount *mp)
1247 : {
1248 138947 : int error = 0;
1249 138947 : bool need_covered;
1250 :
1251 145629 : ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
1252 : !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
1253 : xlog_is_shutdown(mp->m_log));
1254 :
1255 138947 : if (!xfs_log_writable(mp))
1256 : return 0;
1257 :
1258 : /*
1259 : * xfs_log_need_covered() is not idempotent because it progresses the
1260 : * state machine if the log requires covering. Therefore, we must call
1261 : * this function once and use the result until we've issued an sb sync.
1262 : * Do so first to make that abundantly clear.
1263 : *
1264 : * Fall into the covering sequence if the log needs covering or the
1265 : * mount has lazy superblock accounting to sync to disk. The sb sync
1266 : * used for covering accumulates the in-core counters, so covering
1267 : * handles this for us.
1268 : */
1269 125997 : need_covered = xfs_log_need_covered(mp);
1270 125997 : if (!need_covered && !xfs_has_lazysbcount(mp))
1271 : return 0;
1272 :
1273 : /*
1274 : * To cover the log, commit the superblock twice (at most) in
1275 : * independent checkpoints. The first serves as a reference for the
1276 : * tail pointer. The sync transaction and AIL push empties the AIL and
1277 : * updates the in-core tail to the LSN of the first checkpoint. The
1278 : * second commit updates the on-disk tail with the in-core LSN,
1279 : * covering the log. Push the AIL one more time to leave it empty, as
1280 : * we found it.
1281 : */
1282 228196 : do {
1283 228196 : error = xfs_sync_sb(mp, true);
1284 228196 : if (error)
1285 : break;
1286 228161 : xfs_ail_push_all_sync(mp->m_ail);
1287 228161 : } while (xfs_log_need_covered(mp));
1288 :
1289 : return error;
1290 : }
1291 :
1292 : /*
1293 : * We may be holding the log iclog lock upon entering this routine.
1294 : */
1295 : xfs_lsn_t
1296 46980819 : xlog_assign_tail_lsn_locked(
1297 : struct xfs_mount *mp)
1298 : {
1299 46980819 : struct xlog *log = mp->m_log;
1300 46980819 : struct xfs_log_item *lip;
1301 46980819 : xfs_lsn_t tail_lsn;
1302 :
1303 46980819 : assert_spin_locked(&mp->m_ail->ail_lock);
1304 :
1305 : /*
1306 : * To make sure we always have a valid LSN for the log tail we keep
1307 : * track of the last LSN which was committed in log->l_last_sync_lsn,
1308 : * and use that when the AIL was empty.
1309 : */
1310 46980819 : lip = xfs_ail_min(mp->m_ail);
1311 45725366 : if (lip)
1312 45725366 : tail_lsn = lip->li_lsn;
1313 : else
1314 1255453 : tail_lsn = atomic64_read(&log->l_last_sync_lsn);
1315 46980819 : trace_xfs_log_assign_tail_lsn(log, tail_lsn);
1316 46980819 : atomic64_set(&log->l_tail_lsn, tail_lsn);
1317 46980819 : return tail_lsn;
1318 : }
1319 :
1320 : xfs_lsn_t
1321 45679510 : xlog_assign_tail_lsn(
1322 : struct xfs_mount *mp)
1323 : {
1324 45679510 : xfs_lsn_t tail_lsn;
1325 :
1326 45679510 : spin_lock(&mp->m_ail->ail_lock);
1327 45679515 : tail_lsn = xlog_assign_tail_lsn_locked(mp);
1328 45679513 : spin_unlock(&mp->m_ail->ail_lock);
1329 :
1330 45679515 : return tail_lsn;
1331 : }
1332 :
1333 : /*
1334 : * Return the space in the log between the tail and the head. The head
1335 : * is passed in the cycle/bytes formal parms. In the special case where
1336 : * the reserve head has wrapped passed the tail, this calculation is no
1337 : * longer valid. In this case, just return 0 which means there is no space
1338 : * in the log. This works for all places where this function is called
1339 : * with the reserve head. Of course, if the write head were to ever
1340 : * wrap the tail, we should blow up. Rather than catch this case here,
1341 : * we depend on other ASSERTions in other parts of the code. XXXmiken
1342 : *
1343 : * If reservation head is behind the tail, we have a problem. Warn about it,
1344 : * but then treat it as if the log is empty.
1345 : *
1346 : * If the log is shut down, the head and tail may be invalid or out of whack, so
1347 : * shortcut invalidity asserts in this case so that we don't trigger them
1348 : * falsely.
1349 : */
1350 : STATIC int
1351 6494824671 : xlog_space_left(
1352 : struct xlog *log,
1353 : atomic64_t *head)
1354 : {
1355 6494824671 : int tail_bytes;
1356 6494824671 : int tail_cycle;
1357 6494824671 : int head_cycle;
1358 6494824671 : int head_bytes;
1359 :
1360 6494824671 : xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1361 6494824671 : xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1362 6494824671 : tail_bytes = BBTOB(tail_bytes);
1363 6494824671 : if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1364 4634254394 : return log->l_logsize - (head_bytes - tail_bytes);
1365 1860570277 : if (tail_cycle + 1 < head_cycle)
1366 : return 0;
1367 :
1368 : /* Ignore potential inconsistency when shutdown. */
1369 3721140128 : if (xlog_is_shutdown(log))
1370 0 : return log->l_logsize;
1371 :
1372 1860570064 : if (tail_cycle < head_cycle) {
1373 1860570064 : ASSERT(tail_cycle == (head_cycle - 1));
1374 1860570064 : return tail_bytes - head_bytes;
1375 : }
1376 :
1377 : /*
1378 : * The reservation head is behind the tail. In this case we just want to
1379 : * return the size of the log as the amount of space left.
1380 : */
1381 0 : xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
1382 0 : xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d",
1383 : tail_cycle, tail_bytes);
1384 0 : xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d",
1385 : head_cycle, head_bytes);
1386 0 : ASSERT(0);
1387 0 : return log->l_logsize;
1388 : }
1389 :
1390 :
1391 : static void
1392 45663825 : xlog_ioend_work(
1393 : struct work_struct *work)
1394 : {
1395 45663825 : struct xlog_in_core *iclog =
1396 45663825 : container_of(work, struct xlog_in_core, ic_end_io_work);
1397 45663825 : struct xlog *log = iclog->ic_log;
1398 45663825 : int error;
1399 :
1400 45663825 : error = blk_status_to_errno(iclog->ic_bio.bi_status);
1401 : #ifdef DEBUG
1402 : /* treat writes with injected CRC errors as failed */
1403 45663825 : if (iclog->ic_fail_crc)
1404 : error = -EIO;
1405 : #endif
1406 :
1407 : /*
1408 : * Race to shutdown the filesystem if we see an error.
1409 : */
1410 45663766 : if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1411 11092 : xfs_alert(log->l_mp, "log I/O error %d", error);
1412 11092 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1413 : }
1414 :
1415 45663825 : xlog_state_done_syncing(iclog);
1416 45663825 : bio_uninit(&iclog->ic_bio);
1417 :
1418 : /*
1419 : * Drop the lock to signal that we are done. Nothing references the
1420 : * iclog after this, so an unmount waiting on this lock can now tear it
1421 : * down safely. As such, it is unsafe to reference the iclog after the
1422 : * unlock as we could race with it being freed.
1423 : */
1424 45663825 : up(&iclog->ic_sema);
1425 45663825 : }
1426 :
1427 : /*
1428 : * Return size of each in-core log record buffer.
1429 : *
1430 : * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1431 : *
1432 : * If the filesystem blocksize is too large, we may need to choose a
1433 : * larger size since the directory code currently logs entire blocks.
1434 : */
1435 : STATIC void
1436 66856 : xlog_get_iclog_buffer_size(
1437 : struct xfs_mount *mp,
1438 : struct xlog *log)
1439 : {
1440 66856 : if (mp->m_logbufs <= 0)
1441 66836 : mp->m_logbufs = XLOG_MAX_ICLOGS;
1442 66856 : if (mp->m_logbsize <= 0)
1443 65689 : mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
1444 :
1445 66856 : log->l_iclog_bufs = mp->m_logbufs;
1446 66856 : log->l_iclog_size = mp->m_logbsize;
1447 :
1448 : /*
1449 : * # headers = size / 32k - one header holds cycles from 32k of data.
1450 : */
1451 66856 : log->l_iclog_heads =
1452 66856 : DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE);
1453 66856 : log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
1454 66856 : }
1455 :
1456 : void
1457 158735 : xfs_log_work_queue(
1458 : struct xfs_mount *mp)
1459 : {
1460 158735 : queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
1461 158735 : msecs_to_jiffies(xfs_syncd_centisecs * 10));
1462 158735 : }
1463 :
1464 : /*
1465 : * Clear the log incompat flags if we have the opportunity.
1466 : *
1467 : * This only happens if we're about to log the second dummy transaction as part
1468 : * of covering the log and we can get the log incompat feature usage lock.
1469 : */
1470 : static inline void
1471 556 : xlog_clear_incompat(
1472 : struct xlog *log)
1473 : {
1474 556 : struct xfs_mount *mp = log->l_mp;
1475 556 : uint32_t incompat_mask = 0;
1476 :
1477 556 : if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
1478 : XFS_SB_FEAT_INCOMPAT_LOG_ALL))
1479 : return;
1480 :
1481 486 : if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
1482 : return;
1483 :
1484 198 : if (down_write_trylock(&log->l_incompat_xattrs))
1485 198 : incompat_mask |= XFS_SB_FEAT_INCOMPAT_LOG_XATTRS;
1486 :
1487 198 : if (down_write_trylock(&log->l_incompat_swapext))
1488 198 : incompat_mask |= XFS_SB_FEAT_INCOMPAT_LOG_SWAPEXT;
1489 :
1490 198 : if (!incompat_mask)
1491 : return;
1492 :
1493 198 : xfs_clear_incompat_log_features(mp, incompat_mask);
1494 :
1495 198 : if (incompat_mask & XFS_SB_FEAT_INCOMPAT_LOG_SWAPEXT)
1496 198 : up_write(&log->l_incompat_swapext);
1497 :
1498 198 : if (incompat_mask & XFS_SB_FEAT_INCOMPAT_LOG_XATTRS)
1499 198 : up_write(&log->l_incompat_xattrs);
1500 : }
1501 :
1502 : /*
1503 : * Every sync period we need to unpin all items in the AIL and push them to
1504 : * disk. If there is nothing dirty, then we might need to cover the log to
1505 : * indicate that the filesystem is idle.
1506 : */
1507 : static void
1508 21995 : xfs_log_worker(
1509 : struct work_struct *work)
1510 : {
1511 21995 : struct xlog *log = container_of(to_delayed_work(work),
1512 : struct xlog, l_work);
1513 21995 : struct xfs_mount *mp = log->l_mp;
1514 :
1515 : /* dgc: errors ignored - not fatal and nowhere to report them */
1516 21995 : if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
1517 : /*
1518 : * Dump a transaction into the log that contains no real change.
1519 : * This is needed to stamp the current tail LSN into the log
1520 : * during the covering operation.
1521 : *
1522 : * We cannot use an inode here for this - that will push dirty
1523 : * state back up into the VFS and then periodic inode flushing
1524 : * will prevent log covering from making progress. Hence we
1525 : * synchronously log the superblock instead to ensure the
1526 : * superblock is immediately unpinned and can be written back.
1527 : */
1528 556 : xlog_clear_incompat(log);
1529 556 : xfs_sync_sb(mp, true);
1530 : } else
1531 21442 : xfs_log_force(mp, 0);
1532 :
1533 : /* start pushing all the metadata that is currently dirty */
1534 21999 : xfs_ail_push_all(mp->m_ail);
1535 :
1536 : /* queue us up again */
1537 21999 : xfs_log_work_queue(mp);
1538 21999 : }
1539 :
1540 : /*
1541 : * This routine initializes some of the log structure for a given mount point.
1542 : * Its primary purpose is to fill in enough, so recovery can occur. However,
1543 : * some other stuff may be filled in too.
1544 : */
1545 : STATIC struct xlog *
1546 66856 : xlog_alloc_log(
1547 : struct xfs_mount *mp,
1548 : struct xfs_buftarg *log_target,
1549 : xfs_daddr_t blk_offset,
1550 : int num_bblks)
1551 : {
1552 66856 : struct xlog *log;
1553 66856 : xlog_rec_header_t *head;
1554 66856 : xlog_in_core_t **iclogp;
1555 66856 : xlog_in_core_t *iclog, *prev_iclog=NULL;
1556 66856 : int i;
1557 66856 : int error = -ENOMEM;
1558 66856 : uint log2_size = 0;
1559 :
1560 66856 : log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1561 66856 : if (!log) {
1562 0 : xfs_warn(mp, "Log allocation failed: No memory!");
1563 0 : goto out;
1564 : }
1565 :
1566 66856 : log->l_mp = mp;
1567 66856 : log->l_targ = log_target;
1568 66856 : log->l_logsize = BBTOB(num_bblks);
1569 66856 : log->l_logBBstart = blk_offset;
1570 66856 : log->l_logBBsize = num_bblks;
1571 66856 : log->l_covered_state = XLOG_STATE_COVER_IDLE;
1572 66856 : set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
1573 66856 : INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1574 :
1575 66856 : log->l_prev_block = -1;
1576 : /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1577 66856 : xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1578 66856 : xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1579 66856 : log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1580 :
1581 66856 : if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
1582 40870 : log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
1583 : else
1584 25986 : log->l_iclog_roundoff = BBSIZE;
1585 :
1586 66856 : xlog_grant_head_init(&log->l_reserve_head);
1587 66856 : xlog_grant_head_init(&log->l_write_head);
1588 :
1589 66856 : error = -EFSCORRUPTED;
1590 66856 : if (xfs_has_sector(mp)) {
1591 64462 : log2_size = mp->m_sb.sb_logsectlog;
1592 64462 : if (log2_size < BBSHIFT) {
1593 0 : xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1594 : log2_size, BBSHIFT);
1595 0 : goto out_free_log;
1596 : }
1597 :
1598 64462 : log2_size -= BBSHIFT;
1599 64462 : if (log2_size > mp->m_sectbb_log) {
1600 0 : xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1601 : log2_size, mp->m_sectbb_log);
1602 0 : goto out_free_log;
1603 : }
1604 :
1605 : /* for larger sector sizes, must have v2 or external log */
1606 64462 : if (log2_size && log->l_logBBstart > 0 &&
1607 : !xfs_has_logv2(mp)) {
1608 0 : xfs_warn(mp,
1609 : "log sector size (0x%x) invalid for configuration.",
1610 : log2_size);
1611 0 : goto out_free_log;
1612 : }
1613 : }
1614 64462 : log->l_sectBBsize = 1 << log2_size;
1615 :
1616 66856 : init_rwsem(&log->l_incompat_xattrs);
1617 66856 : init_rwsem(&log->l_incompat_swapext);
1618 :
1619 66856 : xlog_get_iclog_buffer_size(mp, log);
1620 :
1621 66856 : spin_lock_init(&log->l_icloglock);
1622 66856 : init_waitqueue_head(&log->l_flush_wait);
1623 :
1624 66856 : iclogp = &log->l_iclog;
1625 : /*
1626 : * The amount of memory to allocate for the iclog structure is
1627 : * rather funky due to the way the structure is defined. It is
1628 : * done this way so that we can use different sizes for machines
1629 : * with different amounts of memory. See the definition of
1630 : * xlog_in_core_t in xfs_log_priv.h for details.
1631 : */
1632 66856 : ASSERT(log->l_iclog_size >= 4096);
1633 601644 : for (i = 0; i < log->l_iclog_bufs; i++) {
1634 534788 : size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1635 : sizeof(struct bio_vec);
1636 :
1637 534788 : iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
1638 534788 : if (!iclog)
1639 0 : goto out_free_iclog;
1640 :
1641 534788 : *iclogp = iclog;
1642 534788 : iclog->ic_prev = prev_iclog;
1643 534788 : prev_iclog = iclog;
1644 :
1645 534788 : iclog->ic_data = kvzalloc(log->l_iclog_size,
1646 : GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1647 534788 : if (!iclog->ic_data)
1648 0 : goto out_free_iclog;
1649 534788 : head = &iclog->ic_header;
1650 534788 : memset(head, 0, sizeof(xlog_rec_header_t));
1651 534788 : head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1652 534788 : head->h_version = cpu_to_be32(
1653 : xfs_has_logv2(log->l_mp) ? 2 : 1);
1654 534788 : head->h_size = cpu_to_be32(log->l_iclog_size);
1655 : /* new fields */
1656 534788 : head->h_fmt = cpu_to_be32(XLOG_FMT);
1657 1069576 : memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1658 :
1659 534788 : iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1660 534788 : iclog->ic_state = XLOG_STATE_ACTIVE;
1661 534788 : iclog->ic_log = log;
1662 534788 : atomic_set(&iclog->ic_refcnt, 0);
1663 534788 : INIT_LIST_HEAD(&iclog->ic_callbacks);
1664 534788 : iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
1665 :
1666 534788 : init_waitqueue_head(&iclog->ic_force_wait);
1667 534788 : init_waitqueue_head(&iclog->ic_write_wait);
1668 534788 : INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1669 534788 : sema_init(&iclog->ic_sema, 1);
1670 :
1671 534788 : iclogp = &iclog->ic_next;
1672 : }
1673 66856 : *iclogp = log->l_iclog; /* complete ring */
1674 66856 : log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
1675 :
1676 133712 : log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1677 : XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM |
1678 : WQ_HIGHPRI),
1679 66856 : 0, mp->m_super->s_id);
1680 66856 : if (!log->l_ioend_workqueue)
1681 0 : goto out_free_iclog;
1682 :
1683 66856 : error = xlog_cil_init(log);
1684 66856 : if (error)
1685 0 : goto out_destroy_workqueue;
1686 : return log;
1687 :
1688 : out_destroy_workqueue:
1689 0 : destroy_workqueue(log->l_ioend_workqueue);
1690 0 : out_free_iclog:
1691 0 : for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1692 0 : prev_iclog = iclog->ic_next;
1693 0 : kmem_free(iclog->ic_data);
1694 0 : kmem_free(iclog);
1695 0 : if (prev_iclog == log->l_iclog)
1696 : break;
1697 : }
1698 0 : out_free_log:
1699 0 : kmem_free(log);
1700 0 : out:
1701 0 : return ERR_PTR(error);
1702 : } /* xlog_alloc_log */
1703 :
1704 : /*
1705 : * Compute the LSN that we'd need to push the log tail towards in order to have
1706 : * (a) enough on-disk log space to log the number of bytes specified, (b) at
1707 : * least 25% of the log space free, and (c) at least 256 blocks free. If the
1708 : * log free space already meets all three thresholds, this function returns
1709 : * NULLCOMMITLSN.
1710 : */
1711 : xfs_lsn_t
1712 3963397089 : xlog_grant_push_threshold(
1713 : struct xlog *log,
1714 : int need_bytes)
1715 : {
1716 3963397089 : xfs_lsn_t threshold_lsn = 0;
1717 3963397089 : xfs_lsn_t last_sync_lsn;
1718 3963397089 : int free_blocks;
1719 3963397089 : int free_bytes;
1720 3963397089 : int threshold_block;
1721 3963397089 : int threshold_cycle;
1722 3963397089 : int free_threshold;
1723 :
1724 3963397089 : ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1725 :
1726 3963397089 : free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1727 3963890873 : free_blocks = BTOBBT(free_bytes);
1728 :
1729 : /*
1730 : * Set the threshold for the minimum number of free blocks in the
1731 : * log to the maximum of what the caller needs, one quarter of the
1732 : * log, and 256 blocks.
1733 : */
1734 3963890873 : free_threshold = BTOBB(need_bytes);
1735 3963890873 : free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
1736 3963890873 : free_threshold = max(free_threshold, 256);
1737 3963890873 : if (free_blocks >= free_threshold)
1738 : return NULLCOMMITLSN;
1739 :
1740 53923201 : xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1741 : &threshold_block);
1742 53923201 : threshold_block += free_threshold;
1743 53923201 : if (threshold_block >= log->l_logBBsize) {
1744 11078725 : threshold_block -= log->l_logBBsize;
1745 11078725 : threshold_cycle += 1;
1746 : }
1747 53923201 : threshold_lsn = xlog_assign_lsn(threshold_cycle,
1748 : threshold_block);
1749 : /*
1750 : * Don't pass in an lsn greater than the lsn of the last
1751 : * log record known to be on disk. Use a snapshot of the last sync lsn
1752 : * so that it doesn't change between the compare and the set.
1753 : */
1754 53923201 : last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1755 53923201 : if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1756 : threshold_lsn = last_sync_lsn;
1757 :
1758 : return threshold_lsn;
1759 : }
1760 :
1761 : /*
1762 : * Push the tail of the log if we need to do so to maintain the free log space
1763 : * thresholds set out by xlog_grant_push_threshold. We may need to adopt a
1764 : * policy which pushes on an lsn which is further along in the log once we
1765 : * reach the high water mark. In this manner, we would be creating a low water
1766 : * mark.
1767 : */
1768 : STATIC void
1769 3963866671 : xlog_grant_push_ail(
1770 : struct xlog *log,
1771 : int need_bytes)
1772 : {
1773 3963866671 : xfs_lsn_t threshold_lsn;
1774 :
1775 3963866671 : threshold_lsn = xlog_grant_push_threshold(log, need_bytes);
1776 4016683402 : if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log))
1777 : return;
1778 :
1779 : /*
1780 : * Get the transaction layer to kick the dirty buffers out to
1781 : * disk asynchronously. No point in trying to do this if
1782 : * the filesystem is shutting down.
1783 : */
1784 53855696 : xfs_ail_push(log->l_ailp, threshold_lsn);
1785 : }
1786 :
1787 : /*
1788 : * Stamp cycle number in every block
1789 : */
1790 : STATIC void
1791 45663624 : xlog_pack_data(
1792 : struct xlog *log,
1793 : struct xlog_in_core *iclog,
1794 : int roundoff)
1795 : {
1796 45663624 : int i, j, k;
1797 45663624 : int size = iclog->ic_offset + roundoff;
1798 45663624 : __be32 cycle_lsn;
1799 45663624 : char *dp;
1800 :
1801 45663624 : cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1802 :
1803 45663624 : dp = iclog->ic_datap;
1804 2672145980 : for (i = 0; i < BTOBB(size); i++) {
1805 2626497804 : if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
1806 : break;
1807 2626482203 : iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1808 2626482356 : *(__be32 *)dp = cycle_lsn;
1809 2626482356 : dp += BBSIZE;
1810 : }
1811 :
1812 45663777 : if (xfs_has_logv2(log->l_mp)) {
1813 45663717 : xlog_in_core_2_t *xhdr = iclog->ic_data;
1814 :
1815 50792101 : for ( ; i < BTOBB(size); i++) {
1816 5128385 : j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1817 5128385 : k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1818 5128385 : xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
1819 5128384 : *(__be32 *)dp = cycle_lsn;
1820 5128384 : dp += BBSIZE;
1821 : }
1822 :
1823 45760252 : for (i = 1; i < log->l_iclog_heads; i++)
1824 96535 : xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
1825 : }
1826 45663777 : }
1827 :
1828 : /*
1829 : * Calculate the checksum for a log buffer.
1830 : *
1831 : * This is a little more complicated than it should be because the various
1832 : * headers and the actual data are non-contiguous.
1833 : */
1834 : __le32
1835 50463865 : xlog_cksum(
1836 : struct xlog *log,
1837 : struct xlog_rec_header *rhead,
1838 : char *dp,
1839 : int size)
1840 : {
1841 50463865 : uint32_t crc;
1842 :
1843 : /* first generate the crc for the record header ... */
1844 50463865 : crc = xfs_start_cksum_update((char *)rhead,
1845 : sizeof(struct xlog_rec_header),
1846 : offsetof(struct xlog_rec_header, h_crc));
1847 :
1848 : /* ... then for additional cycle data for v2 logs ... */
1849 50462848 : if (xfs_has_logv2(log->l_mp)) {
1850 50462716 : union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
1851 50462716 : int i;
1852 50462716 : int xheads;
1853 :
1854 50462716 : xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE);
1855 :
1856 50587004 : for (i = 1; i < xheads; i++) {
1857 124766 : crc = crc32c(crc, &xhdr[i].hic_xheader,
1858 : sizeof(struct xlog_rec_ext_header));
1859 : }
1860 : }
1861 :
1862 : /* ... and finally for the payload */
1863 50462370 : crc = crc32c(crc, dp, size);
1864 :
1865 50462241 : return xfs_end_cksum(crc);
1866 : }
1867 :
1868 : static void
1869 45663825 : xlog_bio_end_io(
1870 : struct bio *bio)
1871 : {
1872 45663825 : struct xlog_in_core *iclog = bio->bi_private;
1873 :
1874 45663825 : queue_work(iclog->ic_log->l_ioend_workqueue,
1875 : &iclog->ic_end_io_work);
1876 45663825 : }
1877 :
1878 : static int
1879 45662103 : xlog_map_iclog_data(
1880 : struct bio *bio,
1881 : void *data,
1882 : size_t count)
1883 : {
1884 335475239 : do {
1885 335475239 : struct page *page = kmem_to_page(data);
1886 335475256 : unsigned int off = offset_in_page(data);
1887 335475256 : size_t len = min_t(size_t, count, PAGE_SIZE - off);
1888 :
1889 335475256 : if (bio_add_page(bio, page, len, off) != len)
1890 : return -EIO;
1891 :
1892 335476467 : data += len;
1893 335476467 : count -= len;
1894 335476467 : } while (count);
1895 :
1896 : return 0;
1897 : }
1898 :
1899 : STATIC void
1900 45663137 : xlog_write_iclog(
1901 : struct xlog *log,
1902 : struct xlog_in_core *iclog,
1903 : uint64_t bno,
1904 : unsigned int count)
1905 : {
1906 45663137 : ASSERT(bno < log->l_logBBsize);
1907 45663137 : trace_xlog_iclog_write(iclog, _RET_IP_);
1908 :
1909 : /*
1910 : * We lock the iclogbufs here so that we can serialise against I/O
1911 : * completion during unmount. We might be processing a shutdown
1912 : * triggered during unmount, and that can occur asynchronously to the
1913 : * unmount thread, and hence we need to ensure that completes before
1914 : * tearing down the iclogbufs. Hence we need to hold the buffer lock
1915 : * across the log IO to archieve that.
1916 : */
1917 45662361 : down(&iclog->ic_sema);
1918 91324380 : if (xlog_is_shutdown(log)) {
1919 : /*
1920 : * It would seem logical to return EIO here, but we rely on
1921 : * the log state machine to propagate I/O errors instead of
1922 : * doing it here. We kick of the state machine and unlock
1923 : * the buffer manually, the code needs to be kept in sync
1924 : * with the I/O completion path.
1925 : */
1926 8 : xlog_state_done_syncing(iclog);
1927 8 : up(&iclog->ic_sema);
1928 8 : return;
1929 : }
1930 :
1931 : /*
1932 : * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
1933 : * IOs coming immediately after this one. This prevents the block layer
1934 : * writeback throttle from throttling log writes behind background
1935 : * metadata writeback and causing priority inversions.
1936 : */
1937 45662182 : bio_init(&iclog->ic_bio, xfs_buftarg_bdev(log->l_targ), iclog->ic_bvec,
1938 45662182 : howmany(count, PAGE_SIZE),
1939 : REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
1940 45662467 : iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1941 45662467 : iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1942 45662467 : iclog->ic_bio.bi_private = iclog;
1943 :
1944 45662467 : if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
1945 9863609 : iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1946 : /*
1947 : * For external log devices, we also need to flush the data
1948 : * device cache first to ensure all metadata writeback covered
1949 : * by the LSN in this iclog is on stable storage. This is slow,
1950 : * but it *must* complete before we issue the external log IO.
1951 : *
1952 : * If the flush fails, we cannot conclude that past metadata
1953 : * writeback from the log succeeded. Repeating the flush is
1954 : * not possible, hence we must shut down with log IO error to
1955 : * avoid shutdown re-entering this path and erroring out again.
1956 : */
1957 12103383 : if (log->l_targ != log->l_mp->m_ddev_targp &&
1958 2239790 : xfs_buftarg_flush(log->l_mp->m_ddev_targp)) {
1959 0 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1960 0 : return;
1961 : }
1962 : }
1963 45662451 : if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
1964 6847813 : iclog->ic_bio.bi_opf |= REQ_FUA;
1965 :
1966 45662451 : iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
1967 :
1968 45662451 : if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
1969 0 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1970 0 : return;
1971 : }
1972 45663278 : if (is_vmalloc_addr(iclog->ic_data))
1973 : flush_kernel_vmap_range(iclog->ic_data, count);
1974 :
1975 : /*
1976 : * If this log buffer would straddle the end of the log we will have
1977 : * to split it up into two bios, so that we can continue at the start.
1978 : */
1979 45663276 : if (bno + BTOBB(count) > log->l_logBBsize) {
1980 16420 : struct bio *split;
1981 :
1982 16420 : split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1983 : GFP_NOIO, &fs_bio_set);
1984 16420 : bio_chain(split, &iclog->ic_bio);
1985 16420 : submit_bio(split);
1986 :
1987 : /* restart at logical offset zero for the remainder */
1988 16420 : iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1989 : }
1990 :
1991 45663276 : submit_bio(&iclog->ic_bio);
1992 : }
1993 :
1994 : /*
1995 : * We need to bump cycle number for the part of the iclog that is
1996 : * written to the start of the log. Watch out for the header magic
1997 : * number case, though.
1998 : */
1999 : static void
2000 16420 : xlog_split_iclog(
2001 : struct xlog *log,
2002 : void *data,
2003 : uint64_t bno,
2004 : unsigned int count)
2005 : {
2006 16420 : unsigned int split_offset = BBTOB(log->l_logBBsize - bno);
2007 16420 : unsigned int i;
2008 :
2009 528801 : for (i = split_offset; i < count; i += BBSIZE) {
2010 512381 : uint32_t cycle = get_unaligned_be32(data + i);
2011 :
2012 512381 : if (++cycle == XLOG_HEADER_MAGIC_NUM)
2013 0 : cycle++;
2014 512381 : put_unaligned_be32(cycle, data + i);
2015 : }
2016 16420 : }
2017 :
2018 : static int
2019 45663739 : xlog_calc_iclog_size(
2020 : struct xlog *log,
2021 : struct xlog_in_core *iclog,
2022 : uint32_t *roundoff)
2023 : {
2024 45663739 : uint32_t count_init, count;
2025 :
2026 : /* Add for LR header */
2027 45663739 : count_init = log->l_iclog_hsize + iclog->ic_offset;
2028 45663739 : count = roundup(count_init, log->l_iclog_roundoff);
2029 :
2030 45663739 : *roundoff = count - count_init;
2031 :
2032 45663739 : ASSERT(count >= count_init);
2033 45663739 : ASSERT(*roundoff < log->l_iclog_roundoff);
2034 45663739 : return count;
2035 : }
2036 :
2037 : /*
2038 : * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
2039 : * fashion. Previously, we should have moved the current iclog
2040 : * ptr in the log to point to the next available iclog. This allows further
2041 : * write to continue while this code syncs out an iclog ready to go.
2042 : * Before an in-core log can be written out, the data section must be scanned
2043 : * to save away the 1st word of each BBSIZE block into the header. We replace
2044 : * it with the current cycle count. Each BBSIZE block is tagged with the
2045 : * cycle count because there in an implicit assumption that drives will
2046 : * guarantee that entire 512 byte blocks get written at once. In other words,
2047 : * we can't have part of a 512 byte block written and part not written. By
2048 : * tagging each block, we will know which blocks are valid when recovering
2049 : * after an unclean shutdown.
2050 : *
2051 : * This routine is single threaded on the iclog. No other thread can be in
2052 : * this routine with the same iclog. Changing contents of iclog can there-
2053 : * fore be done without grabbing the state machine lock. Updating the global
2054 : * log will require grabbing the lock though.
2055 : *
2056 : * The entire log manager uses a logical block numbering scheme. Only
2057 : * xlog_write_iclog knows about the fact that the log may not start with
2058 : * block zero on a given device.
2059 : */
2060 : STATIC void
2061 45663790 : xlog_sync(
2062 : struct xlog *log,
2063 : struct xlog_in_core *iclog,
2064 : struct xlog_ticket *ticket)
2065 : {
2066 45663790 : unsigned int count; /* byte count of bwrite */
2067 45663790 : unsigned int roundoff; /* roundoff to BB or stripe */
2068 45663790 : uint64_t bno;
2069 45663790 : unsigned int size;
2070 :
2071 45663790 : ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2072 45663790 : trace_xlog_iclog_sync(iclog, _RET_IP_);
2073 :
2074 45663746 : count = xlog_calc_iclog_size(log, iclog, &roundoff);
2075 :
2076 : /*
2077 : * If we have a ticket, account for the roundoff via the ticket
2078 : * reservation to avoid touching the hot grant heads needlessly.
2079 : * Otherwise, we have to move grant heads directly.
2080 : */
2081 45663733 : if (ticket) {
2082 39688331 : ticket->t_curr_res -= roundoff;
2083 : } else {
2084 5975402 : xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
2085 5975402 : xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
2086 : }
2087 :
2088 : /* put cycle number in every block */
2089 45663733 : xlog_pack_data(log, iclog, roundoff);
2090 :
2091 : /* real byte length */
2092 45663778 : size = iclog->ic_offset;
2093 45663778 : if (xfs_has_logv2(log->l_mp))
2094 45663724 : size += roundoff;
2095 45663778 : iclog->ic_header.h_len = cpu_to_be32(size);
2096 :
2097 45663778 : XFS_STATS_INC(log->l_mp, xs_log_writes);
2098 45663712 : XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
2099 :
2100 45663711 : bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
2101 :
2102 : /* Do we need to split this write into 2 parts? */
2103 45663711 : if (bno + BTOBB(count) > log->l_logBBsize)
2104 16420 : xlog_split_iclog(log, &iclog->ic_header, bno, count);
2105 :
2106 : /* calculcate the checksum */
2107 91325466 : iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
2108 45663711 : iclog->ic_datap, size);
2109 : /*
2110 : * Intentionally corrupt the log record CRC based on the error injection
2111 : * frequency, if defined. This facilitates testing log recovery in the
2112 : * event of torn writes. Hence, set the IOABORT state to abort the log
2113 : * write on I/O completion and shutdown the fs. The subsequent mount
2114 : * detects the bad CRC and attempts to recover.
2115 : */
2116 : #ifdef DEBUG
2117 45661755 : if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
2118 60 : iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
2119 60 : iclog->ic_fail_crc = true;
2120 60 : xfs_warn(log->l_mp,
2121 : "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
2122 : be64_to_cpu(iclog->ic_header.h_lsn));
2123 : }
2124 : #endif
2125 45662039 : xlog_verify_iclog(log, iclog, count);
2126 45663683 : xlog_write_iclog(log, iclog, bno, count);
2127 45663529 : }
2128 :
2129 : /*
2130 : * Deallocate a log structure
2131 : */
2132 : STATIC void
2133 66867 : xlog_dealloc_log(
2134 : struct xlog *log)
2135 : {
2136 66867 : xlog_in_core_t *iclog, *next_iclog;
2137 66867 : int i;
2138 :
2139 : /*
2140 : * Destroy the CIL after waiting for iclog IO completion because an
2141 : * iclog EIO error will try to shut down the log, which accesses the
2142 : * CIL to wake up the waiters.
2143 : */
2144 66867 : xlog_cil_destroy(log);
2145 :
2146 66867 : iclog = log->l_iclog;
2147 601743 : for (i = 0; i < log->l_iclog_bufs; i++) {
2148 534876 : next_iclog = iclog->ic_next;
2149 534876 : kmem_free(iclog->ic_data);
2150 534876 : kmem_free(iclog);
2151 534876 : iclog = next_iclog;
2152 : }
2153 :
2154 66867 : log->l_mp->m_log = NULL;
2155 66867 : destroy_workqueue(log->l_ioend_workqueue);
2156 66867 : kmem_free(log);
2157 66867 : }
2158 :
2159 : /*
2160 : * Update counters atomically now that memcpy is done.
2161 : */
2162 : static inline void
2163 : xlog_state_finish_copy(
2164 : struct xlog *log,
2165 : struct xlog_in_core *iclog,
2166 : int record_cnt,
2167 : int copy_bytes)
2168 : {
2169 52698364 : lockdep_assert_held(&log->l_icloglock);
2170 :
2171 52698364 : be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
2172 52698364 : iclog->ic_offset += copy_bytes;
2173 : }
2174 :
2175 : /*
2176 : * print out info relating to regions written which consume
2177 : * the reservation
2178 : */
2179 : void
2180 0 : xlog_print_tic_res(
2181 : struct xfs_mount *mp,
2182 : struct xlog_ticket *ticket)
2183 : {
2184 0 : xfs_warn(mp, "ticket reservation summary:");
2185 0 : xfs_warn(mp, " unit res = %d bytes", ticket->t_unit_res);
2186 0 : xfs_warn(mp, " current res = %d bytes", ticket->t_curr_res);
2187 0 : xfs_warn(mp, " original count = %d", ticket->t_ocnt);
2188 0 : xfs_warn(mp, " remaining count = %d", ticket->t_cnt);
2189 0 : }
2190 :
2191 : /*
2192 : * Print a summary of the transaction.
2193 : */
2194 : void
2195 0 : xlog_print_trans(
2196 : struct xfs_trans *tp)
2197 : {
2198 0 : struct xfs_mount *mp = tp->t_mountp;
2199 0 : struct xfs_log_item *lip;
2200 :
2201 : /* dump core transaction and ticket info */
2202 0 : xfs_warn(mp, "transaction summary:");
2203 0 : xfs_warn(mp, " log res = %d", tp->t_log_res);
2204 0 : xfs_warn(mp, " log count = %d", tp->t_log_count);
2205 0 : xfs_warn(mp, " flags = 0x%x", tp->t_flags);
2206 :
2207 0 : xlog_print_tic_res(mp, tp->t_ticket);
2208 :
2209 : /* dump each log item */
2210 0 : list_for_each_entry(lip, &tp->t_items, li_trans) {
2211 0 : struct xfs_log_vec *lv = lip->li_lv;
2212 0 : struct xfs_log_iovec *vec;
2213 0 : int i;
2214 :
2215 0 : xfs_warn(mp, "log item: ");
2216 0 : xfs_warn(mp, " type = 0x%x", lip->li_type);
2217 0 : xfs_warn(mp, " flags = 0x%lx", lip->li_flags);
2218 0 : if (!lv)
2219 0 : continue;
2220 0 : xfs_warn(mp, " niovecs = %d", lv->lv_niovecs);
2221 0 : xfs_warn(mp, " size = %d", lv->lv_size);
2222 0 : xfs_warn(mp, " bytes = %d", lv->lv_bytes);
2223 0 : xfs_warn(mp, " buf len = %d", lv->lv_buf_len);
2224 :
2225 : /* dump each iovec for the log item */
2226 0 : vec = lv->lv_iovecp;
2227 0 : for (i = 0; i < lv->lv_niovecs; i++) {
2228 0 : int dumplen = min(vec->i_len, 32);
2229 :
2230 0 : xfs_warn(mp, " iovec[%d]", i);
2231 0 : xfs_warn(mp, " type = 0x%x", vec->i_type);
2232 0 : xfs_warn(mp, " len = %d", vec->i_len);
2233 0 : xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i);
2234 0 : xfs_hex_dump(vec->i_addr, dumplen);
2235 :
2236 0 : vec++;
2237 : }
2238 : }
2239 0 : }
2240 :
2241 : static inline void
2242 3843260983 : xlog_write_iovec(
2243 : struct xlog_in_core *iclog,
2244 : uint32_t *log_offset,
2245 : void *data,
2246 : uint32_t write_len,
2247 : int *bytes_left,
2248 : uint32_t *record_cnt,
2249 : uint32_t *data_cnt)
2250 : {
2251 3843260983 : ASSERT(*log_offset < iclog->ic_log->l_iclog_size);
2252 3843260983 : ASSERT(*log_offset % sizeof(int32_t) == 0);
2253 3843260983 : ASSERT(write_len % sizeof(int32_t) == 0);
2254 :
2255 7686521966 : memcpy(iclog->ic_datap + *log_offset, data, write_len);
2256 3843260983 : *log_offset += write_len;
2257 3843260983 : *bytes_left -= write_len;
2258 3843260983 : (*record_cnt)++;
2259 3843260983 : *data_cnt += write_len;
2260 3843260983 : }
2261 :
2262 : /*
2263 : * Write log vectors into a single iclog which is guaranteed by the caller
2264 : * to have enough space to write the entire log vector into.
2265 : */
2266 : static void
2267 1374057833 : xlog_write_full(
2268 : struct xfs_log_vec *lv,
2269 : struct xlog_ticket *ticket,
2270 : struct xlog_in_core *iclog,
2271 : uint32_t *log_offset,
2272 : uint32_t *len,
2273 : uint32_t *record_cnt,
2274 : uint32_t *data_cnt)
2275 : {
2276 1374057833 : int index;
2277 :
2278 1374057833 : ASSERT(*log_offset + *len <= iclog->ic_size ||
2279 : iclog->ic_state == XLOG_STATE_WANT_SYNC);
2280 :
2281 : /*
2282 : * Ordered log vectors have no regions to write so this
2283 : * loop will naturally skip them.
2284 : */
2285 5074007566 : for (index = 0; index < lv->lv_niovecs; index++) {
2286 3699898071 : struct xfs_log_iovec *reg = &lv->lv_iovecp[index];
2287 3699898071 : struct xlog_op_header *ophdr = reg->i_addr;
2288 :
2289 3699898071 : ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2290 3699898071 : xlog_write_iovec(iclog, log_offset, reg->i_addr,
2291 3699898071 : reg->i_len, len, record_cnt, data_cnt);
2292 : }
2293 1374109495 : }
2294 :
2295 : static int
2296 38960411 : xlog_write_get_more_iclog_space(
2297 : struct xlog_ticket *ticket,
2298 : struct xlog_in_core **iclogp,
2299 : uint32_t *log_offset,
2300 : uint32_t len,
2301 : uint32_t *record_cnt,
2302 : uint32_t *data_cnt)
2303 : {
2304 38960411 : struct xlog_in_core *iclog = *iclogp;
2305 38960411 : struct xlog *log = iclog->ic_log;
2306 38960411 : int error;
2307 :
2308 38960411 : spin_lock(&log->l_icloglock);
2309 38960539 : ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
2310 38960539 : xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2311 38960539 : error = xlog_state_release_iclog(log, iclog, ticket);
2312 38960538 : spin_unlock(&log->l_icloglock);
2313 38960522 : if (error)
2314 : return error;
2315 :
2316 38960511 : error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2317 : log_offset);
2318 38960523 : if (error)
2319 : return error;
2320 38957810 : *record_cnt = 0;
2321 38957810 : *data_cnt = 0;
2322 38957810 : *iclogp = iclog;
2323 38957810 : return 0;
2324 : }
2325 :
2326 : /*
2327 : * Write log vectors into a single iclog which is smaller than the current chain
2328 : * length. We write until we cannot fit a full record into the remaining space
2329 : * and then stop. We return the log vector that is to be written that cannot
2330 : * wholly fit in the iclog.
2331 : */
2332 : static int
2333 38958782 : xlog_write_partial(
2334 : struct xfs_log_vec *lv,
2335 : struct xlog_ticket *ticket,
2336 : struct xlog_in_core **iclogp,
2337 : uint32_t *log_offset,
2338 : uint32_t *len,
2339 : uint32_t *record_cnt,
2340 : uint32_t *data_cnt)
2341 : {
2342 38958782 : struct xlog_in_core *iclog = *iclogp;
2343 38958782 : struct xlog_op_header *ophdr;
2344 38958782 : int index = 0;
2345 38958782 : uint32_t rlen;
2346 38958782 : int error;
2347 :
2348 : /* walk the logvec, copying until we run out of space in the iclog */
2349 145459464 : for (index = 0; index < lv->lv_niovecs; index++) {
2350 106503361 : struct xfs_log_iovec *reg = &lv->lv_iovecp[index];
2351 106503361 : uint32_t reg_offset = 0;
2352 :
2353 : /*
2354 : * The first region of a continuation must have a non-zero
2355 : * length otherwise log recovery will just skip over it and
2356 : * start recovering from the next opheader it finds. Because we
2357 : * mark the next opheader as a continuation, recovery will then
2358 : * incorrectly add the continuation to the previous region and
2359 : * that breaks stuff.
2360 : *
2361 : * Hence if there isn't space for region data after the
2362 : * opheader, then we need to start afresh with a new iclog.
2363 : */
2364 106503361 : if (iclog->ic_size - *log_offset <=
2365 : sizeof(struct xlog_op_header)) {
2366 1773440 : error = xlog_write_get_more_iclog_space(ticket,
2367 : &iclog, log_offset, *len, record_cnt,
2368 : data_cnt);
2369 1773439 : if (error)
2370 97 : return error;
2371 : }
2372 :
2373 106503263 : ophdr = reg->i_addr;
2374 106503263 : rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset);
2375 :
2376 106503263 : ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2377 106503263 : ophdr->oh_len = cpu_to_be32(rlen - sizeof(struct xlog_op_header));
2378 106503263 : if (rlen != reg->i_len)
2379 37185576 : ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2380 :
2381 106503263 : xlog_write_iovec(iclog, log_offset, reg->i_addr,
2382 : rlen, len, record_cnt, data_cnt);
2383 :
2384 : /* If we wrote the whole region, move to the next. */
2385 106502945 : if (rlen == reg->i_len)
2386 69317707 : continue;
2387 :
2388 : /*
2389 : * We now have a partially written iovec, but it can span
2390 : * multiple iclogs so we loop here. First we release the iclog
2391 : * we currently have, then we get a new iclog and add a new
2392 : * opheader. Then we continue copying from where we were until
2393 : * we either complete the iovec or fill the iclog. If we
2394 : * complete the iovec, then we increment the index and go right
2395 : * back to the top of the outer loop. if we fill the iclog, we
2396 : * run the inner loop again.
2397 : *
2398 : * This is complicated by the tail of a region using all the
2399 : * space in an iclog and hence requiring us to release the iclog
2400 : * and get a new one before returning to the outer loop. We must
2401 : * always guarantee that we exit this inner loop with at least
2402 : * space for log transaction opheaders left in the current
2403 : * iclog, hence we cannot just terminate the loop at the end
2404 : * of the of the continuation. So we loop while there is no
2405 : * space left in the current iclog, and check for the end of the
2406 : * continuation after getting a new iclog.
2407 : */
2408 37186576 : do {
2409 : /*
2410 : * Ensure we include the continuation opheader in the
2411 : * space we need in the new iclog by adding that size
2412 : * to the length we require. This continuation opheader
2413 : * needs to be accounted to the ticket as the space it
2414 : * consumes hasn't been accounted to the lv we are
2415 : * writing.
2416 : */
2417 37186576 : error = xlog_write_get_more_iclog_space(ticket,
2418 : &iclog, log_offset,
2419 37186576 : *len + sizeof(struct xlog_op_header),
2420 : record_cnt, data_cnt);
2421 37186884 : if (error)
2422 2619 : return error;
2423 :
2424 37184265 : ophdr = iclog->ic_datap + *log_offset;
2425 37184265 : ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2426 37184265 : ophdr->oh_clientid = XFS_TRANSACTION;
2427 37184265 : ophdr->oh_res2 = 0;
2428 37184265 : ophdr->oh_flags = XLOG_WAS_CONT_TRANS;
2429 :
2430 37184265 : ticket->t_curr_res -= sizeof(struct xlog_op_header);
2431 37184265 : *log_offset += sizeof(struct xlog_op_header);
2432 37184265 : *data_cnt += sizeof(struct xlog_op_header);
2433 :
2434 : /*
2435 : * If rlen fits in the iclog, then end the region
2436 : * continuation. Otherwise we're going around again.
2437 : */
2438 37184265 : reg_offset += rlen;
2439 37184265 : rlen = reg->i_len - reg_offset;
2440 37184265 : if (rlen <= iclog->ic_size - *log_offset)
2441 37182938 : ophdr->oh_flags |= XLOG_END_TRANS;
2442 : else
2443 1327 : ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2444 :
2445 37184265 : rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset);
2446 37184265 : ophdr->oh_len = cpu_to_be32(rlen);
2447 :
2448 37184265 : xlog_write_iovec(iclog, log_offset,
2449 37184265 : reg->i_addr + reg_offset,
2450 : rlen, len, record_cnt, data_cnt);
2451 :
2452 37184313 : } while (ophdr->oh_flags & XLOG_CONTINUE_TRANS);
2453 : }
2454 :
2455 : /*
2456 : * No more iovecs remain in this logvec so return the next log vec to
2457 : * the caller so it can go back to fast path copying.
2458 : */
2459 38956103 : *iclogp = iclog;
2460 38956103 : return 0;
2461 : }
2462 :
2463 : /*
2464 : * Write some region out to in-core log
2465 : *
2466 : * This will be called when writing externally provided regions or when
2467 : * writing out a commit record for a given transaction.
2468 : *
2469 : * General algorithm:
2470 : * 1. Find total length of this write. This may include adding to the
2471 : * lengths passed in.
2472 : * 2. Check whether we violate the tickets reservation.
2473 : * 3. While writing to this iclog
2474 : * A. Reserve as much space in this iclog as can get
2475 : * B. If this is first write, save away start lsn
2476 : * C. While writing this region:
2477 : * 1. If first write of transaction, write start record
2478 : * 2. Write log operation header (header per region)
2479 : * 3. Find out if we can fit entire region into this iclog
2480 : * 4. Potentially, verify destination memcpy ptr
2481 : * 5. Memcpy (partial) region
2482 : * 6. If partial copy, release iclog; otherwise, continue
2483 : * copying more regions into current iclog
2484 : * 4. Mark want sync bit (in simulation mode)
2485 : * 5. Release iclog for potential flush to on-disk log.
2486 : *
2487 : * ERRORS:
2488 : * 1. Panic if reservation is overrun. This should never happen since
2489 : * reservation amounts are generated internal to the filesystem.
2490 : * NOTES:
2491 : * 1. Tickets are single threaded data structures.
2492 : * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2493 : * syncing routine. When a single log_write region needs to span
2494 : * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2495 : * on all log operation writes which don't contain the end of the
2496 : * region. The XLOG_END_TRANS bit is used for the in-core log
2497 : * operation which contains the end of the continued log_write region.
2498 : * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2499 : * we don't really know exactly how much space will be used. As a result,
2500 : * we don't update ic_offset until the end when we know exactly how many
2501 : * bytes have been written out.
2502 : */
2503 : int
2504 13740542 : xlog_write(
2505 : struct xlog *log,
2506 : struct xfs_cil_ctx *ctx,
2507 : struct list_head *lv_chain,
2508 : struct xlog_ticket *ticket,
2509 : uint32_t len)
2510 :
2511 : {
2512 13740542 : struct xlog_in_core *iclog = NULL;
2513 13740542 : struct xfs_log_vec *lv;
2514 13740542 : uint32_t record_cnt = 0;
2515 13740542 : uint32_t data_cnt = 0;
2516 13740542 : int error = 0;
2517 13740542 : int log_offset;
2518 :
2519 13740542 : if (ticket->t_curr_res < 0) {
2520 0 : xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2521 : "ctx ticket reservation ran out. Need to up reservation");
2522 0 : xlog_print_tic_res(log->l_mp, ticket);
2523 0 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
2524 : }
2525 :
2526 13740542 : error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2527 : &log_offset);
2528 13740538 : if (error)
2529 : return error;
2530 :
2531 13740538 : ASSERT(log_offset <= iclog->ic_size - 1);
2532 :
2533 : /*
2534 : * If we have a context pointer, pass it the first iclog we are
2535 : * writing to so it can record state needed for iclog write
2536 : * ordering.
2537 : */
2538 13740538 : if (ctx)
2539 13684286 : xlog_cil_set_ctx_write_state(ctx, iclog);
2540 :
2541 1426749975 : list_for_each_entry(lv, lv_chain, lv_list) {
2542 : /*
2543 : * If the entire log vec does not fit in the iclog, punt it to
2544 : * the partial copy loop which can handle this case.
2545 : */
2546 1413012152 : if (lv->lv_niovecs &&
2547 1410887629 : lv->lv_bytes > iclog->ic_size - log_offset) {
2548 38958835 : error = xlog_write_partial(lv, ticket, &iclog,
2549 : &log_offset, &len, &record_cnt,
2550 : &data_cnt);
2551 38958577 : if (error) {
2552 : /*
2553 : * We have no iclog to release, so just return
2554 : * the error immediately.
2555 : */
2556 2716 : return error;
2557 : }
2558 : } else {
2559 1374053317 : xlog_write_full(lv, ticket, iclog, &log_offset,
2560 : &len, &record_cnt, &data_cnt);
2561 : }
2562 : }
2563 13737823 : ASSERT(len == 0);
2564 :
2565 : /*
2566 : * We've already been guaranteed that the last writes will fit inside
2567 : * the current iclog, and hence it will already have the space used by
2568 : * those writes accounted to it. Hence we do not need to update the
2569 : * iclog with the number of bytes written here.
2570 : */
2571 13737823 : spin_lock(&log->l_icloglock);
2572 13737825 : xlog_state_finish_copy(log, iclog, record_cnt, 0);
2573 13737825 : error = xlog_state_release_iclog(log, iclog, ticket);
2574 13737827 : spin_unlock(&log->l_icloglock);
2575 :
2576 13737827 : return error;
2577 : }
2578 :
2579 : static void
2580 45651785 : xlog_state_activate_iclog(
2581 : struct xlog_in_core *iclog,
2582 : int *iclogs_changed)
2583 : {
2584 91303570 : ASSERT(list_empty_careful(&iclog->ic_callbacks));
2585 45651785 : trace_xlog_iclog_activate(iclog, _RET_IP_);
2586 :
2587 : /*
2588 : * If the number of ops in this iclog indicate it just contains the
2589 : * dummy transaction, we can change state into IDLE (the second time
2590 : * around). Otherwise we should change the state into NEED a dummy.
2591 : * We don't need to cover the dummy.
2592 : */
2593 45651785 : if (*iclogs_changed == 0 &&
2594 45651785 : iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2595 360325 : *iclogs_changed = 1;
2596 : } else {
2597 : /*
2598 : * We have two dirty iclogs so start over. This could also be
2599 : * num of ops indicating this is not the dummy going out.
2600 : */
2601 45291460 : *iclogs_changed = 2;
2602 : }
2603 :
2604 45651785 : iclog->ic_state = XLOG_STATE_ACTIVE;
2605 45651785 : iclog->ic_offset = 0;
2606 45651785 : iclog->ic_header.h_num_logops = 0;
2607 45651785 : memset(iclog->ic_header.h_cycle_data, 0,
2608 : sizeof(iclog->ic_header.h_cycle_data));
2609 45651785 : iclog->ic_header.h_lsn = 0;
2610 45651785 : iclog->ic_header.h_tail_lsn = 0;
2611 45651785 : }
2612 :
2613 : /*
2614 : * Loop through all iclogs and mark all iclogs currently marked DIRTY as
2615 : * ACTIVE after iclog I/O has completed.
2616 : */
2617 : static void
2618 45651785 : xlog_state_activate_iclogs(
2619 : struct xlog *log,
2620 : int *iclogs_changed)
2621 : {
2622 45651785 : struct xlog_in_core *iclog = log->l_iclog;
2623 :
2624 230980118 : do {
2625 230980118 : if (iclog->ic_state == XLOG_STATE_DIRTY)
2626 45651785 : xlog_state_activate_iclog(iclog, iclogs_changed);
2627 : /*
2628 : * The ordering of marking iclogs ACTIVE must be maintained, so
2629 : * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2630 : */
2631 185328333 : else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2632 : break;
2633 194025156 : } while ((iclog = iclog->ic_next) != log->l_iclog);
2634 45651785 : }
2635 :
2636 : static int
2637 45651785 : xlog_covered_state(
2638 : int prev_state,
2639 : int iclogs_changed)
2640 : {
2641 : /*
2642 : * We go to NEED for any non-covering writes. We go to NEED2 if we just
2643 : * wrote the first covering record (DONE). We go to IDLE if we just
2644 : * wrote the second covering record (DONE2) and remain in IDLE until a
2645 : * non-covering write occurs.
2646 : */
2647 45651785 : switch (prev_state) {
2648 207805 : case XLOG_STATE_COVER_IDLE:
2649 207805 : if (iclogs_changed == 1)
2650 40478 : return XLOG_STATE_COVER_IDLE;
2651 : fallthrough;
2652 : case XLOG_STATE_COVER_NEED:
2653 : case XLOG_STATE_COVER_NEED2:
2654 : break;
2655 102538 : case XLOG_STATE_COVER_DONE:
2656 102538 : if (iclogs_changed == 1)
2657 102538 : return XLOG_STATE_COVER_NEED2;
2658 : break;
2659 102445 : case XLOG_STATE_COVER_DONE2:
2660 102445 : if (iclogs_changed == 1)
2661 102445 : return XLOG_STATE_COVER_IDLE;
2662 : break;
2663 0 : default:
2664 0 : ASSERT(0);
2665 : }
2666 :
2667 : return XLOG_STATE_COVER_NEED;
2668 : }
2669 :
2670 : STATIC void
2671 45651785 : xlog_state_clean_iclog(
2672 : struct xlog *log,
2673 : struct xlog_in_core *dirty_iclog)
2674 : {
2675 45651785 : int iclogs_changed = 0;
2676 :
2677 45651785 : trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
2678 :
2679 45651785 : dirty_iclog->ic_state = XLOG_STATE_DIRTY;
2680 :
2681 45651785 : xlog_state_activate_iclogs(log, &iclogs_changed);
2682 45651785 : wake_up_all(&dirty_iclog->ic_force_wait);
2683 :
2684 45651785 : if (iclogs_changed) {
2685 45651785 : log->l_covered_state = xlog_covered_state(log->l_covered_state,
2686 : iclogs_changed);
2687 : }
2688 45651785 : }
2689 :
2690 : STATIC xfs_lsn_t
2691 45651785 : xlog_get_lowest_lsn(
2692 : struct xlog *log)
2693 : {
2694 45651785 : struct xlog_in_core *iclog = log->l_iclog;
2695 45651785 : xfs_lsn_t lowest_lsn = 0, lsn;
2696 :
2697 365214160 : do {
2698 365214160 : if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2699 : iclog->ic_state == XLOG_STATE_DIRTY)
2700 148633461 : continue;
2701 :
2702 216580699 : lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2703 216580699 : if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
2704 : lowest_lsn = lsn;
2705 365214160 : } while ((iclog = iclog->ic_next) != log->l_iclog);
2706 :
2707 45651785 : return lowest_lsn;
2708 : }
2709 :
2710 : /*
2711 : * Completion of a iclog IO does not imply that a transaction has completed, as
2712 : * transactions can be large enough to span many iclogs. We cannot change the
2713 : * tail of the log half way through a transaction as this may be the only
2714 : * transaction in the log and moving the tail to point to the middle of it
2715 : * will prevent recovery from finding the start of the transaction. Hence we
2716 : * should only update the last_sync_lsn if this iclog contains transaction
2717 : * completion callbacks on it.
2718 : *
2719 : * We have to do this before we drop the icloglock to ensure we are the only one
2720 : * that can update it.
2721 : *
2722 : * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
2723 : * the reservation grant head pushing. This is due to the fact that the push
2724 : * target is bound by the current last_sync_lsn value. Hence if we have a large
2725 : * amount of log space bound up in this committing transaction then the
2726 : * last_sync_lsn value may be the limiting factor preventing tail pushing from
2727 : * freeing space in the log. Hence once we've updated the last_sync_lsn we
2728 : * should push the AIL to ensure the push target (and hence the grant head) is
2729 : * no longer bound by the old log head location and can move forwards and make
2730 : * progress again.
2731 : */
2732 : static void
2733 45651785 : xlog_state_set_callback(
2734 : struct xlog *log,
2735 : struct xlog_in_core *iclog,
2736 : xfs_lsn_t header_lsn)
2737 : {
2738 45651785 : trace_xlog_iclog_callback(iclog, _RET_IP_);
2739 45651785 : iclog->ic_state = XLOG_STATE_CALLBACK;
2740 :
2741 91303570 : ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2742 : header_lsn) <= 0);
2743 :
2744 45651785 : if (list_empty_careful(&iclog->ic_callbacks))
2745 : return;
2746 :
2747 6787716 : atomic64_set(&log->l_last_sync_lsn, header_lsn);
2748 6787716 : xlog_grant_push_ail(log, 0);
2749 : }
2750 :
2751 : /*
2752 : * Return true if we need to stop processing, false to continue to the next
2753 : * iclog. The caller will need to run callbacks if the iclog is returned in the
2754 : * XLOG_STATE_CALLBACK state.
2755 : */
2756 : static bool
2757 384213709 : xlog_state_iodone_process_iclog(
2758 : struct xlog *log,
2759 : struct xlog_in_core *iclog)
2760 : {
2761 384213709 : xfs_lsn_t lowest_lsn;
2762 384213709 : xfs_lsn_t header_lsn;
2763 :
2764 384213709 : switch (iclog->ic_state) {
2765 : case XLOG_STATE_ACTIVE:
2766 : case XLOG_STATE_DIRTY:
2767 : /*
2768 : * Skip all iclogs in the ACTIVE & DIRTY states:
2769 : */
2770 : return false;
2771 45651785 : case XLOG_STATE_DONE_SYNC:
2772 : /*
2773 : * Now that we have an iclog that is in the DONE_SYNC state, do
2774 : * one more check here to see if we have chased our tail around.
2775 : * If this is not the lowest lsn iclog, then we will leave it
2776 : * for another completion to process.
2777 : */
2778 45651785 : header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2779 45651785 : lowest_lsn = xlog_get_lowest_lsn(log);
2780 45651785 : if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
2781 : return false;
2782 45651785 : xlog_state_set_callback(log, iclog, header_lsn);
2783 45651785 : return false;
2784 61084133 : default:
2785 : /*
2786 : * Can only perform callbacks in order. Since this iclog is not
2787 : * in the DONE_SYNC state, we skip the rest and just try to
2788 : * clean up.
2789 : */
2790 61084133 : return true;
2791 : }
2792 : }
2793 :
2794 : /*
2795 : * Loop over all the iclogs, running attached callbacks on them. Return true if
2796 : * we ran any callbacks, indicating that we dropped the icloglock. We don't need
2797 : * to handle transient shutdown state here at all because
2798 : * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
2799 : * cleanup of the callbacks.
2800 : */
2801 : static bool
2802 78548176 : xlog_state_do_iclog_callbacks(
2803 : struct xlog *log)
2804 : __releases(&log->l_icloglock)
2805 : __acquires(&log->l_icloglock)
2806 : {
2807 78548176 : struct xlog_in_core *first_iclog = log->l_iclog;
2808 78548176 : struct xlog_in_core *iclog = first_iclog;
2809 78548176 : bool ran_callback = false;
2810 :
2811 384213709 : do {
2812 384213709 : LIST_HEAD(cb_list);
2813 :
2814 384213709 : if (xlog_state_iodone_process_iclog(log, iclog))
2815 : break;
2816 323129576 : if (iclog->ic_state != XLOG_STATE_CALLBACK) {
2817 277477791 : iclog = iclog->ic_next;
2818 277477791 : continue;
2819 : }
2820 45651785 : list_splice_init(&iclog->ic_callbacks, &cb_list);
2821 45651785 : spin_unlock(&log->l_icloglock);
2822 :
2823 45651785 : trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
2824 45651785 : xlog_cil_process_committed(&cb_list);
2825 45651785 : trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
2826 45651785 : ran_callback = true;
2827 :
2828 45651785 : spin_lock(&log->l_icloglock);
2829 45651785 : xlog_state_clean_iclog(log, iclog);
2830 45651785 : iclog = iclog->ic_next;
2831 323129576 : } while (iclog != first_iclog);
2832 :
2833 78548176 : return ran_callback;
2834 : }
2835 :
2836 :
2837 : /*
2838 : * Loop running iclog completion callbacks until there are no more iclogs in a
2839 : * state that can run callbacks.
2840 : */
2841 : STATIC void
2842 45663833 : xlog_state_do_callback(
2843 : struct xlog *log)
2844 : {
2845 45663833 : int flushcnt = 0;
2846 45663833 : int repeats = 0;
2847 :
2848 45663833 : spin_lock(&log->l_icloglock);
2849 78548176 : while (xlog_state_do_iclog_callbacks(log)) {
2850 65768704 : if (xlog_is_shutdown(log))
2851 : break;
2852 :
2853 32884343 : if (++repeats > 5000) {
2854 0 : flushcnt += repeats;
2855 0 : repeats = 0;
2856 0 : xfs_warn(log->l_mp,
2857 : "%s: possible infinite loop (%d iterations)",
2858 : __func__, flushcnt);
2859 : }
2860 : }
2861 :
2862 45663833 : if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
2863 38003900 : wake_up_all(&log->l_flush_wait);
2864 :
2865 45663833 : spin_unlock(&log->l_icloglock);
2866 45663833 : }
2867 :
2868 :
2869 : /*
2870 : * Finish transitioning this iclog to the dirty state.
2871 : *
2872 : * Callbacks could take time, so they are done outside the scope of the
2873 : * global state machine log lock.
2874 : */
2875 : STATIC void
2876 45663833 : xlog_state_done_syncing(
2877 : struct xlog_in_core *iclog)
2878 : {
2879 45663833 : struct xlog *log = iclog->ic_log;
2880 :
2881 45663833 : spin_lock(&log->l_icloglock);
2882 45663833 : ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2883 45663833 : trace_xlog_iclog_sync_done(iclog, _RET_IP_);
2884 :
2885 : /*
2886 : * If we got an error, either on the first buffer, or in the case of
2887 : * split log writes, on the second, we shut down the file system and
2888 : * no iclogs should ever be attempted to be written to disk again.
2889 : */
2890 91327666 : if (!xlog_is_shutdown(log)) {
2891 45651824 : ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2892 45651824 : iclog->ic_state = XLOG_STATE_DONE_SYNC;
2893 : }
2894 :
2895 : /*
2896 : * Someone could be sleeping prior to writing out the next
2897 : * iclog buffer, we wake them all, one will get to do the
2898 : * I/O, the others get to wait for the result.
2899 : */
2900 45663833 : wake_up_all(&iclog->ic_write_wait);
2901 45663833 : spin_unlock(&log->l_icloglock);
2902 45663833 : xlog_state_do_callback(log);
2903 45663833 : }
2904 :
2905 : /*
2906 : * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2907 : * sleep. We wait on the flush queue on the head iclog as that should be
2908 : * the first iclog to complete flushing. Hence if all iclogs are syncing,
2909 : * we will wait here and all new writes will sleep until a sync completes.
2910 : *
2911 : * The in-core logs are used in a circular fashion. They are not used
2912 : * out-of-order even when an iclog past the head is free.
2913 : *
2914 : * return:
2915 : * * log_offset where xlog_write() can start writing into the in-core
2916 : * log's data space.
2917 : * * in-core log pointer to which xlog_write() should write.
2918 : * * boolean indicating this is a continued write to an in-core log.
2919 : * If this is the last write, then the in-core log's offset field
2920 : * needs to be incremented, depending on the amount of data which
2921 : * is copied.
2922 : */
2923 : STATIC int
2924 52701010 : xlog_state_get_iclog_space(
2925 : struct xlog *log,
2926 : int len,
2927 : struct xlog_in_core **iclogp,
2928 : struct xlog_ticket *ticket,
2929 : int *logoffsetp)
2930 : {
2931 62996121 : int log_offset;
2932 62996121 : xlog_rec_header_t *head;
2933 62996121 : xlog_in_core_t *iclog;
2934 :
2935 : restart:
2936 62996121 : spin_lock(&log->l_icloglock);
2937 125992728 : if (xlog_is_shutdown(log)) {
2938 2713 : spin_unlock(&log->l_icloglock);
2939 2713 : return -EIO;
2940 : }
2941 :
2942 62993651 : iclog = log->l_iclog;
2943 62993651 : if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2944 10292739 : XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2945 :
2946 : /* Wait for log writes to have flushed */
2947 10292739 : xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2948 10292551 : goto restart;
2949 : }
2950 :
2951 52700912 : head = &iclog->ic_header;
2952 :
2953 52700912 : atomic_inc(&iclog->ic_refcnt); /* prevents sync */
2954 52700916 : log_offset = iclog->ic_offset;
2955 :
2956 52700916 : trace_xlog_iclog_get_space(iclog, _RET_IP_);
2957 :
2958 : /* On the 1st write to an iclog, figure out lsn. This works
2959 : * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2960 : * committing to. If the offset is set, that's how many blocks
2961 : * must be written.
2962 : */
2963 52700899 : if (log_offset == 0) {
2964 45665927 : ticket->t_curr_res -= log->l_iclog_hsize;
2965 45665927 : head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2966 45665927 : head->h_lsn = cpu_to_be64(
2967 : xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2968 45665927 : ASSERT(log->l_curr_block >= 0);
2969 : }
2970 :
2971 : /* If there is enough room to write everything, then do it. Otherwise,
2972 : * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2973 : * bit is on, so this will get flushed out. Don't update ic_offset
2974 : * until you know exactly how many bytes get copied. Therefore, wait
2975 : * until later to update ic_offset.
2976 : *
2977 : * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2978 : * can fit into remaining data section.
2979 : */
2980 52700899 : if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2981 2560 : int error = 0;
2982 :
2983 2560 : xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2984 :
2985 : /*
2986 : * If we are the only one writing to this iclog, sync it to
2987 : * disk. We need to do an atomic compare and decrement here to
2988 : * avoid racing with concurrent atomic_dec_and_lock() calls in
2989 : * xlog_state_release_iclog() when there is more than one
2990 : * reference to the iclog.
2991 : */
2992 5120 : if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2993 2531 : error = xlog_state_release_iclog(log, iclog, ticket);
2994 2560 : spin_unlock(&log->l_icloglock);
2995 2560 : if (error)
2996 0 : return error;
2997 2560 : goto restart;
2998 : }
2999 :
3000 : /* Do we have enough room to write the full amount in the remainder
3001 : * of this iclog? Or must we continue a write on the next iclog and
3002 : * mark this iclog as completely taken? In the case where we switch
3003 : * iclogs (to mark it taken), this particular iclog will release/sync
3004 : * to disk in xlog_write().
3005 : */
3006 52698339 : if (len <= iclog->ic_size - iclog->ic_offset)
3007 13737813 : iclog->ic_offset += len;
3008 : else
3009 38960526 : xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
3010 52698351 : *iclogp = iclog;
3011 :
3012 52698351 : ASSERT(iclog->ic_offset <= iclog->ic_size);
3013 52698351 : spin_unlock(&log->l_icloglock);
3014 :
3015 52698355 : *logoffsetp = log_offset;
3016 52698355 : return 0;
3017 : }
3018 :
3019 : /*
3020 : * The first cnt-1 times a ticket goes through here we don't need to move the
3021 : * grant write head because the permanent reservation has reserved cnt times the
3022 : * unit amount. Release part of current permanent unit reservation and reset
3023 : * current reservation to be one units worth. Also move grant reservation head
3024 : * forward.
3025 : */
3026 : void
3027 1962703983 : xfs_log_ticket_regrant(
3028 : struct xlog *log,
3029 : struct xlog_ticket *ticket)
3030 : {
3031 1962703983 : trace_xfs_log_ticket_regrant(log, ticket);
3032 :
3033 1962662150 : if (ticket->t_cnt > 0)
3034 1463496714 : ticket->t_cnt--;
3035 :
3036 1962662150 : xlog_grant_sub_space(log, &log->l_reserve_head.grant,
3037 : ticket->t_curr_res);
3038 1962787651 : xlog_grant_sub_space(log, &log->l_write_head.grant,
3039 : ticket->t_curr_res);
3040 1962783386 : ticket->t_curr_res = ticket->t_unit_res;
3041 :
3042 1962783386 : trace_xfs_log_ticket_regrant_sub(log, ticket);
3043 :
3044 : /* just return if we still have some of the pre-reserved space */
3045 1962773971 : if (!ticket->t_cnt) {
3046 529730278 : xlog_grant_add_space(log, &log->l_reserve_head.grant,
3047 : ticket->t_unit_res);
3048 529725721 : trace_xfs_log_ticket_regrant_exit(log, ticket);
3049 :
3050 529722356 : ticket->t_curr_res = ticket->t_unit_res;
3051 : }
3052 :
3053 1962766049 : xfs_log_ticket_put(ticket);
3054 1962790896 : }
3055 :
3056 : /*
3057 : * Give back the space left from a reservation.
3058 : *
3059 : * All the information we need to make a correct determination of space left
3060 : * is present. For non-permanent reservations, things are quite easy. The
3061 : * count should have been decremented to zero. We only need to deal with the
3062 : * space remaining in the current reservation part of the ticket. If the
3063 : * ticket contains a permanent reservation, there may be left over space which
3064 : * needs to be released. A count of N means that N-1 refills of the current
3065 : * reservation can be done before we need to ask for more space. The first
3066 : * one goes to fill up the first current reservation. Once we run out of
3067 : * space, the count will stay at zero and the only space remaining will be
3068 : * in the current reservation field.
3069 : */
3070 : void
3071 1993530725 : xfs_log_ticket_ungrant(
3072 : struct xlog *log,
3073 : struct xlog_ticket *ticket)
3074 : {
3075 1993530725 : int bytes;
3076 :
3077 1993530725 : trace_xfs_log_ticket_ungrant(log, ticket);
3078 :
3079 1993378625 : if (ticket->t_cnt > 0)
3080 1741464317 : ticket->t_cnt--;
3081 :
3082 1993378625 : trace_xfs_log_ticket_ungrant_sub(log, ticket);
3083 :
3084 : /*
3085 : * If this is a permanent reservation ticket, we may be able to free
3086 : * up more space based on the remaining count.
3087 : */
3088 1993375371 : bytes = ticket->t_curr_res;
3089 1993375371 : if (ticket->t_cnt > 0) {
3090 1578069980 : ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
3091 1578069980 : bytes += ticket->t_unit_res*ticket->t_cnt;
3092 : }
3093 :
3094 1993375371 : xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
3095 1994146914 : xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
3096 :
3097 1994078546 : trace_xfs_log_ticket_ungrant_exit(log, ticket);
3098 :
3099 1994037413 : xfs_log_space_wake(log->l_mp);
3100 1993805994 : xfs_log_ticket_put(ticket);
3101 1993731710 : }
3102 :
3103 : /*
3104 : * This routine will mark the current iclog in the ring as WANT_SYNC and move
3105 : * the current iclog pointer to the next iclog in the ring.
3106 : */
3107 : void
3108 45664263 : xlog_state_switch_iclogs(
3109 : struct xlog *log,
3110 : struct xlog_in_core *iclog,
3111 : int eventual_size)
3112 : {
3113 45664263 : ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3114 45664263 : assert_spin_locked(&log->l_icloglock);
3115 45664263 : trace_xlog_iclog_switch(iclog, _RET_IP_);
3116 :
3117 45664264 : if (!eventual_size)
3118 6701178 : eventual_size = iclog->ic_offset;
3119 45664264 : iclog->ic_state = XLOG_STATE_WANT_SYNC;
3120 45664264 : iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3121 45664264 : log->l_prev_block = log->l_curr_block;
3122 45664264 : log->l_prev_cycle = log->l_curr_cycle;
3123 :
3124 : /* roll log?: ic_offset changed later */
3125 45664264 : log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
3126 :
3127 : /* Round up to next log-sunit */
3128 45664264 : if (log->l_iclog_roundoff > BBSIZE) {
3129 38930830 : uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
3130 38930830 : log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
3131 : }
3132 :
3133 45664264 : if (log->l_curr_block >= log->l_logBBsize) {
3134 : /*
3135 : * Rewind the current block before the cycle is bumped to make
3136 : * sure that the combined LSN never transiently moves forward
3137 : * when the log wraps to the next cycle. This is to support the
3138 : * unlocked sample of these fields from xlog_valid_lsn(). Most
3139 : * other cases should acquire l_icloglock.
3140 : */
3141 18917 : log->l_curr_block -= log->l_logBBsize;
3142 18917 : ASSERT(log->l_curr_block >= 0);
3143 18917 : smp_wmb();
3144 18917 : log->l_curr_cycle++;
3145 18917 : if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
3146 0 : log->l_curr_cycle++;
3147 : }
3148 45664264 : ASSERT(iclog == log->l_iclog);
3149 45664264 : log->l_iclog = iclog->ic_next;
3150 45664264 : }
3151 :
3152 : /*
3153 : * Force the iclog to disk and check if the iclog has been completed before
3154 : * xlog_force_iclog() returns. This can happen on synchronous (e.g.
3155 : * pmem) or fast async storage because we drop the icloglock to issue the IO.
3156 : * If completion has already occurred, tell the caller so that it can avoid an
3157 : * unnecessary wait on the iclog.
3158 : */
3159 : static int
3160 5952698 : xlog_force_and_check_iclog(
3161 : struct xlog_in_core *iclog,
3162 : bool *completed)
3163 : {
3164 5952698 : xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3165 5952698 : int error;
3166 :
3167 5952698 : *completed = false;
3168 5952698 : error = xlog_force_iclog(iclog);
3169 5952698 : if (error)
3170 : return error;
3171 :
3172 : /*
3173 : * If the iclog has already been completed and reused the header LSN
3174 : * will have been rewritten by completion
3175 : */
3176 5952698 : if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
3177 237 : *completed = true;
3178 : return 0;
3179 : }
3180 :
3181 : /*
3182 : * Write out all data in the in-core log as of this exact moment in time.
3183 : *
3184 : * Data may be written to the in-core log during this call. However,
3185 : * we don't guarantee this data will be written out. A change from past
3186 : * implementation means this routine will *not* write out zero length LRs.
3187 : *
3188 : * Basically, we try and perform an intelligent scan of the in-core logs.
3189 : * If we determine there is no flushable data, we just return. There is no
3190 : * flushable data if:
3191 : *
3192 : * 1. the current iclog is active and has no data; the previous iclog
3193 : * is in the active or dirty state.
3194 : * 2. the current iclog is drity, and the previous iclog is in the
3195 : * active or dirty state.
3196 : *
3197 : * We may sleep if:
3198 : *
3199 : * 1. the current iclog is not in the active nor dirty state.
3200 : * 2. the current iclog dirty, and the previous iclog is not in the
3201 : * active nor dirty state.
3202 : * 3. the current iclog is active, and there is another thread writing
3203 : * to this particular iclog.
3204 : * 4. a) the current iclog is active and has no other writers
3205 : * b) when we return from flushing out this iclog, it is still
3206 : * not in the active nor dirty state.
3207 : */
3208 : int
3209 5413905 : xfs_log_force(
3210 : struct xfs_mount *mp,
3211 : uint flags)
3212 : {
3213 5413905 : struct xlog *log = mp->m_log;
3214 5413905 : struct xlog_in_core *iclog;
3215 :
3216 5413905 : XFS_STATS_INC(mp, xs_log_force);
3217 5414867 : trace_xfs_log_force(mp, 0, _RET_IP_);
3218 :
3219 5414742 : xlog_cil_force(log);
3220 :
3221 5417424 : spin_lock(&log->l_icloglock);
3222 10834948 : if (xlog_is_shutdown(log))
3223 46100 : goto out_error;
3224 :
3225 5371374 : iclog = log->l_iclog;
3226 5371374 : trace_xlog_iclog_force(iclog, _RET_IP_);
3227 :
3228 5371374 : if (iclog->ic_state == XLOG_STATE_DIRTY ||
3229 5357688 : (iclog->ic_state == XLOG_STATE_ACTIVE &&
3230 4865398 : atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
3231 : /*
3232 : * If the head is dirty or (active and empty), then we need to
3233 : * look at the previous iclog.
3234 : *
3235 : * If the previous iclog is active or dirty we are done. There
3236 : * is nothing to sync out. Otherwise, we attach ourselves to the
3237 : * previous iclog and go to sleep.
3238 : */
3239 3018057 : iclog = iclog->ic_prev;
3240 2353317 : } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3241 2339636 : if (atomic_read(&iclog->ic_refcnt) == 0) {
3242 : /* We have exclusive access to this iclog. */
3243 1847345 : bool completed;
3244 :
3245 1847345 : if (xlog_force_and_check_iclog(iclog, &completed))
3246 0 : goto out_error;
3247 :
3248 1847345 : if (completed)
3249 81 : goto out_unlock;
3250 : } else {
3251 : /*
3252 : * Someone else is still writing to this iclog, so we
3253 : * need to ensure that when they release the iclog it
3254 : * gets synced immediately as we may be waiting on it.
3255 : */
3256 492291 : xlog_state_switch_iclogs(log, iclog, 0);
3257 : }
3258 : }
3259 :
3260 : /*
3261 : * The iclog we are about to wait on may contain the checkpoint pushed
3262 : * by the above xlog_cil_force() call, but it may not have been pushed
3263 : * to disk yet. Like the ACTIVE case above, we need to make sure caches
3264 : * are flushed when this iclog is written.
3265 : */
3266 5371293 : if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
3267 540578 : iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3268 :
3269 5371293 : if (flags & XFS_LOG_SYNC)
3270 5213502 : return xlog_wait_on_iclog(iclog);
3271 157791 : out_unlock:
3272 157872 : spin_unlock(&log->l_icloglock);
3273 157872 : return 0;
3274 46100 : out_error:
3275 46100 : spin_unlock(&log->l_icloglock);
3276 46100 : return -EIO;
3277 : }
3278 :
3279 : /*
3280 : * Force the log to a specific LSN.
3281 : *
3282 : * If an iclog with that lsn can be found:
3283 : * If it is in the DIRTY state, just return.
3284 : * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3285 : * state and go to sleep or return.
3286 : * If it is in any other state, go to sleep or return.
3287 : *
3288 : * Synchronous forces are implemented with a wait queue. All callers trying
3289 : * to force a given lsn to disk must wait on the queue attached to the
3290 : * specific in-core log. When given in-core log finally completes its write
3291 : * to disk, that thread will wake up all threads waiting on the queue.
3292 : */
3293 : static int
3294 5358869 : xlog_force_lsn(
3295 : struct xlog *log,
3296 : xfs_lsn_t lsn,
3297 : uint flags,
3298 : int *log_flushed,
3299 : bool already_slept)
3300 : {
3301 5358869 : struct xlog_in_core *iclog;
3302 5358869 : bool completed;
3303 :
3304 5358869 : spin_lock(&log->l_icloglock);
3305 10719690 : if (xlog_is_shutdown(log))
3306 305 : goto out_error;
3307 :
3308 5359540 : iclog = log->l_iclog;
3309 10949353 : while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3310 5593709 : trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
3311 5593708 : iclog = iclog->ic_next;
3312 5593708 : if (iclog == log->l_iclog)
3313 3895 : goto out_unlock;
3314 : }
3315 :
3316 5355644 : switch (iclog->ic_state) {
3317 4403657 : case XLOG_STATE_ACTIVE:
3318 : /*
3319 : * We sleep here if we haven't already slept (e.g. this is the
3320 : * first time we've looked at the correct iclog buf) and the
3321 : * buffer before us is going to be sync'ed. The reason for this
3322 : * is that if we are doing sync transactions here, by waiting
3323 : * for the previous I/O to complete, we can allow a few more
3324 : * transactions into this iclog before we close it down.
3325 : *
3326 : * Otherwise, we mark the buffer WANT_SYNC, and bump up the
3327 : * refcnt so we can release the log (which drops the ref count).
3328 : * The state switch keeps new transaction commits from using
3329 : * this buffer. When the current commits finish writing into
3330 : * the buffer, the refcount will drop to zero and the buffer
3331 : * will go out then.
3332 : */
3333 4403657 : if (!already_slept &&
3334 4180341 : (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
3335 : iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
3336 298305 : xlog_wait(&iclog->ic_prev->ic_write_wait,
3337 298305 : &log->l_icloglock);
3338 298305 : return -EAGAIN;
3339 : }
3340 4105352 : if (xlog_force_and_check_iclog(iclog, &completed))
3341 0 : goto out_error;
3342 4105353 : if (log_flushed)
3343 3742346 : *log_flushed = 1;
3344 4105353 : if (completed)
3345 156 : goto out_unlock;
3346 : break;
3347 9220 : case XLOG_STATE_WANT_SYNC:
3348 : /*
3349 : * This iclog may contain the checkpoint pushed by the
3350 : * xlog_cil_force_seq() call, but there are other writers still
3351 : * accessing it so it hasn't been pushed to disk yet. Like the
3352 : * ACTIVE case above, we need to make sure caches are flushed
3353 : * when this iclog is written.
3354 : */
3355 9220 : iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3356 9220 : break;
3357 : default:
3358 : /*
3359 : * The entire checkpoint was written by the CIL force and is on
3360 : * its way to disk already. It will be stable when it
3361 : * completes, so we don't need to manipulate caches here at all.
3362 : * We just need to wait for completion if necessary.
3363 : */
3364 : break;
3365 : }
3366 :
3367 5057184 : if (flags & XFS_LOG_SYNC)
3368 5057184 : return xlog_wait_on_iclog(iclog);
3369 0 : out_unlock:
3370 4051 : spin_unlock(&log->l_icloglock);
3371 4051 : return 0;
3372 305 : out_error:
3373 305 : spin_unlock(&log->l_icloglock);
3374 305 : return -EIO;
3375 : }
3376 :
3377 : /*
3378 : * Force the log to a specific checkpoint sequence.
3379 : *
3380 : * First force the CIL so that all the required changes have been flushed to the
3381 : * iclogs. If the CIL force completed it will return a commit LSN that indicates
3382 : * the iclog that needs to be flushed to stable storage. If the caller needs
3383 : * a synchronous log force, we will wait on the iclog with the LSN returned by
3384 : * xlog_cil_force_seq() to be completed.
3385 : */
3386 : int
3387 5133172 : xfs_log_force_seq(
3388 : struct xfs_mount *mp,
3389 : xfs_csn_t seq,
3390 : uint flags,
3391 : int *log_flushed)
3392 : {
3393 5133172 : struct xlog *log = mp->m_log;
3394 5133172 : xfs_lsn_t lsn;
3395 5133172 : int ret;
3396 5133172 : ASSERT(seq != 0);
3397 :
3398 5133172 : XFS_STATS_INC(mp, xs_log_force);
3399 5133230 : trace_xfs_log_force(mp, seq, _RET_IP_);
3400 :
3401 5133082 : lsn = xlog_cil_force_seq(log, seq);
3402 5133081 : if (lsn == NULLCOMMITLSN)
3403 : return 0;
3404 :
3405 5061211 : ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
3406 5060279 : if (ret == -EAGAIN) {
3407 298291 : XFS_STATS_INC(mp, xs_log_force_sleep);
3408 298299 : ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
3409 : }
3410 : return ret;
3411 : }
3412 :
3413 : /*
3414 : * Free a used ticket when its refcount falls to zero.
3415 : */
3416 : void
3417 3956464298 : xfs_log_ticket_put(
3418 : xlog_ticket_t *ticket)
3419 : {
3420 3956464298 : ASSERT(atomic_read(&ticket->t_ref) > 0);
3421 3956464298 : if (atomic_dec_and_test(&ticket->t_ref))
3422 1994192537 : kmem_cache_free(xfs_log_ticket_cache, ticket);
3423 3956527749 : }
3424 :
3425 : xlog_ticket_t *
3426 1962376346 : xfs_log_ticket_get(
3427 : xlog_ticket_t *ticket)
3428 : {
3429 1962376346 : ASSERT(atomic_read(&ticket->t_ref) > 0);
3430 1962376346 : atomic_inc(&ticket->t_ref);
3431 1962746159 : return ticket;
3432 : }
3433 :
3434 : /*
3435 : * Figure out the total log space unit (in bytes) that would be
3436 : * required for a log ticket.
3437 : */
3438 : static int
3439 1989380315 : xlog_calc_unit_res(
3440 : struct xlog *log,
3441 : int unit_bytes,
3442 : int *niclogs)
3443 : {
3444 1989380315 : int iclog_space;
3445 1989380315 : uint num_headers;
3446 :
3447 : /*
3448 : * Permanent reservations have up to 'cnt'-1 active log operations
3449 : * in the log. A unit in this case is the amount of space for one
3450 : * of these log operations. Normal reservations have a cnt of 1
3451 : * and their unit amount is the total amount of space required.
3452 : *
3453 : * The following lines of code account for non-transaction data
3454 : * which occupy space in the on-disk log.
3455 : *
3456 : * Normal form of a transaction is:
3457 : * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3458 : * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3459 : *
3460 : * We need to account for all the leadup data and trailer data
3461 : * around the transaction data.
3462 : * And then we need to account for the worst case in terms of using
3463 : * more space.
3464 : * The worst case will happen if:
3465 : * - the placement of the transaction happens to be such that the
3466 : * roundoff is at its maximum
3467 : * - the transaction data is synced before the commit record is synced
3468 : * i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3469 : * Therefore the commit record is in its own Log Record.
3470 : * This can happen as the commit record is called with its
3471 : * own region to xlog_write().
3472 : * This then means that in the worst case, roundoff can happen for
3473 : * the commit-rec as well.
3474 : * The commit-rec is smaller than padding in this scenario and so it is
3475 : * not added separately.
3476 : */
3477 :
3478 : /* for trans header */
3479 1989380315 : unit_bytes += sizeof(xlog_op_header_t);
3480 1989380315 : unit_bytes += sizeof(xfs_trans_header_t);
3481 :
3482 : /* for start-rec */
3483 1989380315 : unit_bytes += sizeof(xlog_op_header_t);
3484 :
3485 : /*
3486 : * for LR headers - the space for data in an iclog is the size minus
3487 : * the space used for the headers. If we use the iclog size, then we
3488 : * undercalculate the number of headers required.
3489 : *
3490 : * Furthermore - the addition of op headers for split-recs might
3491 : * increase the space required enough to require more log and op
3492 : * headers, so take that into account too.
3493 : *
3494 : * IMPORTANT: This reservation makes the assumption that if this
3495 : * transaction is the first in an iclog and hence has the LR headers
3496 : * accounted to it, then the remaining space in the iclog is
3497 : * exclusively for this transaction. i.e. if the transaction is larger
3498 : * than the iclog, it will be the only thing in that iclog.
3499 : * Fundamentally, this means we must pass the entire log vector to
3500 : * xlog_write to guarantee this.
3501 : */
3502 1989380315 : iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3503 1989380315 : num_headers = howmany(unit_bytes, iclog_space);
3504 :
3505 : /* for split-recs - ophdrs added when data split over LRs */
3506 1989380315 : unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3507 :
3508 : /* add extra header reservations if we overrun */
3509 1990538910 : while (!num_headers ||
3510 1990538910 : howmany(unit_bytes, iclog_space) > num_headers) {
3511 1158595 : unit_bytes += sizeof(xlog_op_header_t);
3512 1158595 : num_headers++;
3513 : }
3514 1989380315 : unit_bytes += log->l_iclog_hsize * num_headers;
3515 :
3516 : /* for commit-rec LR header - note: padding will subsume the ophdr */
3517 1989380315 : unit_bytes += log->l_iclog_hsize;
3518 :
3519 : /* roundoff padding for transaction data and one for commit record */
3520 1989380315 : unit_bytes += 2 * log->l_iclog_roundoff;
3521 :
3522 1989380315 : if (niclogs)
3523 1989311589 : *niclogs = num_headers;
3524 1989380315 : return unit_bytes;
3525 : }
3526 :
3527 : int
3528 68726 : xfs_log_calc_unit_res(
3529 : struct xfs_mount *mp,
3530 : int unit_bytes)
3531 : {
3532 68726 : return xlog_calc_unit_res(mp->m_log, unit_bytes, NULL);
3533 : }
3534 :
3535 : /*
3536 : * Allocate and initialise a new log ticket.
3537 : */
3538 : struct xlog_ticket *
3539 1989734392 : xlog_ticket_alloc(
3540 : struct xlog *log,
3541 : int unit_bytes,
3542 : int cnt,
3543 : bool permanent)
3544 : {
3545 1989734392 : struct xlog_ticket *tic;
3546 1989734392 : int unit_res;
3547 :
3548 1989734392 : tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL);
3549 :
3550 1990235623 : unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
3551 :
3552 1989742728 : atomic_set(&tic->t_ref, 1);
3553 1989742728 : tic->t_task = current;
3554 1989742728 : INIT_LIST_HEAD(&tic->t_queue);
3555 1989742728 : tic->t_unit_res = unit_res;
3556 1989742728 : tic->t_curr_res = unit_res;
3557 1989742728 : tic->t_cnt = cnt;
3558 1989742728 : tic->t_ocnt = cnt;
3559 1989742728 : tic->t_tid = get_random_u32();
3560 1990460321 : if (permanent)
3561 1761660791 : tic->t_flags |= XLOG_TIC_PERM_RESERV;
3562 :
3563 1990460321 : return tic;
3564 : }
3565 :
3566 : #if defined(DEBUG)
3567 : /*
3568 : * Check to make sure the grant write head didn't just over lap the tail. If
3569 : * the cycles are the same, we can't be overlapping. Otherwise, make sure that
3570 : * the cycles differ by exactly one and check the byte count.
3571 : *
3572 : * This check is run unlocked, so can give false positives. Rather than assert
3573 : * on failures, use a warn-once flag and a panic tag to allow the admin to
3574 : * determine if they want to panic the machine when such an error occurs. For
3575 : * debug kernels this will have the same effect as using an assert but, unlinke
3576 : * an assert, it can be turned off at runtime.
3577 : */
3578 : STATIC void
3579 2515686899 : xlog_verify_grant_tail(
3580 : struct xlog *log)
3581 : {
3582 2515686899 : int tail_cycle, tail_blocks;
3583 2515686899 : int cycle, space;
3584 :
3585 2515686899 : xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3586 2515686899 : xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3587 2515686899 : if (tail_cycle != cycle) {
3588 747548429 : if (cycle - 1 != tail_cycle &&
3589 175 : !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3590 3 : xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3591 : "%s: cycle - 1 != tail_cycle", __func__);
3592 : }
3593 :
3594 747552556 : if (space > BBTOB(tail_blocks) &&
3595 4117 : !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3596 141 : xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3597 : "%s: space > BBTOB(tail_blocks)", __func__);
3598 : }
3599 : }
3600 2515687087 : }
3601 :
3602 : /* check if it will fit */
3603 : STATIC void
3604 45663832 : xlog_verify_tail_lsn(
3605 : struct xlog *log,
3606 : struct xlog_in_core *iclog)
3607 : {
3608 45663832 : xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
3609 45663832 : int blocks;
3610 :
3611 45663832 : if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3612 27893071 : blocks =
3613 27893071 : log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3614 27893071 : if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3615 0 : xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3616 : } else {
3617 17770761 : ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3618 :
3619 17770761 : if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3620 0 : xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3621 :
3622 17770761 : blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3623 17770761 : if (blocks < BTOBB(iclog->ic_offset) + 1)
3624 0 : xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3625 : }
3626 45663832 : }
3627 :
3628 : /*
3629 : * Perform a number of checks on the iclog before writing to disk.
3630 : *
3631 : * 1. Make sure the iclogs are still circular
3632 : * 2. Make sure we have a good magic number
3633 : * 3. Make sure we don't have magic numbers in the data
3634 : * 4. Check fields of each log operation header for:
3635 : * A. Valid client identifier
3636 : * B. tid ptr value falls in valid ptr space (user space code)
3637 : * C. Length in log record header is correct according to the
3638 : * individual operation headers within record.
3639 : * 5. When a bwrite will occur within 5 blocks of the front of the physical
3640 : * log, check the preceding blocks of the physical log to make sure all
3641 : * the cycle numbers agree with the current cycle number.
3642 : */
3643 : STATIC void
3644 45661665 : xlog_verify_iclog(
3645 : struct xlog *log,
3646 : struct xlog_in_core *iclog,
3647 : int count)
3648 : {
3649 45661665 : xlog_op_header_t *ophead;
3650 45661665 : xlog_in_core_t *icptr;
3651 45661665 : xlog_in_core_2_t *xhdr;
3652 45661665 : void *base_ptr, *ptr, *p;
3653 45661665 : ptrdiff_t field_offset;
3654 45661665 : uint8_t clientid;
3655 45661665 : int len, i, j, k, op_len;
3656 45661665 : int idx;
3657 :
3658 : /* check validity of iclog pointers */
3659 45661665 : spin_lock(&log->l_icloglock);
3660 45663830 : icptr = log->l_iclog;
3661 410974319 : for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3662 365310489 : ASSERT(icptr);
3663 :
3664 45663830 : if (icptr != log->l_iclog)
3665 0 : xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3666 45663830 : spin_unlock(&log->l_icloglock);
3667 :
3668 : /* check log magic numbers */
3669 45663833 : if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3670 0 : xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3671 :
3672 45663833 : base_ptr = ptr = &iclog->ic_header;
3673 45663833 : p = &iclog->ic_header;
3674 2677340077 : for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
3675 2631676268 : if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3676 0 : xfs_emerg(log->l_mp, "%s: unexpected magic num",
3677 : __func__);
3678 : }
3679 :
3680 : /* check fields */
3681 45663809 : len = be32_to_cpu(iclog->ic_header.h_num_logops);
3682 45663809 : base_ptr = ptr = iclog->ic_datap;
3683 45663809 : ophead = ptr;
3684 45663809 : xhdr = iclog->ic_data;
3685 3889490233 : for (i = 0; i < len; i++) {
3686 3843826542 : ophead = ptr;
3687 :
3688 : /* clientid is only 1 byte */
3689 3843826542 : p = &ophead->oh_clientid;
3690 3843826542 : field_offset = p - base_ptr;
3691 3843826542 : if (field_offset & 0x1ff) {
3692 3815109813 : clientid = ophead->oh_clientid;
3693 : } else {
3694 28716729 : idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
3695 28716729 : if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3696 26033 : j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3697 26033 : k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3698 26033 : clientid = xlog_get_client_id(
3699 26033 : xhdr[j].hic_xheader.xh_cycle_data[k]);
3700 : } else {
3701 28690707 : clientid = xlog_get_client_id(
3702 28690696 : iclog->ic_header.h_cycle_data[idx]);
3703 : }
3704 : }
3705 3843826553 : if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) {
3706 0 : xfs_warn(log->l_mp,
3707 : "%s: op %d invalid clientid %d op "PTR_FMT" offset 0x%lx",
3708 : __func__, i, clientid, ophead,
3709 : (unsigned long)field_offset);
3710 : }
3711 :
3712 : /* check length */
3713 3843826420 : p = &ophead->oh_len;
3714 3843826420 : field_offset = p - base_ptr;
3715 3843826420 : if (field_offset & 0x1ff) {
3716 3814866714 : op_len = be32_to_cpu(ophead->oh_len);
3717 : } else {
3718 28959706 : idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
3719 28959706 : if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3720 26206 : j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3721 26206 : k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3722 26206 : op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3723 : } else {
3724 28933500 : op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3725 : }
3726 : }
3727 3843826424 : ptr += sizeof(xlog_op_header_t) + op_len;
3728 : }
3729 45663691 : }
3730 : #endif
3731 :
3732 : /*
3733 : * Perform a forced shutdown on the log.
3734 : *
3735 : * This can be called from low level log code to trigger a shutdown, or from the
3736 : * high level mount shutdown code when the mount shuts down.
3737 : *
3738 : * Our main objectives here are to make sure that:
3739 : * a. if the shutdown was not due to a log IO error, flush the logs to
3740 : * disk. Anything modified after this is ignored.
3741 : * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3742 : * parties to find out. Nothing new gets queued after this is done.
3743 : * c. Tasks sleeping on log reservations, pinned objects and
3744 : * other resources get woken up.
3745 : * d. The mount is also marked as shut down so that log triggered shutdowns
3746 : * still behave the same as if they called xfs_forced_shutdown().
3747 : *
3748 : * Return true if the shutdown cause was a log IO error and we actually shut the
3749 : * log down.
3750 : */
3751 : bool
3752 20154 : xlog_force_shutdown(
3753 : struct xlog *log,
3754 : uint32_t shutdown_flags)
3755 : {
3756 20154 : bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
3757 :
3758 20154 : if (!log)
3759 : return false;
3760 :
3761 : /*
3762 : * Flush all the completed transactions to disk before marking the log
3763 : * being shut down. We need to do this first as shutting down the log
3764 : * before the force will prevent the log force from flushing the iclogs
3765 : * to disk.
3766 : *
3767 : * When we are in recovery, there are no transactions to flush, and
3768 : * we don't want to touch the log because we don't want to perturb the
3769 : * current head/tail for future recovery attempts. Hence we need to
3770 : * avoid a log force in this case.
3771 : *
3772 : * If we are shutting down due to a log IO error, then we must avoid
3773 : * trying to write the log as that may just result in more IO errors and
3774 : * an endless shutdown/force loop.
3775 : */
3776 24576 : if (!log_error && !xlog_in_recovery(log))
3777 4422 : xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3778 :
3779 : /*
3780 : * Atomically set the shutdown state. If the shutdown state is already
3781 : * set, there someone else is performing the shutdown and so we are done
3782 : * here. This should never happen because we should only ever get called
3783 : * once by the first shutdown caller.
3784 : *
3785 : * Much of the log state machine transitions assume that shutdown state
3786 : * cannot change once they hold the log->l_icloglock. Hence we need to
3787 : * hold that lock here, even though we use the atomic test_and_set_bit()
3788 : * operation to set the shutdown state.
3789 : */
3790 20154 : spin_lock(&log->l_icloglock);
3791 20154 : if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
3792 7224 : spin_unlock(&log->l_icloglock);
3793 7224 : return false;
3794 : }
3795 12930 : spin_unlock(&log->l_icloglock);
3796 :
3797 : /*
3798 : * If this log shutdown also sets the mount shutdown state, issue a
3799 : * shutdown warning message.
3800 : */
3801 12930 : if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
3802 3909 : xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
3803 : "Filesystem has been shut down due to log error (0x%x).",
3804 : shutdown_flags);
3805 3909 : xfs_alert(log->l_mp,
3806 : "Please unmount the filesystem and rectify the problem(s).");
3807 3909 : if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
3808 0 : xfs_stack_trace();
3809 : }
3810 :
3811 : /*
3812 : * We don't want anybody waiting for log reservations after this. That
3813 : * means we have to wake up everybody queued up on reserveq as well as
3814 : * writeq. In addition, we make sure in xlog_{re}grant_log_space that
3815 : * we don't enqueue anything once the SHUTDOWN flag is set, and this
3816 : * action is protected by the grant locks.
3817 : */
3818 12930 : xlog_grant_head_wake_all(&log->l_reserve_head);
3819 12930 : xlog_grant_head_wake_all(&log->l_write_head);
3820 :
3821 : /*
3822 : * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
3823 : * as if the log writes were completed. The abort handling in the log
3824 : * item committed callback functions will do this again under lock to
3825 : * avoid races.
3826 : */
3827 12930 : spin_lock(&log->l_cilp->xc_push_lock);
3828 12930 : wake_up_all(&log->l_cilp->xc_start_wait);
3829 12930 : wake_up_all(&log->l_cilp->xc_commit_wait);
3830 12930 : spin_unlock(&log->l_cilp->xc_push_lock);
3831 :
3832 12930 : spin_lock(&log->l_icloglock);
3833 12930 : xlog_state_shutdown_callbacks(log);
3834 12930 : spin_unlock(&log->l_icloglock);
3835 :
3836 12930 : wake_up_var(&log->l_opstate);
3837 12930 : return log_error;
3838 : }
3839 :
3840 : STATIC int
3841 343932 : xlog_iclogs_empty(
3842 : struct xlog *log)
3843 : {
3844 343932 : xlog_in_core_t *iclog;
3845 :
3846 343932 : iclog = log->l_iclog;
3847 2736522 : do {
3848 : /* endianness does not matter here, zero is zero in
3849 : * any language.
3850 : */
3851 2736522 : if (iclog->ic_header.h_num_logops)
3852 : return 0;
3853 2729840 : iclog = iclog->ic_next;
3854 2729840 : } while (iclog != log->l_iclog);
3855 : return 1;
3856 : }
3857 :
3858 : /*
3859 : * Verify that an LSN stamped into a piece of metadata is valid. This is
3860 : * intended for use in read verifiers on v5 superblocks.
3861 : */
3862 : bool
3863 143895885 : xfs_log_check_lsn(
3864 : struct xfs_mount *mp,
3865 : xfs_lsn_t lsn)
3866 : {
3867 143895885 : struct xlog *log = mp->m_log;
3868 143895885 : bool valid;
3869 :
3870 : /*
3871 : * norecovery mode skips mount-time log processing and unconditionally
3872 : * resets the in-core LSN. We can't validate in this mode, but
3873 : * modifications are not allowed anyways so just return true.
3874 : */
3875 143895885 : if (xfs_has_norecovery(mp))
3876 : return true;
3877 :
3878 : /*
3879 : * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
3880 : * handled by recovery and thus safe to ignore here.
3881 : */
3882 143893361 : if (lsn == NULLCOMMITLSN)
3883 : return true;
3884 :
3885 143338822 : valid = xlog_valid_lsn(mp->m_log, lsn);
3886 :
3887 : /* warn the user about what's gone wrong before verifier failure */
3888 143324655 : if (!valid) {
3889 11 : spin_lock(&log->l_icloglock);
3890 11 : xfs_warn(mp,
3891 : "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
3892 : "Please unmount and run xfs_repair (>= v4.3) to resolve.",
3893 : CYCLE_LSN(lsn), BLOCK_LSN(lsn),
3894 : log->l_curr_cycle, log->l_curr_block);
3895 11 : spin_unlock(&log->l_icloglock);
3896 : }
3897 :
3898 : return valid;
3899 : }
3900 :
3901 : /*
3902 : * Notify the log that we're about to start using a feature that is protected
3903 : * by a log incompat feature flag. This will prevent log covering from
3904 : * clearing those flags.
3905 : */
3906 : void
3907 243764987 : xlog_use_incompat_feat(
3908 : struct xlog *log,
3909 : enum xlog_incompat_feat what)
3910 : {
3911 243764987 : switch (what) {
3912 236941868 : case XLOG_INCOMPAT_FEAT_XATTRS:
3913 236941868 : down_read(&log->l_incompat_xattrs);
3914 236941868 : break;
3915 6823119 : case XLOG_INCOMPAT_FEAT_SWAPEXT:
3916 6823119 : down_read(&log->l_incompat_swapext);
3917 6823119 : break;
3918 : }
3919 244045234 : }
3920 :
3921 : /* Notify the log that we've finished using log incompat features. */
3922 : void
3923 243825890 : xlog_drop_incompat_feat(
3924 : struct xlog *log,
3925 : enum xlog_incompat_feat what)
3926 : {
3927 243825890 : switch (what) {
3928 237010016 : case XLOG_INCOMPAT_FEAT_XATTRS:
3929 237010016 : up_read(&log->l_incompat_xattrs);
3930 237010016 : break;
3931 6815874 : case XLOG_INCOMPAT_FEAT_SWAPEXT:
3932 6815874 : up_read(&log->l_incompat_swapext);
3933 6815874 : break;
3934 : }
3935 244022352 : }
|