Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 : * All Rights Reserved.
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_mount.h"
13 : #include "xfs_errortag.h"
14 : #include "xfs_error.h"
15 : #include "xfs_trans.h"
16 : #include "xfs_trans_priv.h"
17 : #include "xfs_log.h"
18 : #include "xfs_log_priv.h"
19 : #include "xfs_trace.h"
20 : #include "xfs_sysfs.h"
21 : #include "xfs_sb.h"
22 : #include "xfs_health.h"
23 :
24 : struct kmem_cache *xfs_log_ticket_cache;
25 :
26 : /* Local miscellaneous function prototypes */
27 : STATIC struct xlog *
28 : xlog_alloc_log(
29 : struct xfs_mount *mp,
30 : struct xfs_buftarg *log_target,
31 : xfs_daddr_t blk_offset,
32 : int num_bblks);
33 : STATIC int
34 : xlog_space_left(
35 : struct xlog *log,
36 : atomic64_t *head);
37 : STATIC void
38 : xlog_dealloc_log(
39 : struct xlog *log);
40 :
41 : /* local state machine functions */
42 : STATIC void xlog_state_done_syncing(
43 : struct xlog_in_core *iclog);
44 : STATIC void xlog_state_do_callback(
45 : struct xlog *log);
46 : STATIC int
47 : xlog_state_get_iclog_space(
48 : struct xlog *log,
49 : int len,
50 : struct xlog_in_core **iclog,
51 : struct xlog_ticket *ticket,
52 : int *logoffsetp);
53 : STATIC void
54 : xlog_grant_push_ail(
55 : struct xlog *log,
56 : int need_bytes);
57 : STATIC void
58 : xlog_sync(
59 : struct xlog *log,
60 : struct xlog_in_core *iclog,
61 : struct xlog_ticket *ticket);
62 : #if defined(DEBUG)
63 : STATIC void
64 : xlog_verify_grant_tail(
65 : struct xlog *log);
66 : STATIC void
67 : xlog_verify_iclog(
68 : struct xlog *log,
69 : struct xlog_in_core *iclog,
70 : int count);
71 : STATIC void
72 : xlog_verify_tail_lsn(
73 : struct xlog *log,
74 : struct xlog_in_core *iclog);
75 : #else
76 : #define xlog_verify_grant_tail(a)
77 : #define xlog_verify_iclog(a,b,c)
78 : #define xlog_verify_tail_lsn(a,b)
79 : #endif
80 :
81 : STATIC int
82 : xlog_iclogs_empty(
83 : struct xlog *log);
84 :
85 : static int
86 : xfs_log_cover(struct xfs_mount *);
87 :
88 : /*
89 : * We need to make sure the buffer pointer returned is naturally aligned for the
90 : * biggest basic data type we put into it. We have already accounted for this
91 : * padding when sizing the buffer.
92 : *
93 : * However, this padding does not get written into the log, and hence we have to
94 : * track the space used by the log vectors separately to prevent log space hangs
95 : * due to inaccurate accounting (i.e. a leak) of the used log space through the
96 : * CIL context ticket.
97 : *
98 : * We also add space for the xlog_op_header that describes this region in the
99 : * log. This prepends the data region we return to the caller to copy their data
100 : * into, so do all the static initialisation of the ophdr now. Because the ophdr
101 : * is not 8 byte aligned, we have to be careful to ensure that we align the
102 : * start of the buffer such that the region we return to the call is 8 byte
103 : * aligned and packed against the tail of the ophdr.
104 : */
105 : void *
106 27181984458 : xlog_prepare_iovec(
107 : struct xfs_log_vec *lv,
108 : struct xfs_log_iovec **vecp,
109 : uint type)
110 : {
111 27181984458 : struct xfs_log_iovec *vec = *vecp;
112 27181984458 : struct xlog_op_header *oph;
113 27181984458 : uint32_t len;
114 27181984458 : void *buf;
115 :
116 27181984458 : if (vec) {
117 16037284571 : ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
118 16037284571 : vec++;
119 : } else {
120 11144699887 : vec = &lv->lv_iovecp[0];
121 : }
122 :
123 27181984458 : len = lv->lv_buf_len + sizeof(struct xlog_op_header);
124 27181984458 : if (!IS_ALIGNED(len, sizeof(uint64_t))) {
125 26965886016 : lv->lv_buf_len = round_up(len, sizeof(uint64_t)) -
126 : sizeof(struct xlog_op_header);
127 : }
128 :
129 27181984458 : vec->i_type = type;
130 27181984458 : vec->i_addr = lv->lv_buf + lv->lv_buf_len;
131 :
132 27181984458 : oph = vec->i_addr;
133 27181984458 : oph->oh_clientid = XFS_TRANSACTION;
134 27181984458 : oph->oh_res2 = 0;
135 27181984458 : oph->oh_flags = 0;
136 :
137 27181984458 : buf = vec->i_addr + sizeof(struct xlog_op_header);
138 27181984458 : ASSERT(IS_ALIGNED((unsigned long)buf, sizeof(uint64_t)));
139 :
140 27181984458 : *vecp = vec;
141 27181984458 : return buf;
142 : }
143 :
144 : static void
145 5750440111 : xlog_grant_sub_space(
146 : struct xlog *log,
147 : atomic64_t *head,
148 : int bytes)
149 : {
150 5750440111 : int64_t head_val = atomic64_read(head);
151 5754427645 : int64_t new, old;
152 :
153 5754427645 : do {
154 5754427645 : int cycle, space;
155 :
156 5754427645 : xlog_crack_grant_head_val(head_val, &cycle, &space);
157 :
158 5754427645 : space -= bytes;
159 5754427645 : if (space < 0) {
160 43937264 : space += log->l_logsize;
161 43937264 : cycle--;
162 : }
163 :
164 5754427645 : old = head_val;
165 5754427645 : new = xlog_assign_grant_head_val(cycle, space);
166 5754427645 : head_val = atomic64_cmpxchg(head, old, new);
167 5755690692 : } while (head_val != old);
168 5751703158 : }
169 :
170 : static void
171 3896812686 : xlog_grant_add_space(
172 : struct xlog *log,
173 : atomic64_t *head,
174 : int bytes)
175 : {
176 3896812686 : int64_t head_val = atomic64_read(head);
177 3904997474 : int64_t new, old;
178 :
179 3904997474 : do {
180 3904997474 : int tmp;
181 3904997474 : int cycle, space;
182 :
183 3904997474 : xlog_crack_grant_head_val(head_val, &cycle, &space);
184 :
185 3904997474 : tmp = log->l_logsize - space;
186 3904997474 : if (tmp > bytes)
187 3861010763 : space += bytes;
188 : else {
189 43986711 : space = bytes - tmp;
190 43986711 : cycle++;
191 : }
192 :
193 3904997474 : old = head_val;
194 3904997474 : new = xlog_assign_grant_head_val(cycle, space);
195 3904997474 : head_val = atomic64_cmpxchg(head, old, new);
196 3908467667 : } while (head_val != old);
197 3900282879 : }
198 :
199 : STATIC void
200 121528 : xlog_grant_head_init(
201 : struct xlog_grant_head *head)
202 : {
203 121528 : xlog_assign_grant_head(&head->grant, 1, 0);
204 121528 : INIT_LIST_HEAD(&head->waiters);
205 121528 : spin_lock_init(&head->lock);
206 121528 : }
207 :
208 : STATIC void
209 26436 : xlog_grant_head_wake_all(
210 : struct xlog_grant_head *head)
211 : {
212 26436 : struct xlog_ticket *tic;
213 :
214 26436 : spin_lock(&head->lock);
215 26436 : list_for_each_entry(tic, &head->waiters, t_queue)
216 0 : wake_up_process(tic->t_task);
217 26436 : spin_unlock(&head->lock);
218 26436 : }
219 :
220 : static inline int
221 2497730599 : xlog_ticket_reservation(
222 : struct xlog *log,
223 : struct xlog_grant_head *head,
224 : struct xlog_ticket *tic)
225 : {
226 2497730599 : if (head == &log->l_write_head) {
227 407182674 : ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
228 407182674 : return tic->t_unit_res;
229 : }
230 :
231 2090547925 : if (tic->t_flags & XLOG_TIC_PERM_RESERV)
232 1581277982 : return tic->t_unit_res * tic->t_cnt;
233 :
234 509269943 : return tic->t_unit_res;
235 : }
236 :
237 : STATIC bool
238 19313726 : xlog_grant_head_wake(
239 : struct xlog *log,
240 : struct xlog_grant_head *head,
241 : int *free_bytes)
242 : {
243 19313726 : struct xlog_ticket *tic;
244 19313726 : int need_bytes;
245 19313726 : bool woken_task = false;
246 :
247 558148832 : list_for_each_entry(tic, &head->waiters, t_queue) {
248 :
249 : /*
250 : * There is a chance that the size of the CIL checkpoints in
251 : * progress at the last AIL push target calculation resulted in
252 : * limiting the target to the log head (l_last_sync_lsn) at the
253 : * time. This may not reflect where the log head is now as the
254 : * CIL checkpoints may have completed.
255 : *
256 : * Hence when we are woken here, it may be that the head of the
257 : * log that has moved rather than the tail. As the tail didn't
258 : * move, there still won't be space available for the
259 : * reservation we require. However, if the AIL has already
260 : * pushed to the target defined by the old log head location, we
261 : * will hang here waiting for something else to update the AIL
262 : * push target.
263 : *
264 : * Therefore, if there isn't space to wake the first waiter on
265 : * the grant head, we need to push the AIL again to ensure the
266 : * target reflects both the current log tail and log head
267 : * position before we wait for the tail to move again.
268 : */
269 :
270 555528324 : need_bytes = xlog_ticket_reservation(log, head, tic);
271 555528326 : if (*free_bytes < need_bytes) {
272 16693219 : if (!woken_task)
273 781743 : xlog_grant_push_ail(log, need_bytes);
274 16693219 : return false;
275 : }
276 :
277 538835107 : *free_bytes -= need_bytes;
278 538835107 : trace_xfs_log_grant_wake_up(log, tic);
279 538835107 : wake_up_process(tic->t_task);
280 538835106 : woken_task = true;
281 : }
282 :
283 : return true;
284 : }
285 :
286 : STATIC int
287 8687290 : xlog_grant_head_wait(
288 : struct xlog *log,
289 : struct xlog_grant_head *head,
290 : struct xlog_ticket *tic,
291 : int need_bytes) __releases(&head->lock)
292 : __acquires(&head->lock)
293 : {
294 8687290 : list_add_tail(&tic->t_queue, &head->waiters);
295 :
296 8692625 : do {
297 17385250 : if (xlog_is_shutdown(log))
298 0 : goto shutdown;
299 8692625 : xlog_grant_push_ail(log, need_bytes);
300 :
301 8692625 : __set_current_state(TASK_UNINTERRUPTIBLE);
302 8692625 : spin_unlock(&head->lock);
303 :
304 8692585 : XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
305 :
306 8692593 : trace_xfs_log_grant_sleep(log, tic);
307 8692504 : schedule();
308 8608563 : trace_xfs_log_grant_wake(log, tic);
309 :
310 8539492 : spin_lock(&head->lock);
311 17385250 : if (xlog_is_shutdown(log))
312 0 : goto shutdown;
313 8692625 : } while (xlog_space_left(log, &head->grant) < need_bytes);
314 :
315 8687290 : list_del_init(&tic->t_queue);
316 8687290 : return 0;
317 0 : shutdown:
318 0 : list_del_init(&tic->t_queue);
319 0 : return -EIO;
320 : }
321 :
322 : /*
323 : * Atomically get the log space required for a log ticket.
324 : *
325 : * Once a ticket gets put onto head->waiters, it will only return after the
326 : * needed reservation is satisfied.
327 : *
328 : * This function is structured so that it has a lock free fast path. This is
329 : * necessary because every new transaction reservation will come through this
330 : * path. Hence any lock will be globally hot if we take it unconditionally on
331 : * every pass.
332 : *
333 : * As tickets are only ever moved on and off head->waiters under head->lock, we
334 : * only need to take that lock if we are going to add the ticket to the queue
335 : * and sleep. We can avoid taking the lock if the ticket was never added to
336 : * head->waiters because the t_queue list head will be empty and we hold the
337 : * only reference to it so it can safely be checked unlocked.
338 : */
339 : STATIC int
340 1943013954 : xlog_grant_head_check(
341 : struct xlog *log,
342 : struct xlog_grant_head *head,
343 : struct xlog_ticket *tic,
344 : int *need_bytes)
345 : {
346 1943013954 : int free_bytes;
347 1943013954 : int error = 0;
348 :
349 3886027908 : ASSERT(!xlog_in_recovery(log));
350 :
351 : /*
352 : * If there are other waiters on the queue then give them a chance at
353 : * logspace before us. Wake up the first waiters, if we do not wake
354 : * up all the waiters then go to sleep waiting for more free space,
355 : * otherwise try to get some space for this transaction.
356 : */
357 1943013954 : *need_bytes = xlog_ticket_reservation(log, head, tic);
358 1942077583 : free_bytes = xlog_space_left(log, &head->grant);
359 1942155102 : if (!list_empty_careful(&head->waiters)) {
360 9076607 : spin_lock(&head->lock);
361 9555782 : if (!xlog_grant_head_wake(log, head, &free_bytes) ||
362 1104569 : free_bytes < *need_bytes) {
363 8633311 : error = xlog_grant_head_wait(log, head, tic,
364 : *need_bytes);
365 : }
366 9555782 : spin_unlock(&head->lock);
367 1933632490 : } else if (free_bytes < *need_bytes) {
368 53764 : spin_lock(&head->lock);
369 53979 : error = xlog_grant_head_wait(log, head, tic, *need_bytes);
370 53979 : spin_unlock(&head->lock);
371 : }
372 :
373 1943187474 : return error;
374 : }
375 :
376 : bool
377 183399 : xfs_log_writable(
378 : struct xfs_mount *mp)
379 : {
380 : /*
381 : * Do not write to the log on norecovery mounts, if the data or log
382 : * devices are read-only, or if the filesystem is shutdown. Read-only
383 : * mounts allow internal writes for log recovery and unmount purposes,
384 : * so don't restrict that case.
385 : */
386 183399 : if (xfs_has_norecovery(mp))
387 : return false;
388 183291 : if (xfs_readonly_buftarg(mp->m_ddev_targp))
389 : return false;
390 183267 : if (xfs_readonly_buftarg(mp->m_log->l_targ))
391 : return false;
392 366534 : if (xlog_is_shutdown(mp->m_log))
393 26403 : return false;
394 : return true;
395 : }
396 :
397 : /*
398 : * Replenish the byte reservation required by moving the grant write head.
399 : */
400 : int
401 1330912937 : xfs_log_regrant(
402 : struct xfs_mount *mp,
403 : struct xlog_ticket *tic)
404 : {
405 1330912937 : struct xlog *log = mp->m_log;
406 1330912937 : int need_bytes;
407 1330912937 : int error = 0;
408 :
409 2661825874 : if (xlog_is_shutdown(log))
410 : return -EIO;
411 :
412 1330912905 : XFS_STATS_INC(mp, xs_try_logspace);
413 :
414 : /*
415 : * This is a new transaction on the ticket, so we need to change the
416 : * transaction ID so that the next transaction has a different TID in
417 : * the log. Just add one to the existing tid so that we can see chains
418 : * of rolling transactions in the log easily.
419 : */
420 1330861139 : tic->t_tid++;
421 :
422 1330861139 : xlog_grant_push_ail(log, tic->t_unit_res);
423 :
424 1330857195 : tic->t_curr_res = tic->t_unit_res;
425 1330857195 : if (tic->t_cnt > 0)
426 : return 0;
427 :
428 407196731 : trace_xfs_log_regrant(log, tic);
429 :
430 407183078 : error = xlog_grant_head_check(log, &log->l_write_head, tic,
431 : &need_bytes);
432 407194779 : if (error)
433 0 : goto out_error;
434 :
435 407194779 : xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
436 407211917 : trace_xfs_log_regrant_exit(log, tic);
437 407209007 : xlog_verify_grant_tail(log);
438 407209007 : return 0;
439 :
440 : out_error:
441 : /*
442 : * If we are failing, make sure the ticket doesn't have any current
443 : * reservations. We don't want to add this back when the ticket/
444 : * transaction gets cancelled.
445 : */
446 0 : tic->t_curr_res = 0;
447 0 : tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
448 0 : return error;
449 : }
450 :
451 : /*
452 : * Reserve log space and return a ticket corresponding to the reservation.
453 : *
454 : * Each reservation is going to reserve extra space for a log record header.
455 : * When writes happen to the on-disk log, we don't subtract the length of the
456 : * log record header from any reservation. By wasting space in each
457 : * reservation, we prevent over allocation problems.
458 : */
459 : int
460 1536237642 : xfs_log_reserve(
461 : struct xfs_mount *mp,
462 : int unit_bytes,
463 : int cnt,
464 : struct xlog_ticket **ticp,
465 : bool permanent)
466 : {
467 1536237642 : struct xlog *log = mp->m_log;
468 1536237642 : struct xlog_ticket *tic;
469 1536237642 : int need_bytes;
470 1536237642 : int error = 0;
471 :
472 3072475284 : if (xlog_is_shutdown(log))
473 : return -EIO;
474 :
475 1536235019 : XFS_STATS_INC(mp, xs_try_logspace);
476 :
477 1536112807 : ASSERT(*ticp == NULL);
478 1536112807 : tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
479 1536255134 : *ticp = tic;
480 :
481 1536255134 : xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
482 : : tic->t_unit_res);
483 :
484 1535848543 : trace_xfs_log_reserve(log, tic);
485 :
486 1535743973 : error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
487 : &need_bytes);
488 1535648897 : if (error)
489 0 : goto out_error;
490 :
491 1535648897 : xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
492 1538340519 : xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
493 1538959530 : trace_xfs_log_reserve_exit(log, tic);
494 1538691026 : xlog_verify_grant_tail(log);
495 1538691026 : return 0;
496 :
497 : out_error:
498 : /*
499 : * If we are failing, make sure the ticket doesn't have any current
500 : * reservations. We don't want to add this back when the ticket/
501 : * transaction gets cancelled.
502 : */
503 0 : tic->t_curr_res = 0;
504 0 : tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
505 0 : return error;
506 : }
507 :
508 : /*
509 : * Run all the pending iclog callbacks and wake log force waiters and iclog
510 : * space waiters so they can process the newly set shutdown state. We really
511 : * don't care what order we process callbacks here because the log is shut down
512 : * and so state cannot change on disk anymore. However, we cannot wake waiters
513 : * until the callbacks have been processed because we may be in unmount and
514 : * we must ensure that all AIL operations the callbacks perform have completed
515 : * before we tear down the AIL.
516 : *
517 : * We avoid processing actively referenced iclogs so that we don't run callbacks
518 : * while the iclog owner might still be preparing the iclog for IO submssion.
519 : * These will be caught by xlog_state_iclog_release() and call this function
520 : * again to process any callbacks that may have been added to that iclog.
521 : */
522 : static void
523 15342 : xlog_state_shutdown_callbacks(
524 : struct xlog *log)
525 : {
526 15342 : struct xlog_in_core *iclog;
527 15342 : LIST_HEAD(cb_list);
528 :
529 15342 : iclog = log->l_iclog;
530 122736 : do {
531 122736 : if (atomic_read(&iclog->ic_refcnt)) {
532 : /* Reference holder will re-run iclog callbacks. */
533 2118 : continue;
534 : }
535 120618 : list_splice_init(&iclog->ic_callbacks, &cb_list);
536 120618 : spin_unlock(&log->l_icloglock);
537 :
538 120618 : xlog_cil_process_committed(&cb_list);
539 :
540 120618 : spin_lock(&log->l_icloglock);
541 120618 : wake_up_all(&iclog->ic_write_wait);
542 120618 : wake_up_all(&iclog->ic_force_wait);
543 122736 : } while ((iclog = iclog->ic_next) != log->l_iclog);
544 :
545 15342 : wake_up_all(&log->l_flush_wait);
546 15342 : }
547 :
548 : /*
549 : * Flush iclog to disk if this is the last reference to the given iclog and the
550 : * it is in the WANT_SYNC state.
551 : *
552 : * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
553 : * log tail is updated correctly. NEED_FUA indicates that the iclog will be
554 : * written to stable storage, and implies that a commit record is contained
555 : * within the iclog. We need to ensure that the log tail does not move beyond
556 : * the tail that the first commit record in the iclog ordered against, otherwise
557 : * correct recovery of that checkpoint becomes dependent on future operations
558 : * performed on this iclog.
559 : *
560 : * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
561 : * current tail into iclog. Once the iclog tail is set, future operations must
562 : * not modify it, otherwise they potentially violate ordering constraints for
563 : * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
564 : * the iclog will get zeroed on activation of the iclog after sync, so we
565 : * always capture the tail lsn on the iclog on the first NEED_FUA release
566 : * regardless of the number of active reference counts on this iclog.
567 : */
568 : int
569 45694474 : xlog_state_release_iclog(
570 : struct xlog *log,
571 : struct xlog_in_core *iclog,
572 : struct xlog_ticket *ticket)
573 : {
574 45694474 : xfs_lsn_t tail_lsn;
575 45694474 : bool last_ref;
576 :
577 45694474 : lockdep_assert_held(&log->l_icloglock);
578 :
579 45694474 : trace_xlog_iclog_release(iclog, _RET_IP_);
580 : /*
581 : * Grabbing the current log tail needs to be atomic w.r.t. the writing
582 : * of the tail LSN into the iclog so we guarantee that the log tail does
583 : * not move between the first time we know that the iclog needs to be
584 : * made stable and when we eventually submit it.
585 : */
586 45694473 : if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
587 16828661 : (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
588 34113135 : !iclog->ic_header.h_tail_lsn) {
589 28807701 : tail_lsn = xlog_assign_tail_lsn(log->l_mp);
590 28807702 : iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
591 : }
592 :
593 45694474 : last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
594 :
595 91388960 : if (xlog_is_shutdown(log)) {
596 : /*
597 : * If there are no more references to this iclog, process the
598 : * pending iclog callbacks that were waiting on the release of
599 : * this iclog.
600 : */
601 2124 : if (last_ref)
602 2124 : xlog_state_shutdown_callbacks(log);
603 2124 : return -EIO;
604 : }
605 :
606 45692356 : if (!last_ref)
607 : return 0;
608 :
609 39832134 : if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
610 11026590 : ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
611 11026590 : return 0;
612 : }
613 :
614 28805544 : iclog->ic_state = XLOG_STATE_SYNCING;
615 28805544 : xlog_verify_tail_lsn(log, iclog);
616 28805542 : trace_xlog_iclog_syncing(iclog, _RET_IP_);
617 :
618 28805542 : spin_unlock(&log->l_icloglock);
619 28805547 : xlog_sync(log, iclog, ticket);
620 28805382 : spin_lock(&log->l_icloglock);
621 28805382 : return 0;
622 : }
623 :
624 : /*
625 : * Mount a log filesystem
626 : *
627 : * mp - ubiquitous xfs mount point structure
628 : * log_target - buftarg of on-disk log device
629 : * blk_offset - Start block # where block size is 512 bytes (BBSIZE)
630 : * num_bblocks - Number of BBSIZE blocks in on-disk log
631 : *
632 : * Return error or zero.
633 : */
634 : int
635 60764 : xfs_log_mount(
636 : xfs_mount_t *mp,
637 : xfs_buftarg_t *log_target,
638 : xfs_daddr_t blk_offset,
639 : int num_bblks)
640 : {
641 60764 : struct xlog *log;
642 60764 : int error = 0;
643 60764 : int min_logfsbs;
644 :
645 60764 : if (!xfs_has_norecovery(mp)) {
646 60710 : xfs_notice(mp, "Mounting V%d Filesystem %pU",
647 : XFS_SB_VERSION_NUM(&mp->m_sb),
648 : &mp->m_sb.sb_uuid);
649 : } else {
650 54 : xfs_notice(mp,
651 : "Mounting V%d filesystem %pU in no-recovery mode. Filesystem will be inconsistent.",
652 : XFS_SB_VERSION_NUM(&mp->m_sb),
653 : &mp->m_sb.sb_uuid);
654 108 : ASSERT(xfs_is_readonly(mp));
655 : }
656 :
657 60764 : log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
658 60764 : if (IS_ERR(log)) {
659 0 : error = PTR_ERR(log);
660 0 : goto out;
661 : }
662 60764 : mp->m_log = log;
663 :
664 : /*
665 : * Now that we have set up the log and it's internal geometry
666 : * parameters, we can validate the given log space and drop a critical
667 : * message via syslog if the log size is too small. A log that is too
668 : * small can lead to unexpected situations in transaction log space
669 : * reservation stage. The superblock verifier has already validated all
670 : * the other log geometry constraints, so we don't have to check those
671 : * here.
672 : *
673 : * Note: For v4 filesystems, we can't just reject the mount if the
674 : * validation fails. This would mean that people would have to
675 : * downgrade their kernel just to remedy the situation as there is no
676 : * way to grow the log (short of black magic surgery with xfs_db).
677 : *
678 : * We can, however, reject mounts for V5 format filesystems, as the
679 : * mkfs binary being used to make the filesystem should never create a
680 : * filesystem with a log that is too small.
681 : */
682 60764 : min_logfsbs = xfs_log_calc_minimum_size(mp);
683 60764 : if (mp->m_sb.sb_logblocks < min_logfsbs) {
684 0 : xfs_warn(mp,
685 : "Log size %d blocks too small, minimum size is %d blocks",
686 : mp->m_sb.sb_logblocks, min_logfsbs);
687 :
688 : /*
689 : * Log check errors are always fatal on v5; or whenever bad
690 : * metadata leads to a crash.
691 : */
692 0 : if (xfs_has_crc(mp)) {
693 0 : xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
694 0 : ASSERT(0);
695 0 : error = -EINVAL;
696 0 : goto out_free_log;
697 : }
698 0 : xfs_crit(mp, "Log size out of supported range.");
699 0 : xfs_crit(mp,
700 : "Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
701 : }
702 :
703 : /*
704 : * Initialize the AIL now we have a log.
705 : */
706 60764 : error = xfs_trans_ail_init(mp);
707 60764 : if (error) {
708 0 : xfs_warn(mp, "AIL initialisation failed: error %d", error);
709 0 : goto out_free_log;
710 : }
711 60764 : log->l_ailp = mp->m_ail;
712 :
713 : /*
714 : * skip log recovery on a norecovery mount. pretend it all
715 : * just worked.
716 : */
717 60764 : if (!xfs_has_norecovery(mp)) {
718 : /*
719 : * log recovery ignores readonly state and so we need to clear
720 : * mount-based read only state so it can write to disk.
721 : */
722 60710 : bool readonly = test_and_clear_bit(XFS_OPSTATE_READONLY,
723 60710 : &mp->m_opstate);
724 60710 : error = xlog_recover(log);
725 60710 : if (readonly)
726 2225 : set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
727 60710 : if (error) {
728 42 : xfs_warn(mp, "log mount/recovery failed: error %d",
729 : error);
730 42 : xlog_recover_cancel(log);
731 42 : goto out_destroy_ail;
732 : }
733 : }
734 :
735 60722 : error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
736 : "log");
737 60722 : if (error)
738 0 : goto out_destroy_ail;
739 :
740 : /* Normal transactions can now occur */
741 60722 : clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
742 :
743 : /*
744 : * Now the log has been fully initialised and we know were our
745 : * space grant counters are, we can initialise the permanent ticket
746 : * needed for delayed logging to work.
747 : */
748 60722 : xlog_cil_init_post_recovery(log);
749 :
750 60722 : return 0;
751 :
752 42 : out_destroy_ail:
753 42 : xfs_trans_ail_destroy(mp);
754 42 : out_free_log:
755 42 : xlog_dealloc_log(log);
756 : out:
757 : return error;
758 : }
759 :
760 : /*
761 : * Finish the recovery of the file system. This is separate from the
762 : * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
763 : * in the root and real-time bitmap inodes between calling xfs_log_mount() and
764 : * here.
765 : *
766 : * If we finish recovery successfully, start the background log work. If we are
767 : * not doing recovery, then we have a RO filesystem and we don't need to start
768 : * it.
769 : */
770 : int
771 60676 : xfs_log_mount_finish(
772 : struct xfs_mount *mp)
773 : {
774 60676 : struct xlog *log = mp->m_log;
775 60676 : bool readonly;
776 60676 : int error = 0;
777 :
778 60676 : if (xfs_has_norecovery(mp)) {
779 98 : ASSERT(xfs_is_readonly(mp));
780 49 : return 0;
781 : }
782 :
783 : /*
784 : * log recovery ignores readonly state and so we need to clear
785 : * mount-based read only state so it can write to disk.
786 : */
787 60627 : readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
788 :
789 : /*
790 : * During the second phase of log recovery, we need iget and
791 : * iput to behave like they do for an active filesystem.
792 : * xfs_fs_drop_inode needs to be able to prevent the deletion
793 : * of inodes before we're done replaying log items on those
794 : * inodes. Turn it off immediately after recovery finishes
795 : * so that we don't leak the quota inodes if subsequent mount
796 : * activities fail.
797 : *
798 : * We let all inodes involved in redo item processing end up on
799 : * the LRU instead of being evicted immediately so that if we do
800 : * something to an unlinked inode, the irele won't cause
801 : * premature truncation and freeing of the inode, which results
802 : * in log recovery failure. We have to evict the unreferenced
803 : * lru inodes after clearing SB_ACTIVE because we don't
804 : * otherwise clean up the lru if there's a subsequent failure in
805 : * xfs_mountfs, which leads to us leaking the inodes if nothing
806 : * else (e.g. quotacheck) references the inodes before the
807 : * mount failure occurs.
808 : */
809 60627 : mp->m_super->s_flags |= SB_ACTIVE;
810 60627 : xfs_log_work_queue(mp);
811 121254 : if (xlog_recovery_needed(log))
812 13813 : error = xlog_recover_finish(log);
813 60627 : mp->m_super->s_flags &= ~SB_ACTIVE;
814 60627 : evict_inodes(mp->m_super);
815 :
816 : /*
817 : * Drain the buffer LRU after log recovery. This is required for v4
818 : * filesystems to avoid leaving around buffers with NULL verifier ops,
819 : * but we do it unconditionally to make sure we're always in a clean
820 : * cache state after mount.
821 : *
822 : * Don't push in the error case because the AIL may have pending intents
823 : * that aren't removed until recovery is cancelled.
824 : */
825 121254 : if (xlog_recovery_needed(log)) {
826 13813 : if (!error) {
827 13807 : xfs_log_force(mp, XFS_LOG_SYNC);
828 13807 : xfs_ail_push_all_sync(mp->m_ail);
829 : }
830 15472 : xfs_notice(mp, "Ending recovery (logdev: %s)",
831 : mp->m_logname ? mp->m_logname : "internal");
832 : } else {
833 46814 : xfs_info(mp, "Ending clean mount");
834 : }
835 60627 : xfs_buftarg_drain(mp->m_ddev_targp);
836 :
837 60627 : clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
838 60627 : if (readonly)
839 2204 : set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
840 :
841 : /* Make sure the log is dead if we're returning failure. */
842 60633 : ASSERT(!error || xlog_is_shutdown(log));
843 :
844 : return error;
845 : }
846 :
847 : /*
848 : * The mount has failed. Cancel the recovery if it hasn't completed and destroy
849 : * the log.
850 : */
851 : void
852 149 : xfs_log_mount_cancel(
853 : struct xfs_mount *mp)
854 : {
855 149 : xlog_recover_cancel(mp->m_log);
856 149 : xfs_log_unmount(mp);
857 149 : }
858 :
859 : /*
860 : * Flush out the iclog to disk ensuring that device caches are flushed and
861 : * the iclog hits stable storage before any completion waiters are woken.
862 : */
863 : static inline int
864 5165981 : xlog_force_iclog(
865 : struct xlog_in_core *iclog)
866 : {
867 5165981 : atomic_inc(&iclog->ic_refcnt);
868 5165984 : iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
869 5165984 : if (iclog->ic_state == XLOG_STATE_ACTIVE)
870 5165984 : xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
871 5165982 : return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
872 : }
873 :
874 : /*
875 : * Cycle all the iclogbuf locks to make sure all log IO completion
876 : * is done before we tear down these buffers.
877 : */
878 : static void
879 60732 : xlog_wait_iclog_completion(struct xlog *log)
880 : {
881 60732 : int i;
882 60732 : struct xlog_in_core *iclog = log->l_iclog;
883 :
884 546528 : for (i = 0; i < log->l_iclog_bufs; i++) {
885 485796 : down(&iclog->ic_sema);
886 485796 : up(&iclog->ic_sema);
887 485796 : iclog = iclog->ic_next;
888 : }
889 60732 : }
890 :
891 : /*
892 : * Wait for the iclog and all prior iclogs to be written disk as required by the
893 : * log force state machine. Waiting on ic_force_wait ensures iclog completions
894 : * have been ordered and callbacks run before we are woken here, hence
895 : * guaranteeing that all the iclogs up to this one are on stable storage.
896 : */
897 : int
898 10667592 : xlog_wait_on_iclog(
899 : struct xlog_in_core *iclog)
900 : __releases(iclog->ic_log->l_icloglock)
901 : {
902 10667592 : struct xlog *log = iclog->ic_log;
903 :
904 10667592 : trace_xlog_iclog_wait_on(iclog, _RET_IP_);
905 21335186 : if (!xlog_is_shutdown(log) &&
906 10667544 : iclog->ic_state != XLOG_STATE_ACTIVE &&
907 : iclog->ic_state != XLOG_STATE_DIRTY) {
908 8598585 : XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
909 8598587 : xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
910 : } else {
911 2069008 : spin_unlock(&log->l_icloglock);
912 : }
913 :
914 21332626 : if (xlog_is_shutdown(log))
915 4963 : return -EIO;
916 : return 0;
917 : }
918 :
919 : /*
920 : * Write out an unmount record using the ticket provided. We have to account for
921 : * the data space used in the unmount ticket as this write is not done from a
922 : * transaction context that has already done the accounting for us.
923 : */
924 : static int
925 50095 : xlog_write_unmount_record(
926 : struct xlog *log,
927 : struct xlog_ticket *ticket)
928 : {
929 50095 : struct {
930 : struct xlog_op_header ophdr;
931 : struct xfs_unmount_log_format ulf;
932 50095 : } unmount_rec = {
933 : .ophdr = {
934 : .oh_clientid = XFS_LOG,
935 50095 : .oh_tid = cpu_to_be32(ticket->t_tid),
936 : .oh_flags = XLOG_UNMOUNT_TRANS,
937 : },
938 : .ulf = {
939 : .magic = XLOG_UNMOUNT_TYPE,
940 : },
941 : };
942 50095 : struct xfs_log_iovec reg = {
943 : .i_addr = &unmount_rec,
944 : .i_len = sizeof(unmount_rec),
945 : .i_type = XLOG_REG_TYPE_UNMOUNT,
946 : };
947 50095 : struct xfs_log_vec vec = {
948 : .lv_niovecs = 1,
949 : .lv_iovecp = ®,
950 : };
951 50095 : LIST_HEAD(lv_chain);
952 50095 : list_add(&vec.lv_list, &lv_chain);
953 :
954 50095 : BUILD_BUG_ON((sizeof(struct xlog_op_header) +
955 : sizeof(struct xfs_unmount_log_format)) !=
956 : sizeof(unmount_rec));
957 :
958 : /* account for space used by record data */
959 50095 : ticket->t_curr_res -= sizeof(unmount_rec);
960 :
961 50095 : return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len);
962 : }
963 :
964 : /*
965 : * Mark the filesystem clean by writing an unmount record to the head of the
966 : * log.
967 : */
968 : static void
969 50095 : xlog_unmount_write(
970 : struct xlog *log)
971 : {
972 50095 : struct xfs_mount *mp = log->l_mp;
973 50095 : struct xlog_in_core *iclog;
974 50095 : struct xlog_ticket *tic = NULL;
975 50095 : int error;
976 :
977 50095 : error = xfs_log_reserve(mp, 600, 1, &tic, 0);
978 50095 : if (error)
979 0 : goto out_err;
980 :
981 50095 : error = xlog_write_unmount_record(log, tic);
982 : /*
983 : * At this point, we're umounting anyway, so there's no point in
984 : * transitioning log state to shutdown. Just continue...
985 : */
986 50095 : out_err:
987 50095 : if (error)
988 0 : xfs_alert(mp, "%s: unmount record failed", __func__);
989 :
990 50095 : spin_lock(&log->l_icloglock);
991 50095 : iclog = log->l_iclog;
992 50095 : error = xlog_force_iclog(iclog);
993 50095 : xlog_wait_on_iclog(iclog);
994 :
995 50095 : if (tic) {
996 50095 : trace_xfs_log_umount_write(log, tic);
997 50095 : xfs_log_ticket_ungrant(log, tic);
998 : }
999 50095 : }
1000 :
1001 : static void
1002 50095 : xfs_log_unmount_verify_iclog(
1003 : struct xlog *log)
1004 : {
1005 50095 : struct xlog_in_core *iclog = log->l_iclog;
1006 :
1007 400700 : do {
1008 400700 : ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
1009 400700 : ASSERT(iclog->ic_offset == 0);
1010 400700 : } while ((iclog = iclog->ic_next) != log->l_iclog);
1011 50095 : }
1012 :
1013 : /*
1014 : * Unmount record used to have a string "Unmount filesystem--" in the
1015 : * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
1016 : * We just write the magic number now since that particular field isn't
1017 : * currently architecture converted and "Unmount" is a bit foo.
1018 : * As far as I know, there weren't any dependencies on the old behaviour.
1019 : */
1020 : static void
1021 63401 : xfs_log_unmount_write(
1022 : struct xfs_mount *mp)
1023 : {
1024 63401 : struct xlog *log = mp->m_log;
1025 :
1026 63401 : if (!xfs_log_writable(mp))
1027 : return;
1028 :
1029 50106 : xfs_log_force(mp, XFS_LOG_SYNC);
1030 :
1031 100212 : if (xlog_is_shutdown(log))
1032 : return;
1033 :
1034 : /*
1035 : * If we think the summary counters are bad, avoid writing the unmount
1036 : * record to force log recovery at next mount, after which the summary
1037 : * counters will be recalculated. Refer to xlog_check_unmount_rec for
1038 : * more details.
1039 : */
1040 50106 : if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
1041 : XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
1042 11 : xfs_alert(mp, "%s: will fix summary counters at next mount",
1043 : __func__);
1044 11 : return;
1045 : }
1046 :
1047 50095 : xfs_log_unmount_verify_iclog(log);
1048 50095 : xlog_unmount_write(log);
1049 : }
1050 :
1051 : /*
1052 : * Empty the log for unmount/freeze.
1053 : *
1054 : * To do this, we first need to shut down the background log work so it is not
1055 : * trying to cover the log as we clean up. We then need to unpin all objects in
1056 : * the log so we can then flush them out. Once they have completed their IO and
1057 : * run the callbacks removing themselves from the AIL, we can cover the log.
1058 : */
1059 : int
1060 119998 : xfs_log_quiesce(
1061 : struct xfs_mount *mp)
1062 : {
1063 : /*
1064 : * Clear log incompat features since we're quiescing the log. Report
1065 : * failures, though it's not fatal to have a higher log feature
1066 : * protection level than the log contents actually require.
1067 : */
1068 119998 : if (xfs_clear_incompat_log_features(mp, XFS_SB_FEAT_INCOMPAT_LOG_ALL)) {
1069 67177 : int error;
1070 :
1071 67177 : error = xfs_sync_sb(mp, false);
1072 67177 : if (error)
1073 0 : xfs_warn(mp,
1074 : "Failed to clear log incompat features on quiesce");
1075 : }
1076 :
1077 119998 : cancel_delayed_work_sync(&mp->m_log->l_work);
1078 119998 : xfs_log_force(mp, XFS_LOG_SYNC);
1079 :
1080 : /*
1081 : * The superblock buffer is uncached and while xfs_ail_push_all_sync()
1082 : * will push it, xfs_buftarg_wait() will not wait for it. Further,
1083 : * xfs_buf_iowait() cannot be used because it was pushed with the
1084 : * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
1085 : * the IO to complete.
1086 : */
1087 119998 : xfs_ail_push_all_sync(mp->m_ail);
1088 119998 : xfs_buftarg_wait(mp->m_ddev_targp);
1089 119998 : xfs_buf_lock(mp->m_sb_bp);
1090 119998 : xfs_buf_unlock(mp->m_sb_bp);
1091 :
1092 119998 : return xfs_log_cover(mp);
1093 : }
1094 :
1095 : void
1096 2669 : xfs_log_clean(
1097 : struct xfs_mount *mp)
1098 : {
1099 2669 : xfs_log_quiesce(mp);
1100 63401 : xfs_log_unmount_write(mp);
1101 2669 : }
1102 :
1103 : /*
1104 : * Shut down and release the AIL and Log.
1105 : *
1106 : * During unmount, we need to ensure we flush all the dirty metadata objects
1107 : * from the AIL so that the log is empty before we write the unmount record to
1108 : * the log. Once this is done, we can tear down the AIL and the log.
1109 : */
1110 : void
1111 60732 : xfs_log_unmount(
1112 : struct xfs_mount *mp)
1113 : {
1114 60732 : xfs_log_clean(mp);
1115 :
1116 : /*
1117 : * If shutdown has come from iclog IO context, the log
1118 : * cleaning will have been skipped and so we need to wait
1119 : * for the iclog to complete shutdown processing before we
1120 : * tear anything down.
1121 : */
1122 60732 : xlog_wait_iclog_completion(mp->m_log);
1123 :
1124 60732 : xfs_buftarg_drain(mp->m_ddev_targp);
1125 :
1126 60732 : xfs_trans_ail_destroy(mp);
1127 :
1128 60732 : xfs_sysfs_del(&mp->m_log->l_kobj);
1129 :
1130 60732 : xlog_dealloc_log(mp->m_log);
1131 60732 : }
1132 :
1133 : void
1134 11072496511 : xfs_log_item_init(
1135 : struct xfs_mount *mp,
1136 : struct xfs_log_item *item,
1137 : int type,
1138 : const struct xfs_item_ops *ops)
1139 : {
1140 11072496511 : item->li_log = mp->m_log;
1141 11072496511 : item->li_ailp = mp->m_ail;
1142 11072496511 : item->li_type = type;
1143 11072496511 : item->li_ops = ops;
1144 11072496511 : item->li_lv = NULL;
1145 :
1146 11072496511 : INIT_LIST_HEAD(&item->li_ail);
1147 11072496511 : INIT_LIST_HEAD(&item->li_cil);
1148 11072496511 : INIT_LIST_HEAD(&item->li_bio_list);
1149 11072496511 : INIT_LIST_HEAD(&item->li_trans);
1150 11072496511 : }
1151 :
1152 : /*
1153 : * Wake up processes waiting for log space after we have moved the log tail.
1154 : */
1155 : void
1156 1546077251 : xfs_log_space_wake(
1157 : struct xfs_mount *mp)
1158 : {
1159 1546077251 : struct xlog *log = mp->m_log;
1160 1546077251 : int free_bytes;
1161 :
1162 3092154502 : if (xlog_is_shutdown(log))
1163 66368 : return;
1164 :
1165 1546010883 : if (!list_empty_careful(&log->l_write_head.waiters)) {
1166 444 : ASSERT(!xlog_in_recovery(log));
1167 :
1168 222 : spin_lock(&log->l_write_head.lock);
1169 222 : free_bytes = xlog_space_left(log, &log->l_write_head.grant);
1170 222 : xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1171 222 : spin_unlock(&log->l_write_head.lock);
1172 : }
1173 :
1174 1546014347 : if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1175 19475062 : ASSERT(!xlog_in_recovery(log));
1176 :
1177 9737531 : spin_lock(&log->l_reserve_head.lock);
1178 9757723 : free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1179 9757723 : xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1180 9757723 : spin_unlock(&log->l_reserve_head.lock);
1181 : }
1182 : }
1183 :
1184 : /*
1185 : * Determine if we have a transaction that has gone to disk that needs to be
1186 : * covered. To begin the transition to the idle state firstly the log needs to
1187 : * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1188 : * we start attempting to cover the log.
1189 : *
1190 : * Only if we are then in a state where covering is needed, the caller is
1191 : * informed that dummy transactions are required to move the log into the idle
1192 : * state.
1193 : *
1194 : * If there are any items in the AIl or CIL, then we do not want to attempt to
1195 : * cover the log as we may be in a situation where there isn't log space
1196 : * available to run a dummy transaction and this can lead to deadlocks when the
1197 : * tail of the log is pinned by an item that is modified in the CIL. Hence
1198 : * there's no point in running a dummy transaction at this point because we
1199 : * can't start trying to idle the log until both the CIL and AIL are empty.
1200 : */
1201 : static bool
1202 314975 : xfs_log_need_covered(
1203 : struct xfs_mount *mp)
1204 : {
1205 314975 : struct xlog *log = mp->m_log;
1206 314975 : bool needed = false;
1207 :
1208 314975 : if (!xlog_cil_empty(log))
1209 : return false;
1210 :
1211 306827 : spin_lock(&log->l_icloglock);
1212 306827 : switch (log->l_covered_state) {
1213 : case XLOG_STATE_COVER_DONE:
1214 : case XLOG_STATE_COVER_DONE2:
1215 : case XLOG_STATE_COVER_IDLE:
1216 : break;
1217 169829 : case XLOG_STATE_COVER_NEED:
1218 : case XLOG_STATE_COVER_NEED2:
1219 169829 : if (xfs_ail_min_lsn(log->l_ailp))
1220 : break;
1221 168741 : if (!xlog_iclogs_empty(log))
1222 : break;
1223 :
1224 168741 : needed = true;
1225 168741 : if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1226 84411 : log->l_covered_state = XLOG_STATE_COVER_DONE;
1227 : else
1228 84330 : log->l_covered_state = XLOG_STATE_COVER_DONE2;
1229 : break;
1230 0 : default:
1231 0 : needed = true;
1232 0 : break;
1233 : }
1234 306827 : spin_unlock(&log->l_icloglock);
1235 306827 : return needed;
1236 : }
1237 :
1238 : /*
1239 : * Explicitly cover the log. This is similar to background log covering but
1240 : * intended for usage in quiesce codepaths. The caller is responsible to ensure
1241 : * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1242 : * must all be empty.
1243 : */
1244 : static int
1245 119998 : xfs_log_cover(
1246 : struct xfs_mount *mp)
1247 : {
1248 119998 : int error = 0;
1249 119998 : bool need_covered;
1250 :
1251 126948 : ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
1252 : !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
1253 : xlog_is_shutdown(mp->m_log));
1254 :
1255 119998 : if (!xfs_log_writable(mp))
1256 : return 0;
1257 :
1258 : /*
1259 : * xfs_log_need_covered() is not idempotent because it progresses the
1260 : * state machine if the log requires covering. Therefore, we must call
1261 : * this function once and use the result until we've issued an sb sync.
1262 : * Do so first to make that abundantly clear.
1263 : *
1264 : * Fall into the covering sequence if the log needs covering or the
1265 : * mount has lazy superblock accounting to sync to disk. The sb sync
1266 : * used for covering accumulates the in-core counters, so covering
1267 : * handles this for us.
1268 : */
1269 106758 : need_covered = xfs_log_need_covered(mp);
1270 106758 : if (!need_covered && !xfs_has_lazysbcount(mp))
1271 : return 0;
1272 :
1273 : /*
1274 : * To cover the log, commit the superblock twice (at most) in
1275 : * independent checkpoints. The first serves as a reference for the
1276 : * tail pointer. The sync transaction and AIL push empties the AIL and
1277 : * updates the in-core tail to the LSN of the first checkpoint. The
1278 : * second commit updates the on-disk tail with the in-core LSN,
1279 : * covering the log. Push the AIL one more time to leave it empty, as
1280 : * we found it.
1281 : */
1282 190925 : do {
1283 190925 : error = xfs_sync_sb(mp, true);
1284 190925 : if (error)
1285 : break;
1286 190892 : xfs_ail_push_all_sync(mp->m_ail);
1287 190892 : } while (xfs_log_need_covered(mp));
1288 :
1289 : return error;
1290 : }
1291 :
1292 : /*
1293 : * We may be holding the log iclog lock upon entering this routine.
1294 : */
1295 : xfs_lsn_t
1296 29820261 : xlog_assign_tail_lsn_locked(
1297 : struct xfs_mount *mp)
1298 : {
1299 29820261 : struct xlog *log = mp->m_log;
1300 29820261 : struct xfs_log_item *lip;
1301 29820261 : xfs_lsn_t tail_lsn;
1302 :
1303 29820261 : assert_spin_locked(&mp->m_ail->ail_lock);
1304 :
1305 : /*
1306 : * To make sure we always have a valid LSN for the log tail we keep
1307 : * track of the last LSN which was committed in log->l_last_sync_lsn,
1308 : * and use that when the AIL was empty.
1309 : */
1310 29820261 : lip = xfs_ail_min(mp->m_ail);
1311 28649558 : if (lip)
1312 28649558 : tail_lsn = lip->li_lsn;
1313 : else
1314 1170703 : tail_lsn = atomic64_read(&log->l_last_sync_lsn);
1315 29820261 : trace_xfs_log_assign_tail_lsn(log, tail_lsn);
1316 29820258 : atomic64_set(&log->l_tail_lsn, tail_lsn);
1317 29820258 : return tail_lsn;
1318 : }
1319 :
1320 : xfs_lsn_t
1321 28821514 : xlog_assign_tail_lsn(
1322 : struct xfs_mount *mp)
1323 : {
1324 28821514 : xfs_lsn_t tail_lsn;
1325 :
1326 28821514 : spin_lock(&mp->m_ail->ail_lock);
1327 28821518 : tail_lsn = xlog_assign_tail_lsn_locked(mp);
1328 28821515 : spin_unlock(&mp->m_ail->ail_lock);
1329 :
1330 28821518 : return tail_lsn;
1331 : }
1332 :
1333 : /*
1334 : * Return the space in the log between the tail and the head. The head
1335 : * is passed in the cycle/bytes formal parms. In the special case where
1336 : * the reserve head has wrapped passed the tail, this calculation is no
1337 : * longer valid. In this case, just return 0 which means there is no space
1338 : * in the log. This works for all places where this function is called
1339 : * with the reserve head. Of course, if the write head were to ever
1340 : * wrap the tail, we should blow up. Rather than catch this case here,
1341 : * we depend on other ASSERTions in other parts of the code. XXXmiken
1342 : *
1343 : * If reservation head is behind the tail, we have a problem. Warn about it,
1344 : * but then treat it as if the log is empty.
1345 : *
1346 : * If the log is shut down, the head and tail may be invalid or out of whack, so
1347 : * shortcut invalidity asserts in this case so that we don't trigger them
1348 : * falsely.
1349 : */
1350 : STATIC int
1351 4841379377 : xlog_space_left(
1352 : struct xlog *log,
1353 : atomic64_t *head)
1354 : {
1355 4841379377 : int tail_bytes;
1356 4841379377 : int tail_cycle;
1357 4841379377 : int head_cycle;
1358 4841379377 : int head_bytes;
1359 :
1360 4841379377 : xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1361 4841379377 : xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1362 4841379377 : tail_bytes = BBTOB(tail_bytes);
1363 4841379377 : if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1364 3689103122 : return log->l_logsize - (head_bytes - tail_bytes);
1365 1152276255 : if (tail_cycle + 1 < head_cycle)
1366 : return 0;
1367 :
1368 : /* Ignore potential inconsistency when shutdown. */
1369 2304552458 : if (xlog_is_shutdown(log))
1370 0 : return log->l_logsize;
1371 :
1372 1152276229 : if (tail_cycle < head_cycle) {
1373 1152276229 : ASSERT(tail_cycle == (head_cycle - 1));
1374 1152276229 : return tail_bytes - head_bytes;
1375 : }
1376 :
1377 : /*
1378 : * The reservation head is behind the tail. In this case we just want to
1379 : * return the size of the log as the amount of space left.
1380 : */
1381 0 : xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
1382 0 : xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d",
1383 : tail_cycle, tail_bytes);
1384 0 : xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d",
1385 : head_cycle, head_bytes);
1386 0 : ASSERT(0);
1387 0 : return log->l_logsize;
1388 : }
1389 :
1390 :
1391 : static void
1392 28805542 : xlog_ioend_work(
1393 : struct work_struct *work)
1394 : {
1395 28805542 : struct xlog_in_core *iclog =
1396 28805542 : container_of(work, struct xlog_in_core, ic_end_io_work);
1397 28805542 : struct xlog *log = iclog->ic_log;
1398 28805542 : int error;
1399 :
1400 28805542 : error = blk_status_to_errno(iclog->ic_bio.bi_status);
1401 : #ifdef DEBUG
1402 : /* treat writes with injected CRC errors as failed */
1403 28805542 : if (iclog->ic_fail_crc)
1404 : error = -EIO;
1405 : #endif
1406 :
1407 : /*
1408 : * Race to shutdown the filesystem if we see an error.
1409 : */
1410 28805484 : if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1411 11353 : xfs_alert(log->l_mp, "log I/O error %d", error);
1412 11353 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1413 : }
1414 :
1415 28805542 : xlog_state_done_syncing(iclog);
1416 28805542 : bio_uninit(&iclog->ic_bio);
1417 :
1418 : /*
1419 : * Drop the lock to signal that we are done. Nothing references the
1420 : * iclog after this, so an unmount waiting on this lock can now tear it
1421 : * down safely. As such, it is unsafe to reference the iclog after the
1422 : * unlock as we could race with it being freed.
1423 : */
1424 28805542 : up(&iclog->ic_sema);
1425 28805542 : }
1426 :
1427 : /*
1428 : * Return size of each in-core log record buffer.
1429 : *
1430 : * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1431 : *
1432 : * If the filesystem blocksize is too large, we may need to choose a
1433 : * larger size since the directory code currently logs entire blocks.
1434 : */
1435 : STATIC void
1436 60764 : xlog_get_iclog_buffer_size(
1437 : struct xfs_mount *mp,
1438 : struct xlog *log)
1439 : {
1440 60764 : if (mp->m_logbufs <= 0)
1441 60744 : mp->m_logbufs = XLOG_MAX_ICLOGS;
1442 60764 : if (mp->m_logbsize <= 0)
1443 59697 : mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
1444 :
1445 60764 : log->l_iclog_bufs = mp->m_logbufs;
1446 60764 : log->l_iclog_size = mp->m_logbsize;
1447 :
1448 : /*
1449 : * # headers = size / 32k - one header holds cycles from 32k of data.
1450 : */
1451 60764 : log->l_iclog_heads =
1452 60764 : DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE);
1453 60764 : log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
1454 60764 : }
1455 :
1456 : void
1457 135057 : xfs_log_work_queue(
1458 : struct xfs_mount *mp)
1459 : {
1460 135057 : queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
1461 135057 : msecs_to_jiffies(xfs_syncd_centisecs * 10));
1462 135057 : }
1463 :
1464 : /*
1465 : * Clear the log incompat flags if we have the opportunity.
1466 : *
1467 : * This only happens if we're about to log the second dummy transaction as part
1468 : * of covering the log and we can get the log incompat feature usage lock.
1469 : */
1470 : static inline void
1471 364 : xlog_clear_incompat(
1472 : struct xlog *log)
1473 : {
1474 364 : struct xfs_mount *mp = log->l_mp;
1475 364 : uint32_t incompat_mask = 0;
1476 :
1477 364 : if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
1478 : XFS_SB_FEAT_INCOMPAT_LOG_ALL))
1479 : return;
1480 :
1481 332 : if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
1482 : return;
1483 :
1484 129 : if (down_write_trylock(&log->l_incompat_xattrs))
1485 129 : incompat_mask |= XFS_SB_FEAT_INCOMPAT_LOG_XATTRS;
1486 :
1487 129 : if (down_write_trylock(&log->l_incompat_swapext))
1488 129 : incompat_mask |= XFS_SB_FEAT_INCOMPAT_LOG_SWAPEXT;
1489 :
1490 129 : if (!incompat_mask)
1491 : return;
1492 :
1493 129 : xfs_clear_incompat_log_features(mp, incompat_mask);
1494 :
1495 129 : if (incompat_mask & XFS_SB_FEAT_INCOMPAT_LOG_SWAPEXT)
1496 129 : up_write(&log->l_incompat_swapext);
1497 :
1498 129 : if (incompat_mask & XFS_SB_FEAT_INCOMPAT_LOG_XATTRS)
1499 129 : up_write(&log->l_incompat_xattrs);
1500 : }
1501 :
1502 : /*
1503 : * Every sync period we need to unpin all items in the AIL and push them to
1504 : * disk. If there is nothing dirty, then we might need to cover the log to
1505 : * indicate that the filesystem is idle.
1506 : */
1507 : static void
1508 17403 : xfs_log_worker(
1509 : struct work_struct *work)
1510 : {
1511 17403 : struct xlog *log = container_of(to_delayed_work(work),
1512 : struct xlog, l_work);
1513 17403 : struct xfs_mount *mp = log->l_mp;
1514 :
1515 : /* dgc: errors ignored - not fatal and nowhere to report them */
1516 17403 : if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
1517 : /*
1518 : * Dump a transaction into the log that contains no real change.
1519 : * This is needed to stamp the current tail LSN into the log
1520 : * during the covering operation.
1521 : *
1522 : * We cannot use an inode here for this - that will push dirty
1523 : * state back up into the VFS and then periodic inode flushing
1524 : * will prevent log covering from making progress. Hence we
1525 : * synchronously log the superblock instead to ensure the
1526 : * superblock is immediately unpinned and can be written back.
1527 : */
1528 364 : xlog_clear_incompat(log);
1529 364 : xfs_sync_sb(mp, true);
1530 : } else
1531 17052 : xfs_log_force(mp, 0);
1532 :
1533 : /* start pushing all the metadata that is currently dirty */
1534 17417 : xfs_ail_push_all(mp->m_ail);
1535 :
1536 : /* queue us up again */
1537 17417 : xfs_log_work_queue(mp);
1538 17417 : }
1539 :
1540 : /*
1541 : * This routine initializes some of the log structure for a given mount point.
1542 : * Its primary purpose is to fill in enough, so recovery can occur. However,
1543 : * some other stuff may be filled in too.
1544 : */
1545 : STATIC struct xlog *
1546 60764 : xlog_alloc_log(
1547 : struct xfs_mount *mp,
1548 : struct xfs_buftarg *log_target,
1549 : xfs_daddr_t blk_offset,
1550 : int num_bblks)
1551 : {
1552 60764 : struct xlog *log;
1553 60764 : xlog_rec_header_t *head;
1554 60764 : xlog_in_core_t **iclogp;
1555 60764 : xlog_in_core_t *iclog, *prev_iclog=NULL;
1556 60764 : int i;
1557 60764 : int error = -ENOMEM;
1558 60764 : uint log2_size = 0;
1559 :
1560 60764 : log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1561 60764 : if (!log) {
1562 0 : xfs_warn(mp, "Log allocation failed: No memory!");
1563 0 : goto out;
1564 : }
1565 :
1566 60764 : log->l_mp = mp;
1567 60764 : log->l_targ = log_target;
1568 60764 : log->l_logsize = BBTOB(num_bblks);
1569 60764 : log->l_logBBstart = blk_offset;
1570 60764 : log->l_logBBsize = num_bblks;
1571 60764 : log->l_covered_state = XLOG_STATE_COVER_IDLE;
1572 60764 : set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
1573 60764 : INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1574 :
1575 60764 : log->l_prev_block = -1;
1576 : /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1577 60764 : xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1578 60764 : xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1579 60764 : log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1580 :
1581 60764 : if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
1582 40984 : log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
1583 : else
1584 19780 : log->l_iclog_roundoff = BBSIZE;
1585 :
1586 60764 : xlog_grant_head_init(&log->l_reserve_head);
1587 60764 : xlog_grant_head_init(&log->l_write_head);
1588 :
1589 60764 : error = -EFSCORRUPTED;
1590 60764 : if (xfs_has_sector(mp)) {
1591 58442 : log2_size = mp->m_sb.sb_logsectlog;
1592 58442 : if (log2_size < BBSHIFT) {
1593 0 : xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1594 : log2_size, BBSHIFT);
1595 0 : goto out_free_log;
1596 : }
1597 :
1598 58442 : log2_size -= BBSHIFT;
1599 58442 : if (log2_size > mp->m_sectbb_log) {
1600 0 : xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1601 : log2_size, mp->m_sectbb_log);
1602 0 : goto out_free_log;
1603 : }
1604 :
1605 : /* for larger sector sizes, must have v2 or external log */
1606 58442 : if (log2_size && log->l_logBBstart > 0 &&
1607 : !xfs_has_logv2(mp)) {
1608 0 : xfs_warn(mp,
1609 : "log sector size (0x%x) invalid for configuration.",
1610 : log2_size);
1611 0 : goto out_free_log;
1612 : }
1613 : }
1614 58442 : log->l_sectBBsize = 1 << log2_size;
1615 :
1616 60764 : init_rwsem(&log->l_incompat_xattrs);
1617 60764 : init_rwsem(&log->l_incompat_swapext);
1618 :
1619 60764 : xlog_get_iclog_buffer_size(mp, log);
1620 :
1621 60764 : spin_lock_init(&log->l_icloglock);
1622 60764 : init_waitqueue_head(&log->l_flush_wait);
1623 :
1624 60764 : iclogp = &log->l_iclog;
1625 : /*
1626 : * The amount of memory to allocate for the iclog structure is
1627 : * rather funky due to the way the structure is defined. It is
1628 : * done this way so that we can use different sizes for machines
1629 : * with different amounts of memory. See the definition of
1630 : * xlog_in_core_t in xfs_log_priv.h for details.
1631 : */
1632 60764 : ASSERT(log->l_iclog_size >= 4096);
1633 546816 : for (i = 0; i < log->l_iclog_bufs; i++) {
1634 486052 : size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1635 : sizeof(struct bio_vec);
1636 :
1637 486052 : iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
1638 486052 : if (!iclog)
1639 0 : goto out_free_iclog;
1640 :
1641 486052 : *iclogp = iclog;
1642 486052 : iclog->ic_prev = prev_iclog;
1643 486052 : prev_iclog = iclog;
1644 :
1645 486052 : iclog->ic_data = kvzalloc(log->l_iclog_size,
1646 : GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1647 486052 : if (!iclog->ic_data)
1648 0 : goto out_free_iclog;
1649 486052 : head = &iclog->ic_header;
1650 486052 : memset(head, 0, sizeof(xlog_rec_header_t));
1651 486052 : head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1652 486052 : head->h_version = cpu_to_be32(
1653 : xfs_has_logv2(log->l_mp) ? 2 : 1);
1654 486052 : head->h_size = cpu_to_be32(log->l_iclog_size);
1655 : /* new fields */
1656 486052 : head->h_fmt = cpu_to_be32(XLOG_FMT);
1657 972104 : memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1658 :
1659 486052 : iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1660 486052 : iclog->ic_state = XLOG_STATE_ACTIVE;
1661 486052 : iclog->ic_log = log;
1662 486052 : atomic_set(&iclog->ic_refcnt, 0);
1663 486052 : INIT_LIST_HEAD(&iclog->ic_callbacks);
1664 486052 : iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
1665 :
1666 486052 : init_waitqueue_head(&iclog->ic_force_wait);
1667 486052 : init_waitqueue_head(&iclog->ic_write_wait);
1668 486052 : INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1669 486052 : sema_init(&iclog->ic_sema, 1);
1670 :
1671 486052 : iclogp = &iclog->ic_next;
1672 : }
1673 60764 : *iclogp = log->l_iclog; /* complete ring */
1674 60764 : log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
1675 :
1676 121528 : log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1677 : XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM |
1678 : WQ_HIGHPRI),
1679 60764 : 0, mp->m_super->s_id);
1680 60764 : if (!log->l_ioend_workqueue)
1681 0 : goto out_free_iclog;
1682 :
1683 60764 : error = xlog_cil_init(log);
1684 60764 : if (error)
1685 0 : goto out_destroy_workqueue;
1686 : return log;
1687 :
1688 : out_destroy_workqueue:
1689 0 : destroy_workqueue(log->l_ioend_workqueue);
1690 0 : out_free_iclog:
1691 0 : for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1692 0 : prev_iclog = iclog->ic_next;
1693 0 : kmem_free(iclog->ic_data);
1694 0 : kmem_free(iclog);
1695 0 : if (prev_iclog == log->l_iclog)
1696 : break;
1697 : }
1698 0 : out_free_log:
1699 0 : kmem_free(log);
1700 0 : out:
1701 0 : return ERR_PTR(error);
1702 : } /* xlog_alloc_log */
1703 :
1704 : /*
1705 : * Compute the LSN that we'd need to push the log tail towards in order to have
1706 : * (a) enough on-disk log space to log the number of bytes specified, (b) at
1707 : * least 25% of the log space free, and (c) at least 256 blocks free. If the
1708 : * log free space already meets all three thresholds, this function returns
1709 : * NULLCOMMITLSN.
1710 : */
1711 : xfs_lsn_t
1712 2882291916 : xlog_grant_push_threshold(
1713 : struct xlog *log,
1714 : int need_bytes)
1715 : {
1716 2882291916 : xfs_lsn_t threshold_lsn = 0;
1717 2882291916 : xfs_lsn_t last_sync_lsn;
1718 2882291916 : int free_blocks;
1719 2882291916 : int free_bytes;
1720 2882291916 : int threshold_block;
1721 2882291916 : int threshold_cycle;
1722 2882291916 : int free_threshold;
1723 :
1724 2882291916 : ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1725 :
1726 2882291916 : free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1727 2882184319 : free_blocks = BTOBBT(free_bytes);
1728 :
1729 : /*
1730 : * Set the threshold for the minimum number of free blocks in the
1731 : * log to the maximum of what the caller needs, one quarter of the
1732 : * log, and 256 blocks.
1733 : */
1734 2882184319 : free_threshold = BTOBB(need_bytes);
1735 2882184319 : free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
1736 2882184319 : free_threshold = max(free_threshold, 256);
1737 2882184319 : if (free_blocks >= free_threshold)
1738 : return NULLCOMMITLSN;
1739 :
1740 33919149 : xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1741 : &threshold_block);
1742 33919149 : threshold_block += free_threshold;
1743 33919149 : if (threshold_block >= log->l_logBBsize) {
1744 5007619 : threshold_block -= log->l_logBBsize;
1745 5007619 : threshold_cycle += 1;
1746 : }
1747 33919149 : threshold_lsn = xlog_assign_lsn(threshold_cycle,
1748 : threshold_block);
1749 : /*
1750 : * Don't pass in an lsn greater than the lsn of the last
1751 : * log record known to be on disk. Use a snapshot of the last sync lsn
1752 : * so that it doesn't change between the compare and the set.
1753 : */
1754 33919149 : last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1755 33919149 : if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1756 : threshold_lsn = last_sync_lsn;
1757 :
1758 : return threshold_lsn;
1759 : }
1760 :
1761 : /*
1762 : * Push the tail of the log if we need to do so to maintain the free log space
1763 : * thresholds set out by xlog_grant_push_threshold. We may need to adopt a
1764 : * policy which pushes on an lsn which is further along in the log once we
1765 : * reach the high water mark. In this manner, we would be creating a low water
1766 : * mark.
1767 : */
1768 : STATIC void
1769 2882818620 : xlog_grant_push_ail(
1770 : struct xlog *log,
1771 : int need_bytes)
1772 : {
1773 2882818620 : xfs_lsn_t threshold_lsn;
1774 :
1775 2882818620 : threshold_lsn = xlog_grant_push_threshold(log, need_bytes);
1776 2915322947 : if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log))
1777 : return;
1778 :
1779 : /*
1780 : * Get the transaction layer to kick the dirty buffers out to
1781 : * disk asynchronously. No point in trying to do this if
1782 : * the filesystem is shutting down.
1783 : */
1784 33901041 : xfs_ail_push(log->l_ailp, threshold_lsn);
1785 : }
1786 :
1787 : /*
1788 : * Stamp cycle number in every block
1789 : */
1790 : STATIC void
1791 28805440 : xlog_pack_data(
1792 : struct xlog *log,
1793 : struct xlog_in_core *iclog,
1794 : int roundoff)
1795 : {
1796 28805440 : int i, j, k;
1797 28805440 : int size = iclog->ic_offset + roundoff;
1798 28805440 : __be32 cycle_lsn;
1799 28805440 : char *dp;
1800 :
1801 28805440 : cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1802 :
1803 28805440 : dp = iclog->ic_datap;
1804 1612093277 : for (i = 0; i < BTOBB(size); i++) {
1805 1583301884 : if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
1806 : break;
1807 1583287783 : iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1808 1583287837 : *(__be32 *)dp = cycle_lsn;
1809 1583287837 : dp += BBSIZE;
1810 : }
1811 :
1812 28805494 : if (xfs_has_logv2(log->l_mp)) {
1813 28805434 : xlog_in_core_2_t *xhdr = iclog->ic_data;
1814 :
1815 33352882 : for ( ; i < BTOBB(size); i++) {
1816 4547511 : j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1817 4547511 : k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1818 4547511 : xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
1819 4547448 : *(__be32 *)dp = cycle_lsn;
1820 4547448 : dp += BBSIZE;
1821 : }
1822 :
1823 28892400 : for (i = 1; i < log->l_iclog_heads; i++)
1824 87029 : xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
1825 : }
1826 28805431 : }
1827 :
1828 : /*
1829 : * Calculate the checksum for a log buffer.
1830 : *
1831 : * This is a little more complicated than it should be because the various
1832 : * headers and the actual data are non-contiguous.
1833 : */
1834 : __le32
1835 33708491 : xlog_cksum(
1836 : struct xlog *log,
1837 : struct xlog_rec_header *rhead,
1838 : char *dp,
1839 : int size)
1840 : {
1841 33708491 : uint32_t crc;
1842 :
1843 : /* first generate the crc for the record header ... */
1844 33708491 : crc = xfs_start_cksum_update((char *)rhead,
1845 : sizeof(struct xlog_rec_header),
1846 : offsetof(struct xlog_rec_header, h_crc));
1847 :
1848 : /* ... then for additional cycle data for v2 logs ... */
1849 33707792 : if (xfs_has_logv2(log->l_mp)) {
1850 33707704 : union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
1851 33707704 : int i;
1852 33707704 : int xheads;
1853 :
1854 33707704 : xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE);
1855 :
1856 33820731 : for (i = 1; i < xheads; i++) {
1857 113181 : crc = crc32c(crc, &xhdr[i].hic_xheader,
1858 : sizeof(struct xlog_rec_ext_header));
1859 : }
1860 : }
1861 :
1862 : /* ... and finally for the payload */
1863 33707639 : crc = crc32c(crc, dp, size);
1864 :
1865 33707356 : return xfs_end_cksum(crc);
1866 : }
1867 :
1868 : static void
1869 28805542 : xlog_bio_end_io(
1870 : struct bio *bio)
1871 : {
1872 28805542 : struct xlog_in_core *iclog = bio->bi_private;
1873 :
1874 28805542 : queue_work(iclog->ic_log->l_ioend_workqueue,
1875 : &iclog->ic_end_io_work);
1876 28805542 : }
1877 :
1878 : static int
1879 28804819 : xlog_map_iclog_data(
1880 : struct bio *bio,
1881 : void *data,
1882 : size_t count)
1883 : {
1884 202793156 : do {
1885 202793156 : struct page *page = kmem_to_page(data);
1886 202792115 : unsigned int off = offset_in_page(data);
1887 202792115 : size_t len = min_t(size_t, count, PAGE_SIZE - off);
1888 :
1889 202792115 : if (bio_add_page(bio, page, len, off) != len)
1890 : return -EIO;
1891 :
1892 202793623 : data += len;
1893 202793623 : count -= len;
1894 202793623 : } while (count);
1895 :
1896 : return 0;
1897 : }
1898 :
1899 : STATIC void
1900 28804885 : xlog_write_iclog(
1901 : struct xlog *log,
1902 : struct xlog_in_core *iclog,
1903 : uint64_t bno,
1904 : unsigned int count)
1905 : {
1906 28804885 : ASSERT(bno < log->l_logBBsize);
1907 28804885 : trace_xlog_iclog_write(iclog, _RET_IP_);
1908 :
1909 : /*
1910 : * We lock the iclogbufs here so that we can serialise against I/O
1911 : * completion during unmount. We might be processing a shutdown
1912 : * triggered during unmount, and that can occur asynchronously to the
1913 : * unmount thread, and hence we need to ensure that completes before
1914 : * tearing down the iclogbufs. Hence we need to hold the buffer lock
1915 : * across the log IO to archieve that.
1916 : */
1917 28804362 : down(&iclog->ic_sema);
1918 57608714 : if (xlog_is_shutdown(log)) {
1919 : /*
1920 : * It would seem logical to return EIO here, but we rely on
1921 : * the log state machine to propagate I/O errors instead of
1922 : * doing it here. We kick of the state machine and unlock
1923 : * the buffer manually, the code needs to be kept in sync
1924 : * with the I/O completion path.
1925 : */
1926 7 : xlog_state_done_syncing(iclog);
1927 7 : up(&iclog->ic_sema);
1928 7 : return;
1929 : }
1930 :
1931 : /*
1932 : * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
1933 : * IOs coming immediately after this one. This prevents the block layer
1934 : * writeback throttle from throttling log writes behind background
1935 : * metadata writeback and causing priority inversions.
1936 : */
1937 28804350 : bio_init(&iclog->ic_bio, xfs_buftarg_bdev(log->l_targ), iclog->ic_bvec,
1938 28804350 : howmany(count, PAGE_SIZE),
1939 : REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
1940 28804863 : iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1941 28804863 : iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1942 28804863 : iclog->ic_bio.bi_private = iclog;
1943 :
1944 28804863 : if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
1945 7876938 : iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1946 : /*
1947 : * For external log devices, we also need to flush the data
1948 : * device cache first to ensure all metadata writeback covered
1949 : * by the LSN in this iclog is on stable storage. This is slow,
1950 : * but it *must* complete before we issue the external log IO.
1951 : *
1952 : * If the flush fails, we cannot conclude that past metadata
1953 : * writeback from the log succeeded. Repeating the flush is
1954 : * not possible, hence we must shut down with log IO error to
1955 : * avoid shutdown re-entering this path and erroring out again.
1956 : */
1957 9599526 : if (log->l_targ != log->l_mp->m_ddev_targp &&
1958 1722603 : xfs_buftarg_flush(log->l_mp->m_ddev_targp)) {
1959 0 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1960 0 : return;
1961 : }
1962 : }
1963 28804848 : if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
1964 5802916 : iclog->ic_bio.bi_opf |= REQ_FUA;
1965 :
1966 28804848 : iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
1967 :
1968 28804848 : if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
1969 0 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1970 0 : return;
1971 : }
1972 28805169 : if (is_vmalloc_addr(iclog->ic_data))
1973 : flush_kernel_vmap_range(iclog->ic_data, count);
1974 :
1975 : /*
1976 : * If this log buffer would straddle the end of the log we will have
1977 : * to split it up into two bios, so that we can continue at the start.
1978 : */
1979 28805146 : if (bno + BTOBB(count) > log->l_logBBsize) {
1980 10047 : struct bio *split;
1981 :
1982 10047 : split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1983 : GFP_NOIO, &fs_bio_set);
1984 10047 : bio_chain(split, &iclog->ic_bio);
1985 10047 : submit_bio(split);
1986 :
1987 : /* restart at logical offset zero for the remainder */
1988 10047 : iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1989 : }
1990 :
1991 28805146 : submit_bio(&iclog->ic_bio);
1992 : }
1993 :
1994 : /*
1995 : * We need to bump cycle number for the part of the iclog that is
1996 : * written to the start of the log. Watch out for the header magic
1997 : * number case, though.
1998 : */
1999 : static void
2000 10047 : xlog_split_iclog(
2001 : struct xlog *log,
2002 : void *data,
2003 : uint64_t bno,
2004 : unsigned int count)
2005 : {
2006 10047 : unsigned int split_offset = BBTOB(log->l_logBBsize - bno);
2007 10047 : unsigned int i;
2008 :
2009 323263 : for (i = split_offset; i < count; i += BBSIZE) {
2010 313216 : uint32_t cycle = get_unaligned_be32(data + i);
2011 :
2012 313216 : if (++cycle == XLOG_HEADER_MAGIC_NUM)
2013 0 : cycle++;
2014 313216 : put_unaligned_be32(cycle, data + i);
2015 : }
2016 10047 : }
2017 :
2018 : static int
2019 28805495 : xlog_calc_iclog_size(
2020 : struct xlog *log,
2021 : struct xlog_in_core *iclog,
2022 : uint32_t *roundoff)
2023 : {
2024 28805495 : uint32_t count_init, count;
2025 :
2026 : /* Add for LR header */
2027 28805495 : count_init = log->l_iclog_hsize + iclog->ic_offset;
2028 28805495 : count = roundup(count_init, log->l_iclog_roundoff);
2029 :
2030 28805495 : *roundoff = count - count_init;
2031 :
2032 28805495 : ASSERT(count >= count_init);
2033 28805495 : ASSERT(*roundoff < log->l_iclog_roundoff);
2034 28805495 : return count;
2035 : }
2036 :
2037 : /*
2038 : * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
2039 : * fashion. Previously, we should have moved the current iclog
2040 : * ptr in the log to point to the next available iclog. This allows further
2041 : * write to continue while this code syncs out an iclog ready to go.
2042 : * Before an in-core log can be written out, the data section must be scanned
2043 : * to save away the 1st word of each BBSIZE block into the header. We replace
2044 : * it with the current cycle count. Each BBSIZE block is tagged with the
2045 : * cycle count because there in an implicit assumption that drives will
2046 : * guarantee that entire 512 byte blocks get written at once. In other words,
2047 : * we can't have part of a 512 byte block written and part not written. By
2048 : * tagging each block, we will know which blocks are valid when recovering
2049 : * after an unclean shutdown.
2050 : *
2051 : * This routine is single threaded on the iclog. No other thread can be in
2052 : * this routine with the same iclog. Changing contents of iclog can there-
2053 : * fore be done without grabbing the state machine lock. Updating the global
2054 : * log will require grabbing the lock though.
2055 : *
2056 : * The entire log manager uses a logical block numbering scheme. Only
2057 : * xlog_write_iclog knows about the fact that the log may not start with
2058 : * block zero on a given device.
2059 : */
2060 : STATIC void
2061 28805529 : xlog_sync(
2062 : struct xlog *log,
2063 : struct xlog_in_core *iclog,
2064 : struct xlog_ticket *ticket)
2065 : {
2066 28805529 : unsigned int count; /* byte count of bwrite */
2067 28805529 : unsigned int roundoff; /* roundoff to BB or stripe */
2068 28805529 : uint64_t bno;
2069 28805529 : unsigned int size;
2070 :
2071 28805529 : ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2072 28805529 : trace_xlog_iclog_sync(iclog, _RET_IP_);
2073 :
2074 28805492 : count = xlog_calc_iclog_size(log, iclog, &roundoff);
2075 :
2076 : /*
2077 : * If we have a ticket, account for the roundoff via the ticket
2078 : * reservation to avoid touching the hot grant heads needlessly.
2079 : * Otherwise, we have to move grant heads directly.
2080 : */
2081 28805489 : if (ticket) {
2082 23663727 : ticket->t_curr_res -= roundoff;
2083 : } else {
2084 5141762 : xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
2085 5141763 : xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
2086 : }
2087 :
2088 : /* put cycle number in every block */
2089 28805491 : xlog_pack_data(log, iclog, roundoff);
2090 :
2091 : /* real byte length */
2092 28805439 : size = iclog->ic_offset;
2093 28805439 : if (xfs_has_logv2(log->l_mp))
2094 28805382 : size += roundoff;
2095 28805439 : iclog->ic_header.h_len = cpu_to_be32(size);
2096 :
2097 28805439 : XFS_STATS_INC(log->l_mp, xs_log_writes);
2098 28805477 : XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
2099 :
2100 28805473 : bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
2101 :
2102 : /* Do we need to split this write into 2 parts? */
2103 28805473 : if (bno + BTOBB(count) > log->l_logBBsize)
2104 10047 : xlog_split_iclog(log, &iclog->ic_header, bno, count);
2105 :
2106 : /* calculcate the checksum */
2107 57610040 : iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
2108 28805473 : iclog->ic_datap, size);
2109 : /*
2110 : * Intentionally corrupt the log record CRC based on the error injection
2111 : * frequency, if defined. This facilitates testing log recovery in the
2112 : * event of torn writes. Hence, set the IOABORT state to abort the log
2113 : * write on I/O completion and shutdown the fs. The subsequent mount
2114 : * detects the bad CRC and attempts to recover.
2115 : */
2116 : #ifdef DEBUG
2117 28804567 : if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
2118 59 : iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
2119 59 : iclog->ic_fail_crc = true;
2120 59 : xfs_warn(log->l_mp,
2121 : "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
2122 : be64_to_cpu(iclog->ic_header.h_lsn));
2123 : }
2124 : #endif
2125 28804569 : xlog_verify_iclog(log, iclog, count);
2126 28805393 : xlog_write_iclog(log, iclog, bno, count);
2127 28805346 : }
2128 :
2129 : /*
2130 : * Deallocate a log structure
2131 : */
2132 : STATIC void
2133 60774 : xlog_dealloc_log(
2134 : struct xlog *log)
2135 : {
2136 60774 : xlog_in_core_t *iclog, *next_iclog;
2137 60774 : int i;
2138 :
2139 : /*
2140 : * Destroy the CIL after waiting for iclog IO completion because an
2141 : * iclog EIO error will try to shut down the log, which accesses the
2142 : * CIL to wake up the waiters.
2143 : */
2144 60774 : xlog_cil_destroy(log);
2145 :
2146 60774 : iclog = log->l_iclog;
2147 546906 : for (i = 0; i < log->l_iclog_bufs; i++) {
2148 486132 : next_iclog = iclog->ic_next;
2149 486132 : kmem_free(iclog->ic_data);
2150 486132 : kmem_free(iclog);
2151 486132 : iclog = next_iclog;
2152 : }
2153 :
2154 60774 : log->l_mp->m_log = NULL;
2155 60774 : destroy_workqueue(log->l_ioend_workqueue);
2156 60774 : kmem_free(log);
2157 60774 : }
2158 :
2159 : /*
2160 : * Update counters atomically now that memcpy is done.
2161 : */
2162 : static inline void
2163 : xlog_state_finish_copy(
2164 : struct xlog *log,
2165 : struct xlog_in_core *iclog,
2166 : int record_cnt,
2167 : int copy_bytes)
2168 : {
2169 34733355 : lockdep_assert_held(&log->l_icloglock);
2170 :
2171 34733355 : be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
2172 34733355 : iclog->ic_offset += copy_bytes;
2173 : }
2174 :
2175 : /*
2176 : * print out info relating to regions written which consume
2177 : * the reservation
2178 : */
2179 : void
2180 0 : xlog_print_tic_res(
2181 : struct xfs_mount *mp,
2182 : struct xlog_ticket *ticket)
2183 : {
2184 0 : xfs_warn(mp, "ticket reservation summary:");
2185 0 : xfs_warn(mp, " unit res = %d bytes", ticket->t_unit_res);
2186 0 : xfs_warn(mp, " current res = %d bytes", ticket->t_curr_res);
2187 0 : xfs_warn(mp, " original count = %d", ticket->t_ocnt);
2188 0 : xfs_warn(mp, " remaining count = %d", ticket->t_cnt);
2189 0 : }
2190 :
2191 : /*
2192 : * Print a summary of the transaction.
2193 : */
2194 : void
2195 0 : xlog_print_trans(
2196 : struct xfs_trans *tp)
2197 : {
2198 0 : struct xfs_mount *mp = tp->t_mountp;
2199 0 : struct xfs_log_item *lip;
2200 :
2201 : /* dump core transaction and ticket info */
2202 0 : xfs_warn(mp, "transaction summary:");
2203 0 : xfs_warn(mp, " log res = %d", tp->t_log_res);
2204 0 : xfs_warn(mp, " log count = %d", tp->t_log_count);
2205 0 : xfs_warn(mp, " flags = 0x%x", tp->t_flags);
2206 :
2207 0 : xlog_print_tic_res(mp, tp->t_ticket);
2208 :
2209 : /* dump each log item */
2210 0 : list_for_each_entry(lip, &tp->t_items, li_trans) {
2211 0 : struct xfs_log_vec *lv = lip->li_lv;
2212 0 : struct xfs_log_iovec *vec;
2213 0 : int i;
2214 :
2215 0 : xfs_warn(mp, "log item: ");
2216 0 : xfs_warn(mp, " type = 0x%x", lip->li_type);
2217 0 : xfs_warn(mp, " flags = 0x%lx", lip->li_flags);
2218 0 : if (!lv)
2219 0 : continue;
2220 0 : xfs_warn(mp, " niovecs = %d", lv->lv_niovecs);
2221 0 : xfs_warn(mp, " size = %d", lv->lv_size);
2222 0 : xfs_warn(mp, " bytes = %d", lv->lv_bytes);
2223 0 : xfs_warn(mp, " buf len = %d", lv->lv_buf_len);
2224 :
2225 : /* dump each iovec for the log item */
2226 0 : vec = lv->lv_iovecp;
2227 0 : for (i = 0; i < lv->lv_niovecs; i++) {
2228 0 : int dumplen = min(vec->i_len, 32);
2229 :
2230 0 : xfs_warn(mp, " iovec[%d]", i);
2231 0 : xfs_warn(mp, " type = 0x%x", vec->i_type);
2232 0 : xfs_warn(mp, " len = %d", vec->i_len);
2233 0 : xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i);
2234 0 : xfs_hex_dump(vec->i_addr, dumplen);
2235 :
2236 0 : vec++;
2237 : }
2238 : }
2239 0 : }
2240 :
2241 : static inline void
2242 2571915367 : xlog_write_iovec(
2243 : struct xlog_in_core *iclog,
2244 : uint32_t *log_offset,
2245 : void *data,
2246 : uint32_t write_len,
2247 : int *bytes_left,
2248 : uint32_t *record_cnt,
2249 : uint32_t *data_cnt)
2250 : {
2251 2571915367 : ASSERT(*log_offset < iclog->ic_log->l_iclog_size);
2252 2571915367 : ASSERT(*log_offset % sizeof(int32_t) == 0);
2253 2571915367 : ASSERT(write_len % sizeof(int32_t) == 0);
2254 :
2255 5143830734 : memcpy(iclog->ic_datap + *log_offset, data, write_len);
2256 2571915367 : *log_offset += write_len;
2257 2571915367 : *bytes_left -= write_len;
2258 2571915367 : (*record_cnt)++;
2259 2571915367 : *data_cnt += write_len;
2260 2571915367 : }
2261 :
2262 : /*
2263 : * Write log vectors into a single iclog which is guaranteed by the caller
2264 : * to have enough space to write the entire log vector into.
2265 : */
2266 : static void
2267 913089628 : xlog_write_full(
2268 : struct xfs_log_vec *lv,
2269 : struct xlog_ticket *ticket,
2270 : struct xlog_in_core *iclog,
2271 : uint32_t *log_offset,
2272 : uint32_t *len,
2273 : uint32_t *record_cnt,
2274 : uint32_t *data_cnt)
2275 : {
2276 913089628 : int index;
2277 :
2278 913089628 : ASSERT(*log_offset + *len <= iclog->ic_size ||
2279 : iclog->ic_state == XLOG_STATE_WANT_SYNC);
2280 :
2281 : /*
2282 : * Ordered log vectors have no regions to write so this
2283 : * loop will naturally skip them.
2284 : */
2285 3399296073 : for (index = 0; index < lv->lv_niovecs; index++) {
2286 2486149941 : struct xfs_log_iovec *reg = &lv->lv_iovecp[index];
2287 2486149941 : struct xlog_op_header *ophdr = reg->i_addr;
2288 :
2289 2486149941 : ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2290 2486149941 : xlog_write_iovec(iclog, log_offset, reg->i_addr,
2291 2486149941 : reg->i_len, len, record_cnt, data_cnt);
2292 : }
2293 913146132 : }
2294 :
2295 : static int
2296 23096508 : xlog_write_get_more_iclog_space(
2297 : struct xlog_ticket *ticket,
2298 : struct xlog_in_core **iclogp,
2299 : uint32_t *log_offset,
2300 : uint32_t len,
2301 : uint32_t *record_cnt,
2302 : uint32_t *data_cnt)
2303 : {
2304 23096508 : struct xlog_in_core *iclog = *iclogp;
2305 23096508 : struct xlog *log = iclog->ic_log;
2306 23096508 : int error;
2307 :
2308 23096508 : spin_lock(&log->l_icloglock);
2309 23096667 : ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
2310 23096667 : xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2311 23096667 : error = xlog_state_release_iclog(log, iclog, ticket);
2312 23096664 : spin_unlock(&log->l_icloglock);
2313 23096660 : if (error)
2314 : return error;
2315 :
2316 23096649 : error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2317 : log_offset);
2318 23096644 : if (error)
2319 : return error;
2320 23093628 : *record_cnt = 0;
2321 23093628 : *data_cnt = 0;
2322 23093628 : *iclogp = iclog;
2323 23093628 : return 0;
2324 : }
2325 :
2326 : /*
2327 : * Write log vectors into a single iclog which is smaller than the current chain
2328 : * length. We write until we cannot fit a full record into the remaining space
2329 : * and then stop. We return the log vector that is to be written that cannot
2330 : * wholly fit in the iclog.
2331 : */
2332 : static int
2333 23095513 : xlog_write_partial(
2334 : struct xfs_log_vec *lv,
2335 : struct xlog_ticket *ticket,
2336 : struct xlog_in_core **iclogp,
2337 : uint32_t *log_offset,
2338 : uint32_t *len,
2339 : uint32_t *record_cnt,
2340 : uint32_t *data_cnt)
2341 : {
2342 23095513 : struct xlog_in_core *iclog = *iclogp;
2343 23095513 : struct xlog_op_header *ophdr;
2344 23095513 : int index = 0;
2345 23095513 : uint32_t rlen;
2346 23095513 : int error;
2347 :
2348 : /* walk the logvec, copying until we run out of space in the iclog */
2349 87066457 : for (index = 0; index < lv->lv_niovecs; index++) {
2350 63974081 : struct xfs_log_iovec *reg = &lv->lv_iovecp[index];
2351 63974081 : uint32_t reg_offset = 0;
2352 :
2353 : /*
2354 : * The first region of a continuation must have a non-zero
2355 : * length otherwise log recovery will just skip over it and
2356 : * start recovering from the next opheader it finds. Because we
2357 : * mark the next opheader as a continuation, recovery will then
2358 : * incorrectly add the continuation to the previous region and
2359 : * that breaks stuff.
2360 : *
2361 : * Hence if there isn't space for region data after the
2362 : * opheader, then we need to start afresh with a new iclog.
2363 : */
2364 63974081 : if (iclog->ic_size - *log_offset <=
2365 : sizeof(struct xlog_op_header)) {
2366 1165283 : error = xlog_write_get_more_iclog_space(ticket,
2367 : &iclog, log_offset, *len, record_cnt,
2368 : data_cnt);
2369 1165284 : if (error)
2370 107 : return error;
2371 : }
2372 :
2373 63973975 : ophdr = reg->i_addr;
2374 63973975 : rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset);
2375 :
2376 63973975 : ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2377 63973975 : ophdr->oh_len = cpu_to_be32(rlen - sizeof(struct xlog_op_header));
2378 63973975 : if (rlen != reg->i_len)
2379 21930349 : ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2380 :
2381 63973975 : xlog_write_iovec(iclog, log_offset, reg->i_addr,
2382 : rlen, len, record_cnt, data_cnt);
2383 :
2384 : /* If we wrote the whole region, move to the next. */
2385 63973685 : if (rlen == reg->i_len)
2386 42043521 : continue;
2387 :
2388 : /*
2389 : * We now have a partially written iovec, but it can span
2390 : * multiple iclogs so we loop here. First we release the iclog
2391 : * we currently have, then we get a new iclog and add a new
2392 : * opheader. Then we continue copying from where we were until
2393 : * we either complete the iovec or fill the iclog. If we
2394 : * complete the iovec, then we increment the index and go right
2395 : * back to the top of the outer loop. if we fill the iclog, we
2396 : * run the inner loop again.
2397 : *
2398 : * This is complicated by the tail of a region using all the
2399 : * space in an iclog and hence requiring us to release the iclog
2400 : * and get a new one before returning to the outer loop. We must
2401 : * always guarantee that we exit this inner loop with at least
2402 : * space for log transaction opheaders left in the current
2403 : * iclog, hence we cannot just terminate the loop at the end
2404 : * of the of the continuation. So we loop while there is no
2405 : * space left in the current iclog, and check for the end of the
2406 : * continuation after getting a new iclog.
2407 : */
2408 21931093 : do {
2409 : /*
2410 : * Ensure we include the continuation opheader in the
2411 : * space we need in the new iclog by adding that size
2412 : * to the length we require. This continuation opheader
2413 : * needs to be accounted to the ticket as the space it
2414 : * consumes hasn't been accounted to the lv we are
2415 : * writing.
2416 : */
2417 21931093 : error = xlog_write_get_more_iclog_space(ticket,
2418 : &iclog, log_offset,
2419 21931093 : *len + sizeof(struct xlog_op_header),
2420 : record_cnt, data_cnt);
2421 21931287 : if (error)
2422 2918 : return error;
2423 :
2424 21928369 : ophdr = iclog->ic_datap + *log_offset;
2425 21928369 : ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2426 21928369 : ophdr->oh_clientid = XFS_TRANSACTION;
2427 21928369 : ophdr->oh_res2 = 0;
2428 21928369 : ophdr->oh_flags = XLOG_WAS_CONT_TRANS;
2429 :
2430 21928369 : ticket->t_curr_res -= sizeof(struct xlog_op_header);
2431 21928369 : *log_offset += sizeof(struct xlog_op_header);
2432 21928369 : *data_cnt += sizeof(struct xlog_op_header);
2433 :
2434 : /*
2435 : * If rlen fits in the iclog, then end the region
2436 : * continuation. Otherwise we're going around again.
2437 : */
2438 21928369 : reg_offset += rlen;
2439 21928369 : rlen = reg->i_len - reg_offset;
2440 21928369 : if (rlen <= iclog->ic_size - *log_offset)
2441 21927450 : ophdr->oh_flags |= XLOG_END_TRANS;
2442 : else
2443 919 : ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2444 :
2445 21928369 : rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset);
2446 21928369 : ophdr->oh_len = cpu_to_be32(rlen);
2447 :
2448 21928369 : xlog_write_iovec(iclog, log_offset,
2449 21928369 : reg->i_addr + reg_offset,
2450 : rlen, len, record_cnt, data_cnt);
2451 :
2452 21928352 : } while (ophdr->oh_flags & XLOG_CONTINUE_TRANS);
2453 : }
2454 :
2455 : /*
2456 : * No more iovecs remain in this logvec so return the next log vec to
2457 : * the caller so it can go back to fast path copying.
2458 : */
2459 23092376 : *iclogp = iclog;
2460 23092376 : return 0;
2461 : }
2462 :
2463 : /*
2464 : * Write some region out to in-core log
2465 : *
2466 : * This will be called when writing externally provided regions or when
2467 : * writing out a commit record for a given transaction.
2468 : *
2469 : * General algorithm:
2470 : * 1. Find total length of this write. This may include adding to the
2471 : * lengths passed in.
2472 : * 2. Check whether we violate the tickets reservation.
2473 : * 3. While writing to this iclog
2474 : * A. Reserve as much space in this iclog as can get
2475 : * B. If this is first write, save away start lsn
2476 : * C. While writing this region:
2477 : * 1. If first write of transaction, write start record
2478 : * 2. Write log operation header (header per region)
2479 : * 3. Find out if we can fit entire region into this iclog
2480 : * 4. Potentially, verify destination memcpy ptr
2481 : * 5. Memcpy (partial) region
2482 : * 6. If partial copy, release iclog; otherwise, continue
2483 : * copying more regions into current iclog
2484 : * 4. Mark want sync bit (in simulation mode)
2485 : * 5. Release iclog for potential flush to on-disk log.
2486 : *
2487 : * ERRORS:
2488 : * 1. Panic if reservation is overrun. This should never happen since
2489 : * reservation amounts are generated internal to the filesystem.
2490 : * NOTES:
2491 : * 1. Tickets are single threaded data structures.
2492 : * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2493 : * syncing routine. When a single log_write region needs to span
2494 : * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2495 : * on all log operation writes which don't contain the end of the
2496 : * region. The XLOG_END_TRANS bit is used for the in-core log
2497 : * operation which contains the end of the continued log_write region.
2498 : * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2499 : * we don't really know exactly how much space will be used. As a result,
2500 : * we don't update ic_offset until the end when we know exactly how many
2501 : * bytes have been written out.
2502 : */
2503 : int
2504 11639711 : xlog_write(
2505 : struct xlog *log,
2506 : struct xfs_cil_ctx *ctx,
2507 : struct list_head *lv_chain,
2508 : struct xlog_ticket *ticket,
2509 : uint32_t len)
2510 :
2511 : {
2512 11639711 : struct xlog_in_core *iclog = NULL;
2513 11639711 : struct xfs_log_vec *lv;
2514 11639711 : uint32_t record_cnt = 0;
2515 11639711 : uint32_t data_cnt = 0;
2516 11639711 : int error = 0;
2517 11639711 : int log_offset;
2518 :
2519 11639711 : if (ticket->t_curr_res < 0) {
2520 0 : xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2521 : "ctx ticket reservation ran out. Need to up reservation");
2522 0 : xlog_print_tic_res(log->l_mp, ticket);
2523 0 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
2524 : }
2525 :
2526 11639711 : error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2527 : &log_offset);
2528 11639716 : if (error)
2529 : return error;
2530 :
2531 11639715 : ASSERT(log_offset <= iclog->ic_size - 1);
2532 :
2533 : /*
2534 : * If we have a context pointer, pass it the first iclog we are
2535 : * writing to so it can record state needed for iclog write
2536 : * ordering.
2537 : */
2538 11639715 : if (ctx)
2539 11589620 : xlog_cil_set_ctx_write_state(ctx, iclog);
2540 :
2541 947815641 : list_for_each_entry(lv, lv_chain, lv_list) {
2542 : /*
2543 : * If the entire log vec does not fit in the iclog, punt it to
2544 : * the partial copy loop which can handle this case.
2545 : */
2546 936178956 : if (lv->lv_niovecs &&
2547 934487181 : lv->lv_bytes > iclog->ic_size - log_offset) {
2548 23095518 : error = xlog_write_partial(lv, ticket, &iclog,
2549 : &log_offset, &len, &record_cnt,
2550 : &data_cnt);
2551 23095362 : if (error) {
2552 : /*
2553 : * We have no iclog to release, so just return
2554 : * the error immediately.
2555 : */
2556 3025 : return error;
2557 : }
2558 : } else {
2559 913083438 : xlog_write_full(lv, ticket, iclog, &log_offset,
2560 : &len, &record_cnt, &data_cnt);
2561 : }
2562 : }
2563 11636685 : ASSERT(len == 0);
2564 :
2565 : /*
2566 : * We've already been guaranteed that the last writes will fit inside
2567 : * the current iclog, and hence it will already have the space used by
2568 : * those writes accounted to it. Hence we do not need to update the
2569 : * iclog with the number of bytes written here.
2570 : */
2571 11636685 : spin_lock(&log->l_icloglock);
2572 11636688 : xlog_state_finish_copy(log, iclog, record_cnt, 0);
2573 11636688 : error = xlog_state_release_iclog(log, iclog, ticket);
2574 11636689 : spin_unlock(&log->l_icloglock);
2575 :
2576 11636689 : return error;
2577 : }
2578 :
2579 : static void
2580 28793059 : xlog_state_activate_iclog(
2581 : struct xlog_in_core *iclog,
2582 : int *iclogs_changed)
2583 : {
2584 57586118 : ASSERT(list_empty_careful(&iclog->ic_callbacks));
2585 28793059 : trace_xlog_iclog_activate(iclog, _RET_IP_);
2586 :
2587 : /*
2588 : * If the number of ops in this iclog indicate it just contains the
2589 : * dummy transaction, we can change state into IDLE (the second time
2590 : * around). Otherwise we should change the state into NEED a dummy.
2591 : * We don't need to cover the dummy.
2592 : */
2593 28793059 : if (*iclogs_changed == 0 &&
2594 28793059 : iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2595 305474 : *iclogs_changed = 1;
2596 : } else {
2597 : /*
2598 : * We have two dirty iclogs so start over. This could also be
2599 : * num of ops indicating this is not the dummy going out.
2600 : */
2601 28487585 : *iclogs_changed = 2;
2602 : }
2603 :
2604 28793059 : iclog->ic_state = XLOG_STATE_ACTIVE;
2605 28793059 : iclog->ic_offset = 0;
2606 28793059 : iclog->ic_header.h_num_logops = 0;
2607 28793059 : memset(iclog->ic_header.h_cycle_data, 0,
2608 : sizeof(iclog->ic_header.h_cycle_data));
2609 28793059 : iclog->ic_header.h_lsn = 0;
2610 28793059 : iclog->ic_header.h_tail_lsn = 0;
2611 28793059 : }
2612 :
2613 : /*
2614 : * Loop through all iclogs and mark all iclogs currently marked DIRTY as
2615 : * ACTIVE after iclog I/O has completed.
2616 : */
2617 : static void
2618 28793059 : xlog_state_activate_iclogs(
2619 : struct xlog *log,
2620 : int *iclogs_changed)
2621 : {
2622 28793059 : struct xlog_in_core *iclog = log->l_iclog;
2623 :
2624 149385866 : do {
2625 149385866 : if (iclog->ic_state == XLOG_STATE_DIRTY)
2626 28793059 : xlog_state_activate_iclog(iclog, iclogs_changed);
2627 : /*
2628 : * The ordering of marking iclogs ACTIVE must be maintained, so
2629 : * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2630 : */
2631 120592807 : else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2632 : break;
2633 127443024 : } while ((iclog = iclog->ic_next) != log->l_iclog);
2634 28793059 : }
2635 :
2636 : static int
2637 28793059 : xlog_covered_state(
2638 : int prev_state,
2639 : int iclogs_changed)
2640 : {
2641 : /*
2642 : * We go to NEED for any non-covering writes. We go to NEED2 if we just
2643 : * wrote the first covering record (DONE). We go to IDLE if we just
2644 : * wrote the second covering record (DONE2) and remain in IDLE until a
2645 : * non-covering write occurs.
2646 : */
2647 28793059 : switch (prev_state) {
2648 181937 : case XLOG_STATE_COVER_IDLE:
2649 181937 : if (iclogs_changed == 1)
2650 39011 : return XLOG_STATE_COVER_IDLE;
2651 : fallthrough;
2652 : case XLOG_STATE_COVER_NEED:
2653 : case XLOG_STATE_COVER_NEED2:
2654 : break;
2655 84411 : case XLOG_STATE_COVER_DONE:
2656 84411 : if (iclogs_changed == 1)
2657 84411 : return XLOG_STATE_COVER_NEED2;
2658 : break;
2659 84330 : case XLOG_STATE_COVER_DONE2:
2660 84330 : if (iclogs_changed == 1)
2661 84330 : return XLOG_STATE_COVER_IDLE;
2662 : break;
2663 0 : default:
2664 0 : ASSERT(0);
2665 : }
2666 :
2667 : return XLOG_STATE_COVER_NEED;
2668 : }
2669 :
2670 : STATIC void
2671 28793059 : xlog_state_clean_iclog(
2672 : struct xlog *log,
2673 : struct xlog_in_core *dirty_iclog)
2674 : {
2675 28793059 : int iclogs_changed = 0;
2676 :
2677 28793059 : trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
2678 :
2679 28793059 : dirty_iclog->ic_state = XLOG_STATE_DIRTY;
2680 :
2681 28793059 : xlog_state_activate_iclogs(log, &iclogs_changed);
2682 28793059 : wake_up_all(&dirty_iclog->ic_force_wait);
2683 :
2684 28793059 : if (iclogs_changed) {
2685 28793059 : log->l_covered_state = xlog_covered_state(log->l_covered_state,
2686 : iclogs_changed);
2687 : }
2688 28793059 : }
2689 :
2690 : STATIC xfs_lsn_t
2691 28793059 : xlog_get_lowest_lsn(
2692 : struct xlog *log)
2693 : {
2694 28793059 : struct xlog_in_core *iclog = log->l_iclog;
2695 28793059 : xfs_lsn_t lowest_lsn = 0, lsn;
2696 :
2697 230344352 : do {
2698 230344352 : if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2699 : iclog->ic_state == XLOG_STATE_DIRTY)
2700 98757574 : continue;
2701 :
2702 131586778 : lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2703 131586778 : if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
2704 : lowest_lsn = lsn;
2705 230344352 : } while ((iclog = iclog->ic_next) != log->l_iclog);
2706 :
2707 28793059 : return lowest_lsn;
2708 : }
2709 :
2710 : /*
2711 : * Completion of a iclog IO does not imply that a transaction has completed, as
2712 : * transactions can be large enough to span many iclogs. We cannot change the
2713 : * tail of the log half way through a transaction as this may be the only
2714 : * transaction in the log and moving the tail to point to the middle of it
2715 : * will prevent recovery from finding the start of the transaction. Hence we
2716 : * should only update the last_sync_lsn if this iclog contains transaction
2717 : * completion callbacks on it.
2718 : *
2719 : * We have to do this before we drop the icloglock to ensure we are the only one
2720 : * that can update it.
2721 : *
2722 : * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
2723 : * the reservation grant head pushing. This is due to the fact that the push
2724 : * target is bound by the current last_sync_lsn value. Hence if we have a large
2725 : * amount of log space bound up in this committing transaction then the
2726 : * last_sync_lsn value may be the limiting factor preventing tail pushing from
2727 : * freeing space in the log. Hence once we've updated the last_sync_lsn we
2728 : * should push the AIL to ensure the push target (and hence the grant head) is
2729 : * no longer bound by the old log head location and can move forwards and make
2730 : * progress again.
2731 : */
2732 : static void
2733 28793059 : xlog_state_set_callback(
2734 : struct xlog *log,
2735 : struct xlog_in_core *iclog,
2736 : xfs_lsn_t header_lsn)
2737 : {
2738 28793059 : trace_xlog_iclog_callback(iclog, _RET_IP_);
2739 28793059 : iclog->ic_state = XLOG_STATE_CALLBACK;
2740 :
2741 57586118 : ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2742 : header_lsn) <= 0);
2743 :
2744 28793059 : if (list_empty_careful(&iclog->ic_callbacks))
2745 : return;
2746 :
2747 5749821 : atomic64_set(&log->l_last_sync_lsn, header_lsn);
2748 5749821 : xlog_grant_push_ail(log, 0);
2749 : }
2750 :
2751 : /*
2752 : * Return true if we need to stop processing, false to continue to the next
2753 : * iclog. The caller will need to run callbacks if the iclog is returned in the
2754 : * XLOG_STATE_CALLBACK state.
2755 : */
2756 : static bool
2757 256964432 : xlog_state_iodone_process_iclog(
2758 : struct xlog *log,
2759 : struct xlog_in_core *iclog)
2760 : {
2761 256964432 : xfs_lsn_t lowest_lsn;
2762 256964432 : xfs_lsn_t header_lsn;
2763 :
2764 256964432 : switch (iclog->ic_state) {
2765 : case XLOG_STATE_ACTIVE:
2766 : case XLOG_STATE_DIRTY:
2767 : /*
2768 : * Skip all iclogs in the ACTIVE & DIRTY states:
2769 : */
2770 : return false;
2771 28793059 : case XLOG_STATE_DONE_SYNC:
2772 : /*
2773 : * Now that we have an iclog that is in the DONE_SYNC state, do
2774 : * one more check here to see if we have chased our tail around.
2775 : * If this is not the lowest lsn iclog, then we will leave it
2776 : * for another completion to process.
2777 : */
2778 28793059 : header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2779 28793059 : lowest_lsn = xlog_get_lowest_lsn(log);
2780 28793059 : if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
2781 : return false;
2782 28793059 : xlog_state_set_callback(log, iclog, header_lsn);
2783 28793059 : return false;
2784 36697404 : default:
2785 : /*
2786 : * Can only perform callbacks in order. Since this iclog is not
2787 : * in the DONE_SYNC state, we skip the rest and just try to
2788 : * clean up.
2789 : */
2790 36697404 : return true;
2791 : }
2792 : }
2793 :
2794 : /*
2795 : * Loop over all the iclogs, running attached callbacks on them. Return true if
2796 : * we ran any callbacks, indicating that we dropped the icloglock. We don't need
2797 : * to handle transient shutdown state here at all because
2798 : * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
2799 : * cleanup of the callbacks.
2800 : */
2801 : static bool
2802 50434807 : xlog_state_do_iclog_callbacks(
2803 : struct xlog *log)
2804 : __releases(&log->l_icloglock)
2805 : __acquires(&log->l_icloglock)
2806 : {
2807 50434807 : struct xlog_in_core *first_iclog = log->l_iclog;
2808 50434807 : struct xlog_in_core *iclog = first_iclog;
2809 50434807 : bool ran_callback = false;
2810 :
2811 256964432 : do {
2812 256964432 : LIST_HEAD(cb_list);
2813 :
2814 256964432 : if (xlog_state_iodone_process_iclog(log, iclog))
2815 : break;
2816 220267028 : if (iclog->ic_state != XLOG_STATE_CALLBACK) {
2817 191473969 : iclog = iclog->ic_next;
2818 191473969 : continue;
2819 : }
2820 28793059 : list_splice_init(&iclog->ic_callbacks, &cb_list);
2821 28793059 : spin_unlock(&log->l_icloglock);
2822 :
2823 28793059 : trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
2824 28793059 : xlog_cil_process_committed(&cb_list);
2825 28793059 : trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
2826 28793059 : ran_callback = true;
2827 :
2828 28793059 : spin_lock(&log->l_icloglock);
2829 28793059 : xlog_state_clean_iclog(log, iclog);
2830 28793059 : iclog = iclog->ic_next;
2831 220267028 : } while (iclog != first_iclog);
2832 :
2833 50434807 : return ran_callback;
2834 : }
2835 :
2836 :
2837 : /*
2838 : * Loop running iclog completion callbacks until there are no more iclogs in a
2839 : * state that can run callbacks.
2840 : */
2841 : STATIC void
2842 28805549 : xlog_state_do_callback(
2843 : struct xlog *log)
2844 : {
2845 28805549 : int flushcnt = 0;
2846 28805549 : int repeats = 0;
2847 :
2848 28805549 : spin_lock(&log->l_icloglock);
2849 50434807 : while (xlog_state_do_iclog_callbacks(log)) {
2850 43258538 : if (xlog_is_shutdown(log))
2851 : break;
2852 :
2853 21629258 : if (++repeats > 5000) {
2854 0 : flushcnt += repeats;
2855 0 : repeats = 0;
2856 0 : xfs_warn(log->l_mp,
2857 : "%s: possible infinite loop (%d iterations)",
2858 : __func__, flushcnt);
2859 : }
2860 : }
2861 :
2862 28805549 : if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
2863 24434737 : wake_up_all(&log->l_flush_wait);
2864 :
2865 28805549 : spin_unlock(&log->l_icloglock);
2866 28805549 : }
2867 :
2868 :
2869 : /*
2870 : * Finish transitioning this iclog to the dirty state.
2871 : *
2872 : * Callbacks could take time, so they are done outside the scope of the
2873 : * global state machine log lock.
2874 : */
2875 : STATIC void
2876 28805549 : xlog_state_done_syncing(
2877 : struct xlog_in_core *iclog)
2878 : {
2879 28805549 : struct xlog *log = iclog->ic_log;
2880 :
2881 28805549 : spin_lock(&log->l_icloglock);
2882 28805549 : ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2883 28805549 : trace_xlog_iclog_sync_done(iclog, _RET_IP_);
2884 :
2885 : /*
2886 : * If we got an error, either on the first buffer, or in the case of
2887 : * split log writes, on the second, we shut down the file system and
2888 : * no iclogs should ever be attempted to be written to disk again.
2889 : */
2890 57611098 : if (!xlog_is_shutdown(log)) {
2891 28793129 : ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2892 28793129 : iclog->ic_state = XLOG_STATE_DONE_SYNC;
2893 : }
2894 :
2895 : /*
2896 : * Someone could be sleeping prior to writing out the next
2897 : * iclog buffer, we wake them all, one will get to do the
2898 : * I/O, the others get to wait for the result.
2899 : */
2900 28805549 : wake_up_all(&iclog->ic_write_wait);
2901 28805549 : spin_unlock(&log->l_icloglock);
2902 28805549 : xlog_state_do_callback(log);
2903 28805549 : }
2904 :
2905 : /*
2906 : * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2907 : * sleep. We wait on the flush queue on the head iclog as that should be
2908 : * the first iclog to complete flushing. Hence if all iclogs are syncing,
2909 : * we will wait here and all new writes will sleep until a sync completes.
2910 : *
2911 : * The in-core logs are used in a circular fashion. They are not used
2912 : * out-of-order even when an iclog past the head is free.
2913 : *
2914 : * return:
2915 : * * log_offset where xlog_write() can start writing into the in-core
2916 : * log's data space.
2917 : * * in-core log pointer to which xlog_write() should write.
2918 : * * boolean indicating this is a continued write to an in-core log.
2919 : * If this is the last write, then the in-core log's offset field
2920 : * needs to be incremented, depending on the amount of data which
2921 : * is copied.
2922 : */
2923 : STATIC int
2924 34736354 : xlog_state_get_iclog_space(
2925 : struct xlog *log,
2926 : int len,
2927 : struct xlog_in_core **iclogp,
2928 : struct xlog_ticket *ticket,
2929 : int *logoffsetp)
2930 : {
2931 40974442 : int log_offset;
2932 40974442 : xlog_rec_header_t *head;
2933 40974442 : xlog_in_core_t *iclog;
2934 :
2935 : restart:
2936 40974442 : spin_lock(&log->l_icloglock);
2937 81949100 : if (xlog_is_shutdown(log)) {
2938 3017 : spin_unlock(&log->l_icloglock);
2939 3017 : return -EIO;
2940 : }
2941 :
2942 40971533 : iclog = log->l_iclog;
2943 40971533 : if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2944 6236318 : XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2945 :
2946 : /* Wait for log writes to have flushed */
2947 6236318 : xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2948 6236223 : goto restart;
2949 : }
2950 :
2951 34735215 : head = &iclog->ic_header;
2952 :
2953 34735215 : atomic_inc(&iclog->ic_refcnt); /* prevents sync */
2954 34735215 : log_offset = iclog->ic_offset;
2955 :
2956 34735215 : trace_xlog_iclog_get_space(iclog, _RET_IP_);
2957 :
2958 : /* On the 1st write to an iclog, figure out lsn. This works
2959 : * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2960 : * committing to. If the offset is set, that's how many blocks
2961 : * must be written.
2962 : */
2963 34735203 : if (log_offset == 0) {
2964 28807702 : ticket->t_curr_res -= log->l_iclog_hsize;
2965 28807702 : head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2966 28807702 : head->h_lsn = cpu_to_be64(
2967 : xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2968 28807702 : ASSERT(log->l_curr_block >= 0);
2969 : }
2970 :
2971 : /* If there is enough room to write everything, then do it. Otherwise,
2972 : * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2973 : * bit is on, so this will get flushed out. Don't update ic_offset
2974 : * until you know exactly how many bytes get copied. Therefore, wait
2975 : * until later to update ic_offset.
2976 : *
2977 : * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2978 : * can fit into remaining data section.
2979 : */
2980 34735203 : if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2981 1865 : int error = 0;
2982 :
2983 1865 : xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2984 :
2985 : /*
2986 : * If we are the only one writing to this iclog, sync it to
2987 : * disk. We need to do an atomic compare and decrement here to
2988 : * avoid racing with concurrent atomic_dec_and_lock() calls in
2989 : * xlog_state_release_iclog() when there is more than one
2990 : * reference to the iclog.
2991 : */
2992 3730 : if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2993 1845 : error = xlog_state_release_iclog(log, iclog, ticket);
2994 1865 : spin_unlock(&log->l_icloglock);
2995 1865 : if (error)
2996 0 : return error;
2997 1865 : goto restart;
2998 : }
2999 :
3000 : /* Do we have enough room to write the full amount in the remainder
3001 : * of this iclog? Or must we continue a write on the next iclog and
3002 : * mark this iclog as completely taken? In the case where we switch
3003 : * iclogs (to mark it taken), this particular iclog will release/sync
3004 : * to disk in xlog_write().
3005 : */
3006 34733338 : if (len <= iclog->ic_size - iclog->ic_offset)
3007 11636681 : iclog->ic_offset += len;
3008 : else
3009 23096657 : xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
3010 34733340 : *iclogp = iclog;
3011 :
3012 34733340 : ASSERT(iclog->ic_offset <= iclog->ic_size);
3013 34733340 : spin_unlock(&log->l_icloglock);
3014 :
3015 34733350 : *logoffsetp = log_offset;
3016 34733350 : return 0;
3017 : }
3018 :
3019 : /*
3020 : * The first cnt-1 times a ticket goes through here we don't need to move the
3021 : * grant write head because the permanent reservation has reserved cnt times the
3022 : * unit amount. Release part of current permanent unit reservation and reset
3023 : * current reservation to be one units worth. Also move grant reservation head
3024 : * forward.
3025 : */
3026 : void
3027 1330935681 : xfs_log_ticket_regrant(
3028 : struct xlog *log,
3029 : struct xlog_ticket *ticket)
3030 : {
3031 1330935681 : trace_xfs_log_ticket_regrant(log, ticket);
3032 :
3033 1330845832 : if (ticket->t_cnt > 0)
3034 944683233 : ticket->t_cnt--;
3035 :
3036 1330845832 : xlog_grant_sub_space(log, &log->l_reserve_head.grant,
3037 : ticket->t_curr_res);
3038 1331138838 : xlog_grant_sub_space(log, &log->l_write_head.grant,
3039 : ticket->t_curr_res);
3040 1331114954 : ticket->t_curr_res = ticket->t_unit_res;
3041 :
3042 1331114954 : trace_xfs_log_ticket_regrant_sub(log, ticket);
3043 :
3044 : /* just return if we still have some of the pre-reserved space */
3045 1331105371 : if (!ticket->t_cnt) {
3046 407211876 : xlog_grant_add_space(log, &log->l_reserve_head.grant,
3047 : ticket->t_unit_res);
3048 407213638 : trace_xfs_log_ticket_regrant_exit(log, ticket);
3049 :
3050 407211632 : ticket->t_curr_res = ticket->t_unit_res;
3051 : }
3052 :
3053 1331105127 : xfs_log_ticket_put(ticket);
3054 1331137324 : }
3055 :
3056 : /*
3057 : * Give back the space left from a reservation.
3058 : *
3059 : * All the information we need to make a correct determination of space left
3060 : * is present. For non-permanent reservations, things are quite easy. The
3061 : * count should have been decremented to zero. We only need to deal with the
3062 : * space remaining in the current reservation part of the ticket. If the
3063 : * ticket contains a permanent reservation, there may be left over space which
3064 : * needs to be released. A count of N means that N-1 refills of the current
3065 : * reservation can be done before we need to ask for more space. The first
3066 : * one goes to fill up the first current reservation. Once we run out of
3067 : * space, the count will stay at zero and the only space remaining will be
3068 : * in the current reservation field.
3069 : */
3070 : void
3071 1544684875 : xfs_log_ticket_ungrant(
3072 : struct xlog *log,
3073 : struct xlog_ticket *ticket)
3074 : {
3075 1544684875 : int bytes;
3076 :
3077 1544684875 : trace_xfs_log_ticket_ungrant(log, ticket);
3078 :
3079 1544327590 : if (ticket->t_cnt > 0)
3080 1329643558 : ticket->t_cnt--;
3081 :
3082 1544327590 : trace_xfs_log_ticket_ungrant_sub(log, ticket);
3083 :
3084 : /*
3085 : * If this is a permanent reservation ticket, we may be able to free
3086 : * up more space based on the remaining count.
3087 : */
3088 1544250067 : bytes = ticket->t_curr_res;
3089 1544250067 : if (ticket->t_cnt > 0) {
3090 1189574906 : ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
3091 1189574906 : bytes += ticket->t_unit_res*ticket->t_cnt;
3092 : }
3093 :
3094 1544250067 : xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
3095 1545291469 : xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
3096 :
3097 1545196926 : trace_xfs_log_ticket_ungrant_exit(log, ticket);
3098 :
3099 1545138839 : xfs_log_space_wake(log->l_mp);
3100 1544968673 : xfs_log_ticket_put(ticket);
3101 1544687632 : }
3102 :
3103 : /*
3104 : * This routine will mark the current iclog in the ring as WANT_SYNC and move
3105 : * the current iclog pointer to the next iclog in the ring.
3106 : */
3107 : void
3108 28806023 : xlog_state_switch_iclogs(
3109 : struct xlog *log,
3110 : struct xlog_in_core *iclog,
3111 : int eventual_size)
3112 : {
3113 28806023 : ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3114 28806023 : assert_spin_locked(&log->l_icloglock);
3115 28806023 : trace_xlog_iclog_switch(iclog, _RET_IP_);
3116 :
3117 28806020 : if (!eventual_size)
3118 5707505 : eventual_size = iclog->ic_offset;
3119 28806020 : iclog->ic_state = XLOG_STATE_WANT_SYNC;
3120 28806020 : iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3121 28806020 : log->l_prev_block = log->l_curr_block;
3122 28806020 : log->l_prev_cycle = log->l_curr_cycle;
3123 :
3124 : /* roll log?: ic_offset changed later */
3125 28806020 : log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
3126 :
3127 : /* Round up to next log-sunit */
3128 28806020 : if (log->l_iclog_roundoff > BBSIZE) {
3129 25337458 : uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
3130 25337458 : log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
3131 : }
3132 :
3133 28806020 : if (log->l_curr_block >= log->l_logBBsize) {
3134 : /*
3135 : * Rewind the current block before the cycle is bumped to make
3136 : * sure that the combined LSN never transiently moves forward
3137 : * when the log wraps to the next cycle. This is to support the
3138 : * unlocked sample of these fields from xlog_valid_lsn(). Most
3139 : * other cases should acquire l_icloglock.
3140 : */
3141 11761 : log->l_curr_block -= log->l_logBBsize;
3142 11761 : ASSERT(log->l_curr_block >= 0);
3143 11761 : smp_wmb();
3144 11761 : log->l_curr_cycle++;
3145 11761 : if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
3146 0 : log->l_curr_cycle++;
3147 : }
3148 28806020 : ASSERT(iclog == log->l_iclog);
3149 28806020 : log->l_iclog = iclog->ic_next;
3150 28806020 : }
3151 :
3152 : /*
3153 : * Force the iclog to disk and check if the iclog has been completed before
3154 : * xlog_force_iclog() returns. This can happen on synchronous (e.g.
3155 : * pmem) or fast async storage because we drop the icloglock to issue the IO.
3156 : * If completion has already occurred, tell the caller so that it can avoid an
3157 : * unnecessary wait on the iclog.
3158 : */
3159 : static int
3160 5115887 : xlog_force_and_check_iclog(
3161 : struct xlog_in_core *iclog,
3162 : bool *completed)
3163 : {
3164 5115887 : xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3165 5115887 : int error;
3166 :
3167 5115887 : *completed = false;
3168 5115887 : error = xlog_force_iclog(iclog);
3169 5115889 : if (error)
3170 : return error;
3171 :
3172 : /*
3173 : * If the iclog has already been completed and reused the header LSN
3174 : * will have been rewritten by completion
3175 : */
3176 5115889 : if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
3177 129 : *completed = true;
3178 : return 0;
3179 : }
3180 :
3181 : /*
3182 : * Write out all data in the in-core log as of this exact moment in time.
3183 : *
3184 : * Data may be written to the in-core log during this call. However,
3185 : * we don't guarantee this data will be written out. A change from past
3186 : * implementation means this routine will *not* write out zero length LRs.
3187 : *
3188 : * Basically, we try and perform an intelligent scan of the in-core logs.
3189 : * If we determine there is no flushable data, we just return. There is no
3190 : * flushable data if:
3191 : *
3192 : * 1. the current iclog is active and has no data; the previous iclog
3193 : * is in the active or dirty state.
3194 : * 2. the current iclog is drity, and the previous iclog is in the
3195 : * active or dirty state.
3196 : *
3197 : * We may sleep if:
3198 : *
3199 : * 1. the current iclog is not in the active nor dirty state.
3200 : * 2. the current iclog dirty, and the previous iclog is not in the
3201 : * active nor dirty state.
3202 : * 3. the current iclog is active, and there is another thread writing
3203 : * to this particular iclog.
3204 : * 4. a) the current iclog is active and has no other writers
3205 : * b) when we return from flushing out this iclog, it is still
3206 : * not in the active nor dirty state.
3207 : */
3208 : int
3209 4319603 : xfs_log_force(
3210 : struct xfs_mount *mp,
3211 : uint flags)
3212 : {
3213 4319603 : struct xlog *log = mp->m_log;
3214 4319603 : struct xlog_in_core *iclog;
3215 :
3216 4319603 : XFS_STATS_INC(mp, xs_log_force);
3217 4320760 : trace_xfs_log_force(mp, 0, _RET_IP_);
3218 :
3219 4320565 : xlog_cil_force(log);
3220 :
3221 4322548 : spin_lock(&log->l_icloglock);
3222 8645232 : if (xlog_is_shutdown(log))
3223 47505 : goto out_error;
3224 :
3225 4275111 : iclog = log->l_iclog;
3226 4275111 : trace_xlog_iclog_force(iclog, _RET_IP_);
3227 :
3228 4275111 : if (iclog->ic_state == XLOG_STATE_DIRTY ||
3229 4268583 : (iclog->ic_state == XLOG_STATE_ACTIVE &&
3230 3853734 : atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
3231 : /*
3232 : * If the head is dirty or (active and empty), then we need to
3233 : * look at the previous iclog.
3234 : *
3235 : * If the previous iclog is active or dirty we are done. There
3236 : * is nothing to sync out. Otherwise, we attach ourselves to the
3237 : * previous iclog and go to sleep.
3238 : */
3239 2387598 : iclog = iclog->ic_prev;
3240 1887513 : } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3241 1880993 : if (atomic_read(&iclog->ic_refcnt) == 0) {
3242 : /* We have exclusive access to this iclog. */
3243 1466144 : bool completed;
3244 :
3245 1466144 : if (xlog_force_and_check_iclog(iclog, &completed))
3246 0 : goto out_error;
3247 :
3248 1466144 : if (completed)
3249 63 : goto out_unlock;
3250 : } else {
3251 : /*
3252 : * Someone else is still writing to this iclog, so we
3253 : * need to ensure that when they release the iclog it
3254 : * gets synced immediately as we may be waiting on it.
3255 : */
3256 414849 : xlog_state_switch_iclogs(log, iclog, 0);
3257 : }
3258 : }
3259 :
3260 : /*
3261 : * The iclog we are about to wait on may contain the checkpoint pushed
3262 : * by the above xlog_cil_force() call, but it may not have been pushed
3263 : * to disk yet. Like the ACTIVE case above, we need to make sure caches
3264 : * are flushed when this iclog is written.
3265 : */
3266 4275048 : if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
3267 443787 : iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3268 :
3269 4275048 : if (flags & XFS_LOG_SYNC)
3270 4169981 : return xlog_wait_on_iclog(iclog);
3271 105067 : out_unlock:
3272 105130 : spin_unlock(&log->l_icloglock);
3273 105130 : return 0;
3274 47505 : out_error:
3275 47505 : spin_unlock(&log->l_icloglock);
3276 47505 : return -EIO;
3277 : }
3278 :
3279 : /*
3280 : * Force the log to a specific LSN.
3281 : *
3282 : * If an iclog with that lsn can be found:
3283 : * If it is in the DIRTY state, just return.
3284 : * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3285 : * state and go to sleep or return.
3286 : * If it is in any other state, go to sleep or return.
3287 : *
3288 : * Synchronous forces are implemented with a wait queue. All callers trying
3289 : * to force a given lsn to disk must wait on the queue attached to the
3290 : * specific in-core log. When given in-core log finally completes its write
3291 : * to disk, that thread will wake up all threads waiting on the queue.
3292 : */
3293 : static int
3294 4548252 : xlog_force_lsn(
3295 : struct xlog *log,
3296 : xfs_lsn_t lsn,
3297 : uint flags,
3298 : int *log_flushed,
3299 : bool already_slept)
3300 : {
3301 4548252 : struct xlog_in_core *iclog;
3302 4548252 : bool completed;
3303 :
3304 4548252 : spin_lock(&log->l_icloglock);
3305 9098236 : if (xlog_is_shutdown(log))
3306 287 : goto out_error;
3307 :
3308 4548831 : iclog = log->l_iclog;
3309 8221340 : while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3310 3674813 : trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
3311 3674812 : iclog = iclog->ic_next;
3312 3674812 : if (iclog == log->l_iclog)
3313 2303 : goto out_unlock;
3314 : }
3315 :
3316 4546527 : switch (iclog->ic_state) {
3317 3894854 : case XLOG_STATE_ACTIVE:
3318 : /*
3319 : * We sleep here if we haven't already slept (e.g. this is the
3320 : * first time we've looked at the correct iclog buf) and the
3321 : * buffer before us is going to be sync'ed. The reason for this
3322 : * is that if we are doing sync transactions here, by waiting
3323 : * for the previous I/O to complete, we can allow a few more
3324 : * transactions into this iclog before we close it down.
3325 : *
3326 : * Otherwise, we mark the buffer WANT_SYNC, and bump up the
3327 : * refcnt so we can release the log (which drops the ref count).
3328 : * The state switch keeps new transaction commits from using
3329 : * this buffer. When the current commits finish writing into
3330 : * the buffer, the refcount will drop to zero and the buffer
3331 : * will go out then.
3332 : */
3333 3894854 : if (!already_slept &&
3334 3712916 : (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
3335 : iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
3336 245112 : xlog_wait(&iclog->ic_prev->ic_write_wait,
3337 245112 : &log->l_icloglock);
3338 245112 : return -EAGAIN;
3339 : }
3340 3649742 : if (xlog_force_and_check_iclog(iclog, &completed))
3341 0 : goto out_error;
3342 3649745 : if (log_flushed)
3343 3339044 : *log_flushed = 1;
3344 3649745 : if (completed)
3345 66 : goto out_unlock;
3346 : break;
3347 6296 : case XLOG_STATE_WANT_SYNC:
3348 : /*
3349 : * This iclog may contain the checkpoint pushed by the
3350 : * xlog_cil_force_seq() call, but there are other writers still
3351 : * accessing it so it hasn't been pushed to disk yet. Like the
3352 : * ACTIVE case above, we need to make sure caches are flushed
3353 : * when this iclog is written.
3354 : */
3355 6296 : iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3356 6296 : break;
3357 : default:
3358 : /*
3359 : * The entire checkpoint was written by the CIL force and is on
3360 : * its way to disk already. It will be stable when it
3361 : * completes, so we don't need to manipulate caches here at all.
3362 : * We just need to wait for completion if necessary.
3363 : */
3364 : break;
3365 : }
3366 :
3367 4301352 : if (flags & XFS_LOG_SYNC)
3368 4301352 : return xlog_wait_on_iclog(iclog);
3369 0 : out_unlock:
3370 2369 : spin_unlock(&log->l_icloglock);
3371 2369 : return 0;
3372 287 : out_error:
3373 287 : spin_unlock(&log->l_icloglock);
3374 287 : return -EIO;
3375 : }
3376 :
3377 : /*
3378 : * Force the log to a specific checkpoint sequence.
3379 : *
3380 : * First force the CIL so that all the required changes have been flushed to the
3381 : * iclogs. If the CIL force completed it will return a commit LSN that indicates
3382 : * the iclog that needs to be flushed to stable storage. If the caller needs
3383 : * a synchronous log force, we will wait on the iclog with the LSN returned by
3384 : * xlog_cil_force_seq() to be completed.
3385 : */
3386 : int
3387 4333188 : xfs_log_force_seq(
3388 : struct xfs_mount *mp,
3389 : xfs_csn_t seq,
3390 : uint flags,
3391 : int *log_flushed)
3392 : {
3393 4333188 : struct xlog *log = mp->m_log;
3394 4333188 : xfs_lsn_t lsn;
3395 4333188 : int ret;
3396 4333188 : ASSERT(seq != 0);
3397 :
3398 4333188 : XFS_STATS_INC(mp, xs_log_force);
3399 4333236 : trace_xfs_log_force(mp, seq, _RET_IP_);
3400 :
3401 4333147 : lsn = xlog_cil_force_seq(log, seq);
3402 4333135 : if (lsn == NULLCOMMITLSN)
3403 : return 0;
3404 :
3405 4303720 : ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
3406 4303171 : if (ret == -EAGAIN) {
3407 245107 : XFS_STATS_INC(mp, xs_log_force_sleep);
3408 245102 : ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
3409 : }
3410 : return ret;
3411 : }
3412 :
3413 : /*
3414 : * Free a used ticket when its refcount falls to zero.
3415 : */
3416 : void
3417 2875955570 : xfs_log_ticket_put(
3418 : xlog_ticket_t *ticket)
3419 : {
3420 2875955570 : ASSERT(atomic_read(&ticket->t_ref) > 0);
3421 2875955570 : if (atomic_dec_and_test(&ticket->t_ref))
3422 1545291962 : kmem_cache_free(xfs_log_ticket_cache, ticket);
3423 2875986573 : }
3424 :
3425 : xlog_ticket_t *
3426 1330784330 : xfs_log_ticket_get(
3427 : xlog_ticket_t *ticket)
3428 : {
3429 1330784330 : ASSERT(atomic_read(&ticket->t_ref) > 0);
3430 1330784330 : atomic_inc(&ticket->t_ref);
3431 1331110674 : return ticket;
3432 : }
3433 :
3434 : /*
3435 : * Figure out the total log space unit (in bytes) that would be
3436 : * required for a log ticket.
3437 : */
3438 : static int
3439 1543066953 : xlog_calc_unit_res(
3440 : struct xlog *log,
3441 : int unit_bytes,
3442 : int *niclogs)
3443 : {
3444 1543066953 : int iclog_space;
3445 1543066953 : uint num_headers;
3446 :
3447 : /*
3448 : * Permanent reservations have up to 'cnt'-1 active log operations
3449 : * in the log. A unit in this case is the amount of space for one
3450 : * of these log operations. Normal reservations have a cnt of 1
3451 : * and their unit amount is the total amount of space required.
3452 : *
3453 : * The following lines of code account for non-transaction data
3454 : * which occupy space in the on-disk log.
3455 : *
3456 : * Normal form of a transaction is:
3457 : * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3458 : * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3459 : *
3460 : * We need to account for all the leadup data and trailer data
3461 : * around the transaction data.
3462 : * And then we need to account for the worst case in terms of using
3463 : * more space.
3464 : * The worst case will happen if:
3465 : * - the placement of the transaction happens to be such that the
3466 : * roundoff is at its maximum
3467 : * - the transaction data is synced before the commit record is synced
3468 : * i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3469 : * Therefore the commit record is in its own Log Record.
3470 : * This can happen as the commit record is called with its
3471 : * own region to xlog_write().
3472 : * This then means that in the worst case, roundoff can happen for
3473 : * the commit-rec as well.
3474 : * The commit-rec is smaller than padding in this scenario and so it is
3475 : * not added separately.
3476 : */
3477 :
3478 : /* for trans header */
3479 1543066953 : unit_bytes += sizeof(xlog_op_header_t);
3480 1543066953 : unit_bytes += sizeof(xfs_trans_header_t);
3481 :
3482 : /* for start-rec */
3483 1543066953 : unit_bytes += sizeof(xlog_op_header_t);
3484 :
3485 : /*
3486 : * for LR headers - the space for data in an iclog is the size minus
3487 : * the space used for the headers. If we use the iclog size, then we
3488 : * undercalculate the number of headers required.
3489 : *
3490 : * Furthermore - the addition of op headers for split-recs might
3491 : * increase the space required enough to require more log and op
3492 : * headers, so take that into account too.
3493 : *
3494 : * IMPORTANT: This reservation makes the assumption that if this
3495 : * transaction is the first in an iclog and hence has the LR headers
3496 : * accounted to it, then the remaining space in the iclog is
3497 : * exclusively for this transaction. i.e. if the transaction is larger
3498 : * than the iclog, it will be the only thing in that iclog.
3499 : * Fundamentally, this means we must pass the entire log vector to
3500 : * xlog_write to guarantee this.
3501 : */
3502 1543066953 : iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3503 1543066953 : num_headers = howmany(unit_bytes, iclog_space);
3504 :
3505 : /* for split-recs - ophdrs added when data split over LRs */
3506 1543066953 : unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3507 :
3508 : /* add extra header reservations if we overrun */
3509 1542452937 : while (!num_headers ||
3510 1542452937 : howmany(unit_bytes, iclog_space) > num_headers) {
3511 69845 : unit_bytes += sizeof(xlog_op_header_t);
3512 69845 : num_headers++;
3513 : }
3514 1543066953 : unit_bytes += log->l_iclog_hsize * num_headers;
3515 :
3516 : /* for commit-rec LR header - note: padding will subsume the ophdr */
3517 1543066953 : unit_bytes += log->l_iclog_hsize;
3518 :
3519 : /* roundoff padding for transaction data and one for commit record */
3520 1543066953 : unit_bytes += 2 * log->l_iclog_roundoff;
3521 :
3522 1543066953 : if (niclogs)
3523 1543006189 : *niclogs = num_headers;
3524 1543066953 : return unit_bytes;
3525 : }
3526 :
3527 : int
3528 60764 : xfs_log_calc_unit_res(
3529 : struct xfs_mount *mp,
3530 : int unit_bytes)
3531 : {
3532 60764 : return xlog_calc_unit_res(mp->m_log, unit_bytes, NULL);
3533 : }
3534 :
3535 : /*
3536 : * Allocate and initialise a new log ticket.
3537 : */
3538 : struct xlog_ticket *
3539 1542112449 : xlog_ticket_alloc(
3540 : struct xlog *log,
3541 : int unit_bytes,
3542 : int cnt,
3543 : bool permanent)
3544 : {
3545 1542112449 : struct xlog_ticket *tic;
3546 1542112449 : int unit_res;
3547 :
3548 1542112449 : tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL);
3549 :
3550 1542860062 : unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
3551 :
3552 1542202049 : atomic_set(&tic->t_ref, 1);
3553 1542202049 : tic->t_task = current;
3554 1542202049 : INIT_LIST_HEAD(&tic->t_queue);
3555 1542202049 : tic->t_unit_res = unit_res;
3556 1542202049 : tic->t_curr_res = unit_res;
3557 1542202049 : tic->t_cnt = cnt;
3558 1542202049 : tic->t_ocnt = cnt;
3559 1542202049 : tic->t_tid = get_random_u32();
3560 1541787587 : if (permanent)
3561 1342077288 : tic->t_flags |= XLOG_TIC_PERM_RESERV;
3562 :
3563 1541787587 : return tic;
3564 : }
3565 :
3566 : #if defined(DEBUG)
3567 : /*
3568 : * Check to make sure the grant write head didn't just over lap the tail. If
3569 : * the cycles are the same, we can't be overlapping. Otherwise, make sure that
3570 : * the cycles differ by exactly one and check the byte count.
3571 : *
3572 : * This check is run unlocked, so can give false positives. Rather than assert
3573 : * on failures, use a warn-once flag and a panic tag to allow the admin to
3574 : * determine if they want to panic the machine when such an error occurs. For
3575 : * debug kernels this will have the same effect as using an assert but, unlinke
3576 : * an assert, it can be turned off at runtime.
3577 : */
3578 : STATIC void
3579 1945855810 : xlog_verify_grant_tail(
3580 : struct xlog *log)
3581 : {
3582 1945855810 : int tail_cycle, tail_blocks;
3583 1945855810 : int cycle, space;
3584 :
3585 1945855810 : xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3586 1945855810 : xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3587 1945855810 : if (tail_cycle != cycle) {
3588 476809509 : if (cycle - 1 != tail_cycle &&
3589 37 : !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3590 4 : xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3591 : "%s: cycle - 1 != tail_cycle", __func__);
3592 : }
3593 :
3594 476812990 : if (space > BBTOB(tail_blocks) &&
3595 3485 : !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3596 112 : xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3597 : "%s: space > BBTOB(tail_blocks)", __func__);
3598 : }
3599 : }
3600 1945855843 : }
3601 :
3602 : /* check if it will fit */
3603 : STATIC void
3604 28805544 : xlog_verify_tail_lsn(
3605 : struct xlog *log,
3606 : struct xlog_in_core *iclog)
3607 : {
3608 28805544 : xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
3609 28805544 : int blocks;
3610 :
3611 28805544 : if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3612 18558327 : blocks =
3613 18558327 : log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3614 18558327 : if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3615 0 : xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3616 : } else {
3617 10247217 : ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3618 :
3619 10247217 : if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3620 0 : xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3621 :
3622 10247217 : blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3623 10247217 : if (blocks < BTOBB(iclog->ic_offset) + 1)
3624 0 : xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3625 : }
3626 28805544 : }
3627 :
3628 : /*
3629 : * Perform a number of checks on the iclog before writing to disk.
3630 : *
3631 : * 1. Make sure the iclogs are still circular
3632 : * 2. Make sure we have a good magic number
3633 : * 3. Make sure we don't have magic numbers in the data
3634 : * 4. Check fields of each log operation header for:
3635 : * A. Valid client identifier
3636 : * B. tid ptr value falls in valid ptr space (user space code)
3637 : * C. Length in log record header is correct according to the
3638 : * individual operation headers within record.
3639 : * 5. When a bwrite will occur within 5 blocks of the front of the physical
3640 : * log, check the preceding blocks of the physical log to make sure all
3641 : * the cycle numbers agree with the current cycle number.
3642 : */
3643 : STATIC void
3644 28804401 : xlog_verify_iclog(
3645 : struct xlog *log,
3646 : struct xlog_in_core *iclog,
3647 : int count)
3648 : {
3649 28804401 : xlog_op_header_t *ophead;
3650 28804401 : xlog_in_core_t *icptr;
3651 28804401 : xlog_in_core_2_t *xhdr;
3652 28804401 : void *base_ptr, *ptr, *p;
3653 28804401 : ptrdiff_t field_offset;
3654 28804401 : uint8_t clientid;
3655 28804401 : int len, i, j, k, op_len;
3656 28804401 : int idx;
3657 :
3658 : /* check validity of iclog pointers */
3659 28804401 : spin_lock(&log->l_icloglock);
3660 28805548 : icptr = log->l_iclog;
3661 259249760 : for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3662 230444213 : ASSERT(icptr);
3663 :
3664 28805547 : if (icptr != log->l_iclog)
3665 0 : xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3666 28805547 : spin_unlock(&log->l_icloglock);
3667 :
3668 : /* check log magic numbers */
3669 28805547 : if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3670 0 : xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3671 :
3672 28805547 : base_ptr = ptr = &iclog->ic_header;
3673 28805547 : p = &iclog->ic_header;
3674 1616692921 : for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
3675 1587887430 : if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3676 0 : xfs_emerg(log->l_mp, "%s: unexpected magic num",
3677 : __func__);
3678 : }
3679 :
3680 : /* check fields */
3681 28805491 : len = be32_to_cpu(iclog->ic_header.h_num_logops);
3682 28805491 : base_ptr = ptr = iclog->ic_datap;
3683 28805491 : ophead = ptr;
3684 28805491 : xhdr = iclog->ic_data;
3685 2601155027 : for (i = 0; i < len; i++) {
3686 2572349589 : ophead = ptr;
3687 :
3688 : /* clientid is only 1 byte */
3689 2572349589 : p = &ophead->oh_clientid;
3690 2572349589 : field_offset = p - base_ptr;
3691 2572349589 : if (field_offset & 0x1ff) {
3692 2553235452 : clientid = ophead->oh_clientid;
3693 : } else {
3694 19114137 : idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
3695 19114137 : if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3696 18982 : j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3697 18982 : k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3698 18982 : clientid = xlog_get_client_id(
3699 18982 : xhdr[j].hic_xheader.xh_cycle_data[k]);
3700 : } else {
3701 19095155 : clientid = xlog_get_client_id(
3702 19095155 : iclog->ic_header.h_cycle_data[idx]);
3703 : }
3704 : }
3705 2572349589 : if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) {
3706 0 : xfs_warn(log->l_mp,
3707 : "%s: op %d invalid clientid %d op "PTR_FMT" offset 0x%lx",
3708 : __func__, i, clientid, ophead,
3709 : (unsigned long)field_offset);
3710 : }
3711 :
3712 : /* check length */
3713 2572349528 : p = &ophead->oh_len;
3714 2572349528 : field_offset = p - base_ptr;
3715 2572349528 : if (field_offset & 0x1ff) {
3716 2552975909 : op_len = be32_to_cpu(ophead->oh_len);
3717 : } else {
3718 19373619 : idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
3719 19373619 : if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3720 18614 : j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3721 18614 : k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3722 18614 : op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3723 : } else {
3724 19355005 : op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3725 : }
3726 : }
3727 2572349536 : ptr += sizeof(xlog_op_header_t) + op_len;
3728 : }
3729 28805438 : }
3730 : #endif
3731 :
3732 : /*
3733 : * Perform a forced shutdown on the log.
3734 : *
3735 : * This can be called from low level log code to trigger a shutdown, or from the
3736 : * high level mount shutdown code when the mount shuts down.
3737 : *
3738 : * Our main objectives here are to make sure that:
3739 : * a. if the shutdown was not due to a log IO error, flush the logs to
3740 : * disk. Anything modified after this is ignored.
3741 : * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3742 : * parties to find out. Nothing new gets queued after this is done.
3743 : * c. Tasks sleeping on log reservations, pinned objects and
3744 : * other resources get woken up.
3745 : * d. The mount is also marked as shut down so that log triggered shutdowns
3746 : * still behave the same as if they called xfs_forced_shutdown().
3747 : *
3748 : * Return true if the shutdown cause was a log IO error and we actually shut the
3749 : * log down.
3750 : */
3751 : bool
3752 20632 : xlog_force_shutdown(
3753 : struct xlog *log,
3754 : uint32_t shutdown_flags)
3755 : {
3756 20632 : bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
3757 :
3758 20632 : if (!log)
3759 : return false;
3760 :
3761 : /*
3762 : * Flush all the completed transactions to disk before marking the log
3763 : * being shut down. We need to do this first as shutting down the log
3764 : * before the force will prevent the log force from flushing the iclogs
3765 : * to disk.
3766 : *
3767 : * When we are in recovery, there are no transactions to flush, and
3768 : * we don't want to touch the log because we don't want to perturb the
3769 : * current head/tail for future recovery attempts. Hence we need to
3770 : * avoid a log force in this case.
3771 : *
3772 : * If we are shutting down due to a log IO error, then we must avoid
3773 : * trying to write the log as that may just result in more IO errors and
3774 : * an endless shutdown/force loop.
3775 : */
3776 25042 : if (!log_error && !xlog_in_recovery(log))
3777 4410 : xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3778 :
3779 : /*
3780 : * Atomically set the shutdown state. If the shutdown state is already
3781 : * set, there someone else is performing the shutdown and so we are done
3782 : * here. This should never happen because we should only ever get called
3783 : * once by the first shutdown caller.
3784 : *
3785 : * Much of the log state machine transitions assume that shutdown state
3786 : * cannot change once they hold the log->l_icloglock. Hence we need to
3787 : * hold that lock here, even though we use the atomic test_and_set_bit()
3788 : * operation to set the shutdown state.
3789 : */
3790 20632 : spin_lock(&log->l_icloglock);
3791 20632 : if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
3792 7414 : spin_unlock(&log->l_icloglock);
3793 7414 : return false;
3794 : }
3795 13218 : spin_unlock(&log->l_icloglock);
3796 :
3797 : /*
3798 : * If this log shutdown also sets the mount shutdown state, issue a
3799 : * shutdown warning message.
3800 : */
3801 13218 : if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
3802 3967 : xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
3803 : "Filesystem has been shut down due to log error (0x%x).",
3804 : shutdown_flags);
3805 3967 : xfs_alert(log->l_mp,
3806 : "Please unmount the filesystem and rectify the problem(s).");
3807 3967 : if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
3808 0 : xfs_stack_trace();
3809 : }
3810 :
3811 : /*
3812 : * We don't want anybody waiting for log reservations after this. That
3813 : * means we have to wake up everybody queued up on reserveq as well as
3814 : * writeq. In addition, we make sure in xlog_{re}grant_log_space that
3815 : * we don't enqueue anything once the SHUTDOWN flag is set, and this
3816 : * action is protected by the grant locks.
3817 : */
3818 13218 : xlog_grant_head_wake_all(&log->l_reserve_head);
3819 13218 : xlog_grant_head_wake_all(&log->l_write_head);
3820 :
3821 : /*
3822 : * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
3823 : * as if the log writes were completed. The abort handling in the log
3824 : * item committed callback functions will do this again under lock to
3825 : * avoid races.
3826 : */
3827 13218 : spin_lock(&log->l_cilp->xc_push_lock);
3828 13218 : wake_up_all(&log->l_cilp->xc_start_wait);
3829 13218 : wake_up_all(&log->l_cilp->xc_commit_wait);
3830 13218 : spin_unlock(&log->l_cilp->xc_push_lock);
3831 :
3832 13218 : spin_lock(&log->l_icloglock);
3833 13218 : xlog_state_shutdown_callbacks(log);
3834 13218 : spin_unlock(&log->l_icloglock);
3835 :
3836 13218 : wake_up_var(&log->l_opstate);
3837 13218 : return log_error;
3838 : }
3839 :
3840 : STATIC int
3841 288739 : xlog_iclogs_empty(
3842 : struct xlog *log)
3843 : {
3844 288739 : xlog_in_core_t *iclog;
3845 :
3846 288739 : iclog = log->l_iclog;
3847 2294662 : do {
3848 : /* endianness does not matter here, zero is zero in
3849 : * any language.
3850 : */
3851 2294662 : if (iclog->ic_header.h_num_logops)
3852 : return 0;
3853 2287712 : iclog = iclog->ic_next;
3854 2287712 : } while (iclog != log->l_iclog);
3855 : return 1;
3856 : }
3857 :
3858 : /*
3859 : * Verify that an LSN stamped into a piece of metadata is valid. This is
3860 : * intended for use in read verifiers on v5 superblocks.
3861 : */
3862 : bool
3863 135622596 : xfs_log_check_lsn(
3864 : struct xfs_mount *mp,
3865 : xfs_lsn_t lsn)
3866 : {
3867 135622596 : struct xlog *log = mp->m_log;
3868 135622596 : bool valid;
3869 :
3870 : /*
3871 : * norecovery mode skips mount-time log processing and unconditionally
3872 : * resets the in-core LSN. We can't validate in this mode, but
3873 : * modifications are not allowed anyways so just return true.
3874 : */
3875 135622596 : if (xfs_has_norecovery(mp))
3876 : return true;
3877 :
3878 : /*
3879 : * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
3880 : * handled by recovery and thus safe to ignore here.
3881 : */
3882 135621953 : if (lsn == NULLCOMMITLSN)
3883 : return true;
3884 :
3885 135354152 : valid = xlog_valid_lsn(mp->m_log, lsn);
3886 :
3887 : /* warn the user about what's gone wrong before verifier failure */
3888 135375953 : if (!valid) {
3889 11 : spin_lock(&log->l_icloglock);
3890 11 : xfs_warn(mp,
3891 : "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
3892 : "Please unmount and run xfs_repair (>= v4.3) to resolve.",
3893 : CYCLE_LSN(lsn), BLOCK_LSN(lsn),
3894 : log->l_curr_cycle, log->l_curr_block);
3895 11 : spin_unlock(&log->l_icloglock);
3896 : }
3897 :
3898 : return valid;
3899 : }
3900 :
3901 : /*
3902 : * Notify the log that we're about to start using a feature that is protected
3903 : * by a log incompat feature flag. This will prevent log covering from
3904 : * clearing those flags.
3905 : */
3906 : void
3907 233580642 : xlog_use_incompat_feat(
3908 : struct xlog *log,
3909 : enum xlog_incompat_feat what)
3910 : {
3911 233580642 : switch (what) {
3912 227761523 : case XLOG_INCOMPAT_FEAT_XATTRS:
3913 227761523 : down_read(&log->l_incompat_xattrs);
3914 227761523 : break;
3915 5819119 : case XLOG_INCOMPAT_FEAT_SWAPEXT:
3916 5819119 : down_read(&log->l_incompat_swapext);
3917 5819119 : break;
3918 : }
3919 233891419 : }
3920 :
3921 : /* Notify the log that we've finished using log incompat features. */
3922 : void
3923 233527176 : xlog_drop_incompat_feat(
3924 : struct xlog *log,
3925 : enum xlog_incompat_feat what)
3926 : {
3927 233527176 : switch (what) {
3928 227709774 : case XLOG_INCOMPAT_FEAT_XATTRS:
3929 227709774 : up_read(&log->l_incompat_xattrs);
3930 227709774 : break;
3931 5817402 : case XLOG_INCOMPAT_FEAT_SWAPEXT:
3932 5817402 : up_read(&log->l_incompat_swapext);
3933 5817402 : break;
3934 : }
3935 233840461 : }
|