Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 : * All Rights Reserved.
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_mount.h"
13 : #include "xfs_errortag.h"
14 : #include "xfs_error.h"
15 : #include "xfs_trans.h"
16 : #include "xfs_trans_priv.h"
17 : #include "xfs_log.h"
18 : #include "xfs_log_priv.h"
19 : #include "xfs_trace.h"
20 : #include "xfs_sysfs.h"
21 : #include "xfs_sb.h"
22 : #include "xfs_health.h"
23 :
24 : struct kmem_cache *xfs_log_ticket_cache;
25 :
26 : /* Local miscellaneous function prototypes */
27 : STATIC struct xlog *
28 : xlog_alloc_log(
29 : struct xfs_mount *mp,
30 : struct xfs_buftarg *log_target,
31 : xfs_daddr_t blk_offset,
32 : int num_bblks);
33 : STATIC int
34 : xlog_space_left(
35 : struct xlog *log,
36 : atomic64_t *head);
37 : STATIC void
38 : xlog_dealloc_log(
39 : struct xlog *log);
40 :
41 : /* local state machine functions */
42 : STATIC void xlog_state_done_syncing(
43 : struct xlog_in_core *iclog);
44 : STATIC void xlog_state_do_callback(
45 : struct xlog *log);
46 : STATIC int
47 : xlog_state_get_iclog_space(
48 : struct xlog *log,
49 : int len,
50 : struct xlog_in_core **iclog,
51 : struct xlog_ticket *ticket,
52 : int *logoffsetp);
53 : STATIC void
54 : xlog_grant_push_ail(
55 : struct xlog *log,
56 : int need_bytes);
57 : STATIC void
58 : xlog_sync(
59 : struct xlog *log,
60 : struct xlog_in_core *iclog,
61 : struct xlog_ticket *ticket);
62 : #if defined(DEBUG)
63 : STATIC void
64 : xlog_verify_grant_tail(
65 : struct xlog *log);
66 : STATIC void
67 : xlog_verify_iclog(
68 : struct xlog *log,
69 : struct xlog_in_core *iclog,
70 : int count);
71 : STATIC void
72 : xlog_verify_tail_lsn(
73 : struct xlog *log,
74 : struct xlog_in_core *iclog);
75 : #else
76 : #define xlog_verify_grant_tail(a)
77 : #define xlog_verify_iclog(a,b,c)
78 : #define xlog_verify_tail_lsn(a,b)
79 : #endif
80 :
81 : STATIC int
82 : xlog_iclogs_empty(
83 : struct xlog *log);
84 :
85 : static int
86 : xfs_log_cover(struct xfs_mount *);
87 :
88 : /*
89 : * We need to make sure the buffer pointer returned is naturally aligned for the
90 : * biggest basic data type we put into it. We have already accounted for this
91 : * padding when sizing the buffer.
92 : *
93 : * However, this padding does not get written into the log, and hence we have to
94 : * track the space used by the log vectors separately to prevent log space hangs
95 : * due to inaccurate accounting (i.e. a leak) of the used log space through the
96 : * CIL context ticket.
97 : *
98 : * We also add space for the xlog_op_header that describes this region in the
99 : * log. This prepends the data region we return to the caller to copy their data
100 : * into, so do all the static initialisation of the ophdr now. Because the ophdr
101 : * is not 8 byte aligned, we have to be careful to ensure that we align the
102 : * start of the buffer such that the region we return to the call is 8 byte
103 : * aligned and packed against the tail of the ophdr.
104 : */
105 : void *
106 21764824032 : xlog_prepare_iovec(
107 : struct xfs_log_vec *lv,
108 : struct xfs_log_iovec **vecp,
109 : uint type)
110 : {
111 21764824032 : struct xfs_log_iovec *vec = *vecp;
112 21764824032 : struct xlog_op_header *oph;
113 21764824032 : uint32_t len;
114 21764824032 : void *buf;
115 :
116 21764824032 : if (vec) {
117 12328831963 : ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
118 12328831963 : vec++;
119 : } else {
120 9435992069 : vec = &lv->lv_iovecp[0];
121 : }
122 :
123 21764824032 : len = lv->lv_buf_len + sizeof(struct xlog_op_header);
124 21764824032 : if (!IS_ALIGNED(len, sizeof(uint64_t))) {
125 21535306722 : lv->lv_buf_len = round_up(len, sizeof(uint64_t)) -
126 : sizeof(struct xlog_op_header);
127 : }
128 :
129 21764824032 : vec->i_type = type;
130 21764824032 : vec->i_addr = lv->lv_buf + lv->lv_buf_len;
131 :
132 21764824032 : oph = vec->i_addr;
133 21764824032 : oph->oh_clientid = XFS_TRANSACTION;
134 21764824032 : oph->oh_res2 = 0;
135 21764824032 : oph->oh_flags = 0;
136 :
137 21764824032 : buf = vec->i_addr + sizeof(struct xlog_op_header);
138 21764824032 : ASSERT(IS_ALIGNED((unsigned long)buf, sizeof(uint64_t)));
139 :
140 21764824032 : *vecp = vec;
141 21764824032 : return buf;
142 : }
143 :
144 : static void
145 4336315889 : xlog_grant_sub_space(
146 : struct xlog *log,
147 : atomic64_t *head,
148 : int bytes)
149 : {
150 4336315889 : int64_t head_val = atomic64_read(head);
151 4338332308 : int64_t new, old;
152 :
153 4338332308 : do {
154 4338332308 : int cycle, space;
155 :
156 4338332308 : xlog_crack_grant_head_val(head_val, &cycle, &space);
157 :
158 4338332308 : space -= bytes;
159 4338332308 : if (space < 0) {
160 51863336 : space += log->l_logsize;
161 51863336 : cycle--;
162 : }
163 :
164 4338332308 : old = head_val;
165 4338332308 : new = xlog_assign_grant_head_val(cycle, space);
166 4338332308 : head_val = atomic64_cmpxchg(head, old, new);
167 4338424161 : } while (head_val != old);
168 4336407742 : }
169 :
170 : static void
171 2853749208 : xlog_grant_add_space(
172 : struct xlog *log,
173 : atomic64_t *head,
174 : int bytes)
175 : {
176 2853749208 : int64_t head_val = atomic64_read(head);
177 2856072546 : int64_t new, old;
178 :
179 2856072546 : do {
180 2856072546 : int tmp;
181 2856072546 : int cycle, space;
182 :
183 2856072546 : xlog_crack_grant_head_val(head_val, &cycle, &space);
184 :
185 2856072546 : tmp = log->l_logsize - space;
186 2856072546 : if (tmp > bytes)
187 2804169312 : space += bytes;
188 : else {
189 51903234 : space = bytes - tmp;
190 51903234 : cycle++;
191 : }
192 :
193 2856072546 : old = head_val;
194 2856072546 : new = xlog_assign_grant_head_val(cycle, space);
195 2856072546 : head_val = atomic64_cmpxchg(head, old, new);
196 2856576981 : } while (head_val != old);
197 2854253643 : }
198 :
199 : STATIC void
200 48666 : xlog_grant_head_init(
201 : struct xlog_grant_head *head)
202 : {
203 48666 : xlog_assign_grant_head(&head->grant, 1, 0);
204 48666 : INIT_LIST_HEAD(&head->waiters);
205 48666 : spin_lock_init(&head->lock);
206 48666 : }
207 :
208 : STATIC void
209 23758 : xlog_grant_head_wake_all(
210 : struct xlog_grant_head *head)
211 : {
212 23758 : struct xlog_ticket *tic;
213 :
214 23758 : spin_lock(&head->lock);
215 23758 : list_for_each_entry(tic, &head->waiters, t_queue)
216 0 : wake_up_process(tic->t_task);
217 23758 : spin_unlock(&head->lock);
218 23758 : }
219 :
220 : static inline int
221 1504659406 : xlog_ticket_reservation(
222 : struct xlog *log,
223 : struct xlog_grant_head *head,
224 : struct xlog_ticket *tic)
225 : {
226 1504659406 : if (head == &log->l_write_head) {
227 222231332 : ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
228 222231332 : return tic->t_unit_res;
229 : }
230 :
231 1282428074 : if (tic->t_flags & XLOG_TIC_PERM_RESERV)
232 1124472284 : return tic->t_unit_res * tic->t_cnt;
233 :
234 157955790 : return tic->t_unit_res;
235 : }
236 :
237 : STATIC bool
238 6578506 : xlog_grant_head_wake(
239 : struct xlog *log,
240 : struct xlog_grant_head *head,
241 : int *free_bytes)
242 : {
243 6578506 : struct xlog_ticket *tic;
244 6578506 : int need_bytes;
245 6578506 : bool woken_task = false;
246 :
247 82879430 : list_for_each_entry(tic, &head->waiters, t_queue) {
248 :
249 : /*
250 : * There is a chance that the size of the CIL checkpoints in
251 : * progress at the last AIL push target calculation resulted in
252 : * limiting the target to the log head (l_last_sync_lsn) at the
253 : * time. This may not reflect where the log head is now as the
254 : * CIL checkpoints may have completed.
255 : *
256 : * Hence when we are woken here, it may be that the head of the
257 : * log that has moved rather than the tail. As the tail didn't
258 : * move, there still won't be space available for the
259 : * reservation we require. However, if the AIL has already
260 : * pushed to the target defined by the old log head location, we
261 : * will hang here waiting for something else to update the AIL
262 : * push target.
263 : *
264 : * Therefore, if there isn't space to wake the first waiter on
265 : * the grant head, we need to push the AIL again to ensure the
266 : * target reflects both the current log tail and log head
267 : * position before we wait for the tail to move again.
268 : */
269 :
270 80414738 : need_bytes = xlog_ticket_reservation(log, head, tic);
271 80414739 : if (*free_bytes < need_bytes) {
272 4113814 : if (!woken_task)
273 375064 : xlog_grant_push_ail(log, need_bytes);
274 4113814 : return false;
275 : }
276 :
277 76300925 : *free_bytes -= need_bytes;
278 76300925 : trace_xfs_log_grant_wake_up(log, tic);
279 76300925 : wake_up_process(tic->t_task);
280 76300924 : woken_task = true;
281 : }
282 :
283 : return true;
284 : }
285 :
286 : STATIC int
287 2212917 : xlog_grant_head_wait(
288 : struct xlog *log,
289 : struct xlog_grant_head *head,
290 : struct xlog_ticket *tic,
291 : int need_bytes) __releases(&head->lock)
292 : __acquires(&head->lock)
293 : {
294 2212917 : list_add_tail(&tic->t_queue, &head->waiters);
295 :
296 2213308 : do {
297 4426616 : if (xlog_is_shutdown(log))
298 0 : goto shutdown;
299 2213308 : xlog_grant_push_ail(log, need_bytes);
300 :
301 2213308 : __set_current_state(TASK_UNINTERRUPTIBLE);
302 2213308 : spin_unlock(&head->lock);
303 :
304 2213278 : XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
305 :
306 2213278 : trace_xfs_log_grant_sleep(log, tic);
307 2213300 : schedule();
308 2212624 : trace_xfs_log_grant_wake(log, tic);
309 :
310 2212694 : spin_lock(&head->lock);
311 4426616 : if (xlog_is_shutdown(log))
312 0 : goto shutdown;
313 2213308 : } while (xlog_space_left(log, &head->grant) < need_bytes);
314 :
315 2212917 : list_del_init(&tic->t_queue);
316 2212917 : return 0;
317 0 : shutdown:
318 0 : list_del_init(&tic->t_queue);
319 0 : return -EIO;
320 : }
321 :
322 : /*
323 : * Atomically get the log space required for a log ticket.
324 : *
325 : * Once a ticket gets put onto head->waiters, it will only return after the
326 : * needed reservation is satisfied.
327 : *
328 : * This function is structured so that it has a lock free fast path. This is
329 : * necessary because every new transaction reservation will come through this
330 : * path. Hence any lock will be globally hot if we take it unconditionally on
331 : * every pass.
332 : *
333 : * As tickets are only ever moved on and off head->waiters under head->lock, we
334 : * only need to take that lock if we are going to add the ticket to the queue
335 : * and sleep. We can avoid taking the lock if the ticket was never added to
336 : * head->waiters because the t_queue list head will be empty and we hold the
337 : * only reference to it so it can safely be checked unlocked.
338 : */
339 : STATIC int
340 1424143357 : xlog_grant_head_check(
341 : struct xlog *log,
342 : struct xlog_grant_head *head,
343 : struct xlog_ticket *tic,
344 : int *need_bytes)
345 : {
346 1424143357 : int free_bytes;
347 1424143357 : int error = 0;
348 :
349 2848286714 : ASSERT(!xlog_in_recovery(log));
350 :
351 : /*
352 : * If there are other waiters on the queue then give them a chance at
353 : * logspace before us. Wake up the first waiters, if we do not wake
354 : * up all the waiters then go to sleep waiting for more free space,
355 : * otherwise try to get some space for this transaction.
356 : */
357 1424143357 : *need_bytes = xlog_ticket_reservation(log, head, tic);
358 1424280247 : free_bytes = xlog_space_left(log, &head->grant);
359 1424266655 : if (!list_empty_careful(&head->waiters)) {
360 3187486 : spin_lock(&head->lock);
361 3186527 : if (!xlog_grant_head_wake(log, head, &free_bytes) ||
362 1035013 : free_bytes < *need_bytes) {
363 2194665 : error = xlog_grant_head_wait(log, head, tic,
364 : *need_bytes);
365 : }
366 3186527 : spin_unlock(&head->lock);
367 1421103367 : } else if (free_bytes < *need_bytes) {
368 18251 : spin_lock(&head->lock);
369 18252 : error = xlog_grant_head_wait(log, head, tic, *need_bytes);
370 18252 : spin_unlock(&head->lock);
371 : }
372 :
373 1424289806 : return error;
374 : }
375 :
376 : bool
377 102014 : xfs_log_writable(
378 : struct xfs_mount *mp)
379 : {
380 : /*
381 : * Do not write to the log on norecovery mounts, if the data or log
382 : * devices are read-only, or if the filesystem is shutdown. Read-only
383 : * mounts allow internal writes for log recovery and unmount purposes,
384 : * so don't restrict that case.
385 : */
386 102014 : if (xfs_has_norecovery(mp))
387 : return false;
388 101994 : if (xfs_readonly_buftarg(mp->m_ddev_targp))
389 : return false;
390 101990 : if (xfs_readonly_buftarg(mp->m_log->l_targ))
391 : return false;
392 203980 : if (xlog_is_shutdown(mp->m_log))
393 23751 : return false;
394 : return true;
395 : }
396 :
397 : /*
398 : * Replenish the byte reservation required by moving the grant write head.
399 : */
400 : int
401 962420292 : xfs_log_regrant(
402 : struct xfs_mount *mp,
403 : struct xlog_ticket *tic)
404 : {
405 962420292 : struct xlog *log = mp->m_log;
406 962420292 : int need_bytes;
407 962420292 : int error = 0;
408 :
409 1924840584 : if (xlog_is_shutdown(log))
410 : return -EIO;
411 :
412 962420255 : XFS_STATS_INC(mp, xs_try_logspace);
413 :
414 : /*
415 : * This is a new transaction on the ticket, so we need to change the
416 : * transaction ID so that the next transaction has a different TID in
417 : * the log. Just add one to the existing tid so that we can see chains
418 : * of rolling transactions in the log easily.
419 : */
420 962420255 : tic->t_tid++;
421 :
422 962420255 : xlog_grant_push_ail(log, tic->t_unit_res);
423 :
424 962447351 : tic->t_curr_res = tic->t_unit_res;
425 962447351 : if (tic->t_cnt > 0)
426 : return 0;
427 :
428 222230420 : trace_xfs_log_regrant(log, tic);
429 :
430 222231415 : error = xlog_grant_head_check(log, &log->l_write_head, tic,
431 : &need_bytes);
432 222231687 : if (error)
433 0 : goto out_error;
434 :
435 222231687 : xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
436 222231894 : trace_xfs_log_regrant_exit(log, tic);
437 222231966 : xlog_verify_grant_tail(log);
438 222231966 : return 0;
439 :
440 : out_error:
441 : /*
442 : * If we are failing, make sure the ticket doesn't have any current
443 : * reservations. We don't want to add this back when the ticket/
444 : * transaction gets cancelled.
445 : */
446 0 : tic->t_curr_res = 0;
447 0 : tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
448 0 : return error;
449 : }
450 :
451 : /*
452 : * Reserve log space and return a ticket corresponding to the reservation.
453 : *
454 : * Each reservation is going to reserve extra space for a log record header.
455 : * When writes happen to the on-disk log, we don't subtract the length of the
456 : * log record header from any reservation. By wasting space in each
457 : * reservation, we prevent over allocation problems.
458 : */
459 : int
460 1201942697 : xfs_log_reserve(
461 : struct xfs_mount *mp,
462 : int unit_bytes,
463 : int cnt,
464 : struct xlog_ticket **ticp,
465 : bool permanent)
466 : {
467 1201942697 : struct xlog *log = mp->m_log;
468 1201942697 : struct xlog_ticket *tic;
469 1201942697 : int need_bytes;
470 1201942697 : int error = 0;
471 :
472 2403885394 : if (xlog_is_shutdown(log))
473 : return -EIO;
474 :
475 1201940631 : XFS_STATS_INC(mp, xs_try_logspace);
476 :
477 1201940631 : ASSERT(*ticp == NULL);
478 1201940631 : tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
479 1202076389 : *ticp = tic;
480 :
481 1202076389 : xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
482 : : tic->t_unit_res);
483 :
484 1201935546 : trace_xfs_log_reserve(log, tic);
485 :
486 1202028578 : error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
487 : &need_bytes);
488 1202082211 : if (error)
489 0 : goto out_error;
490 :
491 1202082211 : xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
492 1202121036 : xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
493 1202194795 : trace_xfs_log_reserve_exit(log, tic);
494 1202198136 : xlog_verify_grant_tail(log);
495 1202198136 : return 0;
496 :
497 : out_error:
498 : /*
499 : * If we are failing, make sure the ticket doesn't have any current
500 : * reservations. We don't want to add this back when the ticket/
501 : * transaction gets cancelled.
502 : */
503 0 : tic->t_curr_res = 0;
504 0 : tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
505 0 : return error;
506 : }
507 :
508 : /*
509 : * Run all the pending iclog callbacks and wake log force waiters and iclog
510 : * space waiters so they can process the newly set shutdown state. We really
511 : * don't care what order we process callbacks here because the log is shut down
512 : * and so state cannot change on disk anymore. However, we cannot wake waiters
513 : * until the callbacks have been processed because we may be in unmount and
514 : * we must ensure that all AIL operations the callbacks perform have completed
515 : * before we tear down the AIL.
516 : *
517 : * We avoid processing actively referenced iclogs so that we don't run callbacks
518 : * while the iclog owner might still be preparing the iclog for IO submssion.
519 : * These will be caught by xlog_state_iclog_release() and call this function
520 : * again to process any callbacks that may have been added to that iclog.
521 : */
522 : static void
523 13456 : xlog_state_shutdown_callbacks(
524 : struct xlog *log)
525 : {
526 13456 : struct xlog_in_core *iclog;
527 13456 : LIST_HEAD(cb_list);
528 :
529 13456 : iclog = log->l_iclog;
530 107648 : do {
531 107648 : if (atomic_read(&iclog->ic_refcnt)) {
532 : /* Reference holder will re-run iclog callbacks. */
533 1565 : continue;
534 : }
535 106083 : list_splice_init(&iclog->ic_callbacks, &cb_list);
536 106083 : spin_unlock(&log->l_icloglock);
537 :
538 106083 : xlog_cil_process_committed(&cb_list);
539 :
540 106083 : spin_lock(&log->l_icloglock);
541 106083 : wake_up_all(&iclog->ic_write_wait);
542 106083 : wake_up_all(&iclog->ic_force_wait);
543 107648 : } while ((iclog = iclog->ic_next) != log->l_iclog);
544 :
545 13456 : wake_up_all(&log->l_flush_wait);
546 13456 : }
547 :
548 : /*
549 : * Flush iclog to disk if this is the last reference to the given iclog and the
550 : * it is in the WANT_SYNC state.
551 : *
552 : * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
553 : * log tail is updated correctly. NEED_FUA indicates that the iclog will be
554 : * written to stable storage, and implies that a commit record is contained
555 : * within the iclog. We need to ensure that the log tail does not move beyond
556 : * the tail that the first commit record in the iclog ordered against, otherwise
557 : * correct recovery of that checkpoint becomes dependent on future operations
558 : * performed on this iclog.
559 : *
560 : * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
561 : * current tail into iclog. Once the iclog tail is set, future operations must
562 : * not modify it, otherwise they potentially violate ordering constraints for
563 : * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
564 : * the iclog will get zeroed on activation of the iclog after sync, so we
565 : * always capture the tail lsn on the iclog on the first NEED_FUA release
566 : * regardless of the number of active reference counts on this iclog.
567 : */
568 : int
569 42981863 : xlog_state_release_iclog(
570 : struct xlog *log,
571 : struct xlog_in_core *iclog,
572 : struct xlog_ticket *ticket)
573 : {
574 42981863 : xfs_lsn_t tail_lsn;
575 42981863 : bool last_ref;
576 :
577 42981863 : lockdep_assert_held(&log->l_icloglock);
578 :
579 42981863 : trace_xlog_iclog_release(iclog, _RET_IP_);
580 : /*
581 : * Grabbing the current log tail needs to be atomic w.r.t. the writing
582 : * of the tail LSN into the iclog so we guarantee that the log tail does
583 : * not move between the first time we know that the iclog needs to be
584 : * made stable and when we eventually submit it.
585 : */
586 42981873 : if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
587 10613788 : (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
588 35491295 : !iclog->ic_header.h_tail_lsn) {
589 32325853 : tail_lsn = xlog_assign_tail_lsn(log->l_mp);
590 32325853 : iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
591 : }
592 :
593 42981873 : last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
594 :
595 85963780 : if (xlog_is_shutdown(log)) {
596 : /*
597 : * If there are no more references to this iclog, process the
598 : * pending iclog callbacks that were waiting on the release of
599 : * this iclog.
600 : */
601 1577 : if (last_ref)
602 1577 : xlog_state_shutdown_callbacks(log);
603 1577 : return -EIO;
604 : }
605 :
606 42980313 : if (!last_ref)
607 : return 0;
608 :
609 39174423 : if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
610 6850188 : ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
611 6850188 : return 0;
612 : }
613 :
614 32324235 : iclog->ic_state = XLOG_STATE_SYNCING;
615 32324235 : xlog_verify_tail_lsn(log, iclog);
616 32324235 : trace_xlog_iclog_syncing(iclog, _RET_IP_);
617 :
618 32324235 : spin_unlock(&log->l_icloglock);
619 32324234 : xlog_sync(log, iclog, ticket);
620 32324190 : spin_lock(&log->l_icloglock);
621 32324190 : return 0;
622 : }
623 :
624 : /*
625 : * Mount a log filesystem
626 : *
627 : * mp - ubiquitous xfs mount point structure
628 : * log_target - buftarg of on-disk log device
629 : * blk_offset - Start block # where block size is 512 bytes (BBSIZE)
630 : * num_bblocks - Number of BBSIZE blocks in on-disk log
631 : *
632 : * Return error or zero.
633 : */
634 : int
635 24333 : xfs_log_mount(
636 : xfs_mount_t *mp,
637 : xfs_buftarg_t *log_target,
638 : xfs_daddr_t blk_offset,
639 : int num_bblks)
640 : {
641 24333 : struct xlog *log;
642 24333 : int error = 0;
643 24333 : int min_logfsbs;
644 :
645 24333 : if (!xfs_has_norecovery(mp)) {
646 24323 : xfs_notice(mp, "Mounting V%d Filesystem %pU",
647 : XFS_SB_VERSION_NUM(&mp->m_sb),
648 : &mp->m_sb.sb_uuid);
649 : } else {
650 10 : xfs_notice(mp,
651 : "Mounting V%d filesystem %pU in no-recovery mode. Filesystem will be inconsistent.",
652 : XFS_SB_VERSION_NUM(&mp->m_sb),
653 : &mp->m_sb.sb_uuid);
654 20 : ASSERT(xfs_is_readonly(mp));
655 : }
656 :
657 24333 : log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
658 24333 : if (IS_ERR(log)) {
659 0 : error = PTR_ERR(log);
660 0 : goto out;
661 : }
662 24333 : mp->m_log = log;
663 :
664 : /*
665 : * Now that we have set up the log and it's internal geometry
666 : * parameters, we can validate the given log space and drop a critical
667 : * message via syslog if the log size is too small. A log that is too
668 : * small can lead to unexpected situations in transaction log space
669 : * reservation stage. The superblock verifier has already validated all
670 : * the other log geometry constraints, so we don't have to check those
671 : * here.
672 : *
673 : * Note: For v4 filesystems, we can't just reject the mount if the
674 : * validation fails. This would mean that people would have to
675 : * downgrade their kernel just to remedy the situation as there is no
676 : * way to grow the log (short of black magic surgery with xfs_db).
677 : *
678 : * We can, however, reject mounts for V5 format filesystems, as the
679 : * mkfs binary being used to make the filesystem should never create a
680 : * filesystem with a log that is too small.
681 : */
682 24333 : min_logfsbs = xfs_log_calc_minimum_size(mp);
683 24333 : if (mp->m_sb.sb_logblocks < min_logfsbs) {
684 0 : xfs_warn(mp,
685 : "Log size %d blocks too small, minimum size is %d blocks",
686 : mp->m_sb.sb_logblocks, min_logfsbs);
687 :
688 : /*
689 : * Log check errors are always fatal on v5; or whenever bad
690 : * metadata leads to a crash.
691 : */
692 0 : if (xfs_has_crc(mp)) {
693 0 : xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
694 0 : ASSERT(0);
695 0 : error = -EINVAL;
696 0 : goto out_free_log;
697 : }
698 0 : xfs_crit(mp, "Log size out of supported range.");
699 0 : xfs_crit(mp,
700 : "Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
701 : }
702 :
703 : /*
704 : * Initialize the AIL now we have a log.
705 : */
706 24333 : error = xfs_trans_ail_init(mp);
707 24333 : if (error) {
708 0 : xfs_warn(mp, "AIL initialisation failed: error %d", error);
709 0 : goto out_free_log;
710 : }
711 24333 : log->l_ailp = mp->m_ail;
712 :
713 : /*
714 : * skip log recovery on a norecovery mount. pretend it all
715 : * just worked.
716 : */
717 24333 : if (!xfs_has_norecovery(mp)) {
718 : /*
719 : * log recovery ignores readonly state and so we need to clear
720 : * mount-based read only state so it can write to disk.
721 : */
722 24323 : bool readonly = test_and_clear_bit(XFS_OPSTATE_READONLY,
723 24323 : &mp->m_opstate);
724 24323 : error = xlog_recover(log);
725 24323 : if (readonly)
726 2211 : set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
727 24323 : if (error) {
728 8 : xfs_warn(mp, "log mount/recovery failed: error %d",
729 : error);
730 8 : xlog_recover_cancel(log);
731 8 : goto out_destroy_ail;
732 : }
733 : }
734 :
735 24325 : error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
736 : "log");
737 24325 : if (error)
738 0 : goto out_destroy_ail;
739 :
740 : /* Normal transactions can now occur */
741 24325 : clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
742 :
743 : /*
744 : * Now the log has been fully initialised and we know were our
745 : * space grant counters are, we can initialise the permanent ticket
746 : * needed for delayed logging to work.
747 : */
748 24325 : xlog_cil_init_post_recovery(log);
749 :
750 24325 : return 0;
751 :
752 8 : out_destroy_ail:
753 8 : xfs_trans_ail_destroy(mp);
754 8 : out_free_log:
755 8 : xlog_dealloc_log(log);
756 : out:
757 : return error;
758 : }
759 :
760 : /*
761 : * Finish the recovery of the file system. This is separate from the
762 : * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
763 : * in the root and real-time bitmap inodes between calling xfs_log_mount() and
764 : * here.
765 : *
766 : * If we finish recovery successfully, start the background log work. If we are
767 : * not doing recovery, then we have a RO filesystem and we don't need to start
768 : * it.
769 : */
770 : int
771 24315 : xfs_log_mount_finish(
772 : struct xfs_mount *mp)
773 : {
774 24315 : struct xlog *log = mp->m_log;
775 24315 : bool readonly;
776 24315 : int error = 0;
777 :
778 24315 : if (xfs_has_norecovery(mp)) {
779 16 : ASSERT(xfs_is_readonly(mp));
780 8 : return 0;
781 : }
782 :
783 : /*
784 : * log recovery ignores readonly state and so we need to clear
785 : * mount-based read only state so it can write to disk.
786 : */
787 24307 : readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
788 :
789 : /*
790 : * During the second phase of log recovery, we need iget and
791 : * iput to behave like they do for an active filesystem.
792 : * xfs_fs_drop_inode needs to be able to prevent the deletion
793 : * of inodes before we're done replaying log items on those
794 : * inodes. Turn it off immediately after recovery finishes
795 : * so that we don't leak the quota inodes if subsequent mount
796 : * activities fail.
797 : *
798 : * We let all inodes involved in redo item processing end up on
799 : * the LRU instead of being evicted immediately so that if we do
800 : * something to an unlinked inode, the irele won't cause
801 : * premature truncation and freeing of the inode, which results
802 : * in log recovery failure. We have to evict the unreferenced
803 : * lru inodes after clearing SB_ACTIVE because we don't
804 : * otherwise clean up the lru if there's a subsequent failure in
805 : * xfs_mountfs, which leads to us leaking the inodes if nothing
806 : * else (e.g. quotacheck) references the inodes before the
807 : * mount failure occurs.
808 : */
809 24307 : mp->m_super->s_flags |= SB_ACTIVE;
810 24307 : xfs_log_work_queue(mp);
811 48614 : if (xlog_recovery_needed(log))
812 11369 : error = xlog_recover_finish(log);
813 24307 : mp->m_super->s_flags &= ~SB_ACTIVE;
814 24307 : evict_inodes(mp->m_super);
815 :
816 : /*
817 : * Drain the buffer LRU after log recovery. This is required for v4
818 : * filesystems to avoid leaving around buffers with NULL verifier ops,
819 : * but we do it unconditionally to make sure we're always in a clean
820 : * cache state after mount.
821 : *
822 : * Don't push in the error case because the AIL may have pending intents
823 : * that aren't removed until recovery is cancelled.
824 : */
825 48614 : if (xlog_recovery_needed(log)) {
826 11369 : if (!error) {
827 11367 : xfs_log_force(mp, XFS_LOG_SYNC);
828 11367 : xfs_ail_push_all_sync(mp->m_ail);
829 : }
830 11369 : xfs_notice(mp, "Ending recovery (logdev: %s)",
831 : mp->m_logname ? mp->m_logname : "internal");
832 : } else {
833 12938 : xfs_info(mp, "Ending clean mount");
834 : }
835 24307 : xfs_buftarg_drain(mp->m_ddev_targp);
836 :
837 24307 : clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
838 24307 : if (readonly)
839 2207 : set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
840 :
841 : /* Make sure the log is dead if we're returning failure. */
842 24309 : ASSERT(!error || xlog_is_shutdown(log));
843 :
844 : return error;
845 : }
846 :
847 : /*
848 : * The mount has failed. Cancel the recovery if it hasn't completed and destroy
849 : * the log.
850 : */
851 : void
852 34 : xfs_log_mount_cancel(
853 : struct xfs_mount *mp)
854 : {
855 34 : xlog_recover_cancel(mp->m_log);
856 34 : xfs_log_unmount(mp);
857 34 : }
858 :
859 : /*
860 : * Flush out the iclog to disk ensuring that device caches are flushed and
861 : * the iclog hits stable storage before any completion waiters are woken.
862 : */
863 : static inline int
864 2934682 : xlog_force_iclog(
865 : struct xlog_in_core *iclog)
866 : {
867 2934682 : atomic_inc(&iclog->ic_refcnt);
868 2934682 : iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
869 2934682 : if (iclog->ic_state == XLOG_STATE_ACTIVE)
870 2934683 : xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
871 2934681 : return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
872 : }
873 :
874 : /*
875 : * Cycle all the iclogbuf locks to make sure all log IO completion
876 : * is done before we tear down these buffers.
877 : */
878 : static void
879 24331 : xlog_wait_iclog_completion(struct xlog *log)
880 : {
881 24331 : int i;
882 24331 : struct xlog_in_core *iclog = log->l_iclog;
883 :
884 218967 : for (i = 0; i < log->l_iclog_bufs; i++) {
885 194636 : down(&iclog->ic_sema);
886 194636 : up(&iclog->ic_sema);
887 194636 : iclog = iclog->ic_next;
888 : }
889 24331 : }
890 :
891 : /*
892 : * Wait for the iclog and all prior iclogs to be written disk as required by the
893 : * log force state machine. Waiting on ic_force_wait ensures iclog completions
894 : * have been ordered and callbacks run before we are woken here, hence
895 : * guaranteeing that all the iclogs up to this one are on stable storage.
896 : */
897 : int
898 7958257 : xlog_wait_on_iclog(
899 : struct xlog_in_core *iclog)
900 : __releases(iclog->ic_log->l_icloglock)
901 : {
902 7958257 : struct xlog *log = iclog->ic_log;
903 :
904 7958257 : trace_xlog_iclog_wait_on(iclog, _RET_IP_);
905 15916516 : if (!xlog_is_shutdown(log) &&
906 7956929 : iclog->ic_state != XLOG_STATE_ACTIVE &&
907 : iclog->ic_state != XLOG_STATE_DIRTY) {
908 6491865 : XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
909 6491865 : xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
910 : } else {
911 1466393 : spin_unlock(&log->l_icloglock);
912 : }
913 :
914 15914186 : if (xlog_is_shutdown(log))
915 4798 : return -EIO;
916 : return 0;
917 : }
918 :
919 : /*
920 : * Write out an unmount record using the ticket provided. We have to account for
921 : * the data space used in the unmount ticket as this write is not done from a
922 : * transaction context that has already done the accounting for us.
923 : */
924 : static int
925 14877 : xlog_write_unmount_record(
926 : struct xlog *log,
927 : struct xlog_ticket *ticket)
928 : {
929 14877 : struct {
930 : struct xlog_op_header ophdr;
931 : struct xfs_unmount_log_format ulf;
932 29754 : } unmount_rec = {
933 : .ophdr = {
934 : .oh_clientid = XFS_LOG,
935 14877 : .oh_tid = cpu_to_be32(ticket->t_tid),
936 : .oh_flags = XLOG_UNMOUNT_TRANS,
937 : },
938 : .ulf = {
939 : .magic = XLOG_UNMOUNT_TYPE,
940 : },
941 : };
942 14877 : struct xfs_log_iovec reg = {
943 : .i_addr = &unmount_rec,
944 : .i_len = sizeof(unmount_rec),
945 : .i_type = XLOG_REG_TYPE_UNMOUNT,
946 : };
947 14877 : struct xfs_log_vec vec = {
948 : .lv_niovecs = 1,
949 : .lv_iovecp = ®,
950 : };
951 14877 : LIST_HEAD(lv_chain);
952 14877 : list_add(&vec.lv_list, &lv_chain);
953 :
954 14877 : BUILD_BUG_ON((sizeof(struct xlog_op_header) +
955 : sizeof(struct xfs_unmount_log_format)) !=
956 : sizeof(unmount_rec));
957 :
958 : /* account for space used by record data */
959 14877 : ticket->t_curr_res -= sizeof(unmount_rec);
960 :
961 14877 : return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len);
962 : }
963 :
964 : /*
965 : * Mark the filesystem clean by writing an unmount record to the head of the
966 : * log.
967 : */
968 : static void
969 14877 : xlog_unmount_write(
970 : struct xlog *log)
971 : {
972 14877 : struct xfs_mount *mp = log->l_mp;
973 14877 : struct xlog_in_core *iclog;
974 14877 : struct xlog_ticket *tic = NULL;
975 14877 : int error;
976 :
977 14877 : error = xfs_log_reserve(mp, 600, 1, &tic, 0);
978 14877 : if (error)
979 0 : goto out_err;
980 :
981 14877 : error = xlog_write_unmount_record(log, tic);
982 : /*
983 : * At this point, we're umounting anyway, so there's no point in
984 : * transitioning log state to shutdown. Just continue...
985 : */
986 14877 : out_err:
987 14877 : if (error)
988 0 : xfs_alert(mp, "%s: unmount record failed", __func__);
989 :
990 14877 : spin_lock(&log->l_icloglock);
991 14877 : iclog = log->l_iclog;
992 14877 : error = xlog_force_iclog(iclog);
993 14877 : xlog_wait_on_iclog(iclog);
994 :
995 14877 : if (tic) {
996 14877 : trace_xfs_log_umount_write(log, tic);
997 14877 : xfs_log_ticket_ungrant(log, tic);
998 : }
999 14877 : }
1000 :
1001 : static void
1002 14877 : xfs_log_unmount_verify_iclog(
1003 : struct xlog *log)
1004 : {
1005 14877 : struct xlog_in_core *iclog = log->l_iclog;
1006 :
1007 119004 : do {
1008 119004 : ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
1009 119004 : ASSERT(iclog->ic_offset == 0);
1010 119004 : } while ((iclog = iclog->ic_next) != log->l_iclog);
1011 14877 : }
1012 :
1013 : /*
1014 : * Unmount record used to have a string "Unmount filesystem--" in the
1015 : * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
1016 : * We just write the magic number now since that particular field isn't
1017 : * currently architecture converted and "Unmount" is a bit foo.
1018 : * As far as I know, there weren't any dependencies on the old behaviour.
1019 : */
1020 : static void
1021 26778 : xfs_log_unmount_write(
1022 : struct xfs_mount *mp)
1023 : {
1024 26778 : struct xlog *log = mp->m_log;
1025 :
1026 26778 : if (!xfs_log_writable(mp))
1027 : return;
1028 :
1029 14885 : xfs_log_force(mp, XFS_LOG_SYNC);
1030 :
1031 29770 : if (xlog_is_shutdown(log))
1032 : return;
1033 :
1034 : /*
1035 : * If we think the summary counters are bad, avoid writing the unmount
1036 : * record to force log recovery at next mount, after which the summary
1037 : * counters will be recalculated. Refer to xlog_check_unmount_rec for
1038 : * more details.
1039 : */
1040 14885 : if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
1041 : XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
1042 8 : xfs_alert(mp, "%s: will fix summary counters at next mount",
1043 : __func__);
1044 8 : return;
1045 : }
1046 :
1047 14877 : xfs_log_unmount_verify_iclog(log);
1048 14877 : xlog_unmount_write(log);
1049 : }
1050 :
1051 : /*
1052 : * Empty the log for unmount/freeze.
1053 : *
1054 : * To do this, we first need to shut down the background log work so it is not
1055 : * trying to cover the log as we clean up. We then need to unpin all objects in
1056 : * the log so we can then flush them out. Once they have completed their IO and
1057 : * run the callbacks removing themselves from the AIL, we can cover the log.
1058 : */
1059 : int
1060 75236 : xfs_log_quiesce(
1061 : struct xfs_mount *mp)
1062 : {
1063 : /*
1064 : * Clear log incompat features since we're quiescing the log. Report
1065 : * failures, though it's not fatal to have a higher log feature
1066 : * protection level than the log contents actually require.
1067 : */
1068 75236 : if (xfs_clear_incompat_log_features(mp, XFS_SB_FEAT_INCOMPAT_LOG_ALL)) {
1069 50727 : int error;
1070 :
1071 50727 : error = xfs_sync_sb(mp, false);
1072 50727 : if (error)
1073 0 : xfs_warn(mp,
1074 : "Failed to clear log incompat features on quiesce");
1075 : }
1076 :
1077 75236 : cancel_delayed_work_sync(&mp->m_log->l_work);
1078 75236 : xfs_log_force(mp, XFS_LOG_SYNC);
1079 :
1080 : /*
1081 : * The superblock buffer is uncached and while xfs_ail_push_all_sync()
1082 : * will push it, xfs_buftarg_wait() will not wait for it. Further,
1083 : * xfs_buf_iowait() cannot be used because it was pushed with the
1084 : * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
1085 : * the IO to complete.
1086 : */
1087 75236 : xfs_ail_push_all_sync(mp->m_ail);
1088 75236 : xfs_buftarg_wait(mp->m_ddev_targp);
1089 75236 : xfs_buf_lock(mp->m_sb_bp);
1090 75236 : xfs_buf_unlock(mp->m_sb_bp);
1091 :
1092 75236 : return xfs_log_cover(mp);
1093 : }
1094 :
1095 : void
1096 2447 : xfs_log_clean(
1097 : struct xfs_mount *mp)
1098 : {
1099 2447 : xfs_log_quiesce(mp);
1100 26778 : xfs_log_unmount_write(mp);
1101 2447 : }
1102 :
1103 : /*
1104 : * Shut down and release the AIL and Log.
1105 : *
1106 : * During unmount, we need to ensure we flush all the dirty metadata objects
1107 : * from the AIL so that the log is empty before we write the unmount record to
1108 : * the log. Once this is done, we can tear down the AIL and the log.
1109 : */
1110 : void
1111 24331 : xfs_log_unmount(
1112 : struct xfs_mount *mp)
1113 : {
1114 24331 : xfs_log_clean(mp);
1115 :
1116 : /*
1117 : * If shutdown has come from iclog IO context, the log
1118 : * cleaning will have been skipped and so we need to wait
1119 : * for the iclog to complete shutdown processing before we
1120 : * tear anything down.
1121 : */
1122 24331 : xlog_wait_iclog_completion(mp->m_log);
1123 :
1124 24331 : xfs_buftarg_drain(mp->m_ddev_targp);
1125 :
1126 24331 : xfs_trans_ail_destroy(mp);
1127 :
1128 24331 : xfs_sysfs_del(&mp->m_log->l_kobj);
1129 :
1130 24331 : xlog_dealloc_log(mp->m_log);
1131 24331 : }
1132 :
1133 : void
1134 10217382914 : xfs_log_item_init(
1135 : struct xfs_mount *mp,
1136 : struct xfs_log_item *item,
1137 : int type,
1138 : const struct xfs_item_ops *ops)
1139 : {
1140 10217382914 : item->li_log = mp->m_log;
1141 10217382914 : item->li_ailp = mp->m_ail;
1142 10217382914 : item->li_type = type;
1143 10217382914 : item->li_ops = ops;
1144 10217382914 : item->li_lv = NULL;
1145 :
1146 10217382914 : INIT_LIST_HEAD(&item->li_ail);
1147 10217382914 : INIT_LIST_HEAD(&item->li_cil);
1148 10217382914 : INIT_LIST_HEAD(&item->li_bio_list);
1149 10217382914 : INIT_LIST_HEAD(&item->li_trans);
1150 10217382914 : }
1151 :
1152 : /*
1153 : * Wake up processes waiting for log space after we have moved the log tail.
1154 : */
1155 : void
1156 1206807821 : xfs_log_space_wake(
1157 : struct xfs_mount *mp)
1158 : {
1159 1206807821 : struct xlog *log = mp->m_log;
1160 1206807821 : int free_bytes;
1161 :
1162 2413615642 : if (xlog_is_shutdown(log))
1163 57538 : return;
1164 :
1165 1206750283 : if (!list_empty_careful(&log->l_write_head.waiters)) {
1166 76 : ASSERT(!xlog_in_recovery(log));
1167 :
1168 38 : spin_lock(&log->l_write_head.lock);
1169 38 : free_bytes = xlog_space_left(log, &log->l_write_head.grant);
1170 38 : xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1171 38 : spin_unlock(&log->l_write_head.lock);
1172 : }
1173 :
1174 1206753716 : if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1175 6783822 : ASSERT(!xlog_in_recovery(log));
1176 :
1177 3391911 : spin_lock(&log->l_reserve_head.lock);
1178 3391941 : free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1179 3391941 : xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1180 3391941 : spin_unlock(&log->l_reserve_head.lock);
1181 : }
1182 : }
1183 :
1184 : /*
1185 : * Determine if we have a transaction that has gone to disk that needs to be
1186 : * covered. To begin the transition to the idle state firstly the log needs to
1187 : * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1188 : * we start attempting to cover the log.
1189 : *
1190 : * Only if we are then in a state where covering is needed, the caller is
1191 : * informed that dummy transactions are required to move the log into the idle
1192 : * state.
1193 : *
1194 : * If there are any items in the AIl or CIL, then we do not want to attempt to
1195 : * cover the log as we may be in a situation where there isn't log space
1196 : * available to run a dummy transaction and this can lead to deadlocks when the
1197 : * tail of the log is pinned by an item that is modified in the CIL. Hence
1198 : * there's no point in running a dummy transaction at this point because we
1199 : * can't start trying to idle the log until both the CIL and AIL are empty.
1200 : */
1201 : static bool
1202 195837 : xfs_log_need_covered(
1203 : struct xfs_mount *mp)
1204 : {
1205 195837 : struct xlog *log = mp->m_log;
1206 195837 : bool needed = false;
1207 :
1208 195837 : if (!xlog_cil_empty(log))
1209 : return false;
1210 :
1211 191021 : spin_lock(&log->l_icloglock);
1212 191021 : switch (log->l_covered_state) {
1213 : case XLOG_STATE_COVER_DONE:
1214 : case XLOG_STATE_COVER_DONE2:
1215 : case XLOG_STATE_COVER_IDLE:
1216 : break;
1217 114859 : case XLOG_STATE_COVER_NEED:
1218 : case XLOG_STATE_COVER_NEED2:
1219 114859 : if (xfs_ail_min_lsn(log->l_ailp))
1220 : break;
1221 114430 : if (!xlog_iclogs_empty(log))
1222 : break;
1223 :
1224 114430 : needed = true;
1225 114430 : if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1226 57226 : log->l_covered_state = XLOG_STATE_COVER_DONE;
1227 : else
1228 57204 : log->l_covered_state = XLOG_STATE_COVER_DONE2;
1229 : break;
1230 0 : default:
1231 0 : needed = true;
1232 0 : break;
1233 : }
1234 191021 : spin_unlock(&log->l_icloglock);
1235 191021 : return needed;
1236 : }
1237 :
1238 : /*
1239 : * Explicitly cover the log. This is similar to background log covering but
1240 : * intended for usage in quiesce codepaths. The caller is responsible to ensure
1241 : * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1242 : * must all be empty.
1243 : */
1244 : static int
1245 75236 : xfs_log_cover(
1246 : struct xfs_mount *mp)
1247 : {
1248 75236 : int error = 0;
1249 75236 : bool need_covered;
1250 :
1251 82174 : ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
1252 : !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
1253 : xlog_is_shutdown(mp->m_log));
1254 :
1255 75236 : if (!xfs_log_writable(mp))
1256 : return 0;
1257 :
1258 : /*
1259 : * xfs_log_need_covered() is not idempotent because it progresses the
1260 : * state machine if the log requires covering. Therefore, we must call
1261 : * this function once and use the result until we've issued an sb sync.
1262 : * Do so first to make that abundantly clear.
1263 : *
1264 : * Fall into the covering sequence if the log needs covering or the
1265 : * mount has lazy superblock accounting to sync to disk. The sb sync
1266 : * used for covering accumulates the in-core counters, so covering
1267 : * handles this for us.
1268 : */
1269 63354 : need_covered = xfs_log_need_covered(mp);
1270 63354 : if (!need_covered && !xfs_has_lazysbcount(mp))
1271 : return 0;
1272 :
1273 : /*
1274 : * To cover the log, commit the superblock twice (at most) in
1275 : * independent checkpoints. The first serves as a reference for the
1276 : * tail pointer. The sync transaction and AIL push empties the AIL and
1277 : * updates the in-core tail to the LSN of the first checkpoint. The
1278 : * second commit updates the on-disk tail with the in-core LSN,
1279 : * covering the log. Push the AIL one more time to leave it empty, as
1280 : * we found it.
1281 : */
1282 120457 : do {
1283 120457 : error = xfs_sync_sb(mp, true);
1284 120457 : if (error)
1285 : break;
1286 120450 : xfs_ail_push_all_sync(mp->m_ail);
1287 120450 : } while (xfs_log_need_covered(mp));
1288 :
1289 : return error;
1290 : }
1291 :
1292 : /*
1293 : * We may be holding the log iclog lock upon entering this routine.
1294 : */
1295 : xfs_lsn_t
1296 33103599 : xlog_assign_tail_lsn_locked(
1297 : struct xfs_mount *mp)
1298 : {
1299 33103599 : struct xlog *log = mp->m_log;
1300 33103599 : struct xfs_log_item *lip;
1301 33103599 : xfs_lsn_t tail_lsn;
1302 :
1303 33103599 : assert_spin_locked(&mp->m_ail->ail_lock);
1304 :
1305 : /*
1306 : * To make sure we always have a valid LSN for the log tail we keep
1307 : * track of the last LSN which was committed in log->l_last_sync_lsn,
1308 : * and use that when the AIL was empty.
1309 : */
1310 33103599 : lip = xfs_ail_min(mp->m_ail);
1311 32367256 : if (lip)
1312 32367256 : tail_lsn = lip->li_lsn;
1313 : else
1314 736343 : tail_lsn = atomic64_read(&log->l_last_sync_lsn);
1315 33103599 : trace_xfs_log_assign_tail_lsn(log, tail_lsn);
1316 33103599 : atomic64_set(&log->l_tail_lsn, tail_lsn);
1317 33103599 : return tail_lsn;
1318 : }
1319 :
1320 : xfs_lsn_t
1321 32337219 : xlog_assign_tail_lsn(
1322 : struct xfs_mount *mp)
1323 : {
1324 32337219 : xfs_lsn_t tail_lsn;
1325 :
1326 32337219 : spin_lock(&mp->m_ail->ail_lock);
1327 32337219 : tail_lsn = xlog_assign_tail_lsn_locked(mp);
1328 32337219 : spin_unlock(&mp->m_ail->ail_lock);
1329 :
1330 32337222 : return tail_lsn;
1331 : }
1332 :
1333 : /*
1334 : * Return the space in the log between the tail and the head. The head
1335 : * is passed in the cycle/bytes formal parms. In the special case where
1336 : * the reserve head has wrapped passed the tail, this calculation is no
1337 : * longer valid. In this case, just return 0 which means there is no space
1338 : * in the log. This works for all places where this function is called
1339 : * with the reserve head. Of course, if the write head were to ever
1340 : * wrap the tail, we should blow up. Rather than catch this case here,
1341 : * we depend on other ASSERTions in other parts of the code. XXXmiken
1342 : *
1343 : * If reservation head is behind the tail, we have a problem. Warn about it,
1344 : * but then treat it as if the log is empty.
1345 : *
1346 : * If the log is shut down, the head and tail may be invalid or out of whack, so
1347 : * shortcut invalidity asserts in this case so that we don't trigger them
1348 : * falsely.
1349 : */
1350 : STATIC int
1351 3600287193 : xlog_space_left(
1352 : struct xlog *log,
1353 : atomic64_t *head)
1354 : {
1355 3600287193 : int tail_bytes;
1356 3600287193 : int tail_cycle;
1357 3600287193 : int head_cycle;
1358 3600287193 : int head_bytes;
1359 :
1360 3600287193 : xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1361 3600287193 : xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1362 3600287193 : tail_bytes = BBTOB(tail_bytes);
1363 3600287193 : if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1364 2427734549 : return log->l_logsize - (head_bytes - tail_bytes);
1365 1172552644 : if (tail_cycle + 1 < head_cycle)
1366 : return 0;
1367 :
1368 : /* Ignore potential inconsistency when shutdown. */
1369 2345105288 : if (xlog_is_shutdown(log))
1370 0 : return log->l_logsize;
1371 :
1372 1172552644 : if (tail_cycle < head_cycle) {
1373 1172552644 : ASSERT(tail_cycle == (head_cycle - 1));
1374 1172552644 : return tail_bytes - head_bytes;
1375 : }
1376 :
1377 : /*
1378 : * The reservation head is behind the tail. In this case we just want to
1379 : * return the size of the log as the amount of space left.
1380 : */
1381 0 : xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
1382 0 : xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d",
1383 : tail_cycle, tail_bytes);
1384 0 : xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d",
1385 : head_cycle, head_bytes);
1386 0 : ASSERT(0);
1387 0 : return log->l_logsize;
1388 : }
1389 :
1390 :
1391 : static void
1392 32324231 : xlog_ioend_work(
1393 : struct work_struct *work)
1394 : {
1395 32324231 : struct xlog_in_core *iclog =
1396 32324231 : container_of(work, struct xlog_in_core, ic_end_io_work);
1397 32324231 : struct xlog *log = iclog->ic_log;
1398 32324231 : int error;
1399 :
1400 32324231 : error = blk_status_to_errno(iclog->ic_bio.bi_status);
1401 : #ifdef DEBUG
1402 : /* treat writes with injected CRC errors as failed */
1403 32324231 : if (iclog->ic_fail_crc)
1404 : error = -EIO;
1405 : #endif
1406 :
1407 : /*
1408 : * Race to shutdown the filesystem if we see an error.
1409 : */
1410 32324220 : if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1411 10162 : xfs_alert(log->l_mp, "log I/O error %d", error);
1412 10162 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1413 : }
1414 :
1415 32324231 : xlog_state_done_syncing(iclog);
1416 32324231 : bio_uninit(&iclog->ic_bio);
1417 :
1418 : /*
1419 : * Drop the lock to signal that we are done. Nothing references the
1420 : * iclog after this, so an unmount waiting on this lock can now tear it
1421 : * down safely. As such, it is unsafe to reference the iclog after the
1422 : * unlock as we could race with it being freed.
1423 : */
1424 32324231 : up(&iclog->ic_sema);
1425 32324231 : }
1426 :
1427 : /*
1428 : * Return size of each in-core log record buffer.
1429 : *
1430 : * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1431 : *
1432 : * If the filesystem blocksize is too large, we may need to choose a
1433 : * larger size since the directory code currently logs entire blocks.
1434 : */
1435 : STATIC void
1436 24333 : xlog_get_iclog_buffer_size(
1437 : struct xfs_mount *mp,
1438 : struct xlog *log)
1439 : {
1440 24333 : if (mp->m_logbufs <= 0)
1441 24329 : mp->m_logbufs = XLOG_MAX_ICLOGS;
1442 24333 : if (mp->m_logbsize <= 0)
1443 24127 : mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
1444 :
1445 24333 : log->l_iclog_bufs = mp->m_logbufs;
1446 24333 : log->l_iclog_size = mp->m_logbsize;
1447 :
1448 : /*
1449 : * # headers = size / 32k - one header holds cycles from 32k of data.
1450 : */
1451 24333 : log->l_iclog_heads =
1452 24333 : DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE);
1453 24333 : log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
1454 24333 : }
1455 :
1456 : void
1457 85060 : xfs_log_work_queue(
1458 : struct xfs_mount *mp)
1459 : {
1460 85060 : queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
1461 85060 : msecs_to_jiffies(xfs_syncd_centisecs * 10));
1462 85060 : }
1463 :
1464 : /*
1465 : * Clear the log incompat flags if we have the opportunity.
1466 : *
1467 : * This only happens if we're about to log the second dummy transaction as part
1468 : * of covering the log and we can get the log incompat feature usage lock.
1469 : */
1470 : static inline void
1471 220 : xlog_clear_incompat(
1472 : struct xlog *log)
1473 : {
1474 220 : struct xfs_mount *mp = log->l_mp;
1475 220 : uint32_t incompat_mask = 0;
1476 :
1477 220 : if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
1478 : XFS_SB_FEAT_INCOMPAT_LOG_ALL))
1479 : return;
1480 :
1481 217 : if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
1482 : return;
1483 :
1484 98 : if (down_write_trylock(&log->l_incompat_xattrs))
1485 98 : incompat_mask |= XFS_SB_FEAT_INCOMPAT_LOG_XATTRS;
1486 :
1487 98 : if (down_write_trylock(&log->l_incompat_swapext))
1488 98 : incompat_mask |= XFS_SB_FEAT_INCOMPAT_LOG_SWAPEXT;
1489 :
1490 98 : if (!incompat_mask)
1491 : return;
1492 :
1493 98 : xfs_clear_incompat_log_features(mp, incompat_mask);
1494 :
1495 98 : if (incompat_mask & XFS_SB_FEAT_INCOMPAT_LOG_SWAPEXT)
1496 98 : up_write(&log->l_incompat_swapext);
1497 :
1498 98 : if (incompat_mask & XFS_SB_FEAT_INCOMPAT_LOG_XATTRS)
1499 98 : up_write(&log->l_incompat_xattrs);
1500 : }
1501 :
1502 : /*
1503 : * Every sync period we need to unpin all items in the AIL and push them to
1504 : * disk. If there is nothing dirty, then we might need to cover the log to
1505 : * indicate that the filesystem is idle.
1506 : */
1507 : static void
1508 12055 : xfs_log_worker(
1509 : struct work_struct *work)
1510 : {
1511 12055 : struct xlog *log = container_of(to_delayed_work(work),
1512 : struct xlog, l_work);
1513 12055 : struct xfs_mount *mp = log->l_mp;
1514 :
1515 : /* dgc: errors ignored - not fatal and nowhere to report them */
1516 12055 : if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
1517 : /*
1518 : * Dump a transaction into the log that contains no real change.
1519 : * This is needed to stamp the current tail LSN into the log
1520 : * during the covering operation.
1521 : *
1522 : * We cannot use an inode here for this - that will push dirty
1523 : * state back up into the VFS and then periodic inode flushing
1524 : * will prevent log covering from making progress. Hence we
1525 : * synchronously log the superblock instead to ensure the
1526 : * superblock is immediately unpinned and can be written back.
1527 : */
1528 220 : xlog_clear_incompat(log);
1529 220 : xfs_sync_sb(mp, true);
1530 : } else
1531 11840 : xfs_log_force(mp, 0);
1532 :
1533 : /* start pushing all the metadata that is currently dirty */
1534 12065 : xfs_ail_push_all(mp->m_ail);
1535 :
1536 : /* queue us up again */
1537 12065 : xfs_log_work_queue(mp);
1538 12065 : }
1539 :
1540 : /*
1541 : * This routine initializes some of the log structure for a given mount point.
1542 : * Its primary purpose is to fill in enough, so recovery can occur. However,
1543 : * some other stuff may be filled in too.
1544 : */
1545 : STATIC struct xlog *
1546 24333 : xlog_alloc_log(
1547 : struct xfs_mount *mp,
1548 : struct xfs_buftarg *log_target,
1549 : xfs_daddr_t blk_offset,
1550 : int num_bblks)
1551 : {
1552 24333 : struct xlog *log;
1553 24333 : xlog_rec_header_t *head;
1554 24333 : xlog_in_core_t **iclogp;
1555 24333 : xlog_in_core_t *iclog, *prev_iclog=NULL;
1556 24333 : int i;
1557 24333 : int error = -ENOMEM;
1558 24333 : uint log2_size = 0;
1559 :
1560 24333 : log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1561 24333 : if (!log) {
1562 0 : xfs_warn(mp, "Log allocation failed: No memory!");
1563 0 : goto out;
1564 : }
1565 :
1566 24333 : log->l_mp = mp;
1567 24333 : log->l_targ = log_target;
1568 24333 : log->l_logsize = BBTOB(num_bblks);
1569 24333 : log->l_logBBstart = blk_offset;
1570 24333 : log->l_logBBsize = num_bblks;
1571 24333 : log->l_covered_state = XLOG_STATE_COVER_IDLE;
1572 24333 : set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
1573 24333 : INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1574 :
1575 24333 : log->l_prev_block = -1;
1576 : /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1577 24333 : xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1578 24333 : xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1579 24333 : log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1580 :
1581 24333 : if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
1582 24055 : log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
1583 : else
1584 278 : log->l_iclog_roundoff = BBSIZE;
1585 :
1586 24333 : xlog_grant_head_init(&log->l_reserve_head);
1587 24333 : xlog_grant_head_init(&log->l_write_head);
1588 :
1589 24333 : error = -EFSCORRUPTED;
1590 24333 : if (xfs_has_sector(mp)) {
1591 24027 : log2_size = mp->m_sb.sb_logsectlog;
1592 24027 : if (log2_size < BBSHIFT) {
1593 0 : xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1594 : log2_size, BBSHIFT);
1595 0 : goto out_free_log;
1596 : }
1597 :
1598 24027 : log2_size -= BBSHIFT;
1599 24027 : if (log2_size > mp->m_sectbb_log) {
1600 0 : xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1601 : log2_size, mp->m_sectbb_log);
1602 0 : goto out_free_log;
1603 : }
1604 :
1605 : /* for larger sector sizes, must have v2 or external log */
1606 24027 : if (log2_size && log->l_logBBstart > 0 &&
1607 : !xfs_has_logv2(mp)) {
1608 0 : xfs_warn(mp,
1609 : "log sector size (0x%x) invalid for configuration.",
1610 : log2_size);
1611 0 : goto out_free_log;
1612 : }
1613 : }
1614 24333 : log->l_sectBBsize = 1 << log2_size;
1615 :
1616 24333 : init_rwsem(&log->l_incompat_xattrs);
1617 24333 : init_rwsem(&log->l_incompat_swapext);
1618 :
1619 24333 : xlog_get_iclog_buffer_size(mp, log);
1620 :
1621 24333 : spin_lock_init(&log->l_icloglock);
1622 24333 : init_waitqueue_head(&log->l_flush_wait);
1623 :
1624 24333 : iclogp = &log->l_iclog;
1625 : /*
1626 : * The amount of memory to allocate for the iclog structure is
1627 : * rather funky due to the way the structure is defined. It is
1628 : * done this way so that we can use different sizes for machines
1629 : * with different amounts of memory. See the definition of
1630 : * xlog_in_core_t in xfs_log_priv.h for details.
1631 : */
1632 24333 : ASSERT(log->l_iclog_size >= 4096);
1633 218985 : for (i = 0; i < log->l_iclog_bufs; i++) {
1634 194652 : size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1635 : sizeof(struct bio_vec);
1636 :
1637 194652 : iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
1638 194652 : if (!iclog)
1639 0 : goto out_free_iclog;
1640 :
1641 194652 : *iclogp = iclog;
1642 194652 : iclog->ic_prev = prev_iclog;
1643 194652 : prev_iclog = iclog;
1644 :
1645 194652 : iclog->ic_data = kvzalloc(log->l_iclog_size,
1646 : GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1647 194652 : if (!iclog->ic_data)
1648 0 : goto out_free_iclog;
1649 194652 : head = &iclog->ic_header;
1650 194652 : memset(head, 0, sizeof(xlog_rec_header_t));
1651 194652 : head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1652 194652 : head->h_version = cpu_to_be32(
1653 : xfs_has_logv2(log->l_mp) ? 2 : 1);
1654 194652 : head->h_size = cpu_to_be32(log->l_iclog_size);
1655 : /* new fields */
1656 194652 : head->h_fmt = cpu_to_be32(XLOG_FMT);
1657 389304 : memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1658 :
1659 194652 : iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1660 194652 : iclog->ic_state = XLOG_STATE_ACTIVE;
1661 194652 : iclog->ic_log = log;
1662 194652 : atomic_set(&iclog->ic_refcnt, 0);
1663 194652 : INIT_LIST_HEAD(&iclog->ic_callbacks);
1664 194652 : iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
1665 :
1666 194652 : init_waitqueue_head(&iclog->ic_force_wait);
1667 194652 : init_waitqueue_head(&iclog->ic_write_wait);
1668 194652 : INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1669 194652 : sema_init(&iclog->ic_sema, 1);
1670 :
1671 194652 : iclogp = &iclog->ic_next;
1672 : }
1673 24333 : *iclogp = log->l_iclog; /* complete ring */
1674 24333 : log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
1675 :
1676 48666 : log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1677 : XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM |
1678 : WQ_HIGHPRI),
1679 24333 : 0, mp->m_super->s_id);
1680 24333 : if (!log->l_ioend_workqueue)
1681 0 : goto out_free_iclog;
1682 :
1683 24333 : error = xlog_cil_init(log);
1684 24333 : if (error)
1685 0 : goto out_destroy_workqueue;
1686 : return log;
1687 :
1688 : out_destroy_workqueue:
1689 0 : destroy_workqueue(log->l_ioend_workqueue);
1690 0 : out_free_iclog:
1691 0 : for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1692 0 : prev_iclog = iclog->ic_next;
1693 0 : kmem_free(iclog->ic_data);
1694 0 : kmem_free(iclog);
1695 0 : if (prev_iclog == log->l_iclog)
1696 : break;
1697 : }
1698 0 : out_free_log:
1699 0 : kmem_free(log);
1700 0 : out:
1701 0 : return ERR_PTR(error);
1702 : } /* xlog_alloc_log */
1703 :
1704 : /*
1705 : * Compute the LSN that we'd need to push the log tail towards in order to have
1706 : * (a) enough on-disk log space to log the number of bytes specified, (b) at
1707 : * least 25% of the log space free, and (c) at least 256 blocks free. If the
1708 : * log free space already meets all three thresholds, this function returns
1709 : * NULLCOMMITLSN.
1710 : */
1711 : xfs_lsn_t
1712 2170794363 : xlog_grant_push_threshold(
1713 : struct xlog *log,
1714 : int need_bytes)
1715 : {
1716 2170794363 : xfs_lsn_t threshold_lsn = 0;
1717 2170794363 : xfs_lsn_t last_sync_lsn;
1718 2170794363 : int free_blocks;
1719 2170794363 : int free_bytes;
1720 2170794363 : int threshold_block;
1721 2170794363 : int threshold_cycle;
1722 2170794363 : int free_threshold;
1723 :
1724 2170794363 : ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1725 :
1726 2170794363 : free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1727 2170873579 : free_blocks = BTOBBT(free_bytes);
1728 :
1729 : /*
1730 : * Set the threshold for the minimum number of free blocks in the
1731 : * log to the maximum of what the caller needs, one quarter of the
1732 : * log, and 256 blocks.
1733 : */
1734 2170873579 : free_threshold = BTOBB(need_bytes);
1735 2170873579 : free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
1736 2170873579 : free_threshold = max(free_threshold, 256);
1737 2170873579 : if (free_blocks >= free_threshold)
1738 : return NULLCOMMITLSN;
1739 :
1740 25938587 : xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1741 : &threshold_block);
1742 25938587 : threshold_block += free_threshold;
1743 25938587 : if (threshold_block >= log->l_logBBsize) {
1744 6211081 : threshold_block -= log->l_logBBsize;
1745 6211081 : threshold_cycle += 1;
1746 : }
1747 25938587 : threshold_lsn = xlog_assign_lsn(threshold_cycle,
1748 : threshold_block);
1749 : /*
1750 : * Don't pass in an lsn greater than the lsn of the last
1751 : * log record known to be on disk. Use a snapshot of the last sync lsn
1752 : * so that it doesn't change between the compare and the set.
1753 : */
1754 25938587 : last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1755 25938587 : if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1756 : threshold_lsn = last_sync_lsn;
1757 :
1758 : return threshold_lsn;
1759 : }
1760 :
1761 : /*
1762 : * Push the tail of the log if we need to do so to maintain the free log space
1763 : * thresholds set out by xlog_grant_push_threshold. We may need to adopt a
1764 : * policy which pushes on an lsn which is further along in the log once we
1765 : * reach the high water mark. In this manner, we would be creating a low water
1766 : * mark.
1767 : */
1768 : STATIC void
1769 2170085810 : xlog_grant_push_ail(
1770 : struct xlog *log,
1771 : int need_bytes)
1772 : {
1773 2170085810 : xfs_lsn_t threshold_lsn;
1774 :
1775 2170085810 : threshold_lsn = xlog_grant_push_threshold(log, need_bytes);
1776 2196344525 : if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log))
1777 : return;
1778 :
1779 : /*
1780 : * Get the transaction layer to kick the dirty buffers out to
1781 : * disk asynchronously. No point in trying to do this if
1782 : * the filesystem is shutting down.
1783 : */
1784 25933269 : xfs_ail_push(log->l_ailp, threshold_lsn);
1785 : }
1786 :
1787 : /*
1788 : * Stamp cycle number in every block
1789 : */
1790 : STATIC void
1791 32324229 : xlog_pack_data(
1792 : struct xlog *log,
1793 : struct xlog_in_core *iclog,
1794 : int roundoff)
1795 : {
1796 32324229 : int i, j, k;
1797 32324229 : int size = iclog->ic_offset + roundoff;
1798 32324229 : __be32 cycle_lsn;
1799 32324229 : char *dp;
1800 :
1801 32324229 : cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1802 :
1803 32324229 : dp = iclog->ic_datap;
1804 1948601809 : for (i = 0; i < BTOBB(size); i++) {
1805 1916280863 : if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
1806 : break;
1807 1916277580 : iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1808 1916277580 : *(__be32 *)dp = cycle_lsn;
1809 1916277580 : dp += BBSIZE;
1810 : }
1811 :
1812 32324229 : if (xfs_has_logv2(log->l_mp)) {
1813 32324211 : xlog_in_core_2_t *xhdr = iclog->ic_data;
1814 :
1815 33448277 : for ( ; i < BTOBB(size); i++) {
1816 1124066 : j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1817 1124066 : k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1818 1124066 : xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
1819 1124066 : *(__be32 *)dp = cycle_lsn;
1820 1124066 : dp += BBSIZE;
1821 : }
1822 :
1823 32344808 : for (i = 1; i < log->l_iclog_heads; i++)
1824 20597 : xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
1825 : }
1826 32324229 : }
1827 :
1828 : /*
1829 : * Calculate the checksum for a log buffer.
1830 : *
1831 : * This is a little more complicated than it should be because the various
1832 : * headers and the actual data are non-contiguous.
1833 : */
1834 : __le32
1835 36377395 : xlog_cksum(
1836 : struct xlog *log,
1837 : struct xlog_rec_header *rhead,
1838 : char *dp,
1839 : int size)
1840 : {
1841 36377395 : uint32_t crc;
1842 :
1843 : /* first generate the crc for the record header ... */
1844 36377395 : crc = xfs_start_cksum_update((char *)rhead,
1845 : sizeof(struct xlog_rec_header),
1846 : offsetof(struct xlog_rec_header, h_crc));
1847 :
1848 : /* ... then for additional cycle data for v2 logs ... */
1849 36377374 : if (xfs_has_logv2(log->l_mp)) {
1850 36377382 : union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
1851 36377382 : int i;
1852 36377382 : int xheads;
1853 :
1854 36377382 : xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE);
1855 :
1856 36402962 : for (i = 1; i < xheads; i++) {
1857 25577 : crc = crc32c(crc, &xhdr[i].hic_xheader,
1858 : sizeof(struct xlog_rec_ext_header));
1859 : }
1860 : }
1861 :
1862 : /* ... and finally for the payload */
1863 36377390 : crc = crc32c(crc, dp, size);
1864 :
1865 36377372 : return xfs_end_cksum(crc);
1866 : }
1867 :
1868 : static void
1869 32324231 : xlog_bio_end_io(
1870 : struct bio *bio)
1871 : {
1872 32324231 : struct xlog_in_core *iclog = bio->bi_private;
1873 :
1874 32324231 : queue_work(iclog->ic_log->l_ioend_workqueue,
1875 : &iclog->ic_end_io_work);
1876 32324231 : }
1877 :
1878 : static int
1879 32324118 : xlog_map_iclog_data(
1880 : struct bio *bio,
1881 : void *data,
1882 : size_t count)
1883 : {
1884 32331432 : do {
1885 32331432 : struct page *page = kmem_to_page(data);
1886 32331389 : unsigned int off = offset_in_page(data);
1887 32331389 : size_t len = min_t(size_t, count, PAGE_SIZE - off);
1888 :
1889 32331389 : if (bio_add_page(bio, page, len, off) != len)
1890 : return -EIO;
1891 :
1892 32331398 : data += len;
1893 32331398 : count -= len;
1894 32331398 : } while (count);
1895 :
1896 : return 0;
1897 : }
1898 :
1899 : STATIC void
1900 32324180 : xlog_write_iclog(
1901 : struct xlog *log,
1902 : struct xlog_in_core *iclog,
1903 : uint64_t bno,
1904 : unsigned int count)
1905 : {
1906 32324180 : ASSERT(bno < log->l_logBBsize);
1907 32324180 : trace_xlog_iclog_write(iclog, _RET_IP_);
1908 :
1909 : /*
1910 : * We lock the iclogbufs here so that we can serialise against I/O
1911 : * completion during unmount. We might be processing a shutdown
1912 : * triggered during unmount, and that can occur asynchronously to the
1913 : * unmount thread, and hence we need to ensure that completes before
1914 : * tearing down the iclogbufs. Hence we need to hold the buffer lock
1915 : * across the log IO to archieve that.
1916 : */
1917 32324217 : down(&iclog->ic_sema);
1918 64648366 : if (xlog_is_shutdown(log)) {
1919 : /*
1920 : * It would seem logical to return EIO here, but we rely on
1921 : * the log state machine to propagate I/O errors instead of
1922 : * doing it here. We kick of the state machine and unlock
1923 : * the buffer manually, the code needs to be kept in sync
1924 : * with the I/O completion path.
1925 : */
1926 7 : xlog_state_done_syncing(iclog);
1927 7 : up(&iclog->ic_sema);
1928 7 : return;
1929 : }
1930 :
1931 : /*
1932 : * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
1933 : * IOs coming immediately after this one. This prevents the block layer
1934 : * writeback throttle from throttling log writes behind background
1935 : * metadata writeback and causing priority inversions.
1936 : */
1937 32324176 : bio_init(&iclog->ic_bio, xfs_buftarg_bdev(log->l_targ), iclog->ic_bvec,
1938 32324176 : howmany(count, PAGE_SIZE),
1939 : REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
1940 32324151 : iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1941 32324151 : iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1942 32324151 : iclog->ic_bio.bi_private = iclog;
1943 :
1944 32324151 : if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
1945 5975564 : iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1946 : /*
1947 : * For external log devices, we also need to flush the data
1948 : * device cache first to ensure all metadata writeback covered
1949 : * by the LSN in this iclog is on stable storage. This is slow,
1950 : * but it *must* complete before we issue the external log IO.
1951 : *
1952 : * If the flush fails, we cannot conclude that past metadata
1953 : * writeback from the log succeeded. Repeating the flush is
1954 : * not possible, hence we must shut down with log IO error to
1955 : * avoid shutdown re-entering this path and erroring out again.
1956 : */
1957 5975568 : if (log->l_targ != log->l_mp->m_ddev_targp &&
1958 4 : xfs_buftarg_flush(log->l_mp->m_ddev_targp)) {
1959 0 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1960 0 : return;
1961 : }
1962 : }
1963 32324151 : if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
1964 3740829 : iclog->ic_bio.bi_opf |= REQ_FUA;
1965 :
1966 32324151 : iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
1967 :
1968 32324151 : if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
1969 0 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1970 0 : return;
1971 : }
1972 32324068 : if (is_vmalloc_addr(iclog->ic_data))
1973 : flush_kernel_vmap_range(iclog->ic_data, count);
1974 :
1975 : /*
1976 : * If this log buffer would straddle the end of the log we will have
1977 : * to split it up into two bios, so that we can continue at the start.
1978 : */
1979 32324063 : if (bno + BTOBB(count) > log->l_logBBsize) {
1980 12636 : struct bio *split;
1981 :
1982 12636 : split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1983 : GFP_NOIO, &fs_bio_set);
1984 12636 : bio_chain(split, &iclog->ic_bio);
1985 12636 : submit_bio(split);
1986 :
1987 : /* restart at logical offset zero for the remainder */
1988 12636 : iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1989 : }
1990 :
1991 32324063 : submit_bio(&iclog->ic_bio);
1992 : }
1993 :
1994 : /*
1995 : * We need to bump cycle number for the part of the iclog that is
1996 : * written to the start of the log. Watch out for the header magic
1997 : * number case, though.
1998 : */
1999 : static void
2000 12636 : xlog_split_iclog(
2001 : struct xlog *log,
2002 : void *data,
2003 : uint64_t bno,
2004 : unsigned int count)
2005 : {
2006 12636 : unsigned int split_offset = BBTOB(log->l_logBBsize - bno);
2007 12636 : unsigned int i;
2008 :
2009 411012 : for (i = split_offset; i < count; i += BBSIZE) {
2010 398376 : uint32_t cycle = get_unaligned_be32(data + i);
2011 :
2012 398376 : if (++cycle == XLOG_HEADER_MAGIC_NUM)
2013 0 : cycle++;
2014 398376 : put_unaligned_be32(cycle, data + i);
2015 : }
2016 12636 : }
2017 :
2018 : static int
2019 32324225 : xlog_calc_iclog_size(
2020 : struct xlog *log,
2021 : struct xlog_in_core *iclog,
2022 : uint32_t *roundoff)
2023 : {
2024 32324225 : uint32_t count_init, count;
2025 :
2026 : /* Add for LR header */
2027 32324225 : count_init = log->l_iclog_hsize + iclog->ic_offset;
2028 32324225 : count = roundup(count_init, log->l_iclog_roundoff);
2029 :
2030 32324225 : *roundoff = count - count_init;
2031 :
2032 32324225 : ASSERT(count >= count_init);
2033 32324225 : ASSERT(*roundoff < log->l_iclog_roundoff);
2034 32324225 : return count;
2035 : }
2036 :
2037 : /*
2038 : * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
2039 : * fashion. Previously, we should have moved the current iclog
2040 : * ptr in the log to point to the next available iclog. This allows further
2041 : * write to continue while this code syncs out an iclog ready to go.
2042 : * Before an in-core log can be written out, the data section must be scanned
2043 : * to save away the 1st word of each BBSIZE block into the header. We replace
2044 : * it with the current cycle count. Each BBSIZE block is tagged with the
2045 : * cycle count because there in an implicit assumption that drives will
2046 : * guarantee that entire 512 byte blocks get written at once. In other words,
2047 : * we can't have part of a 512 byte block written and part not written. By
2048 : * tagging each block, we will know which blocks are valid when recovering
2049 : * after an unclean shutdown.
2050 : *
2051 : * This routine is single threaded on the iclog. No other thread can be in
2052 : * this routine with the same iclog. Changing contents of iclog can there-
2053 : * fore be done without grabbing the state machine lock. Updating the global
2054 : * log will require grabbing the lock though.
2055 : *
2056 : * The entire log manager uses a logical block numbering scheme. Only
2057 : * xlog_write_iclog knows about the fact that the log may not start with
2058 : * block zero on a given device.
2059 : */
2060 : STATIC void
2061 32324219 : xlog_sync(
2062 : struct xlog *log,
2063 : struct xlog_in_core *iclog,
2064 : struct xlog_ticket *ticket)
2065 : {
2066 32324219 : unsigned int count; /* byte count of bwrite */
2067 32324219 : unsigned int roundoff; /* roundoff to BB or stripe */
2068 32324219 : uint64_t bno;
2069 32324219 : unsigned int size;
2070 :
2071 32324219 : ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2072 32324219 : trace_xlog_iclog_sync(iclog, _RET_IP_);
2073 :
2074 32324231 : count = xlog_calc_iclog_size(log, iclog, &roundoff);
2075 :
2076 : /*
2077 : * If we have a ticket, account for the roundoff via the ticket
2078 : * reservation to avoid touching the hot grant heads needlessly.
2079 : * Otherwise, we have to move grant heads directly.
2080 : */
2081 32324232 : if (ticket) {
2082 29417562 : ticket->t_curr_res -= roundoff;
2083 : } else {
2084 2906670 : xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
2085 2906671 : xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
2086 : }
2087 :
2088 : /* put cycle number in every block */
2089 32324232 : xlog_pack_data(log, iclog, roundoff);
2090 :
2091 : /* real byte length */
2092 32324215 : size = iclog->ic_offset;
2093 32324215 : if (xfs_has_logv2(log->l_mp))
2094 32324211 : size += roundoff;
2095 32324215 : iclog->ic_header.h_len = cpu_to_be32(size);
2096 :
2097 32324215 : XFS_STATS_INC(log->l_mp, xs_log_writes);
2098 32324215 : XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
2099 :
2100 32324215 : bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
2101 :
2102 : /* Do we need to split this write into 2 parts? */
2103 32324215 : if (bno + BTOBB(count) > log->l_logBBsize)
2104 12636 : xlog_split_iclog(log, &iclog->ic_header, bno, count);
2105 :
2106 : /* calculcate the checksum */
2107 64648381 : iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
2108 32324215 : iclog->ic_datap, size);
2109 : /*
2110 : * Intentionally corrupt the log record CRC based on the error injection
2111 : * frequency, if defined. This facilitates testing log recovery in the
2112 : * event of torn writes. Hence, set the IOABORT state to abort the log
2113 : * write on I/O completion and shutdown the fs. The subsequent mount
2114 : * detects the bad CRC and attempts to recover.
2115 : */
2116 : #ifdef DEBUG
2117 32324166 : if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
2118 12 : iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
2119 12 : iclog->ic_fail_crc = true;
2120 12 : xfs_warn(log->l_mp,
2121 : "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
2122 : be64_to_cpu(iclog->ic_header.h_lsn));
2123 : }
2124 : #endif
2125 32324157 : xlog_verify_iclog(log, iclog, count);
2126 32324220 : xlog_write_iclog(log, iclog, bno, count);
2127 32324183 : }
2128 :
2129 : /*
2130 : * Deallocate a log structure
2131 : */
2132 : STATIC void
2133 24339 : xlog_dealloc_log(
2134 : struct xlog *log)
2135 : {
2136 24339 : xlog_in_core_t *iclog, *next_iclog;
2137 24339 : int i;
2138 :
2139 : /*
2140 : * Destroy the CIL after waiting for iclog IO completion because an
2141 : * iclog EIO error will try to shut down the log, which accesses the
2142 : * CIL to wake up the waiters.
2143 : */
2144 24339 : xlog_cil_destroy(log);
2145 :
2146 24339 : iclog = log->l_iclog;
2147 219039 : for (i = 0; i < log->l_iclog_bufs; i++) {
2148 194700 : next_iclog = iclog->ic_next;
2149 194700 : kmem_free(iclog->ic_data);
2150 194700 : kmem_free(iclog);
2151 194700 : iclog = next_iclog;
2152 : }
2153 :
2154 24339 : log->l_mp->m_log = NULL;
2155 24339 : destroy_workqueue(log->l_ioend_workqueue);
2156 24339 : kmem_free(log);
2157 24339 : }
2158 :
2159 : /*
2160 : * Update counters atomically now that memcpy is done.
2161 : */
2162 : static inline void
2163 : xlog_state_finish_copy(
2164 : struct xlog *log,
2165 : struct xlog_in_core *iclog,
2166 : int record_cnt,
2167 : int copy_bytes)
2168 : {
2169 36289973 : lockdep_assert_held(&log->l_icloglock);
2170 :
2171 36289973 : be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
2172 36289972 : iclog->ic_offset += copy_bytes;
2173 : }
2174 :
2175 : /*
2176 : * print out info relating to regions written which consume
2177 : * the reservation
2178 : */
2179 : void
2180 0 : xlog_print_tic_res(
2181 : struct xfs_mount *mp,
2182 : struct xlog_ticket *ticket)
2183 : {
2184 0 : xfs_warn(mp, "ticket reservation summary:");
2185 0 : xfs_warn(mp, " unit res = %d bytes", ticket->t_unit_res);
2186 0 : xfs_warn(mp, " current res = %d bytes", ticket->t_curr_res);
2187 0 : xfs_warn(mp, " original count = %d", ticket->t_ocnt);
2188 0 : xfs_warn(mp, " remaining count = %d", ticket->t_cnt);
2189 0 : }
2190 :
2191 : /*
2192 : * Print a summary of the transaction.
2193 : */
2194 : void
2195 0 : xlog_print_trans(
2196 : struct xfs_trans *tp)
2197 : {
2198 0 : struct xfs_mount *mp = tp->t_mountp;
2199 0 : struct xfs_log_item *lip;
2200 :
2201 : /* dump core transaction and ticket info */
2202 0 : xfs_warn(mp, "transaction summary:");
2203 0 : xfs_warn(mp, " log res = %d", tp->t_log_res);
2204 0 : xfs_warn(mp, " log count = %d", tp->t_log_count);
2205 0 : xfs_warn(mp, " flags = 0x%x", tp->t_flags);
2206 :
2207 0 : xlog_print_tic_res(mp, tp->t_ticket);
2208 :
2209 : /* dump each log item */
2210 0 : list_for_each_entry(lip, &tp->t_items, li_trans) {
2211 0 : struct xfs_log_vec *lv = lip->li_lv;
2212 0 : struct xfs_log_iovec *vec;
2213 0 : int i;
2214 :
2215 0 : xfs_warn(mp, "log item: ");
2216 0 : xfs_warn(mp, " type = 0x%x", lip->li_type);
2217 0 : xfs_warn(mp, " flags = 0x%lx", lip->li_flags);
2218 0 : if (!lv)
2219 0 : continue;
2220 0 : xfs_warn(mp, " niovecs = %d", lv->lv_niovecs);
2221 0 : xfs_warn(mp, " size = %d", lv->lv_size);
2222 0 : xfs_warn(mp, " bytes = %d", lv->lv_bytes);
2223 0 : xfs_warn(mp, " buf len = %d", lv->lv_buf_len);
2224 :
2225 : /* dump each iovec for the log item */
2226 0 : vec = lv->lv_iovecp;
2227 0 : for (i = 0; i < lv->lv_niovecs; i++) {
2228 0 : int dumplen = min(vec->i_len, 32);
2229 :
2230 0 : xfs_warn(mp, " iovec[%d]", i);
2231 0 : xfs_warn(mp, " type = 0x%x", vec->i_type);
2232 0 : xfs_warn(mp, " len = %d", vec->i_len);
2233 0 : xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i);
2234 0 : xfs_hex_dump(vec->i_addr, dumplen);
2235 :
2236 0 : vec++;
2237 : }
2238 : }
2239 0 : }
2240 :
2241 : static inline void
2242 2861079169 : xlog_write_iovec(
2243 : struct xlog_in_core *iclog,
2244 : uint32_t *log_offset,
2245 : void *data,
2246 : uint32_t write_len,
2247 : int *bytes_left,
2248 : uint32_t *record_cnt,
2249 : uint32_t *data_cnt)
2250 : {
2251 2861079169 : ASSERT(*log_offset < iclog->ic_log->l_iclog_size);
2252 2861079169 : ASSERT(*log_offset % sizeof(int32_t) == 0);
2253 2861079169 : ASSERT(write_len % sizeof(int32_t) == 0);
2254 :
2255 5722158338 : memcpy(iclog->ic_datap + *log_offset, data, write_len);
2256 2861079169 : *log_offset += write_len;
2257 2861079169 : *bytes_left -= write_len;
2258 2861079169 : (*record_cnt)++;
2259 2861079169 : *data_cnt += write_len;
2260 2861079169 : }
2261 :
2262 : /*
2263 : * Write log vectors into a single iclog which is guaranteed by the caller
2264 : * to have enough space to write the entire log vector into.
2265 : */
2266 : static void
2267 1016291919 : xlog_write_full(
2268 : struct xfs_log_vec *lv,
2269 : struct xlog_ticket *ticket,
2270 : struct xlog_in_core *iclog,
2271 : uint32_t *log_offset,
2272 : uint32_t *len,
2273 : uint32_t *record_cnt,
2274 : uint32_t *data_cnt)
2275 : {
2276 1016291919 : int index;
2277 :
2278 1016291919 : ASSERT(*log_offset + *len <= iclog->ic_size ||
2279 : iclog->ic_state == XLOG_STATE_WANT_SYNC);
2280 :
2281 : /*
2282 : * Ordered log vectors have no regions to write so this
2283 : * loop will naturally skip them.
2284 : */
2285 3770669258 : for (index = 0; index < lv->lv_niovecs; index++) {
2286 2754362087 : struct xfs_log_iovec *reg = &lv->lv_iovecp[index];
2287 2754362087 : struct xlog_op_header *ophdr = reg->i_addr;
2288 :
2289 2754362087 : ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2290 2754362087 : xlog_write_iovec(iclog, log_offset, reg->i_addr,
2291 2754362087 : reg->i_len, len, record_cnt, data_cnt);
2292 : }
2293 1016307171 : }
2294 :
2295 : static int
2296 28764573 : xlog_write_get_more_iclog_space(
2297 : struct xlog_ticket *ticket,
2298 : struct xlog_in_core **iclogp,
2299 : uint32_t *log_offset,
2300 : uint32_t len,
2301 : uint32_t *record_cnt,
2302 : uint32_t *data_cnt)
2303 : {
2304 28764573 : struct xlog_in_core *iclog = *iclogp;
2305 28764573 : struct xlog *log = iclog->ic_log;
2306 28764573 : int error;
2307 :
2308 28764573 : spin_lock(&log->l_icloglock);
2309 28764617 : ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
2310 28764617 : xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2311 28764616 : error = xlog_state_release_iclog(log, iclog, ticket);
2312 28764616 : spin_unlock(&log->l_icloglock);
2313 28764609 : if (error)
2314 : return error;
2315 :
2316 28764591 : error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2317 : log_offset);
2318 28764565 : if (error)
2319 : return error;
2320 28761571 : *record_cnt = 0;
2321 28761571 : *data_cnt = 0;
2322 28761571 : *iclogp = iclog;
2323 28761571 : return 0;
2324 : }
2325 :
2326 : /*
2327 : * Write log vectors into a single iclog which is smaller than the current chain
2328 : * length. We write until we cannot fit a full record into the remaining space
2329 : * and then stop. We return the log vector that is to be written that cannot
2330 : * wholly fit in the iclog.
2331 : */
2332 : static int
2333 28764021 : xlog_write_partial(
2334 : struct xfs_log_vec *lv,
2335 : struct xlog_ticket *ticket,
2336 : struct xlog_in_core **iclogp,
2337 : uint32_t *log_offset,
2338 : uint32_t *len,
2339 : uint32_t *record_cnt,
2340 : uint32_t *data_cnt)
2341 : {
2342 28764021 : struct xlog_in_core *iclog = *iclogp;
2343 28764021 : struct xlog_op_header *ophdr;
2344 28764021 : int index = 0;
2345 28764021 : uint32_t rlen;
2346 28764021 : int error;
2347 :
2348 : /* walk the logvec, copying until we run out of space in the iclog */
2349 108054923 : for (index = 0; index < lv->lv_niovecs; index++) {
2350 79293952 : struct xfs_log_iovec *reg = &lv->lv_iovecp[index];
2351 79293952 : uint32_t reg_offset = 0;
2352 :
2353 : /*
2354 : * The first region of a continuation must have a non-zero
2355 : * length otherwise log recovery will just skip over it and
2356 : * start recovering from the next opheader it finds. Because we
2357 : * mark the next opheader as a continuation, recovery will then
2358 : * incorrectly add the continuation to the previous region and
2359 : * that breaks stuff.
2360 : *
2361 : * Hence if there isn't space for region data after the
2362 : * opheader, then we need to start afresh with a new iclog.
2363 : */
2364 79293952 : if (iclog->ic_size - *log_offset <=
2365 : sizeof(struct xlog_op_header)) {
2366 1332175 : error = xlog_write_get_more_iclog_space(ticket,
2367 : &iclog, log_offset, *len, record_cnt,
2368 : data_cnt);
2369 1332175 : if (error)
2370 109 : return error;
2371 : }
2372 :
2373 79293843 : ophdr = reg->i_addr;
2374 79293843 : rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset);
2375 :
2376 79293843 : ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2377 79293843 : ophdr->oh_len = cpu_to_be32(rlen - sizeof(struct xlog_op_header));
2378 79293843 : if (rlen != reg->i_len)
2379 27431922 : ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2380 :
2381 79293843 : xlog_write_iovec(iclog, log_offset, reg->i_addr,
2382 : rlen, len, record_cnt, data_cnt);
2383 :
2384 : /* If we wrote the whole region, move to the next. */
2385 79293794 : if (rlen == reg->i_len)
2386 51862007 : continue;
2387 :
2388 : /*
2389 : * We now have a partially written iovec, but it can span
2390 : * multiple iclogs so we loop here. First we release the iclog
2391 : * we currently have, then we get a new iclog and add a new
2392 : * opheader. Then we continue copying from where we were until
2393 : * we either complete the iovec or fill the iclog. If we
2394 : * complete the iovec, then we increment the index and go right
2395 : * back to the top of the outer loop. if we fill the iclog, we
2396 : * run the inner loop again.
2397 : *
2398 : * This is complicated by the tail of a region using all the
2399 : * space in an iclog and hence requiring us to release the iclog
2400 : * and get a new one before returning to the outer loop. We must
2401 : * always guarantee that we exit this inner loop with at least
2402 : * space for log transaction opheaders left in the current
2403 : * iclog, hence we cannot just terminate the loop at the end
2404 : * of the of the continuation. So we loop while there is no
2405 : * space left in the current iclog, and check for the end of the
2406 : * continuation after getting a new iclog.
2407 : */
2408 27432387 : do {
2409 : /*
2410 : * Ensure we include the continuation opheader in the
2411 : * space we need in the new iclog by adding that size
2412 : * to the length we require. This continuation opheader
2413 : * needs to be accounted to the ticket as the space it
2414 : * consumes hasn't been accounted to the lv we are
2415 : * writing.
2416 : */
2417 27432387 : error = xlog_write_get_more_iclog_space(ticket,
2418 : &iclog, log_offset,
2419 27432387 : *len + sizeof(struct xlog_op_header),
2420 : record_cnt, data_cnt);
2421 27432412 : if (error)
2422 2900 : return error;
2423 :
2424 27429512 : ophdr = iclog->ic_datap + *log_offset;
2425 27429512 : ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2426 27429512 : ophdr->oh_clientid = XFS_TRANSACTION;
2427 27429512 : ophdr->oh_res2 = 0;
2428 27429512 : ophdr->oh_flags = XLOG_WAS_CONT_TRANS;
2429 :
2430 27429512 : ticket->t_curr_res -= sizeof(struct xlog_op_header);
2431 27429512 : *log_offset += sizeof(struct xlog_op_header);
2432 27429512 : *data_cnt += sizeof(struct xlog_op_header);
2433 :
2434 : /*
2435 : * If rlen fits in the iclog, then end the region
2436 : * continuation. Otherwise we're going around again.
2437 : */
2438 27429512 : reg_offset += rlen;
2439 27429512 : rlen = reg->i_len - reg_offset;
2440 27429512 : if (rlen <= iclog->ic_size - *log_offset)
2441 27429010 : ophdr->oh_flags |= XLOG_END_TRANS;
2442 : else
2443 502 : ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2444 :
2445 27429512 : rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset);
2446 27429512 : ophdr->oh_len = cpu_to_be32(rlen);
2447 :
2448 27429512 : xlog_write_iovec(iclog, log_offset,
2449 27429512 : reg->i_addr + reg_offset,
2450 : rlen, len, record_cnt, data_cnt);
2451 :
2452 27429495 : } while (ophdr->oh_flags & XLOG_CONTINUE_TRANS);
2453 : }
2454 :
2455 : /*
2456 : * No more iovecs remain in this logvec so return the next log vec to
2457 : * the caller so it can go back to fast path copying.
2458 : */
2459 28760971 : *iclogp = iclog;
2460 28760971 : return 0;
2461 : }
2462 :
2463 : /*
2464 : * Write some region out to in-core log
2465 : *
2466 : * This will be called when writing externally provided regions or when
2467 : * writing out a commit record for a given transaction.
2468 : *
2469 : * General algorithm:
2470 : * 1. Find total length of this write. This may include adding to the
2471 : * lengths passed in.
2472 : * 2. Check whether we violate the tickets reservation.
2473 : * 3. While writing to this iclog
2474 : * A. Reserve as much space in this iclog as can get
2475 : * B. If this is first write, save away start lsn
2476 : * C. While writing this region:
2477 : * 1. If first write of transaction, write start record
2478 : * 2. Write log operation header (header per region)
2479 : * 3. Find out if we can fit entire region into this iclog
2480 : * 4. Potentially, verify destination memcpy ptr
2481 : * 5. Memcpy (partial) region
2482 : * 6. If partial copy, release iclog; otherwise, continue
2483 : * copying more regions into current iclog
2484 : * 4. Mark want sync bit (in simulation mode)
2485 : * 5. Release iclog for potential flush to on-disk log.
2486 : *
2487 : * ERRORS:
2488 : * 1. Panic if reservation is overrun. This should never happen since
2489 : * reservation amounts are generated internal to the filesystem.
2490 : * NOTES:
2491 : * 1. Tickets are single threaded data structures.
2492 : * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2493 : * syncing routine. When a single log_write region needs to span
2494 : * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2495 : * on all log operation writes which don't contain the end of the
2496 : * region. The XLOG_END_TRANS bit is used for the in-core log
2497 : * operation which contains the end of the continued log_write region.
2498 : * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2499 : * we don't really know exactly how much space will be used. As a result,
2500 : * we don't update ic_offset until the end when we know exactly how many
2501 : * bytes have been written out.
2502 : */
2503 : int
2504 7528366 : xlog_write(
2505 : struct xlog *log,
2506 : struct xfs_cil_ctx *ctx,
2507 : struct list_head *lv_chain,
2508 : struct xlog_ticket *ticket,
2509 : uint32_t len)
2510 :
2511 : {
2512 7528366 : struct xlog_in_core *iclog = NULL;
2513 7528366 : struct xfs_log_vec *lv;
2514 7528366 : uint32_t record_cnt = 0;
2515 7528366 : uint32_t data_cnt = 0;
2516 7528366 : int error = 0;
2517 7528366 : int log_offset;
2518 :
2519 7528366 : if (ticket->t_curr_res < 0) {
2520 0 : xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2521 : "ctx ticket reservation ran out. Need to up reservation");
2522 0 : xlog_print_tic_res(log->l_mp, ticket);
2523 0 : xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
2524 : }
2525 :
2526 7528366 : error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2527 : &log_offset);
2528 7528354 : if (error)
2529 : return error;
2530 :
2531 7528354 : ASSERT(log_offset <= iclog->ic_size - 1);
2532 :
2533 : /*
2534 : * If we have a context pointer, pass it the first iclog we are
2535 : * writing to so it can record state needed for iclog write
2536 : * ordering.
2537 : */
2538 7528354 : if (ctx)
2539 7513477 : xlog_cil_set_ctx_write_state(ctx, iclog);
2540 :
2541 1052576146 : list_for_each_entry(lv, lv_chain, lv_list) {
2542 : /*
2543 : * If the entire log vec does not fit in the iclog, punt it to
2544 : * the partial copy loop which can handle this case.
2545 : */
2546 1045050796 : if (lv->lv_niovecs &&
2547 1044022302 : lv->lv_bytes > iclog->ic_size - log_offset) {
2548 28764014 : error = xlog_write_partial(lv, ticket, &iclog,
2549 : &log_offset, &len, &record_cnt,
2550 : &data_cnt);
2551 28763982 : if (error) {
2552 : /*
2553 : * We have no iclog to release, so just return
2554 : * the error immediately.
2555 : */
2556 3009 : return error;
2557 : }
2558 : } else {
2559 1016286782 : xlog_write_full(lv, ticket, iclog, &log_offset,
2560 : &len, &record_cnt, &data_cnt);
2561 : }
2562 : }
2563 7525350 : ASSERT(len == 0);
2564 :
2565 : /*
2566 : * We've already been guaranteed that the last writes will fit inside
2567 : * the current iclog, and hence it will already have the space used by
2568 : * those writes accounted to it. Hence we do not need to update the
2569 : * iclog with the number of bytes written here.
2570 : */
2571 7525350 : spin_lock(&log->l_icloglock);
2572 7525356 : xlog_state_finish_copy(log, iclog, record_cnt, 0);
2573 7525356 : error = xlog_state_release_iclog(log, iclog, ticket);
2574 7525354 : spin_unlock(&log->l_icloglock);
2575 :
2576 7525354 : return error;
2577 : }
2578 :
2579 : static void
2580 32313715 : xlog_state_activate_iclog(
2581 : struct xlog_in_core *iclog,
2582 : int *iclogs_changed)
2583 : {
2584 64627430 : ASSERT(list_empty_careful(&iclog->ic_callbacks));
2585 32313715 : trace_xlog_iclog_activate(iclog, _RET_IP_);
2586 :
2587 : /*
2588 : * If the number of ops in this iclog indicate it just contains the
2589 : * dummy transaction, we can change state into IDLE (the second time
2590 : * around). Otherwise we should change the state into NEED a dummy.
2591 : * We don't need to cover the dummy.
2592 : */
2593 32313715 : if (*iclogs_changed == 0 &&
2594 32313715 : iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2595 200426 : *iclogs_changed = 1;
2596 : } else {
2597 : /*
2598 : * We have two dirty iclogs so start over. This could also be
2599 : * num of ops indicating this is not the dummy going out.
2600 : */
2601 32113289 : *iclogs_changed = 2;
2602 : }
2603 :
2604 32313715 : iclog->ic_state = XLOG_STATE_ACTIVE;
2605 32313715 : iclog->ic_offset = 0;
2606 32313715 : iclog->ic_header.h_num_logops = 0;
2607 32313715 : memset(iclog->ic_header.h_cycle_data, 0,
2608 : sizeof(iclog->ic_header.h_cycle_data));
2609 32313715 : iclog->ic_header.h_lsn = 0;
2610 32313715 : iclog->ic_header.h_tail_lsn = 0;
2611 32313715 : }
2612 :
2613 : /*
2614 : * Loop through all iclogs and mark all iclogs currently marked DIRTY as
2615 : * ACTIVE after iclog I/O has completed.
2616 : */
2617 : static void
2618 32313715 : xlog_state_activate_iclogs(
2619 : struct xlog *log,
2620 : int *iclogs_changed)
2621 : {
2622 32313715 : struct xlog_in_core *iclog = log->l_iclog;
2623 :
2624 172628553 : do {
2625 172628553 : if (iclog->ic_state == XLOG_STATE_DIRTY)
2626 32313715 : xlog_state_activate_iclog(iclog, iclogs_changed);
2627 : /*
2628 : * The ordering of marking iclogs ACTIVE must be maintained, so
2629 : * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2630 : */
2631 140314838 : else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2632 : break;
2633 145810233 : } while ((iclog = iclog->ic_next) != log->l_iclog);
2634 32313715 : }
2635 :
2636 : static int
2637 32313715 : xlog_covered_state(
2638 : int prev_state,
2639 : int iclogs_changed)
2640 : {
2641 : /*
2642 : * We go to NEED for any non-covering writes. We go to NEED2 if we just
2643 : * wrote the first covering record (DONE). We go to IDLE if we just
2644 : * wrote the second covering record (DONE2) and remain in IDLE until a
2645 : * non-covering write occurs.
2646 : */
2647 32313715 : switch (prev_state) {
2648 96479 : case XLOG_STATE_COVER_IDLE:
2649 96479 : if (iclogs_changed == 1)
2650 17117 : return XLOG_STATE_COVER_IDLE;
2651 : fallthrough;
2652 : case XLOG_STATE_COVER_NEED:
2653 : case XLOG_STATE_COVER_NEED2:
2654 : break;
2655 57226 : case XLOG_STATE_COVER_DONE:
2656 57226 : if (iclogs_changed == 1)
2657 57226 : return XLOG_STATE_COVER_NEED2;
2658 : break;
2659 57204 : case XLOG_STATE_COVER_DONE2:
2660 57204 : if (iclogs_changed == 1)
2661 57204 : return XLOG_STATE_COVER_IDLE;
2662 : break;
2663 0 : default:
2664 0 : ASSERT(0);
2665 : }
2666 :
2667 : return XLOG_STATE_COVER_NEED;
2668 : }
2669 :
2670 : STATIC void
2671 32313715 : xlog_state_clean_iclog(
2672 : struct xlog *log,
2673 : struct xlog_in_core *dirty_iclog)
2674 : {
2675 32313715 : int iclogs_changed = 0;
2676 :
2677 32313715 : trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
2678 :
2679 32313715 : dirty_iclog->ic_state = XLOG_STATE_DIRTY;
2680 :
2681 32313715 : xlog_state_activate_iclogs(log, &iclogs_changed);
2682 32313715 : wake_up_all(&dirty_iclog->ic_force_wait);
2683 :
2684 32313715 : if (iclogs_changed) {
2685 32313715 : log->l_covered_state = xlog_covered_state(log->l_covered_state,
2686 : iclogs_changed);
2687 : }
2688 32313715 : }
2689 :
2690 : STATIC xfs_lsn_t
2691 32313715 : xlog_get_lowest_lsn(
2692 : struct xlog *log)
2693 : {
2694 32313715 : struct xlog_in_core *iclog = log->l_iclog;
2695 32313715 : xfs_lsn_t lowest_lsn = 0, lsn;
2696 :
2697 258509696 : do {
2698 258509696 : if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2699 : iclog->ic_state == XLOG_STATE_DIRTY)
2700 113711873 : continue;
2701 :
2702 144797823 : lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2703 144797823 : if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
2704 : lowest_lsn = lsn;
2705 258509696 : } while ((iclog = iclog->ic_next) != log->l_iclog);
2706 :
2707 32313715 : return lowest_lsn;
2708 : }
2709 :
2710 : /*
2711 : * Completion of a iclog IO does not imply that a transaction has completed, as
2712 : * transactions can be large enough to span many iclogs. We cannot change the
2713 : * tail of the log half way through a transaction as this may be the only
2714 : * transaction in the log and moving the tail to point to the middle of it
2715 : * will prevent recovery from finding the start of the transaction. Hence we
2716 : * should only update the last_sync_lsn if this iclog contains transaction
2717 : * completion callbacks on it.
2718 : *
2719 : * We have to do this before we drop the icloglock to ensure we are the only one
2720 : * that can update it.
2721 : *
2722 : * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
2723 : * the reservation grant head pushing. This is due to the fact that the push
2724 : * target is bound by the current last_sync_lsn value. Hence if we have a large
2725 : * amount of log space bound up in this committing transaction then the
2726 : * last_sync_lsn value may be the limiting factor preventing tail pushing from
2727 : * freeing space in the log. Hence once we've updated the last_sync_lsn we
2728 : * should push the AIL to ensure the push target (and hence the grant head) is
2729 : * no longer bound by the old log head location and can move forwards and make
2730 : * progress again.
2731 : */
2732 : static void
2733 32313715 : xlog_state_set_callback(
2734 : struct xlog *log,
2735 : struct xlog_in_core *iclog,
2736 : xfs_lsn_t header_lsn)
2737 : {
2738 32313715 : trace_xlog_iclog_callback(iclog, _RET_IP_);
2739 32313715 : iclog->ic_state = XLOG_STATE_CALLBACK;
2740 :
2741 64627430 : ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2742 : header_lsn) <= 0);
2743 :
2744 32313715 : if (list_empty_careful(&iclog->ic_callbacks))
2745 : return;
2746 :
2747 3723330 : atomic64_set(&log->l_last_sync_lsn, header_lsn);
2748 3723330 : xlog_grant_push_ail(log, 0);
2749 : }
2750 :
2751 : /*
2752 : * Return true if we need to stop processing, false to continue to the next
2753 : * iclog. The caller will need to run callbacks if the iclog is returned in the
2754 : * XLOG_STATE_CALLBACK state.
2755 : */
2756 : static bool
2757 267694359 : xlog_state_iodone_process_iclog(
2758 : struct xlog *log,
2759 : struct xlog_in_core *iclog)
2760 : {
2761 267694359 : xfs_lsn_t lowest_lsn;
2762 267694359 : xfs_lsn_t header_lsn;
2763 :
2764 267694359 : switch (iclog->ic_state) {
2765 : case XLOG_STATE_ACTIVE:
2766 : case XLOG_STATE_DIRTY:
2767 : /*
2768 : * Skip all iclogs in the ACTIVE & DIRTY states:
2769 : */
2770 : return false;
2771 32313715 : case XLOG_STATE_DONE_SYNC:
2772 : /*
2773 : * Now that we have an iclog that is in the DONE_SYNC state, do
2774 : * one more check here to see if we have chased our tail around.
2775 : * If this is not the lowest lsn iclog, then we will leave it
2776 : * for another completion to process.
2777 : */
2778 32313715 : header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2779 32313715 : lowest_lsn = xlog_get_lowest_lsn(log);
2780 32313715 : if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
2781 : return false;
2782 32313715 : xlog_state_set_callback(log, iclog, header_lsn);
2783 32313715 : return false;
2784 42630406 : default:
2785 : /*
2786 : * Can only perform callbacks in order. Since this iclog is not
2787 : * in the DONE_SYNC state, we skip the rest and just try to
2788 : * clean up.
2789 : */
2790 42630406 : return true;
2791 : }
2792 : }
2793 :
2794 : /*
2795 : * Loop over all the iclogs, running attached callbacks on them. Return true if
2796 : * we ran any callbacks, indicating that we dropped the icloglock. We don't need
2797 : * to handle transient shutdown state here at all because
2798 : * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
2799 : * cleanup of the callbacks.
2800 : */
2801 : static bool
2802 53672321 : xlog_state_do_iclog_callbacks(
2803 : struct xlog *log)
2804 : __releases(&log->l_icloglock)
2805 : __acquires(&log->l_icloglock)
2806 : {
2807 53672321 : struct xlog_in_core *first_iclog = log->l_iclog;
2808 53672321 : struct xlog_in_core *iclog = first_iclog;
2809 53672321 : bool ran_callback = false;
2810 :
2811 267694359 : do {
2812 267694359 : LIST_HEAD(cb_list);
2813 :
2814 267694359 : if (xlog_state_iodone_process_iclog(log, iclog))
2815 : break;
2816 225063953 : if (iclog->ic_state != XLOG_STATE_CALLBACK) {
2817 192750238 : iclog = iclog->ic_next;
2818 192750238 : continue;
2819 : }
2820 32313715 : list_splice_init(&iclog->ic_callbacks, &cb_list);
2821 32313715 : spin_unlock(&log->l_icloglock);
2822 :
2823 32313715 : trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
2824 32313715 : xlog_cil_process_committed(&cb_list);
2825 32313715 : trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
2826 32313715 : ran_callback = true;
2827 :
2828 32313715 : spin_lock(&log->l_icloglock);
2829 32313715 : xlog_state_clean_iclog(log, iclog);
2830 32313715 : iclog = iclog->ic_next;
2831 225063953 : } while (iclog != first_iclog);
2832 :
2833 53672321 : return ran_callback;
2834 : }
2835 :
2836 :
2837 : /*
2838 : * Loop running iclog completion callbacks until there are no more iclogs in a
2839 : * state that can run callbacks.
2840 : */
2841 : STATIC void
2842 32324238 : xlog_state_do_callback(
2843 : struct xlog *log)
2844 : {
2845 32324238 : int flushcnt = 0;
2846 32324238 : int repeats = 0;
2847 :
2848 32324238 : spin_lock(&log->l_icloglock);
2849 53672321 : while (xlog_state_do_iclog_callbacks(log)) {
2850 42696174 : if (xlog_is_shutdown(log))
2851 : break;
2852 :
2853 21348083 : if (++repeats > 5000) {
2854 0 : flushcnt += repeats;
2855 0 : repeats = 0;
2856 0 : xfs_warn(log->l_mp,
2857 : "%s: possible infinite loop (%d iterations)",
2858 : __func__, flushcnt);
2859 : }
2860 : }
2861 :
2862 32324238 : if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
2863 26422422 : wake_up_all(&log->l_flush_wait);
2864 :
2865 32324238 : spin_unlock(&log->l_icloglock);
2866 32324238 : }
2867 :
2868 :
2869 : /*
2870 : * Finish transitioning this iclog to the dirty state.
2871 : *
2872 : * Callbacks could take time, so they are done outside the scope of the
2873 : * global state machine log lock.
2874 : */
2875 : STATIC void
2876 32324238 : xlog_state_done_syncing(
2877 : struct xlog_in_core *iclog)
2878 : {
2879 32324238 : struct xlog *log = iclog->ic_log;
2880 :
2881 32324238 : spin_lock(&log->l_icloglock);
2882 32324238 : ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2883 32324238 : trace_xlog_iclog_sync_done(iclog, _RET_IP_);
2884 :
2885 : /*
2886 : * If we got an error, either on the first buffer, or in the case of
2887 : * split log writes, on the second, we shut down the file system and
2888 : * no iclogs should ever be attempted to be written to disk again.
2889 : */
2890 64648476 : if (!xlog_is_shutdown(log)) {
2891 32313753 : ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2892 32313753 : iclog->ic_state = XLOG_STATE_DONE_SYNC;
2893 : }
2894 :
2895 : /*
2896 : * Someone could be sleeping prior to writing out the next
2897 : * iclog buffer, we wake them all, one will get to do the
2898 : * I/O, the others get to wait for the result.
2899 : */
2900 32324238 : wake_up_all(&iclog->ic_write_wait);
2901 32324238 : spin_unlock(&log->l_icloglock);
2902 32324238 : xlog_state_do_callback(log);
2903 32324238 : }
2904 :
2905 : /*
2906 : * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2907 : * sleep. We wait on the flush queue on the head iclog as that should be
2908 : * the first iclog to complete flushing. Hence if all iclogs are syncing,
2909 : * we will wait here and all new writes will sleep until a sync completes.
2910 : *
2911 : * The in-core logs are used in a circular fashion. They are not used
2912 : * out-of-order even when an iclog past the head is free.
2913 : *
2914 : * return:
2915 : * * log_offset where xlog_write() can start writing into the in-core
2916 : * log's data space.
2917 : * * in-core log pointer to which xlog_write() should write.
2918 : * * boolean indicating this is a continued write to an in-core log.
2919 : * If this is the last write, then the in-core log's offset field
2920 : * needs to be incremented, depending on the amount of data which
2921 : * is copied.
2922 : */
2923 : STATIC int
2924 36292949 : xlog_state_get_iclog_space(
2925 : struct xlog *log,
2926 : int len,
2927 : struct xlog_in_core **iclogp,
2928 : struct xlog_ticket *ticket,
2929 : int *logoffsetp)
2930 : {
2931 40717348 : int log_offset;
2932 40717348 : xlog_rec_header_t *head;
2933 40717348 : xlog_in_core_t *iclog;
2934 :
2935 : restart:
2936 40717348 : spin_lock(&log->l_icloglock);
2937 81434732 : if (xlog_is_shutdown(log)) {
2938 2994 : spin_unlock(&log->l_icloglock);
2939 2994 : return -EIO;
2940 : }
2941 :
2942 40714372 : iclog = log->l_iclog;
2943 40714372 : if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2944 4422403 : XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2945 :
2946 : /* Wait for log writes to have flushed */
2947 4422403 : xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2948 4422401 : goto restart;
2949 : }
2950 :
2951 36291969 : head = &iclog->ic_header;
2952 :
2953 36291969 : atomic_inc(&iclog->ic_refcnt); /* prevents sync */
2954 36291968 : log_offset = iclog->ic_offset;
2955 :
2956 36291968 : trace_xlog_iclog_get_space(iclog, _RET_IP_);
2957 :
2958 : /* On the 1st write to an iclog, figure out lsn. This works
2959 : * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2960 : * committing to. If the offset is set, that's how many blocks
2961 : * must be written.
2962 : */
2963 36291965 : if (log_offset == 0) {
2964 32325851 : ticket->t_curr_res -= log->l_iclog_hsize;
2965 32325851 : head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2966 32325851 : head->h_lsn = cpu_to_be64(
2967 : xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2968 32325851 : ASSERT(log->l_curr_block >= 0);
2969 : }
2970 :
2971 : /* If there is enough room to write everything, then do it. Otherwise,
2972 : * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2973 : * bit is on, so this will get flushed out. Don't update ic_offset
2974 : * until you know exactly how many bytes get copied. Therefore, wait
2975 : * until later to update ic_offset.
2976 : *
2977 : * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2978 : * can fit into remaining data section.
2979 : */
2980 36291965 : if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2981 1998 : int error = 0;
2982 :
2983 1998 : xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2984 :
2985 : /*
2986 : * If we are the only one writing to this iclog, sync it to
2987 : * disk. We need to do an atomic compare and decrement here to
2988 : * avoid racing with concurrent atomic_dec_and_lock() calls in
2989 : * xlog_state_release_iclog() when there is more than one
2990 : * reference to the iclog.
2991 : */
2992 3996 : if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2993 1992 : error = xlog_state_release_iclog(log, iclog, ticket);
2994 1998 : spin_unlock(&log->l_icloglock);
2995 1998 : if (error)
2996 0 : return error;
2997 1998 : goto restart;
2998 : }
2999 :
3000 : /* Do we have enough room to write the full amount in the remainder
3001 : * of this iclog? Or must we continue a write on the next iclog and
3002 : * mark this iclog as completely taken? In the case where we switch
3003 : * iclogs (to mark it taken), this particular iclog will release/sync
3004 : * to disk in xlog_write().
3005 : */
3006 36289967 : if (len <= iclog->ic_size - iclog->ic_offset)
3007 7525355 : iclog->ic_offset += len;
3008 : else
3009 28764612 : xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
3010 36289969 : *iclogp = iclog;
3011 :
3012 36289969 : ASSERT(iclog->ic_offset <= iclog->ic_size);
3013 36289969 : spin_unlock(&log->l_icloglock);
3014 :
3015 36289955 : *logoffsetp = log_offset;
3016 36289955 : return 0;
3017 : }
3018 :
3019 : /*
3020 : * The first cnt-1 times a ticket goes through here we don't need to move the
3021 : * grant write head because the permanent reservation has reserved cnt times the
3022 : * unit amount. Release part of current permanent unit reservation and reset
3023 : * current reservation to be one units worth. Also move grant reservation head
3024 : * forward.
3025 : */
3026 : void
3027 962449974 : xfs_log_ticket_regrant(
3028 : struct xlog *log,
3029 : struct xlog_ticket *ticket)
3030 : {
3031 962449974 : trace_xfs_log_ticket_regrant(log, ticket);
3032 :
3033 962451897 : if (ticket->t_cnt > 0)
3034 750902986 : ticket->t_cnt--;
3035 :
3036 962451897 : xlog_grant_sub_space(log, &log->l_reserve_head.grant,
3037 : ticket->t_curr_res);
3038 962457653 : xlog_grant_sub_space(log, &log->l_write_head.grant,
3039 : ticket->t_curr_res);
3040 962456372 : ticket->t_curr_res = ticket->t_unit_res;
3041 :
3042 962456372 : trace_xfs_log_ticket_regrant_sub(log, ticket);
3043 :
3044 : /* just return if we still have some of the pre-reserved space */
3045 962456454 : if (!ticket->t_cnt) {
3046 222232040 : xlog_grant_add_space(log, &log->l_reserve_head.grant,
3047 : ticket->t_unit_res);
3048 222231931 : trace_xfs_log_ticket_regrant_exit(log, ticket);
3049 :
3050 222231942 : ticket->t_curr_res = ticket->t_unit_res;
3051 : }
3052 :
3053 962456356 : xfs_log_ticket_put(ticket);
3054 962457461 : }
3055 :
3056 : /*
3057 : * Give back the space left from a reservation.
3058 : *
3059 : * All the information we need to make a correct determination of space left
3060 : * is present. For non-permanent reservations, things are quite easy. The
3061 : * count should have been decremented to zero. We only need to deal with the
3062 : * space remaining in the current reservation part of the ticket. If the
3063 : * ticket contains a permanent reservation, there may be left over space which
3064 : * needs to be released. A count of N means that N-1 refills of the current
3065 : * reservation can be done before we need to ask for more space. The first
3066 : * one goes to fill up the first current reservation. Once we run out of
3067 : * space, the count will stay at zero and the only space remaining will be
3068 : * in the current reservation field.
3069 : */
3070 : void
3071 1205882440 : xfs_log_ticket_ungrant(
3072 : struct xlog *log,
3073 : struct xlog_ticket *ticket)
3074 : {
3075 1205882440 : int bytes;
3076 :
3077 1205882440 : trace_xfs_log_ticket_ungrant(log, ticket);
3078 :
3079 1205924273 : if (ticket->t_cnt > 0)
3080 1081218208 : ticket->t_cnt--;
3081 :
3082 1205924273 : trace_xfs_log_ticket_ungrant_sub(log, ticket);
3083 :
3084 : /*
3085 : * If this is a permanent reservation ticket, we may be able to free
3086 : * up more space based on the remaining count.
3087 : */
3088 1205936505 : bytes = ticket->t_curr_res;
3089 1205936505 : if (ticket->t_cnt > 0) {
3090 972219009 : ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
3091 972219009 : bytes += ticket->t_unit_res*ticket->t_cnt;
3092 : }
3093 :
3094 1205936505 : xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
3095 1206003138 : xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
3096 :
3097 1205968120 : trace_xfs_log_ticket_ungrant_exit(log, ticket);
3098 :
3099 1205968569 : xfs_log_space_wake(log->l_mp);
3100 1205976650 : xfs_log_ticket_put(ticket);
3101 1205993987 : }
3102 :
3103 : /*
3104 : * This routine will mark the current iclog in the ring as WANT_SYNC and move
3105 : * the current iclog pointer to the next iclog in the ring.
3106 : */
3107 : void
3108 32324679 : xlog_state_switch_iclogs(
3109 : struct xlog *log,
3110 : struct xlog_in_core *iclog,
3111 : int eventual_size)
3112 : {
3113 32324679 : ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3114 32324679 : assert_spin_locked(&log->l_icloglock);
3115 32324679 : trace_xlog_iclog_switch(iclog, _RET_IP_);
3116 :
3117 32324679 : if (!eventual_size)
3118 3558070 : eventual_size = iclog->ic_offset;
3119 32324679 : iclog->ic_state = XLOG_STATE_WANT_SYNC;
3120 32324679 : iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3121 32324679 : log->l_prev_block = log->l_curr_block;
3122 32324679 : log->l_prev_cycle = log->l_curr_cycle;
3123 :
3124 : /* roll log?: ic_offset changed later */
3125 32324679 : log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
3126 :
3127 : /* Round up to next log-sunit */
3128 32324679 : if (log->l_iclog_roundoff > BBSIZE) {
3129 32322309 : uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
3130 32322309 : log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
3131 : }
3132 :
3133 32324679 : if (log->l_curr_block >= log->l_logBBsize) {
3134 : /*
3135 : * Rewind the current block before the cycle is bumped to make
3136 : * sure that the combined LSN never transiently moves forward
3137 : * when the log wraps to the next cycle. This is to support the
3138 : * unlocked sample of these fields from xlog_valid_lsn(). Most
3139 : * other cases should acquire l_icloglock.
3140 : */
3141 14823 : log->l_curr_block -= log->l_logBBsize;
3142 14823 : ASSERT(log->l_curr_block >= 0);
3143 14823 : smp_wmb();
3144 14823 : log->l_curr_cycle++;
3145 14823 : if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
3146 0 : log->l_curr_cycle++;
3147 : }
3148 32324679 : ASSERT(iclog == log->l_iclog);
3149 32324679 : log->l_iclog = iclog->ic_next;
3150 32324679 : }
3151 :
3152 : /*
3153 : * Force the iclog to disk and check if the iclog has been completed before
3154 : * xlog_force_iclog() returns. This can happen on synchronous (e.g.
3155 : * pmem) or fast async storage because we drop the icloglock to issue the IO.
3156 : * If completion has already occurred, tell the caller so that it can avoid an
3157 : * unnecessary wait on the iclog.
3158 : */
3159 : static int
3160 2919806 : xlog_force_and_check_iclog(
3161 : struct xlog_in_core *iclog,
3162 : bool *completed)
3163 : {
3164 2919806 : xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3165 2919806 : int error;
3166 :
3167 2919806 : *completed = false;
3168 2919806 : error = xlog_force_iclog(iclog);
3169 2919808 : if (error)
3170 : return error;
3171 :
3172 : /*
3173 : * If the iclog has already been completed and reused the header LSN
3174 : * will have been rewritten by completion
3175 : */
3176 2919808 : if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
3177 1533 : *completed = true;
3178 : return 0;
3179 : }
3180 :
3181 : /*
3182 : * Write out all data in the in-core log as of this exact moment in time.
3183 : *
3184 : * Data may be written to the in-core log during this call. However,
3185 : * we don't guarantee this data will be written out. A change from past
3186 : * implementation means this routine will *not* write out zero length LRs.
3187 : *
3188 : * Basically, we try and perform an intelligent scan of the in-core logs.
3189 : * If we determine there is no flushable data, we just return. There is no
3190 : * flushable data if:
3191 : *
3192 : * 1. the current iclog is active and has no data; the previous iclog
3193 : * is in the active or dirty state.
3194 : * 2. the current iclog is drity, and the previous iclog is in the
3195 : * active or dirty state.
3196 : *
3197 : * We may sleep if:
3198 : *
3199 : * 1. the current iclog is not in the active nor dirty state.
3200 : * 2. the current iclog dirty, and the previous iclog is not in the
3201 : * active nor dirty state.
3202 : * 3. the current iclog is active, and there is another thread writing
3203 : * to this particular iclog.
3204 : * 4. a) the current iclog is active and has no other writers
3205 : * b) when we return from flushing out this iclog, it is still
3206 : * not in the active nor dirty state.
3207 : */
3208 : int
3209 3211210 : xfs_log_force(
3210 : struct xfs_mount *mp,
3211 : uint flags)
3212 : {
3213 3211210 : struct xlog *log = mp->m_log;
3214 3211210 : struct xlog_in_core *iclog;
3215 :
3216 3211210 : XFS_STATS_INC(mp, xs_log_force);
3217 3211210 : trace_xfs_log_force(mp, 0, _RET_IP_);
3218 :
3219 3211149 : xlog_cil_force(log);
3220 :
3221 3211414 : spin_lock(&log->l_icloglock);
3222 6422844 : if (xlog_is_shutdown(log))
3223 42578 : goto out_error;
3224 :
3225 3168844 : iclog = log->l_iclog;
3226 3168844 : trace_xlog_iclog_force(iclog, _RET_IP_);
3227 :
3228 3168846 : if (iclog->ic_state == XLOG_STATE_DIRTY ||
3229 3166855 : (iclog->ic_state == XLOG_STATE_ACTIVE &&
3230 2672200 : atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
3231 : /*
3232 : * If the head is dirty or (active and empty), then we need to
3233 : * look at the previous iclog.
3234 : *
3235 : * If the previous iclog is active or dirty we are done. There
3236 : * is nothing to sync out. Otherwise, we attach ourselves to the
3237 : * previous iclog and go to sleep.
3238 : */
3239 1630757 : iclog = iclog->ic_prev;
3240 1538089 : } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3241 1536102 : if (atomic_read(&iclog->ic_refcnt) == 0) {
3242 : /* We have exclusive access to this iclog. */
3243 1041445 : bool completed;
3244 :
3245 1041445 : if (xlog_force_and_check_iclog(iclog, &completed))
3246 0 : goto out_error;
3247 :
3248 1041445 : if (completed)
3249 469 : goto out_unlock;
3250 : } else {
3251 : /*
3252 : * Someone else is still writing to this iclog, so we
3253 : * need to ensure that when they release the iclog it
3254 : * gets synced immediately as we may be waiting on it.
3255 : */
3256 494657 : xlog_state_switch_iclogs(log, iclog, 0);
3257 : }
3258 : }
3259 :
3260 : /*
3261 : * The iclog we are about to wait on may contain the checkpoint pushed
3262 : * by the above xlog_cil_force() call, but it may not have been pushed
3263 : * to disk yet. Like the ACTIVE case above, we need to make sure caches
3264 : * are flushed when this iclog is written.
3265 : */
3266 3168377 : if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
3267 520728 : iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3268 :
3269 3168377 : if (flags & XFS_LOG_SYNC)
3270 3091608 : return xlog_wait_on_iclog(iclog);
3271 76769 : out_unlock:
3272 77238 : spin_unlock(&log->l_icloglock);
3273 77238 : return 0;
3274 42578 : out_error:
3275 42578 : spin_unlock(&log->l_icloglock);
3276 42578 : return -EIO;
3277 : }
3278 :
3279 : /*
3280 : * Force the log to a specific LSN.
3281 : *
3282 : * If an iclog with that lsn can be found:
3283 : * If it is in the DIRTY state, just return.
3284 : * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3285 : * state and go to sleep or return.
3286 : * If it is in any other state, go to sleep or return.
3287 : *
3288 : * Synchronous forces are implemented with a wait queue. All callers trying
3289 : * to force a given lsn to disk must wait on the queue attached to the
3290 : * specific in-core log. When given in-core log finally completes its write
3291 : * to disk, that thread will wake up all threads waiting on the queue.
3292 : */
3293 : static int
3294 2796602 : xlog_force_lsn(
3295 : struct xlog *log,
3296 : xfs_lsn_t lsn,
3297 : uint flags,
3298 : int *log_flushed,
3299 : bool already_slept)
3300 : {
3301 2796602 : struct xlog_in_core *iclog;
3302 2796602 : bool completed;
3303 :
3304 2796602 : spin_lock(&log->l_icloglock);
3305 5593314 : if (xlog_is_shutdown(log))
3306 320 : goto out_error;
3307 :
3308 2796337 : iclog = log->l_iclog;
3309 5929772 : while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3310 3141501 : trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
3311 3141503 : iclog = iclog->ic_next;
3312 3141503 : if (iclog == log->l_iclog)
3313 8068 : goto out_unlock;
3314 : }
3315 :
3316 2788271 : switch (iclog->ic_state) {
3317 2169797 : case XLOG_STATE_ACTIVE:
3318 : /*
3319 : * We sleep here if we haven't already slept (e.g. this is the
3320 : * first time we've looked at the correct iclog buf) and the
3321 : * buffer before us is going to be sync'ed. The reason for this
3322 : * is that if we are doing sync transactions here, by waiting
3323 : * for the previous I/O to complete, we can allow a few more
3324 : * transactions into this iclog before we close it down.
3325 : *
3326 : * Otherwise, we mark the buffer WANT_SYNC, and bump up the
3327 : * refcnt so we can release the log (which drops the ref count).
3328 : * The state switch keeps new transaction commits from using
3329 : * this buffer. When the current commits finish writing into
3330 : * the buffer, the refcount will drop to zero and the buffer
3331 : * will go out then.
3332 : */
3333 2169797 : if (!already_slept &&
3334 1950838 : (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
3335 : iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
3336 291434 : xlog_wait(&iclog->ic_prev->ic_write_wait,
3337 291434 : &log->l_icloglock);
3338 291434 : return -EAGAIN;
3339 : }
3340 1878363 : if (xlog_force_and_check_iclog(iclog, &completed))
3341 0 : goto out_error;
3342 1878364 : if (log_flushed)
3343 1717543 : *log_flushed = 1;
3344 1878364 : if (completed)
3345 1064 : goto out_unlock;
3346 : break;
3347 4682 : case XLOG_STATE_WANT_SYNC:
3348 : /*
3349 : * This iclog may contain the checkpoint pushed by the
3350 : * xlog_cil_force_seq() call, but there are other writers still
3351 : * accessing it so it hasn't been pushed to disk yet. Like the
3352 : * ACTIVE case above, we need to make sure caches are flushed
3353 : * when this iclog is written.
3354 : */
3355 4682 : iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3356 4682 : break;
3357 : default:
3358 : /*
3359 : * The entire checkpoint was written by the CIL force and is on
3360 : * its way to disk already. It will be stable when it
3361 : * completes, so we don't need to manipulate caches here at all.
3362 : * We just need to wait for completion if necessary.
3363 : */
3364 : break;
3365 : }
3366 :
3367 2495774 : if (flags & XFS_LOG_SYNC)
3368 2495772 : return xlog_wait_on_iclog(iclog);
3369 2 : out_unlock:
3370 9134 : spin_unlock(&log->l_icloglock);
3371 9134 : return 0;
3372 320 : out_error:
3373 320 : spin_unlock(&log->l_icloglock);
3374 320 : return -EIO;
3375 : }
3376 :
3377 : /*
3378 : * Force the log to a specific checkpoint sequence.
3379 : *
3380 : * First force the CIL so that all the required changes have been flushed to the
3381 : * iclogs. If the CIL force completed it will return a commit LSN that indicates
3382 : * the iclog that needs to be flushed to stable storage. If the caller needs
3383 : * a synchronous log force, we will wait on the iclog with the LSN returned by
3384 : * xlog_cil_force_seq() to be completed.
3385 : */
3386 : int
3387 2635219 : xfs_log_force_seq(
3388 : struct xfs_mount *mp,
3389 : xfs_csn_t seq,
3390 : uint flags,
3391 : int *log_flushed)
3392 : {
3393 2635219 : struct xlog *log = mp->m_log;
3394 2635219 : xfs_lsn_t lsn;
3395 2635219 : int ret;
3396 2635219 : ASSERT(seq != 0);
3397 :
3398 2635219 : XFS_STATS_INC(mp, xs_log_force);
3399 2635219 : trace_xfs_log_force(mp, seq, _RET_IP_);
3400 :
3401 2635208 : lsn = xlog_cil_force_seq(log, seq);
3402 2635123 : if (lsn == NULLCOMMITLSN)
3403 : return 0;
3404 :
3405 2505102 : ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
3406 2504885 : if (ret == -EAGAIN) {
3407 291427 : XFS_STATS_INC(mp, xs_log_force_sleep);
3408 291427 : ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
3409 : }
3410 : return ret;
3411 : }
3412 :
3413 : /*
3414 : * Free a used ticket when its refcount falls to zero.
3415 : */
3416 : void
3417 2168445186 : xfs_log_ticket_put(
3418 : xlog_ticket_t *ticket)
3419 : {
3420 2168445186 : ASSERT(atomic_read(&ticket->t_ref) > 0);
3421 4336929953 : if (atomic_dec_and_test(&ticket->t_ref))
3422 1206025407 : kmem_cache_free(xfs_log_ticket_cache, ticket);
3423 2168476790 : }
3424 :
3425 : xlog_ticket_t *
3426 962326056 : xfs_log_ticket_get(
3427 : xlog_ticket_t *ticket)
3428 : {
3429 962326056 : ASSERT(atomic_read(&ticket->t_ref) > 0);
3430 962326056 : atomic_inc(&ticket->t_ref);
3431 962361425 : return ticket;
3432 : }
3433 :
3434 : /*
3435 : * Figure out the total log space unit (in bytes) that would be
3436 : * required for a log ticket.
3437 : */
3438 : static int
3439 1205889950 : xlog_calc_unit_res(
3440 : struct xlog *log,
3441 : int unit_bytes,
3442 : int *niclogs)
3443 : {
3444 1205889950 : int iclog_space;
3445 1205889950 : uint num_headers;
3446 :
3447 : /*
3448 : * Permanent reservations have up to 'cnt'-1 active log operations
3449 : * in the log. A unit in this case is the amount of space for one
3450 : * of these log operations. Normal reservations have a cnt of 1
3451 : * and their unit amount is the total amount of space required.
3452 : *
3453 : * The following lines of code account for non-transaction data
3454 : * which occupy space in the on-disk log.
3455 : *
3456 : * Normal form of a transaction is:
3457 : * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3458 : * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3459 : *
3460 : * We need to account for all the leadup data and trailer data
3461 : * around the transaction data.
3462 : * And then we need to account for the worst case in terms of using
3463 : * more space.
3464 : * The worst case will happen if:
3465 : * - the placement of the transaction happens to be such that the
3466 : * roundoff is at its maximum
3467 : * - the transaction data is synced before the commit record is synced
3468 : * i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3469 : * Therefore the commit record is in its own Log Record.
3470 : * This can happen as the commit record is called with its
3471 : * own region to xlog_write().
3472 : * This then means that in the worst case, roundoff can happen for
3473 : * the commit-rec as well.
3474 : * The commit-rec is smaller than padding in this scenario and so it is
3475 : * not added separately.
3476 : */
3477 :
3478 : /* for trans header */
3479 1205889950 : unit_bytes += sizeof(xlog_op_header_t);
3480 1205889950 : unit_bytes += sizeof(xfs_trans_header_t);
3481 :
3482 : /* for start-rec */
3483 1205889950 : unit_bytes += sizeof(xlog_op_header_t);
3484 :
3485 : /*
3486 : * for LR headers - the space for data in an iclog is the size minus
3487 : * the space used for the headers. If we use the iclog size, then we
3488 : * undercalculate the number of headers required.
3489 : *
3490 : * Furthermore - the addition of op headers for split-recs might
3491 : * increase the space required enough to require more log and op
3492 : * headers, so take that into account too.
3493 : *
3494 : * IMPORTANT: This reservation makes the assumption that if this
3495 : * transaction is the first in an iclog and hence has the LR headers
3496 : * accounted to it, then the remaining space in the iclog is
3497 : * exclusively for this transaction. i.e. if the transaction is larger
3498 : * than the iclog, it will be the only thing in that iclog.
3499 : * Fundamentally, this means we must pass the entire log vector to
3500 : * xlog_write to guarantee this.
3501 : */
3502 1205889950 : iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3503 1205889950 : num_headers = howmany(unit_bytes, iclog_space);
3504 :
3505 : /* for split-recs - ophdrs added when data split over LRs */
3506 1205889950 : unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3507 :
3508 : /* add extra header reservations if we overrun */
3509 1205779764 : while (!num_headers ||
3510 1205779764 : howmany(unit_bytes, iclog_space) > num_headers) {
3511 0 : unit_bytes += sizeof(xlog_op_header_t);
3512 0 : num_headers++;
3513 : }
3514 1205889950 : unit_bytes += log->l_iclog_hsize * num_headers;
3515 :
3516 : /* for commit-rec LR header - note: padding will subsume the ophdr */
3517 1205889950 : unit_bytes += log->l_iclog_hsize;
3518 :
3519 : /* roundoff padding for transaction data and one for commit record */
3520 1205889950 : unit_bytes += 2 * log->l_iclog_roundoff;
3521 :
3522 1205889950 : if (niclogs)
3523 1205865278 : *niclogs = num_headers;
3524 1205889950 : return unit_bytes;
3525 : }
3526 :
3527 : int
3528 24672 : xfs_log_calc_unit_res(
3529 : struct xfs_mount *mp,
3530 : int unit_bytes)
3531 : {
3532 24672 : return xlog_calc_unit_res(mp->m_log, unit_bytes, NULL);
3533 : }
3534 :
3535 : /*
3536 : * Allocate and initialise a new log ticket.
3537 : */
3538 : struct xlog_ticket *
3539 1205679668 : xlog_ticket_alloc(
3540 : struct xlog *log,
3541 : int unit_bytes,
3542 : int cnt,
3543 : bool permanent)
3544 : {
3545 1205679668 : struct xlog_ticket *tic;
3546 1205679668 : int unit_res;
3547 :
3548 1205679668 : tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL);
3549 :
3550 1205768348 : unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
3551 :
3552 1205881212 : atomic_set(&tic->t_ref, 1);
3553 1205881212 : tic->t_task = current;
3554 1205881212 : INIT_LIST_HEAD(&tic->t_queue);
3555 1205881212 : tic->t_unit_res = unit_res;
3556 1205881212 : tic->t_curr_res = unit_res;
3557 1205881212 : tic->t_cnt = cnt;
3558 1205881212 : tic->t_ocnt = cnt;
3559 1205881212 : tic->t_tid = get_random_u32();
3560 1205867787 : if (permanent)
3561 1087784010 : tic->t_flags |= XLOG_TIC_PERM_RESERV;
3562 :
3563 1205867787 : return tic;
3564 : }
3565 :
3566 : #if defined(DEBUG)
3567 : /*
3568 : * Check to make sure the grant write head didn't just over lap the tail. If
3569 : * the cycles are the same, we can't be overlapping. Otherwise, make sure that
3570 : * the cycles differ by exactly one and check the byte count.
3571 : *
3572 : * This check is run unlocked, so can give false positives. Rather than assert
3573 : * on failures, use a warn-once flag and a panic tag to allow the admin to
3574 : * determine if they want to panic the machine when such an error occurs. For
3575 : * debug kernels this will have the same effect as using an assert but, unlinke
3576 : * an assert, it can be turned off at runtime.
3577 : */
3578 : STATIC void
3579 1424447275 : xlog_verify_grant_tail(
3580 : struct xlog *log)
3581 : {
3582 1424447275 : int tail_cycle, tail_blocks;
3583 1424447275 : int cycle, space;
3584 :
3585 1424447275 : xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3586 1424447275 : xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3587 1424447275 : if (tail_cycle != cycle) {
3588 493971973 : if (cycle - 1 != tail_cycle &&
3589 2 : !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3590 0 : xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3591 : "%s: cycle - 1 != tail_cycle", __func__);
3592 : }
3593 :
3594 493972263 : if (space > BBTOB(tail_blocks) &&
3595 292 : !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3596 32 : xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3597 : "%s: space > BBTOB(tail_blocks)", __func__);
3598 : }
3599 : }
3600 1424447275 : }
3601 :
3602 : /* check if it will fit */
3603 : STATIC void
3604 32324235 : xlog_verify_tail_lsn(
3605 : struct xlog *log,
3606 : struct xlog_in_core *iclog)
3607 : {
3608 32324235 : xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
3609 32324235 : int blocks;
3610 :
3611 32324235 : if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3612 19451615 : blocks =
3613 19451615 : log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3614 19451615 : if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3615 0 : xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3616 : } else {
3617 12872620 : ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3618 :
3619 12872620 : if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3620 0 : xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3621 :
3622 12872620 : blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3623 12872620 : if (blocks < BTOBB(iclog->ic_offset) + 1)
3624 0 : xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3625 : }
3626 32324235 : }
3627 :
3628 : /*
3629 : * Perform a number of checks on the iclog before writing to disk.
3630 : *
3631 : * 1. Make sure the iclogs are still circular
3632 : * 2. Make sure we have a good magic number
3633 : * 3. Make sure we don't have magic numbers in the data
3634 : * 4. Check fields of each log operation header for:
3635 : * A. Valid client identifier
3636 : * B. tid ptr value falls in valid ptr space (user space code)
3637 : * C. Length in log record header is correct according to the
3638 : * individual operation headers within record.
3639 : * 5. When a bwrite will occur within 5 blocks of the front of the physical
3640 : * log, check the preceding blocks of the physical log to make sure all
3641 : * the cycle numbers agree with the current cycle number.
3642 : */
3643 : STATIC void
3644 32324136 : xlog_verify_iclog(
3645 : struct xlog *log,
3646 : struct xlog_in_core *iclog,
3647 : int count)
3648 : {
3649 32324136 : xlog_op_header_t *ophead;
3650 32324136 : xlog_in_core_t *icptr;
3651 32324136 : xlog_in_core_2_t *xhdr;
3652 32324136 : void *base_ptr, *ptr, *p;
3653 32324136 : ptrdiff_t field_offset;
3654 32324136 : uint8_t clientid;
3655 32324136 : int len, i, j, k, op_len;
3656 32324136 : int idx;
3657 :
3658 : /* check validity of iclog pointers */
3659 32324136 : spin_lock(&log->l_icloglock);
3660 32324233 : icptr = log->l_iclog;
3661 290918008 : for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3662 258593778 : ASSERT(icptr);
3663 :
3664 32324230 : if (icptr != log->l_iclog)
3665 0 : xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3666 32324230 : spin_unlock(&log->l_icloglock);
3667 :
3668 : /* check log magic numbers */
3669 32324200 : if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3670 0 : xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3671 :
3672 32324200 : base_ptr = ptr = &iclog->ic_header;
3673 32324200 : p = &iclog->ic_header;
3674 1949747937 : for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
3675 1917423811 : if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3676 0 : xfs_emerg(log->l_mp, "%s: unexpected magic num",
3677 : __func__);
3678 : }
3679 :
3680 : /* check fields */
3681 32324126 : len = be32_to_cpu(iclog->ic_header.h_num_logops);
3682 32324126 : base_ptr = ptr = iclog->ic_datap;
3683 32324126 : ophead = ptr;
3684 32324126 : xhdr = iclog->ic_data;
3685 2893613940 : for (i = 0; i < len; i++) {
3686 2861289715 : ophead = ptr;
3687 :
3688 : /* clientid is only 1 byte */
3689 2861289715 : p = &ophead->oh_clientid;
3690 2861289715 : field_offset = p - base_ptr;
3691 2861289715 : if (field_offset & 0x1ff) {
3692 2839719360 : clientid = ophead->oh_clientid;
3693 : } else {
3694 21570355 : idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
3695 21570355 : if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3696 4192 : j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3697 4192 : k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3698 4192 : clientid = xlog_get_client_id(
3699 4192 : xhdr[j].hic_xheader.xh_cycle_data[k]);
3700 : } else {
3701 21566163 : clientid = xlog_get_client_id(
3702 21566163 : iclog->ic_header.h_cycle_data[idx]);
3703 : }
3704 : }
3705 2861289715 : if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) {
3706 0 : xfs_warn(log->l_mp,
3707 : "%s: op %d invalid clientid %d op "PTR_FMT" offset 0x%lx",
3708 : __func__, i, clientid, ophead,
3709 : (unsigned long)field_offset);
3710 : }
3711 :
3712 : /* check length */
3713 2861289814 : p = &ophead->oh_len;
3714 2861289814 : field_offset = p - base_ptr;
3715 2861289814 : if (field_offset & 0x1ff) {
3716 2839648067 : op_len = be32_to_cpu(ophead->oh_len);
3717 : } else {
3718 21641747 : idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
3719 21641747 : if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3720 4216 : j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3721 4216 : k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3722 4216 : op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3723 : } else {
3724 21637531 : op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3725 : }
3726 : }
3727 2861289814 : ptr += sizeof(xlog_op_header_t) + op_len;
3728 : }
3729 32324225 : }
3730 : #endif
3731 :
3732 : /*
3733 : * Perform a forced shutdown on the log.
3734 : *
3735 : * This can be called from low level log code to trigger a shutdown, or from the
3736 : * high level mount shutdown code when the mount shuts down.
3737 : *
3738 : * Our main objectives here are to make sure that:
3739 : * a. if the shutdown was not due to a log IO error, flush the logs to
3740 : * disk. Anything modified after this is ignored.
3741 : * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3742 : * parties to find out. Nothing new gets queued after this is done.
3743 : * c. Tasks sleeping on log reservations, pinned objects and
3744 : * other resources get woken up.
3745 : * d. The mount is also marked as shut down so that log triggered shutdowns
3746 : * still behave the same as if they called xfs_forced_shutdown().
3747 : *
3748 : * Return true if the shutdown cause was a log IO error and we actually shut the
3749 : * log down.
3750 : */
3751 : bool
3752 17695 : xlog_force_shutdown(
3753 : struct xlog *log,
3754 : uint32_t shutdown_flags)
3755 : {
3756 17695 : bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
3757 :
3758 17695 : if (!log)
3759 : return false;
3760 :
3761 : /*
3762 : * Flush all the completed transactions to disk before marking the log
3763 : * being shut down. We need to do this first as shutting down the log
3764 : * before the force will prevent the log force from flushing the iclogs
3765 : * to disk.
3766 : *
3767 : * When we are in recovery, there are no transactions to flush, and
3768 : * we don't want to touch the log because we don't want to perturb the
3769 : * current head/tail for future recovery attempts. Hence we need to
3770 : * avoid a log force in this case.
3771 : *
3772 : * If we are shutting down due to a log IO error, then we must avoid
3773 : * trying to write the log as that may just result in more IO errors and
3774 : * an endless shutdown/force loop.
3775 : */
3776 20601 : if (!log_error && !xlog_in_recovery(log))
3777 2906 : xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3778 :
3779 : /*
3780 : * Atomically set the shutdown state. If the shutdown state is already
3781 : * set, there someone else is performing the shutdown and so we are done
3782 : * here. This should never happen because we should only ever get called
3783 : * once by the first shutdown caller.
3784 : *
3785 : * Much of the log state machine transitions assume that shutdown state
3786 : * cannot change once they hold the log->l_icloglock. Hence we need to
3787 : * hold that lock here, even though we use the atomic test_and_set_bit()
3788 : * operation to set the shutdown state.
3789 : */
3790 17695 : spin_lock(&log->l_icloglock);
3791 35390 : if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
3792 5816 : spin_unlock(&log->l_icloglock);
3793 5816 : return false;
3794 : }
3795 11879 : spin_unlock(&log->l_icloglock);
3796 :
3797 : /*
3798 : * If this log shutdown also sets the mount shutdown state, issue a
3799 : * shutdown warning message.
3800 : */
3801 23758 : if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
3802 4356 : xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
3803 : "Filesystem has been shut down due to log error (0x%x).",
3804 : shutdown_flags);
3805 4356 : xfs_alert(log->l_mp,
3806 : "Please unmount the filesystem and rectify the problem(s).");
3807 4356 : if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
3808 0 : xfs_stack_trace();
3809 : }
3810 :
3811 : /*
3812 : * We don't want anybody waiting for log reservations after this. That
3813 : * means we have to wake up everybody queued up on reserveq as well as
3814 : * writeq. In addition, we make sure in xlog_{re}grant_log_space that
3815 : * we don't enqueue anything once the SHUTDOWN flag is set, and this
3816 : * action is protected by the grant locks.
3817 : */
3818 11879 : xlog_grant_head_wake_all(&log->l_reserve_head);
3819 11879 : xlog_grant_head_wake_all(&log->l_write_head);
3820 :
3821 : /*
3822 : * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
3823 : * as if the log writes were completed. The abort handling in the log
3824 : * item committed callback functions will do this again under lock to
3825 : * avoid races.
3826 : */
3827 11879 : spin_lock(&log->l_cilp->xc_push_lock);
3828 11879 : wake_up_all(&log->l_cilp->xc_start_wait);
3829 11879 : wake_up_all(&log->l_cilp->xc_commit_wait);
3830 11879 : spin_unlock(&log->l_cilp->xc_push_lock);
3831 :
3832 11879 : spin_lock(&log->l_icloglock);
3833 11879 : xlog_state_shutdown_callbacks(log);
3834 11879 : spin_unlock(&log->l_icloglock);
3835 :
3836 11879 : wake_up_var(&log->l_opstate);
3837 11879 : return log_error;
3838 : }
3839 :
3840 : STATIC int
3841 189666 : xlog_iclogs_empty(
3842 : struct xlog *log)
3843 : {
3844 189666 : xlog_in_core_t *iclog;
3845 :
3846 189666 : iclog = log->l_iclog;
3847 1506583 : do {
3848 : /* endianness does not matter here, zero is zero in
3849 : * any language.
3850 : */
3851 1506583 : if (iclog->ic_header.h_num_logops)
3852 : return 0;
3853 1499645 : iclog = iclog->ic_next;
3854 1499645 : } while (iclog != log->l_iclog);
3855 : return 1;
3856 : }
3857 :
3858 : /*
3859 : * Verify that an LSN stamped into a piece of metadata is valid. This is
3860 : * intended for use in read verifiers on v5 superblocks.
3861 : */
3862 : bool
3863 118284316 : xfs_log_check_lsn(
3864 : struct xfs_mount *mp,
3865 : xfs_lsn_t lsn)
3866 : {
3867 118284316 : struct xlog *log = mp->m_log;
3868 118284316 : bool valid;
3869 :
3870 : /*
3871 : * norecovery mode skips mount-time log processing and unconditionally
3872 : * resets the in-core LSN. We can't validate in this mode, but
3873 : * modifications are not allowed anyways so just return true.
3874 : */
3875 118284316 : if (xfs_has_norecovery(mp))
3876 : return true;
3877 :
3878 : /*
3879 : * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
3880 : * handled by recovery and thus safe to ignore here.
3881 : */
3882 118284200 : if (lsn == NULLCOMMITLSN)
3883 : return true;
3884 :
3885 118010755 : valid = xlog_valid_lsn(mp->m_log, lsn);
3886 :
3887 : /* warn the user about what's gone wrong before verifier failure */
3888 118011818 : if (!valid) {
3889 2 : spin_lock(&log->l_icloglock);
3890 2 : xfs_warn(mp,
3891 : "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
3892 : "Please unmount and run xfs_repair (>= v4.3) to resolve.",
3893 : CYCLE_LSN(lsn), BLOCK_LSN(lsn),
3894 : log->l_curr_cycle, log->l_curr_block);
3895 2 : spin_unlock(&log->l_icloglock);
3896 : }
3897 :
3898 : return valid;
3899 : }
3900 :
3901 : /*
3902 : * Notify the log that we're about to start using a feature that is protected
3903 : * by a log incompat feature flag. This will prevent log covering from
3904 : * clearing those flags.
3905 : */
3906 : void
3907 140202634 : xlog_use_incompat_feat(
3908 : struct xlog *log,
3909 : enum xlog_incompat_feat what)
3910 : {
3911 140202634 : switch (what) {
3912 139529861 : case XLOG_INCOMPAT_FEAT_XATTRS:
3913 139529861 : down_read(&log->l_incompat_xattrs);
3914 139529861 : break;
3915 672773 : case XLOG_INCOMPAT_FEAT_SWAPEXT:
3916 672773 : down_read(&log->l_incompat_swapext);
3917 672773 : break;
3918 : }
3919 140208202 : }
3920 :
3921 : /* Notify the log that we've finished using log incompat features. */
3922 : void
3923 140079870 : xlog_drop_incompat_feat(
3924 : struct xlog *log,
3925 : enum xlog_incompat_feat what)
3926 : {
3927 140079870 : switch (what) {
3928 139407094 : case XLOG_INCOMPAT_FEAT_XATTRS:
3929 139407094 : up_read(&log->l_incompat_xattrs);
3930 139407094 : break;
3931 672776 : case XLOG_INCOMPAT_FEAT_SWAPEXT:
3932 672776 : up_read(&log->l_incompat_swapext);
3933 672776 : break;
3934 : }
3935 140123174 : }
|