Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 : * All Rights Reserved.
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_bit.h"
13 : #include "xfs_mount.h"
14 : #include "xfs_trans.h"
15 : #include "xfs_buf_item.h"
16 : #include "xfs_trans_priv.h"
17 : #include "xfs_trace.h"
18 : #include "xfs_log.h"
19 : #include "xfs_log_priv.h"
20 : #include "xfs_log_recover.h"
21 : #include "xfs_error.h"
22 : #include "xfs_inode.h"
23 : #include "xfs_dir2.h"
24 : #include "xfs_quota.h"
25 : #include "xfs_rtgroup.h"
26 : #include "xfs_rtbitmap.h"
27 :
28 : /*
29 : * This is the number of entries in the l_buf_cancel_table used during
30 : * recovery.
31 : */
32 : #define XLOG_BC_TABLE_SIZE 64
33 :
34 : #define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
35 : ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
36 :
37 : /*
38 : * This structure is used during recovery to record the buf log items which
39 : * have been canceled and should not be replayed.
40 : */
41 : struct xfs_buf_cancel {
42 : xfs_daddr_t bc_blkno;
43 : uint bc_len;
44 : int bc_refcount;
45 : struct list_head bc_list;
46 : };
47 :
48 : static struct xfs_buf_cancel *
49 57658883 : xlog_find_buffer_cancelled(
50 : struct xlog *log,
51 : xfs_daddr_t blkno,
52 : uint len)
53 : {
54 57658883 : struct list_head *bucket;
55 57658883 : struct xfs_buf_cancel *bcp;
56 :
57 57658883 : if (!log->l_buf_cancel_table)
58 : return NULL;
59 :
60 57658883 : bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
61 350230587 : list_for_each_entry(bcp, bucket, bc_list) {
62 294929622 : if (bcp->bc_blkno == blkno && bcp->bc_len == len)
63 2357918 : return bcp;
64 : }
65 :
66 : return NULL;
67 : }
68 :
69 : static bool
70 404258 : xlog_add_buffer_cancelled(
71 : struct xlog *log,
72 : xfs_daddr_t blkno,
73 : uint len)
74 : {
75 404258 : struct xfs_buf_cancel *bcp;
76 :
77 : /*
78 : * If we find an existing cancel record, this indicates that the buffer
79 : * was cancelled multiple times. To ensure that during pass 2 we keep
80 : * the record in the table until we reach its last occurrence in the
81 : * log, a reference count is kept to tell how many times we expect to
82 : * see this record during the second pass.
83 : */
84 404258 : bcp = xlog_find_buffer_cancelled(log, blkno, len);
85 404258 : if (bcp) {
86 77451 : bcp->bc_refcount++;
87 77451 : return false;
88 : }
89 :
90 326807 : bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
91 326807 : bcp->bc_blkno = blkno;
92 326807 : bcp->bc_len = len;
93 326807 : bcp->bc_refcount = 1;
94 326807 : list_add_tail(&bcp->bc_list, XLOG_BUF_CANCEL_BUCKET(log, blkno));
95 326807 : return true;
96 : }
97 :
98 : /*
99 : * Check if there is and entry for blkno, len in the buffer cancel record table.
100 : */
101 : bool
102 40873590 : xlog_is_buffer_cancelled(
103 : struct xlog *log,
104 : xfs_daddr_t blkno,
105 : uint len)
106 : {
107 40873590 : return xlog_find_buffer_cancelled(log, blkno, len) != NULL;
108 : }
109 :
110 : /*
111 : * Check if there is and entry for blkno, len in the buffer cancel record table,
112 : * and decremented the reference count on it if there is one.
113 : *
114 : * Remove the cancel record once the refcount hits zero, so that if the same
115 : * buffer is re-used again after its last cancellation we actually replay the
116 : * changes made at that point.
117 : */
118 : static bool
119 404258 : xlog_put_buffer_cancelled(
120 : struct xlog *log,
121 : xfs_daddr_t blkno,
122 : uint len)
123 : {
124 404258 : struct xfs_buf_cancel *bcp;
125 :
126 404258 : bcp = xlog_find_buffer_cancelled(log, blkno, len);
127 404258 : if (!bcp) {
128 0 : ASSERT(0);
129 0 : return false;
130 : }
131 :
132 404258 : if (--bcp->bc_refcount == 0) {
133 326807 : list_del(&bcp->bc_list);
134 326807 : kmem_free(bcp);
135 : }
136 : return true;
137 : }
138 :
139 : /* log buffer item recovery */
140 :
141 : /*
142 : * Sort buffer items for log recovery. Most buffer items should end up on the
143 : * buffer list and are recovered first, with the following exceptions:
144 : *
145 : * 1. XFS_BLF_CANCEL buffers must be processed last because some log items
146 : * might depend on the incor ecancellation record, and replaying a cancelled
147 : * buffer item can remove the incore record.
148 : *
149 : * 2. XFS_BLF_INODE_BUF buffers are handled after most regular items so that
150 : * we replay di_next_unlinked only after flushing the inode 'free' state
151 : * to the inode buffer.
152 : *
153 : * See xlog_recover_reorder_trans for more details.
154 : */
155 : STATIC enum xlog_recover_reorder
156 32762070 : xlog_recover_buf_reorder(
157 : struct xlog_recover_item *item)
158 : {
159 32762070 : struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
160 :
161 32762070 : if (buf_f->blf_flags & XFS_BLF_CANCEL)
162 : return XLOG_REORDER_CANCEL_LIST;
163 31953554 : if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
164 72010 : return XLOG_REORDER_INODE_BUFFER_LIST;
165 : return XLOG_REORDER_BUFFER_LIST;
166 : }
167 :
168 : STATIC void
169 16381035 : xlog_recover_buf_ra_pass2(
170 : struct xlog *log,
171 : struct xlog_recover_item *item)
172 : {
173 16381035 : struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
174 :
175 16381035 : xlog_buf_readahead(log, buf_f->blf_blkno, buf_f->blf_len, NULL);
176 16381035 : }
177 :
178 : /*
179 : * Build up the table of buf cancel records so that we don't replay cancelled
180 : * data in the second pass.
181 : */
182 : static int
183 16381035 : xlog_recover_buf_commit_pass1(
184 : struct xlog *log,
185 : struct xlog_recover_item *item)
186 : {
187 16381035 : struct xfs_buf_log_format *bf = item->ri_buf[0].i_addr;
188 :
189 16381035 : if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) {
190 0 : xfs_err(log->l_mp, "bad buffer log item size (%d)",
191 : item->ri_buf[0].i_len);
192 0 : return -EFSCORRUPTED;
193 : }
194 :
195 16381035 : if (!(bf->blf_flags & XFS_BLF_CANCEL))
196 15976777 : trace_xfs_log_recover_buf_not_cancel(log, bf);
197 404258 : else if (xlog_add_buffer_cancelled(log, bf->blf_blkno, bf->blf_len))
198 326807 : trace_xfs_log_recover_buf_cancel_add(log, bf);
199 : else
200 77451 : trace_xfs_log_recover_buf_cancel_ref_inc(log, bf);
201 : return 0;
202 : }
203 :
204 : /*
205 : * Validate the recovered buffer is of the correct type and attach the
206 : * appropriate buffer operations to them for writeback. Magic numbers are in a
207 : * few places:
208 : * the first 16 bits of the buffer (inode buffer, dquot buffer),
209 : * the first 32 bits of the buffer (most blocks),
210 : * inside a struct xfs_da_blkinfo at the start of the buffer.
211 : */
212 : static void
213 15436869 : xlog_recover_validate_buf_type(
214 : struct xfs_mount *mp,
215 : struct xfs_buf *bp,
216 : struct xfs_buf_log_format *buf_f,
217 : xfs_lsn_t current_lsn)
218 : {
219 15436869 : struct xfs_da_blkinfo *info = bp->b_addr;
220 15436869 : uint32_t magic32;
221 15436869 : uint16_t magic16;
222 15436869 : uint16_t magicda;
223 15436869 : char *warnmsg = NULL;
224 :
225 : /*
226 : * We can only do post recovery validation on items on CRC enabled
227 : * fielsystems as we need to know when the buffer was written to be able
228 : * to determine if we should have replayed the item. If we replay old
229 : * metadata over a newer buffer, then it will enter a temporarily
230 : * inconsistent state resulting in verification failures. Hence for now
231 : * just avoid the verification stage for non-crc filesystems
232 : */
233 15436869 : if (!xfs_has_crc(mp))
234 : return;
235 :
236 15436869 : magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
237 15436869 : magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
238 15436869 : magicda = be16_to_cpu(info->magic);
239 15436869 : switch (xfs_blft_from_flags(buf_f)) {
240 12600638 : case XFS_BLFT_BTREE_BUF:
241 12600638 : switch (magic32) {
242 1494937 : case XFS_ABTB_CRC_MAGIC:
243 : case XFS_ABTB_MAGIC:
244 1494937 : bp->b_ops = &xfs_bnobt_buf_ops;
245 1494937 : break;
246 1916770 : case XFS_ABTC_CRC_MAGIC:
247 : case XFS_ABTC_MAGIC:
248 1916770 : bp->b_ops = &xfs_cntbt_buf_ops;
249 1916770 : break;
250 388286 : case XFS_IBT_CRC_MAGIC:
251 : case XFS_IBT_MAGIC:
252 388286 : bp->b_ops = &xfs_inobt_buf_ops;
253 388286 : break;
254 385249 : case XFS_FIBT_CRC_MAGIC:
255 : case XFS_FIBT_MAGIC:
256 385249 : bp->b_ops = &xfs_finobt_buf_ops;
257 385249 : break;
258 1188041 : case XFS_BMAP_CRC_MAGIC:
259 : case XFS_BMAP_MAGIC:
260 1188041 : bp->b_ops = &xfs_bmbt_buf_ops;
261 1188041 : break;
262 33898 : case XFS_RTRMAP_CRC_MAGIC:
263 33898 : bp->b_ops = &xfs_rtrmapbt_buf_ops;
264 33898 : break;
265 6594796 : case XFS_RMAP_CRC_MAGIC:
266 6594796 : bp->b_ops = &xfs_rmapbt_buf_ops;
267 6594796 : break;
268 597654 : case XFS_REFC_CRC_MAGIC:
269 597654 : bp->b_ops = &xfs_refcountbt_buf_ops;
270 597654 : break;
271 1007 : case XFS_RTREFC_CRC_MAGIC:
272 1007 : bp->b_ops = &xfs_rtrefcountbt_buf_ops;
273 1007 : break;
274 : default:
275 : warnmsg = "Bad btree block magic!";
276 : break;
277 : }
278 : break;
279 824961 : case XFS_BLFT_AGF_BUF:
280 824961 : if (magic32 != XFS_AGF_MAGIC) {
281 : warnmsg = "Bad AGF block magic!";
282 : break;
283 : }
284 824961 : bp->b_ops = &xfs_agf_buf_ops;
285 824961 : break;
286 21499 : case XFS_BLFT_AGFL_BUF:
287 21499 : if (magic32 != XFS_AGFL_MAGIC) {
288 : warnmsg = "Bad AGFL block magic!";
289 : break;
290 : }
291 21499 : bp->b_ops = &xfs_agfl_buf_ops;
292 21499 : break;
293 427839 : case XFS_BLFT_AGI_BUF:
294 427839 : if (magic32 != XFS_AGI_MAGIC) {
295 : warnmsg = "Bad AGI block magic!";
296 : break;
297 : }
298 427839 : bp->b_ops = &xfs_agi_buf_ops;
299 427839 : break;
300 331450 : case XFS_BLFT_UDQUOT_BUF:
301 : case XFS_BLFT_PDQUOT_BUF:
302 : case XFS_BLFT_GDQUOT_BUF:
303 : #ifdef CONFIG_XFS_QUOTA
304 331450 : if (magic16 != XFS_DQUOT_MAGIC) {
305 : warnmsg = "Bad DQUOT block magic!";
306 : break;
307 : }
308 331450 : bp->b_ops = &xfs_dquot_buf_ops;
309 : #else
310 : xfs_alert(mp,
311 : "Trying to recover dquots without QUOTA support built in!");
312 : ASSERT(0);
313 : #endif
314 331450 : break;
315 0 : case XFS_BLFT_DINO_BUF:
316 0 : if (magic16 != XFS_DINODE_MAGIC) {
317 : warnmsg = "Bad INODE block magic!";
318 : break;
319 : }
320 0 : bp->b_ops = &xfs_inode_buf_ops;
321 0 : break;
322 10493 : case XFS_BLFT_SYMLINK_BUF:
323 10493 : if (magic32 != XFS_SYMLINK_MAGIC) {
324 : warnmsg = "Bad symlink block magic!";
325 : break;
326 : }
327 10493 : bp->b_ops = &xfs_symlink_buf_ops;
328 10493 : break;
329 26105 : case XFS_BLFT_DIR_BLOCK_BUF:
330 26105 : if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
331 26105 : magic32 != XFS_DIR3_BLOCK_MAGIC) {
332 : warnmsg = "Bad dir block magic!";
333 : break;
334 : }
335 26105 : bp->b_ops = &xfs_dir3_block_buf_ops;
336 26105 : break;
337 527300 : case XFS_BLFT_DIR_DATA_BUF:
338 527300 : if (magic32 != XFS_DIR2_DATA_MAGIC &&
339 527300 : magic32 != XFS_DIR3_DATA_MAGIC) {
340 : warnmsg = "Bad dir data magic!";
341 : break;
342 : }
343 527300 : bp->b_ops = &xfs_dir3_data_buf_ops;
344 527300 : break;
345 125406 : case XFS_BLFT_DIR_FREE_BUF:
346 125406 : if (magic32 != XFS_DIR2_FREE_MAGIC &&
347 125406 : magic32 != XFS_DIR3_FREE_MAGIC) {
348 : warnmsg = "Bad dir3 free magic!";
349 : break;
350 : }
351 125406 : bp->b_ops = &xfs_dir3_free_buf_ops;
352 125406 : break;
353 56977 : case XFS_BLFT_DIR_LEAF1_BUF:
354 56977 : if (magicda != XFS_DIR2_LEAF1_MAGIC &&
355 56977 : magicda != XFS_DIR3_LEAF1_MAGIC) {
356 : warnmsg = "Bad dir leaf1 magic!";
357 : break;
358 : }
359 56977 : bp->b_ops = &xfs_dir3_leaf1_buf_ops;
360 56977 : break;
361 378589 : case XFS_BLFT_DIR_LEAFN_BUF:
362 378589 : if (magicda != XFS_DIR2_LEAFN_MAGIC &&
363 378589 : magicda != XFS_DIR3_LEAFN_MAGIC) {
364 : warnmsg = "Bad dir leafn magic!";
365 : break;
366 : }
367 378589 : bp->b_ops = &xfs_dir3_leafn_buf_ops;
368 378589 : break;
369 5405 : case XFS_BLFT_DA_NODE_BUF:
370 5405 : if (magicda != XFS_DA_NODE_MAGIC &&
371 5405 : magicda != XFS_DA3_NODE_MAGIC) {
372 : warnmsg = "Bad da node magic!";
373 : break;
374 : }
375 5405 : bp->b_ops = &xfs_da3_node_buf_ops;
376 5405 : break;
377 28726 : case XFS_BLFT_ATTR_LEAF_BUF:
378 28726 : if (magicda != XFS_ATTR_LEAF_MAGIC &&
379 28726 : magicda != XFS_ATTR3_LEAF_MAGIC) {
380 : warnmsg = "Bad attr leaf magic!";
381 : break;
382 : }
383 28726 : bp->b_ops = &xfs_attr3_leaf_buf_ops;
384 28726 : break;
385 0 : case XFS_BLFT_ATTR_RMT_BUF:
386 0 : if (magic32 != XFS_ATTR3_RMT_MAGIC) {
387 : warnmsg = "Bad attr remote magic!";
388 : break;
389 : }
390 0 : bp->b_ops = &xfs_attr3_rmt_buf_ops;
391 0 : break;
392 16779 : case XFS_BLFT_SB_BUF:
393 16779 : if (magic32 != XFS_SB_MAGIC) {
394 : warnmsg = "Bad SB block magic!";
395 : break;
396 : }
397 16779 : bp->b_ops = &xfs_sb_buf_ops;
398 16779 : break;
399 : #ifdef CONFIG_XFS_RT
400 : case XFS_BLFT_RTBITMAP_BUF:
401 29040 : if (xfs_has_rtgroups(mp) && magic32 != XFS_RTBITMAP_MAGIC) {
402 : warnmsg = "Bad rtbitmap magic!";
403 : break;
404 : }
405 29040 : bp->b_ops = xfs_rtblock_ops(mp, false);
406 29040 : break;
407 : case XFS_BLFT_RTSUMMARY_BUF:
408 25662 : if (xfs_has_rtgroups(mp) && magic32 != XFS_RTSUMMARY_MAGIC) {
409 : warnmsg = "Bad rtsummary magic!";
410 : break;
411 : }
412 25662 : bp->b_ops = xfs_rtblock_ops(mp, true);
413 25662 : break;
414 : #endif /* CONFIG_XFS_RT */
415 : default:
416 0 : xfs_warn(mp, "Unknown buffer type %d!",
417 : xfs_blft_from_flags(buf_f));
418 0 : break;
419 : }
420 :
421 : /*
422 : * Nothing else to do in the case of a NULL current LSN as this means
423 : * the buffer is more recent than the change in the log and will be
424 : * skipped.
425 : */
426 15436869 : if (current_lsn == NULLCOMMITLSN)
427 : return;
428 :
429 15082368 : if (warnmsg) {
430 0 : xfs_warn(mp, warnmsg);
431 0 : ASSERT(0);
432 : }
433 :
434 : /*
435 : * We must update the metadata LSN of the buffer as it is written out to
436 : * ensure that older transactions never replay over this one and corrupt
437 : * the buffer. This can occur if log recovery is interrupted at some
438 : * point after the current transaction completes, at which point a
439 : * subsequent mount starts recovery from the beginning.
440 : *
441 : * Write verifiers update the metadata LSN from log items attached to
442 : * the buffer. Therefore, initialize a bli purely to carry the LSN to
443 : * the verifier.
444 : */
445 15082368 : if (bp->b_ops) {
446 15082368 : struct xfs_buf_log_item *bip;
447 :
448 15082368 : bp->b_flags |= _XBF_LOGRECOVERY;
449 15082368 : xfs_buf_item_init(bp, mp);
450 15082368 : bip = bp->b_log_item;
451 15082368 : bip->bli_item.li_lsn = current_lsn;
452 : }
453 : }
454 :
455 : /*
456 : * Perform a 'normal' buffer recovery. Each logged region of the
457 : * buffer should be copied over the corresponding region in the
458 : * given buffer. The bitmap in the buf log format structure indicates
459 : * where to place the logged data.
460 : */
461 : STATIC void
462 15413818 : xlog_recover_do_reg_buffer(
463 : struct xfs_mount *mp,
464 : struct xlog_recover_item *item,
465 : struct xfs_buf *bp,
466 : struct xfs_buf_log_format *buf_f,
467 : xfs_lsn_t current_lsn)
468 : {
469 15413818 : int i;
470 15413818 : int bit;
471 15413818 : int nbits;
472 15413818 : xfs_failaddr_t fa;
473 15413818 : const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot);
474 :
475 15413818 : trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
476 :
477 15413818 : bit = 0;
478 15413818 : i = 1; /* 0 is the buf format structure */
479 61003880 : while (1) {
480 38208849 : bit = xfs_next_bit(buf_f->blf_data_map,
481 : buf_f->blf_map_size, bit);
482 38208849 : if (bit == -1)
483 : break;
484 22795031 : nbits = xfs_contig_bits(buf_f->blf_data_map,
485 : buf_f->blf_map_size, bit);
486 22795031 : ASSERT(nbits > 0);
487 22795031 : ASSERT(item->ri_buf[i].i_addr != NULL);
488 22795031 : ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
489 22795031 : ASSERT(BBTOB(bp->b_length) >=
490 : ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
491 :
492 : /*
493 : * The dirty regions logged in the buffer, even though
494 : * contiguous, may span multiple chunks. This is because the
495 : * dirty region may span a physical page boundary in a buffer
496 : * and hence be split into two separate vectors for writing into
497 : * the log. Hence we need to trim nbits back to the length of
498 : * the current region being copied out of the log.
499 : */
500 22795031 : if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
501 0 : nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
502 :
503 : /*
504 : * Do a sanity check if this is a dquot buffer. Just checking
505 : * the first dquot in the buffer should do. XXXThis is
506 : * probably a good thing to do for other buf types also.
507 : */
508 22795031 : fa = NULL;
509 22795031 : if (buf_f->blf_flags &
510 : (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
511 331450 : if (item->ri_buf[i].i_addr == NULL) {
512 0 : xfs_alert(mp,
513 : "XFS: NULL dquot in %s.", __func__);
514 0 : goto next;
515 : }
516 331450 : if (item->ri_buf[i].i_len < size_disk_dquot) {
517 0 : xfs_alert(mp,
518 : "XFS: dquot too small (%d) in %s.",
519 : item->ri_buf[i].i_len, __func__);
520 0 : goto next;
521 : }
522 331450 : fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr, -1);
523 331450 : if (fa) {
524 0 : xfs_alert(mp,
525 : "dquot corrupt at %pS trying to replay into block 0x%llx",
526 : fa, xfs_buf_daddr(bp));
527 0 : goto next;
528 : }
529 : }
530 :
531 45590062 : memcpy(xfs_buf_offset(bp,
532 : (uint)bit << XFS_BLF_SHIFT), /* dest */
533 : item->ri_buf[i].i_addr, /* source */
534 : nbits<<XFS_BLF_SHIFT); /* length */
535 22795031 : next:
536 22795031 : i++;
537 22795031 : bit += nbits;
538 : }
539 :
540 : /* Shouldn't be any more regions */
541 15413818 : ASSERT(i == item->ri_total);
542 :
543 15413818 : xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
544 15413818 : }
545 :
546 : /*
547 : * Perform a dquot buffer recovery.
548 : * Simple algorithm: if we have found a QUOTAOFF log item of the same type
549 : * (ie. USR or GRP), then just toss this buffer away; don't recover it.
550 : * Else, treat it as a regular buffer and do recovery.
551 : *
552 : * Return false if the buffer was tossed and true if we recovered the buffer to
553 : * indicate to the caller if the buffer needs writing.
554 : */
555 : STATIC bool
556 331450 : xlog_recover_do_dquot_buffer(
557 : struct xfs_mount *mp,
558 : struct xlog *log,
559 : struct xlog_recover_item *item,
560 : struct xfs_buf *bp,
561 : struct xfs_buf_log_format *buf_f)
562 : {
563 331450 : uint type;
564 :
565 331450 : trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
566 :
567 : /*
568 : * Filesystems are required to send in quota flags at mount time.
569 : */
570 331450 : if (!mp->m_qflags)
571 : return false;
572 :
573 331450 : type = 0;
574 331450 : if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
575 164308 : type |= XFS_DQTYPE_USER;
576 331450 : if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
577 3194 : type |= XFS_DQTYPE_PROJ;
578 331450 : if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
579 163948 : type |= XFS_DQTYPE_GROUP;
580 : /*
581 : * This type of quotas was turned off, so ignore this buffer
582 : */
583 331450 : if (log->l_quotaoffs_flag & type)
584 : return false;
585 :
586 331450 : xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
587 331450 : return true;
588 : }
589 :
590 : /*
591 : * Perform recovery for a buffer full of inodes. In these buffers, the only
592 : * data which should be recovered is that which corresponds to the
593 : * di_next_unlinked pointers in the on disk inode structures. The rest of the
594 : * data for the inodes is always logged through the inodes themselves rather
595 : * than the inode buffer and is recovered in xlog_recover_inode_pass2().
596 : *
597 : * The only time when buffers full of inodes are fully recovered is when the
598 : * buffer is full of newly allocated inodes. In this case the buffer will
599 : * not be marked as an inode buffer and so will be sent to
600 : * xlog_recover_do_reg_buffer() below during recovery.
601 : */
602 : STATIC int
603 35870 : xlog_recover_do_inode_buffer(
604 : struct xfs_mount *mp,
605 : struct xlog_recover_item *item,
606 : struct xfs_buf *bp,
607 : struct xfs_buf_log_format *buf_f)
608 : {
609 35870 : int i;
610 35870 : int item_index = 0;
611 35870 : int bit = 0;
612 35870 : int nbits = 0;
613 35870 : int reg_buf_offset = 0;
614 35870 : int reg_buf_bytes = 0;
615 35870 : int next_unlinked_offset;
616 35870 : int inodes_per_buf;
617 35870 : xfs_agino_t *logged_nextp;
618 35870 : xfs_agino_t *buffer_nextp;
619 :
620 35870 : trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
621 :
622 : /*
623 : * Post recovery validation only works properly on CRC enabled
624 : * filesystems.
625 : */
626 35870 : if (xfs_has_crc(mp))
627 35870 : bp->b_ops = &xfs_inode_buf_ops;
628 :
629 35870 : inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
630 1115621 : for (i = 0; i < inodes_per_buf; i++) {
631 1086859 : next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
632 : offsetof(struct xfs_dinode, di_next_unlinked);
633 :
634 1086859 : while (next_unlinked_offset >=
635 2010665 : (reg_buf_offset + reg_buf_bytes)) {
636 : /*
637 : * The next di_next_unlinked field is beyond
638 : * the current logged region. Find the next
639 : * logged region that contains or is beyond
640 : * the current di_next_unlinked field.
641 : */
642 930914 : bit += nbits;
643 930914 : bit = xfs_next_bit(buf_f->blf_data_map,
644 : buf_f->blf_map_size, bit);
645 :
646 : /*
647 : * If there are no more logged regions in the
648 : * buffer, then we're done.
649 : */
650 930914 : if (bit == -1)
651 : return 0;
652 :
653 923806 : nbits = xfs_contig_bits(buf_f->blf_data_map,
654 : buf_f->blf_map_size, bit);
655 923806 : ASSERT(nbits > 0);
656 923806 : reg_buf_offset = bit << XFS_BLF_SHIFT;
657 923806 : reg_buf_bytes = nbits << XFS_BLF_SHIFT;
658 923806 : item_index++;
659 : }
660 :
661 : /*
662 : * If the current logged region starts after the current
663 : * di_next_unlinked field, then move on to the next
664 : * di_next_unlinked field.
665 : */
666 1079751 : if (next_unlinked_offset < reg_buf_offset)
667 155945 : continue;
668 :
669 923806 : ASSERT(item->ri_buf[item_index].i_addr != NULL);
670 923806 : ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
671 923806 : ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length));
672 :
673 : /*
674 : * The current logged region contains a copy of the
675 : * current di_next_unlinked field. Extract its value
676 : * and copy it to the buffer copy.
677 : */
678 923806 : logged_nextp = item->ri_buf[item_index].i_addr +
679 923806 : next_unlinked_offset - reg_buf_offset;
680 923806 : if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) {
681 0 : xfs_alert(mp,
682 : "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
683 : "Trying to replay bad (0) inode di_next_unlinked field.",
684 : item, bp);
685 0 : return -EFSCORRUPTED;
686 : }
687 :
688 923806 : buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
689 923806 : *buffer_nextp = *logged_nextp;
690 :
691 : /*
692 : * If necessary, recalculate the CRC in the on-disk inode. We
693 : * have to leave the inode in a consistent state for whoever
694 : * reads it next....
695 : */
696 923806 : xfs_dinode_calc_crc(mp,
697 923806 : xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
698 :
699 : }
700 :
701 : return 0;
702 : }
703 :
704 : /*
705 : * V5 filesystems know the age of the buffer on disk being recovered. We can
706 : * have newer objects on disk than we are replaying, and so for these cases we
707 : * don't want to replay the current change as that will make the buffer contents
708 : * temporarily invalid on disk.
709 : *
710 : * The magic number might not match the buffer type we are going to recover
711 : * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
712 : * extract the LSN of the existing object in the buffer based on it's current
713 : * magic number. If we don't recognise the magic number in the buffer, then
714 : * return a LSN of -1 so that the caller knows it was an unrecognised block and
715 : * so can recover the buffer.
716 : *
717 : * Note: we cannot rely solely on magic number matches to determine that the
718 : * buffer has a valid LSN - we also need to verify that it belongs to this
719 : * filesystem, so we need to extract the object's LSN and compare it to that
720 : * which we read from the superblock. If the UUIDs don't match, then we've got a
721 : * stale metadata block from an old filesystem instance that we need to recover
722 : * over the top of.
723 : */
724 : static xfs_lsn_t
725 15472739 : xlog_recover_get_buf_lsn(
726 : struct xfs_mount *mp,
727 : struct xfs_buf *bp,
728 : struct xfs_buf_log_format *buf_f)
729 : {
730 15472739 : uint32_t magic32;
731 15472739 : uint16_t magic16;
732 15472739 : uint16_t magicda;
733 15472739 : void *blk = bp->b_addr;
734 15472739 : uuid_t *uuid;
735 15472739 : xfs_lsn_t lsn = -1;
736 15472739 : uint16_t blft;
737 :
738 : /* v4 filesystems always recover immediately */
739 15472739 : if (!xfs_has_crc(mp))
740 0 : goto recover_immediately;
741 :
742 : /*
743 : * realtime bitmap and summary file blocks do not have magic numbers or
744 : * UUIDs, so we must recover them immediately.
745 : */
746 15472739 : blft = xfs_blft_from_flags(buf_f);
747 15472739 : if (!xfs_has_rtgroups(mp) && (blft == XFS_BLFT_RTBITMAP_BUF ||
748 : blft == XFS_BLFT_RTSUMMARY_BUF))
749 7298 : goto recover_immediately;
750 :
751 15465441 : magic32 = be32_to_cpu(*(__be32 *)blk);
752 15465441 : switch (magic32) {
753 48217 : case XFS_RTSUMMARY_MAGIC:
754 : case XFS_RTBITMAP_MAGIC: {
755 48217 : struct xfs_rtbuf_blkinfo *hdr = blk;
756 :
757 48217 : lsn = be64_to_cpu(hdr->rt_lsn);
758 48217 : uuid = &hdr->rt_uuid;
759 48217 : break;
760 : }
761 11369149 : case XFS_ABTB_CRC_MAGIC:
762 : case XFS_ABTC_CRC_MAGIC:
763 : case XFS_ABTB_MAGIC:
764 : case XFS_ABTC_MAGIC:
765 : case XFS_RMAP_CRC_MAGIC:
766 : case XFS_REFC_CRC_MAGIC:
767 : case XFS_FIBT_CRC_MAGIC:
768 : case XFS_FIBT_MAGIC:
769 : case XFS_IBT_CRC_MAGIC:
770 : case XFS_IBT_MAGIC: {
771 11369149 : struct xfs_btree_block *btb = blk;
772 :
773 11369149 : lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
774 11369149 : uuid = &btb->bb_u.s.bb_uuid;
775 11369149 : break;
776 : }
777 1196039 : case XFS_RTRMAP_CRC_MAGIC:
778 : case XFS_RTREFC_CRC_MAGIC:
779 : case XFS_BMAP_CRC_MAGIC:
780 : case XFS_BMAP_MAGIC: {
781 1196039 : struct xfs_btree_block *btb = blk;
782 :
783 1196039 : lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
784 1196039 : uuid = &btb->bb_u.l.bb_uuid;
785 1196039 : break;
786 : }
787 824977 : case XFS_AGF_MAGIC:
788 824977 : lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
789 824977 : uuid = &((struct xfs_agf *)blk)->agf_uuid;
790 824977 : break;
791 21507 : case XFS_AGFL_MAGIC:
792 21507 : lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
793 21507 : uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
794 21507 : break;
795 427863 : case XFS_AGI_MAGIC:
796 427863 : lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
797 427863 : uuid = &((struct xfs_agi *)blk)->agi_uuid;
798 427863 : break;
799 1635 : case XFS_SYMLINK_MAGIC:
800 1635 : lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
801 1635 : uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
802 1635 : break;
803 676102 : case XFS_DIR3_BLOCK_MAGIC:
804 : case XFS_DIR3_DATA_MAGIC:
805 : case XFS_DIR3_FREE_MAGIC:
806 676102 : lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
807 676102 : uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
808 676102 : break;
809 99 : case XFS_ATTR3_RMT_MAGIC:
810 : /*
811 : * Remote attr blocks are written synchronously, rather than
812 : * being logged. That means they do not contain a valid LSN
813 : * (i.e. transactionally ordered) in them, and hence any time we
814 : * see a buffer to replay over the top of a remote attribute
815 : * block we should simply do so.
816 : */
817 99 : goto recover_immediately;
818 16826 : case XFS_SB_MAGIC:
819 : /*
820 : * superblock uuids are magic. We may or may not have a
821 : * sb_meta_uuid on disk, but it will be set in the in-core
822 : * superblock. We set the uuid pointer for verification
823 : * according to the superblock feature mask to ensure we check
824 : * the relevant UUID in the superblock.
825 : */
826 16826 : lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
827 16826 : if (xfs_has_metauuid(mp))
828 0 : uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
829 : else
830 16826 : uuid = &((struct xfs_dsb *)blk)->sb_uuid;
831 : break;
832 : default:
833 : break;
834 : }
835 :
836 14582315 : if (lsn != (xfs_lsn_t)-1) {
837 14582196 : if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
838 3739 : goto recover_immediately;
839 : return lsn;
840 : }
841 :
842 883146 : magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
843 883146 : switch (magicda) {
844 461188 : case XFS_DIR3_LEAF1_MAGIC:
845 : case XFS_DIR3_LEAFN_MAGIC:
846 : case XFS_ATTR3_LEAF_MAGIC:
847 : case XFS_DA3_NODE_MAGIC:
848 461188 : lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
849 461188 : uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
850 461188 : break;
851 : default:
852 : break;
853 : }
854 :
855 883146 : if (lsn != (xfs_lsn_t)-1) {
856 461188 : if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
857 461 : goto recover_immediately;
858 : return lsn;
859 : }
860 :
861 : /*
862 : * We do individual object checks on dquot and inode buffers as they
863 : * have their own individual LSN records. Also, we could have a stale
864 : * buffer here, so we have to at least recognise these buffer types.
865 : *
866 : * A notd complexity here is inode unlinked list processing - it logs
867 : * the inode directly in the buffer, but we don't know which inodes have
868 : * been modified, and there is no global buffer LSN. Hence we need to
869 : * recover all inode buffer types immediately. This problem will be
870 : * fixed by logical logging of the unlinked list modifications.
871 : */
872 421958 : magic16 = be16_to_cpu(*(__be16 *)blk);
873 421958 : switch (magic16) {
874 46358 : case XFS_DQUOT_MAGIC:
875 : case XFS_DINODE_MAGIC:
876 46358 : goto recover_immediately;
877 : default:
878 : break;
879 : }
880 :
881 : /* unknown buffer contents, recover immediately */
882 :
883 : recover_immediately:
884 : return (xfs_lsn_t)-1;
885 :
886 : }
887 :
888 : /*
889 : * This routine replays a modification made to a buffer at runtime.
890 : * There are actually two types of buffer, regular and inode, which
891 : * are handled differently. Inode buffers are handled differently
892 : * in that we only recover a specific set of data from them, namely
893 : * the inode di_next_unlinked fields. This is because all other inode
894 : * data is actually logged via inode records and any data we replay
895 : * here which overlaps that may be stale.
896 : *
897 : * When meta-data buffers are freed at run time we log a buffer item
898 : * with the XFS_BLF_CANCEL bit set to indicate that previous copies
899 : * of the buffer in the log should not be replayed at recovery time.
900 : * This is so that if the blocks covered by the buffer are reused for
901 : * file data before we crash we don't end up replaying old, freed
902 : * meta-data into a user's file.
903 : *
904 : * To handle the cancellation of buffer log items, we make two passes
905 : * over the log during recovery. During the first we build a table of
906 : * those buffers which have been cancelled, and during the second we
907 : * only replay those buffers which do not have corresponding cancel
908 : * records in the table. See xlog_recover_buf_pass[1,2] above
909 : * for more details on the implementation of the table of cancel records.
910 : */
911 : STATIC int
912 16381035 : xlog_recover_buf_commit_pass2(
913 : struct xlog *log,
914 : struct list_head *buffer_list,
915 : struct xlog_recover_item *item,
916 : xfs_lsn_t current_lsn)
917 : {
918 16381035 : struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
919 16381035 : struct xfs_mount *mp = log->l_mp;
920 16381035 : struct xfs_buf *bp;
921 16381035 : int error;
922 16381035 : uint buf_flags;
923 16381035 : xfs_lsn_t lsn;
924 :
925 : /*
926 : * In this pass we only want to recover all the buffers which have
927 : * not been cancelled and are not cancellation buffers themselves.
928 : */
929 16381035 : if (buf_f->blf_flags & XFS_BLF_CANCEL) {
930 404258 : if (xlog_put_buffer_cancelled(log, buf_f->blf_blkno,
931 404258 : buf_f->blf_len))
932 404258 : goto cancelled;
933 : } else {
934 :
935 15976777 : if (xlog_is_buffer_cancelled(log, buf_f->blf_blkno,
936 15976777 : buf_f->blf_len))
937 504038 : goto cancelled;
938 : }
939 :
940 15472739 : trace_xfs_log_recover_buf_recover(log, buf_f);
941 :
942 15472739 : buf_flags = 0;
943 15472739 : if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
944 35870 : buf_flags |= XBF_UNMAPPED;
945 :
946 15472739 : error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
947 : buf_flags, &bp, NULL);
948 15472739 : if (error)
949 : return error;
950 :
951 : /*
952 : * Recover the buffer only if we get an LSN from it and it's less than
953 : * the lsn of the transaction we are replaying.
954 : *
955 : * Note that we have to be extremely careful of readahead here.
956 : * Readahead does not attach verfiers to the buffers so if we don't
957 : * actually do any replay after readahead because of the LSN we found
958 : * in the buffer if more recent than that current transaction then we
959 : * need to attach the verifier directly. Failure to do so can lead to
960 : * future recovery actions (e.g. EFI and unlinked list recovery) can
961 : * operate on the buffers and they won't get the verifier attached. This
962 : * can lead to blocks on disk having the correct content but a stale
963 : * CRC.
964 : *
965 : * It is safe to assume these clean buffers are currently up to date.
966 : * If the buffer is dirtied by a later transaction being replayed, then
967 : * the verifier will be reset to match whatever recover turns that
968 : * buffer into.
969 : */
970 15472739 : lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f);
971 15472739 : if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
972 23051 : trace_xfs_log_recover_buf_skip(log, buf_f);
973 23051 : xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
974 :
975 : /*
976 : * We're skipping replay of this buffer log item due to the log
977 : * item LSN being behind the ondisk buffer. Verify the buffer
978 : * contents since we aren't going to run the write verifier.
979 : */
980 23051 : if (bp->b_ops) {
981 23051 : bp->b_ops->verify_read(bp);
982 23051 : error = bp->b_error;
983 : }
984 23051 : goto out_release;
985 : }
986 :
987 15449688 : if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
988 35870 : error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
989 35870 : if (error)
990 0 : goto out_release;
991 15413818 : } else if (buf_f->blf_flags &
992 : (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
993 331450 : bool dirty;
994 :
995 331450 : dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
996 331450 : if (!dirty)
997 0 : goto out_release;
998 : } else {
999 15082368 : xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
1000 : }
1001 :
1002 : /*
1003 : * Perform delayed write on the buffer. Asynchronous writes will be
1004 : * slower when taking into account all the buffers to be flushed.
1005 : *
1006 : * Also make sure that only inode buffers with good sizes stay in
1007 : * the buffer cache. The kernel moves inodes in buffers of 1 block
1008 : * or inode_cluster_size bytes, whichever is bigger. The inode
1009 : * buffers in the log can be a different size if the log was generated
1010 : * by an older kernel using unclustered inode buffers or a newer kernel
1011 : * running with a different inode cluster size. Regardless, if
1012 : * the inode buffer size isn't max(blocksize, inode_cluster_size)
1013 : * for *our* value of inode_cluster_size, then we need to keep
1014 : * the buffer out of the buffer cache so that the buffer won't
1015 : * overlap with future reads of those inodes.
1016 : */
1017 30899376 : if (XFS_DINODE_MAGIC ==
1018 15449688 : be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
1019 35870 : (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) {
1020 0 : xfs_buf_stale(bp);
1021 0 : error = xfs_bwrite(bp);
1022 : } else {
1023 15449688 : ASSERT(bp->b_mount == mp);
1024 15449688 : bp->b_flags |= _XBF_LOGRECOVERY;
1025 15449688 : xfs_buf_delwri_queue(bp, buffer_list);
1026 :
1027 : /*
1028 : * Update the primary rt super if we just recovered the primary
1029 : * fs super.
1030 : */
1031 15449688 : if (xfs_has_rtgroups(mp) && bp->b_ops == &xfs_sb_buf_ops) {
1032 12394 : struct xfs_buf *rtsb_bp = mp->m_rtsb_bp;
1033 :
1034 12394 : if (rtsb_bp) {
1035 1214 : xfs_buf_lock(rtsb_bp);
1036 1214 : xfs_buf_hold(rtsb_bp);
1037 1214 : xfs_rtgroup_update_super(rtsb_bp, bp);
1038 1214 : rtsb_bp->b_flags |= _XBF_LOGRECOVERY;
1039 1214 : xfs_buf_delwri_queue(rtsb_bp, buffer_list);
1040 1214 : xfs_buf_relse(rtsb_bp);
1041 : }
1042 : }
1043 : }
1044 :
1045 15448474 : out_release:
1046 15472739 : xfs_buf_relse(bp);
1047 15472739 : return error;
1048 908296 : cancelled:
1049 908296 : trace_xfs_log_recover_buf_cancel(log, buf_f);
1050 908296 : return 0;
1051 : }
1052 :
1053 : const struct xlog_recover_item_ops xlog_buf_item_ops = {
1054 : .item_type = XFS_LI_BUF,
1055 : .reorder = xlog_recover_buf_reorder,
1056 : .ra_pass2 = xlog_recover_buf_ra_pass2,
1057 : .commit_pass1 = xlog_recover_buf_commit_pass1,
1058 : .commit_pass2 = xlog_recover_buf_commit_pass2,
1059 : };
1060 :
1061 : #ifdef DEBUG
1062 : void
1063 13581 : xlog_check_buf_cancel_table(
1064 : struct xlog *log)
1065 : {
1066 13581 : int i;
1067 :
1068 882765 : for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
1069 869184 : ASSERT(list_empty(&log->l_buf_cancel_table[i]));
1070 13581 : }
1071 : #endif
1072 :
1073 : int
1074 13581 : xlog_alloc_buf_cancel_table(
1075 : struct xlog *log)
1076 : {
1077 13581 : void *p;
1078 13581 : int i;
1079 :
1080 13581 : ASSERT(log->l_buf_cancel_table == NULL);
1081 :
1082 13581 : p = kmalloc_array(XLOG_BC_TABLE_SIZE, sizeof(struct list_head),
1083 : GFP_KERNEL);
1084 13581 : if (!p)
1085 : return -ENOMEM;
1086 :
1087 13581 : log->l_buf_cancel_table = p;
1088 882765 : for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
1089 869184 : INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
1090 :
1091 : return 0;
1092 : }
1093 :
1094 : void
1095 13581 : xlog_free_buf_cancel_table(
1096 : struct xlog *log)
1097 : {
1098 13581 : int i;
1099 :
1100 13581 : if (!log->l_buf_cancel_table)
1101 : return;
1102 :
1103 882765 : for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) {
1104 : struct xfs_buf_cancel *bc;
1105 :
1106 869184 : while ((bc = list_first_entry_or_null(
1107 : &log->l_buf_cancel_table[i],
1108 : struct xfs_buf_cancel, bc_list))) {
1109 0 : list_del(&bc->bc_list);
1110 0 : kmem_free(bc);
1111 : }
1112 : }
1113 :
1114 13581 : kmem_free(log->l_buf_cancel_table);
1115 13581 : log->l_buf_cancel_table = NULL;
1116 : }
|