Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 : * All Rights Reserved.
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_bit.h"
13 : #include "xfs_mount.h"
14 : #include "xfs_trans.h"
15 : #include "xfs_buf_item.h"
16 : #include "xfs_trans_priv.h"
17 : #include "xfs_trace.h"
18 : #include "xfs_log.h"
19 : #include "xfs_log_priv.h"
20 : #include "xfs_log_recover.h"
21 : #include "xfs_error.h"
22 : #include "xfs_inode.h"
23 : #include "xfs_dir2.h"
24 : #include "xfs_quota.h"
25 : #include "xfs_rtgroup.h"
26 : #include "xfs_rtbitmap.h"
27 :
28 : /*
29 : * This is the number of entries in the l_buf_cancel_table used during
30 : * recovery.
31 : */
32 : #define XLOG_BC_TABLE_SIZE 64
33 :
34 : #define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
35 : ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
36 :
37 : /*
38 : * This structure is used during recovery to record the buf log items which
39 : * have been canceled and should not be replayed.
40 : */
41 : struct xfs_buf_cancel {
42 : xfs_daddr_t bc_blkno;
43 : uint bc_len;
44 : int bc_refcount;
45 : struct list_head bc_list;
46 : };
47 :
48 : static struct xfs_buf_cancel *
49 45302926 : xlog_find_buffer_cancelled(
50 : struct xlog *log,
51 : xfs_daddr_t blkno,
52 : uint len)
53 : {
54 45302926 : struct list_head *bucket;
55 45302926 : struct xfs_buf_cancel *bcp;
56 :
57 45302926 : if (!log->l_buf_cancel_table)
58 : return NULL;
59 :
60 45302926 : bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
61 302473724 : list_for_each_entry(bcp, bucket, bc_list) {
62 259389782 : if (bcp->bc_blkno == blkno && bcp->bc_len == len)
63 2218984 : return bcp;
64 : }
65 :
66 : return NULL;
67 : }
68 :
69 : static bool
70 389942 : xlog_add_buffer_cancelled(
71 : struct xlog *log,
72 : xfs_daddr_t blkno,
73 : uint len)
74 : {
75 389942 : struct xfs_buf_cancel *bcp;
76 :
77 : /*
78 : * If we find an existing cancel record, this indicates that the buffer
79 : * was cancelled multiple times. To ensure that during pass 2 we keep
80 : * the record in the table until we reach its last occurrence in the
81 : * log, a reference count is kept to tell how many times we expect to
82 : * see this record during the second pass.
83 : */
84 389942 : bcp = xlog_find_buffer_cancelled(log, blkno, len);
85 389942 : if (bcp) {
86 69852 : bcp->bc_refcount++;
87 69852 : return false;
88 : }
89 :
90 320090 : bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
91 320090 : bcp->bc_blkno = blkno;
92 320090 : bcp->bc_len = len;
93 320090 : bcp->bc_refcount = 1;
94 320090 : list_add_tail(&bcp->bc_list, XLOG_BUF_CANCEL_BUCKET(log, blkno));
95 320090 : return true;
96 : }
97 :
98 : /*
99 : * Check if there is and entry for blkno, len in the buffer cancel record table.
100 : */
101 : bool
102 31277092 : xlog_is_buffer_cancelled(
103 : struct xlog *log,
104 : xfs_daddr_t blkno,
105 : uint len)
106 : {
107 31277092 : return xlog_find_buffer_cancelled(log, blkno, len) != NULL;
108 : }
109 :
110 : /*
111 : * Check if there is and entry for blkno, len in the buffer cancel record table,
112 : * and decremented the reference count on it if there is one.
113 : *
114 : * Remove the cancel record once the refcount hits zero, so that if the same
115 : * buffer is re-used again after its last cancellation we actually replay the
116 : * changes made at that point.
117 : */
118 : static bool
119 389942 : xlog_put_buffer_cancelled(
120 : struct xlog *log,
121 : xfs_daddr_t blkno,
122 : uint len)
123 : {
124 389942 : struct xfs_buf_cancel *bcp;
125 :
126 389942 : bcp = xlog_find_buffer_cancelled(log, blkno, len);
127 389942 : if (!bcp) {
128 0 : ASSERT(0);
129 0 : return false;
130 : }
131 :
132 389942 : if (--bcp->bc_refcount == 0) {
133 320090 : list_del(&bcp->bc_list);
134 320090 : kmem_free(bcp);
135 : }
136 : return true;
137 : }
138 :
139 : /* log buffer item recovery */
140 :
141 : /*
142 : * Sort buffer items for log recovery. Most buffer items should end up on the
143 : * buffer list and are recovered first, with the following exceptions:
144 : *
145 : * 1. XFS_BLF_CANCEL buffers must be processed last because some log items
146 : * might depend on the incor ecancellation record, and replaying a cancelled
147 : * buffer item can remove the incore record.
148 : *
149 : * 2. XFS_BLF_INODE_BUF buffers are handled after most regular items so that
150 : * we replay di_next_unlinked only after flushing the inode 'free' state
151 : * to the inode buffer.
152 : *
153 : * See xlog_recover_reorder_trans for more details.
154 : */
155 : STATIC enum xlog_recover_reorder
156 27271784 : xlog_recover_buf_reorder(
157 : struct xlog_recover_item *item)
158 : {
159 27271784 : struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
160 :
161 27271784 : if (buf_f->blf_flags & XFS_BLF_CANCEL)
162 : return XLOG_REORDER_CANCEL_LIST;
163 26491900 : if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
164 17756 : return XLOG_REORDER_INODE_BUFFER_LIST;
165 : return XLOG_REORDER_BUFFER_LIST;
166 : }
167 :
168 : STATIC void
169 13635892 : xlog_recover_buf_ra_pass2(
170 : struct xlog *log,
171 : struct xlog_recover_item *item)
172 : {
173 13635892 : struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
174 :
175 13635892 : xlog_buf_readahead(log, buf_f->blf_blkno, buf_f->blf_len, NULL);
176 13635892 : }
177 :
178 : /*
179 : * Build up the table of buf cancel records so that we don't replay cancelled
180 : * data in the second pass.
181 : */
182 : static int
183 13635892 : xlog_recover_buf_commit_pass1(
184 : struct xlog *log,
185 : struct xlog_recover_item *item)
186 : {
187 13635892 : struct xfs_buf_log_format *bf = item->ri_buf[0].i_addr;
188 :
189 13635892 : if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) {
190 0 : xfs_err(log->l_mp, "bad buffer log item size (%d)",
191 : item->ri_buf[0].i_len);
192 0 : return -EFSCORRUPTED;
193 : }
194 :
195 13635892 : if (!(bf->blf_flags & XFS_BLF_CANCEL))
196 13245950 : trace_xfs_log_recover_buf_not_cancel(log, bf);
197 389942 : else if (xlog_add_buffer_cancelled(log, bf->blf_blkno, bf->blf_len))
198 320090 : trace_xfs_log_recover_buf_cancel_add(log, bf);
199 : else
200 69852 : trace_xfs_log_recover_buf_cancel_ref_inc(log, bf);
201 : return 0;
202 : }
203 :
204 : /*
205 : * Validate the recovered buffer is of the correct type and attach the
206 : * appropriate buffer operations to them for writeback. Magic numbers are in a
207 : * few places:
208 : * the first 16 bits of the buffer (inode buffer, dquot buffer),
209 : * the first 32 bits of the buffer (most blocks),
210 : * inside a struct xfs_da_blkinfo at the start of the buffer.
211 : */
212 : static void
213 12786640 : xlog_recover_validate_buf_type(
214 : struct xfs_mount *mp,
215 : struct xfs_buf *bp,
216 : struct xfs_buf_log_format *buf_f,
217 : xfs_lsn_t current_lsn)
218 : {
219 12786640 : struct xfs_da_blkinfo *info = bp->b_addr;
220 12786640 : uint32_t magic32;
221 12786640 : uint16_t magic16;
222 12786640 : uint16_t magicda;
223 12786640 : char *warnmsg = NULL;
224 :
225 : /*
226 : * We can only do post recovery validation on items on CRC enabled
227 : * fielsystems as we need to know when the buffer was written to be able
228 : * to determine if we should have replayed the item. If we replay old
229 : * metadata over a newer buffer, then it will enter a temporarily
230 : * inconsistent state resulting in verification failures. Hence for now
231 : * just avoid the verification stage for non-crc filesystems
232 : */
233 12786640 : if (!xfs_has_crc(mp))
234 : return;
235 :
236 12786640 : magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
237 12786640 : magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
238 12786640 : magicda = be16_to_cpu(info->magic);
239 12786640 : switch (xfs_blft_from_flags(buf_f)) {
240 10393097 : case XFS_BLFT_BTREE_BUF:
241 10393097 : switch (magic32) {
242 1016177 : case XFS_ABTB_CRC_MAGIC:
243 : case XFS_ABTB_MAGIC:
244 1016177 : bp->b_ops = &xfs_bnobt_buf_ops;
245 1016177 : break;
246 1291841 : case XFS_ABTC_CRC_MAGIC:
247 : case XFS_ABTC_MAGIC:
248 1291841 : bp->b_ops = &xfs_cntbt_buf_ops;
249 1291841 : break;
250 310700 : case XFS_IBT_CRC_MAGIC:
251 : case XFS_IBT_MAGIC:
252 310700 : bp->b_ops = &xfs_inobt_buf_ops;
253 310700 : break;
254 308753 : case XFS_FIBT_CRC_MAGIC:
255 : case XFS_FIBT_MAGIC:
256 308753 : bp->b_ops = &xfs_finobt_buf_ops;
257 308753 : break;
258 1023122 : case XFS_BMAP_CRC_MAGIC:
259 : case XFS_BMAP_MAGIC:
260 1023122 : bp->b_ops = &xfs_bmbt_buf_ops;
261 1023122 : break;
262 0 : case XFS_RTRMAP_CRC_MAGIC:
263 0 : bp->b_ops = &xfs_rtrmapbt_buf_ops;
264 0 : break;
265 5937914 : case XFS_RMAP_CRC_MAGIC:
266 5937914 : bp->b_ops = &xfs_rmapbt_buf_ops;
267 5937914 : break;
268 504590 : case XFS_REFC_CRC_MAGIC:
269 504590 : bp->b_ops = &xfs_refcountbt_buf_ops;
270 504590 : break;
271 0 : case XFS_RTREFC_CRC_MAGIC:
272 0 : bp->b_ops = &xfs_rtrefcountbt_buf_ops;
273 0 : break;
274 : default:
275 : warnmsg = "Bad btree block magic!";
276 : break;
277 : }
278 : break;
279 687621 : case XFS_BLFT_AGF_BUF:
280 687621 : if (magic32 != XFS_AGF_MAGIC) {
281 : warnmsg = "Bad AGF block magic!";
282 : break;
283 : }
284 687621 : bp->b_ops = &xfs_agf_buf_ops;
285 687621 : break;
286 18882 : case XFS_BLFT_AGFL_BUF:
287 18882 : if (magic32 != XFS_AGFL_MAGIC) {
288 : warnmsg = "Bad AGFL block magic!";
289 : break;
290 : }
291 18882 : bp->b_ops = &xfs_agfl_buf_ops;
292 18882 : break;
293 351625 : case XFS_BLFT_AGI_BUF:
294 351625 : if (magic32 != XFS_AGI_MAGIC) {
295 : warnmsg = "Bad AGI block magic!";
296 : break;
297 : }
298 351625 : bp->b_ops = &xfs_agi_buf_ops;
299 351625 : break;
300 297688 : case XFS_BLFT_UDQUOT_BUF:
301 : case XFS_BLFT_PDQUOT_BUF:
302 : case XFS_BLFT_GDQUOT_BUF:
303 : #ifdef CONFIG_XFS_QUOTA
304 297688 : if (magic16 != XFS_DQUOT_MAGIC) {
305 : warnmsg = "Bad DQUOT block magic!";
306 : break;
307 : }
308 297688 : bp->b_ops = &xfs_dquot_buf_ops;
309 : #else
310 : xfs_alert(mp,
311 : "Trying to recover dquots without QUOTA support built in!");
312 : ASSERT(0);
313 : #endif
314 297688 : break;
315 0 : case XFS_BLFT_DINO_BUF:
316 0 : if (magic16 != XFS_DINODE_MAGIC) {
317 : warnmsg = "Bad INODE block magic!";
318 : break;
319 : }
320 0 : bp->b_ops = &xfs_inode_buf_ops;
321 0 : break;
322 7136 : case XFS_BLFT_SYMLINK_BUF:
323 7136 : if (magic32 != XFS_SYMLINK_MAGIC) {
324 : warnmsg = "Bad symlink block magic!";
325 : break;
326 : }
327 7136 : bp->b_ops = &xfs_symlink_buf_ops;
328 7136 : break;
329 12676 : case XFS_BLFT_DIR_BLOCK_BUF:
330 12676 : if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
331 12676 : magic32 != XFS_DIR3_BLOCK_MAGIC) {
332 : warnmsg = "Bad dir block magic!";
333 : break;
334 : }
335 12676 : bp->b_ops = &xfs_dir3_block_buf_ops;
336 12676 : break;
337 475301 : case XFS_BLFT_DIR_DATA_BUF:
338 475301 : if (magic32 != XFS_DIR2_DATA_MAGIC &&
339 475301 : magic32 != XFS_DIR3_DATA_MAGIC) {
340 : warnmsg = "Bad dir data magic!";
341 : break;
342 : }
343 475301 : bp->b_ops = &xfs_dir3_data_buf_ops;
344 475301 : break;
345 123640 : case XFS_BLFT_DIR_FREE_BUF:
346 123640 : if (magic32 != XFS_DIR2_FREE_MAGIC &&
347 123640 : magic32 != XFS_DIR3_FREE_MAGIC) {
348 : warnmsg = "Bad dir3 free magic!";
349 : break;
350 : }
351 123640 : bp->b_ops = &xfs_dir3_free_buf_ops;
352 123640 : break;
353 56456 : case XFS_BLFT_DIR_LEAF1_BUF:
354 56456 : if (magicda != XFS_DIR2_LEAF1_MAGIC &&
355 56456 : magicda != XFS_DIR3_LEAF1_MAGIC) {
356 : warnmsg = "Bad dir leaf1 magic!";
357 : break;
358 : }
359 56456 : bp->b_ops = &xfs_dir3_leaf1_buf_ops;
360 56456 : break;
361 332855 : case XFS_BLFT_DIR_LEAFN_BUF:
362 332855 : if (magicda != XFS_DIR2_LEAFN_MAGIC &&
363 332855 : magicda != XFS_DIR3_LEAFN_MAGIC) {
364 : warnmsg = "Bad dir leafn magic!";
365 : break;
366 : }
367 332855 : bp->b_ops = &xfs_dir3_leafn_buf_ops;
368 332855 : break;
369 823 : case XFS_BLFT_DA_NODE_BUF:
370 823 : if (magicda != XFS_DA_NODE_MAGIC &&
371 823 : magicda != XFS_DA3_NODE_MAGIC) {
372 : warnmsg = "Bad da node magic!";
373 : break;
374 : }
375 823 : bp->b_ops = &xfs_da3_node_buf_ops;
376 823 : break;
377 16699 : case XFS_BLFT_ATTR_LEAF_BUF:
378 16699 : if (magicda != XFS_ATTR_LEAF_MAGIC &&
379 16699 : magicda != XFS_ATTR3_LEAF_MAGIC) {
380 : warnmsg = "Bad attr leaf magic!";
381 : break;
382 : }
383 16699 : bp->b_ops = &xfs_attr3_leaf_buf_ops;
384 16699 : break;
385 0 : case XFS_BLFT_ATTR_RMT_BUF:
386 0 : if (magic32 != XFS_ATTR3_RMT_MAGIC) {
387 : warnmsg = "Bad attr remote magic!";
388 : break;
389 : }
390 0 : bp->b_ops = &xfs_attr3_rmt_buf_ops;
391 0 : break;
392 12141 : case XFS_BLFT_SB_BUF:
393 12141 : if (magic32 != XFS_SB_MAGIC) {
394 : warnmsg = "Bad SB block magic!";
395 : break;
396 : }
397 12141 : bp->b_ops = &xfs_sb_buf_ops;
398 12141 : break;
399 : #ifdef CONFIG_XFS_RT
400 : case XFS_BLFT_RTBITMAP_BUF:
401 0 : if (xfs_has_rtgroups(mp) && magic32 != XFS_RTBITMAP_MAGIC) {
402 : warnmsg = "Bad rtbitmap magic!";
403 : break;
404 : }
405 0 : bp->b_ops = xfs_rtblock_ops(mp, false);
406 0 : break;
407 : case XFS_BLFT_RTSUMMARY_BUF:
408 0 : if (xfs_has_rtgroups(mp) && magic32 != XFS_RTSUMMARY_MAGIC) {
409 : warnmsg = "Bad rtsummary magic!";
410 : break;
411 : }
412 0 : bp->b_ops = xfs_rtblock_ops(mp, true);
413 0 : break;
414 : #endif /* CONFIG_XFS_RT */
415 : default:
416 0 : xfs_warn(mp, "Unknown buffer type %d!",
417 : xfs_blft_from_flags(buf_f));
418 0 : break;
419 : }
420 :
421 : /*
422 : * Nothing else to do in the case of a NULL current LSN as this means
423 : * the buffer is more recent than the change in the log and will be
424 : * skipped.
425 : */
426 12786640 : if (current_lsn == NULLCOMMITLSN)
427 : return;
428 :
429 12473674 : if (warnmsg) {
430 0 : xfs_warn(mp, warnmsg);
431 0 : ASSERT(0);
432 : }
433 :
434 : /*
435 : * We must update the metadata LSN of the buffer as it is written out to
436 : * ensure that older transactions never replay over this one and corrupt
437 : * the buffer. This can occur if log recovery is interrupted at some
438 : * point after the current transaction completes, at which point a
439 : * subsequent mount starts recovery from the beginning.
440 : *
441 : * Write verifiers update the metadata LSN from log items attached to
442 : * the buffer. Therefore, initialize a bli purely to carry the LSN to
443 : * the verifier.
444 : */
445 12473674 : if (bp->b_ops) {
446 12473674 : struct xfs_buf_log_item *bip;
447 :
448 12473674 : bp->b_flags |= _XBF_LOGRECOVERY;
449 12473674 : xfs_buf_item_init(bp, mp);
450 12473674 : bip = bp->b_log_item;
451 12473674 : bip->bli_item.li_lsn = current_lsn;
452 : }
453 : }
454 :
455 : /*
456 : * Perform a 'normal' buffer recovery. Each logged region of the
457 : * buffer should be copied over the corresponding region in the
458 : * given buffer. The bitmap in the buf log format structure indicates
459 : * where to place the logged data.
460 : */
461 : STATIC void
462 12771362 : xlog_recover_do_reg_buffer(
463 : struct xfs_mount *mp,
464 : struct xlog_recover_item *item,
465 : struct xfs_buf *bp,
466 : struct xfs_buf_log_format *buf_f,
467 : xfs_lsn_t current_lsn)
468 : {
469 12771362 : int i;
470 12771362 : int bit;
471 12771362 : int nbits;
472 12771362 : xfs_failaddr_t fa;
473 12771362 : const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot);
474 :
475 12771362 : trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
476 :
477 12771362 : bit = 0;
478 12771362 : i = 1; /* 0 is the buf format structure */
479 50133992 : while (1) {
480 31452677 : bit = xfs_next_bit(buf_f->blf_data_map,
481 : buf_f->blf_map_size, bit);
482 31452677 : if (bit == -1)
483 : break;
484 18681315 : nbits = xfs_contig_bits(buf_f->blf_data_map,
485 : buf_f->blf_map_size, bit);
486 18681315 : ASSERT(nbits > 0);
487 18681315 : ASSERT(item->ri_buf[i].i_addr != NULL);
488 18681315 : ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
489 18681315 : ASSERT(BBTOB(bp->b_length) >=
490 : ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
491 :
492 : /*
493 : * The dirty regions logged in the buffer, even though
494 : * contiguous, may span multiple chunks. This is because the
495 : * dirty region may span a physical page boundary in a buffer
496 : * and hence be split into two separate vectors for writing into
497 : * the log. Hence we need to trim nbits back to the length of
498 : * the current region being copied out of the log.
499 : */
500 18681315 : if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
501 0 : nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
502 :
503 : /*
504 : * Do a sanity check if this is a dquot buffer. Just checking
505 : * the first dquot in the buffer should do. XXXThis is
506 : * probably a good thing to do for other buf types also.
507 : */
508 18681315 : fa = NULL;
509 18681315 : if (buf_f->blf_flags &
510 : (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
511 297688 : if (item->ri_buf[i].i_addr == NULL) {
512 0 : xfs_alert(mp,
513 : "XFS: NULL dquot in %s.", __func__);
514 0 : goto next;
515 : }
516 297688 : if (item->ri_buf[i].i_len < size_disk_dquot) {
517 0 : xfs_alert(mp,
518 : "XFS: dquot too small (%d) in %s.",
519 : item->ri_buf[i].i_len, __func__);
520 0 : goto next;
521 : }
522 297688 : fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr, -1);
523 297688 : if (fa) {
524 0 : xfs_alert(mp,
525 : "dquot corrupt at %pS trying to replay into block 0x%llx",
526 : fa, xfs_buf_daddr(bp));
527 0 : goto next;
528 : }
529 : }
530 :
531 37362630 : memcpy(xfs_buf_offset(bp,
532 : (uint)bit << XFS_BLF_SHIFT), /* dest */
533 : item->ri_buf[i].i_addr, /* source */
534 : nbits<<XFS_BLF_SHIFT); /* length */
535 18681315 : next:
536 18681315 : i++;
537 18681315 : bit += nbits;
538 : }
539 :
540 : /* Shouldn't be any more regions */
541 12771362 : ASSERT(i == item->ri_total);
542 :
543 12771362 : xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
544 12771362 : }
545 :
546 : /*
547 : * Perform a dquot buffer recovery.
548 : * Simple algorithm: if we have found a QUOTAOFF log item of the same type
549 : * (ie. USR or GRP), then just toss this buffer away; don't recover it.
550 : * Else, treat it as a regular buffer and do recovery.
551 : *
552 : * Return false if the buffer was tossed and true if we recovered the buffer to
553 : * indicate to the caller if the buffer needs writing.
554 : */
555 : STATIC bool
556 297688 : xlog_recover_do_dquot_buffer(
557 : struct xfs_mount *mp,
558 : struct xlog *log,
559 : struct xlog_recover_item *item,
560 : struct xfs_buf *bp,
561 : struct xfs_buf_log_format *buf_f)
562 : {
563 297688 : uint type;
564 :
565 297688 : trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
566 :
567 : /*
568 : * Filesystems are required to send in quota flags at mount time.
569 : */
570 297688 : if (!mp->m_qflags)
571 : return false;
572 :
573 297688 : type = 0;
574 297688 : if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
575 147709 : type |= XFS_DQTYPE_USER;
576 297688 : if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
577 2433 : type |= XFS_DQTYPE_PROJ;
578 297688 : if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
579 147546 : type |= XFS_DQTYPE_GROUP;
580 : /*
581 : * This type of quotas was turned off, so ignore this buffer
582 : */
583 297688 : if (log->l_quotaoffs_flag & type)
584 : return false;
585 :
586 297688 : xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
587 297688 : return true;
588 : }
589 :
590 : /*
591 : * Perform recovery for a buffer full of inodes. In these buffers, the only
592 : * data which should be recovered is that which corresponds to the
593 : * di_next_unlinked pointers in the on disk inode structures. The rest of the
594 : * data for the inodes is always logged through the inodes themselves rather
595 : * than the inode buffer and is recovered in xlog_recover_inode_pass2().
596 : *
597 : * The only time when buffers full of inodes are fully recovered is when the
598 : * buffer is full of newly allocated inodes. In this case the buffer will
599 : * not be marked as an inode buffer and so will be sent to
600 : * xlog_recover_do_reg_buffer() below during recovery.
601 : */
602 : STATIC int
603 8847 : xlog_recover_do_inode_buffer(
604 : struct xfs_mount *mp,
605 : struct xlog_recover_item *item,
606 : struct xfs_buf *bp,
607 : struct xfs_buf_log_format *buf_f)
608 : {
609 8847 : int i;
610 8847 : int item_index = 0;
611 8847 : int bit = 0;
612 8847 : int nbits = 0;
613 8847 : int reg_buf_offset = 0;
614 8847 : int reg_buf_bytes = 0;
615 8847 : int next_unlinked_offset;
616 8847 : int inodes_per_buf;
617 8847 : xfs_agino_t *logged_nextp;
618 8847 : xfs_agino_t *buffer_nextp;
619 :
620 8847 : trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
621 :
622 : /*
623 : * Post recovery validation only works properly on CRC enabled
624 : * filesystems.
625 : */
626 8847 : if (xfs_has_crc(mp))
627 8847 : bp->b_ops = &xfs_inode_buf_ops;
628 :
629 8847 : inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
630 242902 : for (i = 0; i < inodes_per_buf; i++) {
631 237648 : next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
632 : offsetof(struct xfs_dinode, di_next_unlinked);
633 :
634 237648 : while (next_unlinked_offset >=
635 405910 : (reg_buf_offset + reg_buf_bytes)) {
636 : /*
637 : * The next di_next_unlinked field is beyond
638 : * the current logged region. Find the next
639 : * logged region that contains or is beyond
640 : * the current di_next_unlinked field.
641 : */
642 171855 : bit += nbits;
643 171855 : bit = xfs_next_bit(buf_f->blf_data_map,
644 : buf_f->blf_map_size, bit);
645 :
646 : /*
647 : * If there are no more logged regions in the
648 : * buffer, then we're done.
649 : */
650 171855 : if (bit == -1)
651 : return 0;
652 :
653 168262 : nbits = xfs_contig_bits(buf_f->blf_data_map,
654 : buf_f->blf_map_size, bit);
655 168262 : ASSERT(nbits > 0);
656 168262 : reg_buf_offset = bit << XFS_BLF_SHIFT;
657 168262 : reg_buf_bytes = nbits << XFS_BLF_SHIFT;
658 168262 : item_index++;
659 : }
660 :
661 : /*
662 : * If the current logged region starts after the current
663 : * di_next_unlinked field, then move on to the next
664 : * di_next_unlinked field.
665 : */
666 234055 : if (next_unlinked_offset < reg_buf_offset)
667 65793 : continue;
668 :
669 168262 : ASSERT(item->ri_buf[item_index].i_addr != NULL);
670 168262 : ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
671 168262 : ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length));
672 :
673 : /*
674 : * The current logged region contains a copy of the
675 : * current di_next_unlinked field. Extract its value
676 : * and copy it to the buffer copy.
677 : */
678 168262 : logged_nextp = item->ri_buf[item_index].i_addr +
679 168262 : next_unlinked_offset - reg_buf_offset;
680 168262 : if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) {
681 0 : xfs_alert(mp,
682 : "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
683 : "Trying to replay bad (0) inode di_next_unlinked field.",
684 : item, bp);
685 0 : return -EFSCORRUPTED;
686 : }
687 :
688 168262 : buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
689 168262 : *buffer_nextp = *logged_nextp;
690 :
691 : /*
692 : * If necessary, recalculate the CRC in the on-disk inode. We
693 : * have to leave the inode in a consistent state for whoever
694 : * reads it next....
695 : */
696 168262 : xfs_dinode_calc_crc(mp,
697 168262 : xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
698 :
699 : }
700 :
701 : return 0;
702 : }
703 :
704 : /*
705 : * V5 filesystems know the age of the buffer on disk being recovered. We can
706 : * have newer objects on disk than we are replaying, and so for these cases we
707 : * don't want to replay the current change as that will make the buffer contents
708 : * temporarily invalid on disk.
709 : *
710 : * The magic number might not match the buffer type we are going to recover
711 : * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
712 : * extract the LSN of the existing object in the buffer based on it's current
713 : * magic number. If we don't recognise the magic number in the buffer, then
714 : * return a LSN of -1 so that the caller knows it was an unrecognised block and
715 : * so can recover the buffer.
716 : *
717 : * Note: we cannot rely solely on magic number matches to determine that the
718 : * buffer has a valid LSN - we also need to verify that it belongs to this
719 : * filesystem, so we need to extract the object's LSN and compare it to that
720 : * which we read from the superblock. If the UUIDs don't match, then we've got a
721 : * stale metadata block from an old filesystem instance that we need to recover
722 : * over the top of.
723 : */
724 : static xfs_lsn_t
725 12795487 : xlog_recover_get_buf_lsn(
726 : struct xfs_mount *mp,
727 : struct xfs_buf *bp,
728 : struct xfs_buf_log_format *buf_f)
729 : {
730 12795487 : uint32_t magic32;
731 12795487 : uint16_t magic16;
732 12795487 : uint16_t magicda;
733 12795487 : void *blk = bp->b_addr;
734 12795487 : uuid_t *uuid;
735 12795487 : xfs_lsn_t lsn = -1;
736 12795487 : uint16_t blft;
737 :
738 : /* v4 filesystems always recover immediately */
739 12795487 : if (!xfs_has_crc(mp))
740 0 : goto recover_immediately;
741 :
742 : /*
743 : * realtime bitmap and summary file blocks do not have magic numbers or
744 : * UUIDs, so we must recover them immediately.
745 : */
746 12795487 : blft = xfs_blft_from_flags(buf_f);
747 12795487 : if (!xfs_has_rtgroups(mp) && (blft == XFS_BLFT_RTBITMAP_BUF ||
748 : blft == XFS_BLFT_RTSUMMARY_BUF))
749 0 : goto recover_immediately;
750 :
751 12795487 : magic32 = be32_to_cpu(*(__be32 *)blk);
752 12795487 : switch (magic32) {
753 0 : case XFS_RTSUMMARY_MAGIC:
754 : case XFS_RTBITMAP_MAGIC: {
755 0 : struct xfs_rtbuf_blkinfo *hdr = blk;
756 :
757 0 : lsn = be64_to_cpu(hdr->rt_lsn);
758 0 : uuid = &hdr->rt_uuid;
759 0 : break;
760 : }
761 9364481 : case XFS_ABTB_CRC_MAGIC:
762 : case XFS_ABTC_CRC_MAGIC:
763 : case XFS_ABTB_MAGIC:
764 : case XFS_ABTC_MAGIC:
765 : case XFS_RMAP_CRC_MAGIC:
766 : case XFS_REFC_CRC_MAGIC:
767 : case XFS_FIBT_CRC_MAGIC:
768 : case XFS_FIBT_MAGIC:
769 : case XFS_IBT_CRC_MAGIC:
770 : case XFS_IBT_MAGIC: {
771 9364481 : struct xfs_btree_block *btb = blk;
772 :
773 9364481 : lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
774 9364481 : uuid = &btb->bb_u.s.bb_uuid;
775 9364481 : break;
776 : }
777 1003860 : case XFS_RTRMAP_CRC_MAGIC:
778 : case XFS_RTREFC_CRC_MAGIC:
779 : case XFS_BMAP_CRC_MAGIC:
780 : case XFS_BMAP_MAGIC: {
781 1003860 : struct xfs_btree_block *btb = blk;
782 :
783 1003860 : lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
784 1003860 : uuid = &btb->bb_u.l.bb_uuid;
785 1003860 : break;
786 : }
787 687635 : case XFS_AGF_MAGIC:
788 687635 : lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
789 687635 : uuid = &((struct xfs_agf *)blk)->agf_uuid;
790 687635 : break;
791 18887 : case XFS_AGFL_MAGIC:
792 18887 : lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
793 18887 : uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
794 18887 : break;
795 351643 : case XFS_AGI_MAGIC:
796 351643 : lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
797 351643 : uuid = &((struct xfs_agi *)blk)->agi_uuid;
798 351643 : break;
799 671 : case XFS_SYMLINK_MAGIC:
800 671 : lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
801 671 : uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
802 671 : break;
803 610403 : case XFS_DIR3_BLOCK_MAGIC:
804 : case XFS_DIR3_DATA_MAGIC:
805 : case XFS_DIR3_FREE_MAGIC:
806 610403 : lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
807 610403 : uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
808 610403 : break;
809 23 : case XFS_ATTR3_RMT_MAGIC:
810 : /*
811 : * Remote attr blocks are written synchronously, rather than
812 : * being logged. That means they do not contain a valid LSN
813 : * (i.e. transactionally ordered) in them, and hence any time we
814 : * see a buffer to replay over the top of a remote attribute
815 : * block we should simply do so.
816 : */
817 23 : goto recover_immediately;
818 12169 : case XFS_SB_MAGIC:
819 : /*
820 : * superblock uuids are magic. We may or may not have a
821 : * sb_meta_uuid on disk, but it will be set in the in-core
822 : * superblock. We set the uuid pointer for verification
823 : * according to the superblock feature mask to ensure we check
824 : * the relevant UUID in the superblock.
825 : */
826 12169 : lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
827 12169 : if (xfs_has_metauuid(mp))
828 0 : uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
829 : else
830 12169 : uuid = &((struct xfs_dsb *)blk)->sb_uuid;
831 : break;
832 : default:
833 : break;
834 : }
835 :
836 12049749 : if (lsn != (xfs_lsn_t)-1) {
837 12049720 : if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
838 626 : goto recover_immediately;
839 : return lsn;
840 : }
841 :
842 745744 : magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
843 745744 : switch (magicda) {
844 404068 : case XFS_DIR3_LEAF1_MAGIC:
845 : case XFS_DIR3_LEAFN_MAGIC:
846 : case XFS_ATTR3_LEAF_MAGIC:
847 : case XFS_DA3_NODE_MAGIC:
848 404068 : lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
849 404068 : uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
850 404068 : break;
851 : default:
852 : break;
853 : }
854 :
855 745744 : if (lsn != (xfs_lsn_t)-1) {
856 404068 : if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
857 29 : goto recover_immediately;
858 : return lsn;
859 : }
860 :
861 : /*
862 : * We do individual object checks on dquot and inode buffers as they
863 : * have their own individual LSN records. Also, we could have a stale
864 : * buffer here, so we have to at least recognise these buffer types.
865 : *
866 : * A notd complexity here is inode unlinked list processing - it logs
867 : * the inode directly in the buffer, but we don't know which inodes have
868 : * been modified, and there is no global buffer LSN. Hence we need to
869 : * recover all inode buffer types immediately. This problem will be
870 : * fixed by logical logging of the unlinked list modifications.
871 : */
872 341676 : magic16 = be16_to_cpu(*(__be16 *)blk);
873 341676 : switch (magic16) {
874 10411 : case XFS_DQUOT_MAGIC:
875 : case XFS_DINODE_MAGIC:
876 10411 : goto recover_immediately;
877 : default:
878 : break;
879 : }
880 :
881 : /* unknown buffer contents, recover immediately */
882 :
883 : recover_immediately:
884 : return (xfs_lsn_t)-1;
885 :
886 : }
887 :
888 : /*
889 : * This routine replays a modification made to a buffer at runtime.
890 : * There are actually two types of buffer, regular and inode, which
891 : * are handled differently. Inode buffers are handled differently
892 : * in that we only recover a specific set of data from them, namely
893 : * the inode di_next_unlinked fields. This is because all other inode
894 : * data is actually logged via inode records and any data we replay
895 : * here which overlaps that may be stale.
896 : *
897 : * When meta-data buffers are freed at run time we log a buffer item
898 : * with the XFS_BLF_CANCEL bit set to indicate that previous copies
899 : * of the buffer in the log should not be replayed at recovery time.
900 : * This is so that if the blocks covered by the buffer are reused for
901 : * file data before we crash we don't end up replaying old, freed
902 : * meta-data into a user's file.
903 : *
904 : * To handle the cancellation of buffer log items, we make two passes
905 : * over the log during recovery. During the first we build a table of
906 : * those buffers which have been cancelled, and during the second we
907 : * only replay those buffers which do not have corresponding cancel
908 : * records in the table. See xlog_recover_buf_pass[1,2] above
909 : * for more details on the implementation of the table of cancel records.
910 : */
911 : STATIC int
912 13635892 : xlog_recover_buf_commit_pass2(
913 : struct xlog *log,
914 : struct list_head *buffer_list,
915 : struct xlog_recover_item *item,
916 : xfs_lsn_t current_lsn)
917 : {
918 13635892 : struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
919 13635892 : struct xfs_mount *mp = log->l_mp;
920 13635892 : struct xfs_buf *bp;
921 13635892 : int error;
922 13635892 : uint buf_flags;
923 13635892 : xfs_lsn_t lsn;
924 :
925 : /*
926 : * In this pass we only want to recover all the buffers which have
927 : * not been cancelled and are not cancellation buffers themselves.
928 : */
929 13635892 : if (buf_f->blf_flags & XFS_BLF_CANCEL) {
930 389942 : if (xlog_put_buffer_cancelled(log, buf_f->blf_blkno,
931 389942 : buf_f->blf_len))
932 389942 : goto cancelled;
933 : } else {
934 :
935 13245950 : if (xlog_is_buffer_cancelled(log, buf_f->blf_blkno,
936 13245950 : buf_f->blf_len))
937 450463 : goto cancelled;
938 : }
939 :
940 12795487 : trace_xfs_log_recover_buf_recover(log, buf_f);
941 :
942 12795487 : buf_flags = 0;
943 12795487 : if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
944 8847 : buf_flags |= XBF_UNMAPPED;
945 :
946 12795487 : error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
947 : buf_flags, &bp, NULL);
948 12795487 : if (error)
949 : return error;
950 :
951 : /*
952 : * Recover the buffer only if we get an LSN from it and it's less than
953 : * the lsn of the transaction we are replaying.
954 : *
955 : * Note that we have to be extremely careful of readahead here.
956 : * Readahead does not attach verfiers to the buffers so if we don't
957 : * actually do any replay after readahead because of the LSN we found
958 : * in the buffer if more recent than that current transaction then we
959 : * need to attach the verifier directly. Failure to do so can lead to
960 : * future recovery actions (e.g. EFI and unlinked list recovery) can
961 : * operate on the buffers and they won't get the verifier attached. This
962 : * can lead to blocks on disk having the correct content but a stale
963 : * CRC.
964 : *
965 : * It is safe to assume these clean buffers are currently up to date.
966 : * If the buffer is dirtied by a later transaction being replayed, then
967 : * the verifier will be reset to match whatever recover turns that
968 : * buffer into.
969 : */
970 12795487 : lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f);
971 12795487 : if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
972 15278 : trace_xfs_log_recover_buf_skip(log, buf_f);
973 15278 : xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
974 :
975 : /*
976 : * We're skipping replay of this buffer log item due to the log
977 : * item LSN being behind the ondisk buffer. Verify the buffer
978 : * contents since we aren't going to run the write verifier.
979 : */
980 15278 : if (bp->b_ops) {
981 15278 : bp->b_ops->verify_read(bp);
982 15278 : error = bp->b_error;
983 : }
984 15278 : goto out_release;
985 : }
986 :
987 12780209 : if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
988 8847 : error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
989 8847 : if (error)
990 0 : goto out_release;
991 12771362 : } else if (buf_f->blf_flags &
992 : (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
993 297688 : bool dirty;
994 :
995 297688 : dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
996 297688 : if (!dirty)
997 0 : goto out_release;
998 : } else {
999 12473674 : xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
1000 : }
1001 :
1002 : /*
1003 : * Perform delayed write on the buffer. Asynchronous writes will be
1004 : * slower when taking into account all the buffers to be flushed.
1005 : *
1006 : * Also make sure that only inode buffers with good sizes stay in
1007 : * the buffer cache. The kernel moves inodes in buffers of 1 block
1008 : * or inode_cluster_size bytes, whichever is bigger. The inode
1009 : * buffers in the log can be a different size if the log was generated
1010 : * by an older kernel using unclustered inode buffers or a newer kernel
1011 : * running with a different inode cluster size. Regardless, if
1012 : * the inode buffer size isn't max(blocksize, inode_cluster_size)
1013 : * for *our* value of inode_cluster_size, then we need to keep
1014 : * the buffer out of the buffer cache so that the buffer won't
1015 : * overlap with future reads of those inodes.
1016 : */
1017 12780209 : if (XFS_DINODE_MAGIC ==
1018 12780209 : be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
1019 8847 : (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) {
1020 0 : xfs_buf_stale(bp);
1021 0 : error = xfs_bwrite(bp);
1022 : } else {
1023 12780209 : ASSERT(bp->b_mount == mp);
1024 12780209 : bp->b_flags |= _XBF_LOGRECOVERY;
1025 12780209 : xfs_buf_delwri_queue(bp, buffer_list);
1026 :
1027 : /*
1028 : * Update the primary rt super if we just recovered the primary
1029 : * fs super.
1030 : */
1031 12780209 : if (xfs_has_rtgroups(mp) && bp->b_ops == &xfs_sb_buf_ops) {
1032 10824 : struct xfs_buf *rtsb_bp = mp->m_rtsb_bp;
1033 :
1034 10824 : if (rtsb_bp) {
1035 0 : xfs_buf_lock(rtsb_bp);
1036 0 : xfs_buf_hold(rtsb_bp);
1037 0 : xfs_rtgroup_update_super(rtsb_bp, bp);
1038 0 : rtsb_bp->b_flags |= _XBF_LOGRECOVERY;
1039 0 : xfs_buf_delwri_queue(rtsb_bp, buffer_list);
1040 0 : xfs_buf_relse(rtsb_bp);
1041 : }
1042 : }
1043 : }
1044 :
1045 12780209 : out_release:
1046 12795487 : xfs_buf_relse(bp);
1047 12795487 : return error;
1048 840405 : cancelled:
1049 840405 : trace_xfs_log_recover_buf_cancel(log, buf_f);
1050 840405 : return 0;
1051 : }
1052 :
1053 : const struct xlog_recover_item_ops xlog_buf_item_ops = {
1054 : .item_type = XFS_LI_BUF,
1055 : .reorder = xlog_recover_buf_reorder,
1056 : .ra_pass2 = xlog_recover_buf_ra_pass2,
1057 : .commit_pass1 = xlog_recover_buf_commit_pass1,
1058 : .commit_pass2 = xlog_recover_buf_commit_pass2,
1059 : };
1060 :
1061 : #ifdef DEBUG
1062 : void
1063 11369 : xlog_check_buf_cancel_table(
1064 : struct xlog *log)
1065 : {
1066 11369 : int i;
1067 :
1068 738985 : for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
1069 727616 : ASSERT(list_empty(&log->l_buf_cancel_table[i]));
1070 11369 : }
1071 : #endif
1072 :
1073 : int
1074 11369 : xlog_alloc_buf_cancel_table(
1075 : struct xlog *log)
1076 : {
1077 11369 : void *p;
1078 11369 : int i;
1079 :
1080 11369 : ASSERT(log->l_buf_cancel_table == NULL);
1081 :
1082 11369 : p = kmalloc_array(XLOG_BC_TABLE_SIZE, sizeof(struct list_head),
1083 : GFP_KERNEL);
1084 11369 : if (!p)
1085 : return -ENOMEM;
1086 :
1087 11369 : log->l_buf_cancel_table = p;
1088 738985 : for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
1089 727616 : INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
1090 :
1091 : return 0;
1092 : }
1093 :
1094 : void
1095 11369 : xlog_free_buf_cancel_table(
1096 : struct xlog *log)
1097 : {
1098 11369 : int i;
1099 :
1100 11369 : if (!log->l_buf_cancel_table)
1101 : return;
1102 :
1103 738985 : for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) {
1104 : struct xfs_buf_cancel *bc;
1105 :
1106 727616 : while ((bc = list_first_entry_or_null(
1107 : &log->l_buf_cancel_table[i],
1108 : struct xfs_buf_cancel, bc_list))) {
1109 0 : list_del(&bc->bc_list);
1110 0 : kmem_free(bc);
1111 : }
1112 : }
1113 :
1114 11369 : kmem_free(log->l_buf_cancel_table);
1115 11369 : log->l_buf_cancel_table = NULL;
1116 : }
|