Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * Copyright (C) 2016 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_format.h"
9 : #include "xfs_log_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_bit.h"
12 : #include "xfs_shared.h"
13 : #include "xfs_mount.h"
14 : #include "xfs_defer.h"
15 : #include "xfs_trans.h"
16 : #include "xfs_trans_priv.h"
17 : #include "xfs_refcount_item.h"
18 : #include "xfs_log.h"
19 : #include "xfs_refcount.h"
20 : #include "xfs_error.h"
21 : #include "xfs_log_priv.h"
22 : #include "xfs_log_recover.h"
23 : #include "xfs_ag.h"
24 : #include "xfs_rtgroup.h"
25 :
26 : struct kmem_cache *xfs_cui_cache;
27 : struct kmem_cache *xfs_cud_cache;
28 :
29 : static const struct xfs_item_ops xfs_cui_item_ops;
30 :
31 : static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
32 : {
33 : return container_of(lip, struct xfs_cui_log_item, cui_item);
34 : }
35 :
36 : STATIC void
37 144990316 : xfs_cui_item_free(
38 : struct xfs_cui_log_item *cuip)
39 : {
40 144990316 : kmem_free(cuip->cui_item.li_lv_shadow);
41 144990320 : if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
42 0 : kmem_free(cuip);
43 : else
44 144990320 : kmem_cache_free(xfs_cui_cache, cuip);
45 144990320 : }
46 :
47 : /*
48 : * Freeing the CUI requires that we remove it from the AIL if it has already
49 : * been placed there. However, the CUI may not yet have been placed in the AIL
50 : * when called by xfs_cui_release() from CUD processing due to the ordering of
51 : * committed vs unpin operations in bulk insert operations. Hence the reference
52 : * count to ensure only the last caller frees the CUI.
53 : */
54 : STATIC void
55 289932053 : xfs_cui_release(
56 : struct xfs_cui_log_item *cuip)
57 : {
58 289932053 : ASSERT(atomic_read(&cuip->cui_refcount) > 0);
59 579895394 : if (!atomic_dec_and_test(&cuip->cui_refcount))
60 : return;
61 :
62 144990308 : xfs_trans_ail_delete(&cuip->cui_item, 0);
63 144990325 : xfs_cui_item_free(cuip);
64 : }
65 :
66 :
67 : STATIC void
68 144961910 : xfs_cui_item_size(
69 : struct xfs_log_item *lip,
70 : int *nvecs,
71 : int *nbytes)
72 : {
73 144961910 : struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
74 :
75 144961910 : *nvecs += 1;
76 144961910 : *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
77 144961910 : }
78 :
79 : /*
80 : * This is called to fill in the vector of log iovecs for the
81 : * given cui log item. We use only 1 iovec, and we point that
82 : * at the cui_log_format structure embedded in the cui item.
83 : * It is at this point that we assert that all of the extent
84 : * slots in the cui item have been filled.
85 : */
86 : STATIC void
87 144961618 : xfs_cui_item_format(
88 : struct xfs_log_item *lip,
89 : struct xfs_log_vec *lv)
90 : {
91 144961618 : struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
92 144961618 : struct xfs_log_iovec *vecp = NULL;
93 :
94 144961618 : ASSERT(atomic_read(&cuip->cui_next_extent) ==
95 : cuip->cui_format.cui_nextents);
96 :
97 144961618 : cuip->cui_format.cui_type = XFS_LI_CUI;
98 144961618 : cuip->cui_format.cui_size = 1;
99 :
100 144961618 : xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
101 144961618 : xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
102 144961838 : }
103 :
104 : /*
105 : * The unpin operation is the last place an CUI is manipulated in the log. It is
106 : * either inserted in the AIL or aborted in the event of a log I/O error. In
107 : * either case, the CUI transaction has been successfully committed to make it
108 : * this far. Therefore, we expect whoever committed the CUI to either construct
109 : * and commit the CUD or drop the CUD's reference in the event of error. Simply
110 : * drop the log's CUI reference now that the log is done with it.
111 : */
112 : STATIC void
113 144961932 : xfs_cui_item_unpin(
114 : struct xfs_log_item *lip,
115 : int remove)
116 : {
117 144961932 : struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
118 :
119 144961932 : xfs_cui_release(cuip);
120 144962045 : }
121 :
122 : /*
123 : * The CUI has been either committed or aborted if the transaction has been
124 : * cancelled. If the transaction was cancelled, an CUD isn't going to be
125 : * constructed and thus we free the CUI here directly.
126 : */
127 : STATIC void
128 26906 : xfs_cui_item_release(
129 : struct xfs_log_item *lip)
130 : {
131 26906 : xfs_cui_release(CUI_ITEM(lip));
132 26906 : }
133 :
134 : /*
135 : * Allocate and initialize an cui item with the given number of extents.
136 : */
137 : STATIC struct xfs_cui_log_item *
138 144986591 : xfs_cui_init(
139 : struct xfs_mount *mp,
140 : uint nextents)
141 :
142 : {
143 144986591 : struct xfs_cui_log_item *cuip;
144 :
145 144986591 : ASSERT(nextents > 0);
146 144986591 : if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
147 0 : cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
148 : 0);
149 : else
150 144986591 : cuip = kmem_cache_zalloc(xfs_cui_cache,
151 : GFP_KERNEL | __GFP_NOFAIL);
152 :
153 144985987 : xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
154 144986887 : cuip->cui_format.cui_nextents = nextents;
155 144986887 : cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
156 144986887 : atomic_set(&cuip->cui_next_extent, 0);
157 144986887 : atomic_set(&cuip->cui_refcount, 2);
158 :
159 144986887 : return cuip;
160 : }
161 :
162 : static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
163 : {
164 : return container_of(lip, struct xfs_cud_log_item, cud_item);
165 : }
166 :
167 : STATIC void
168 144961499 : xfs_cud_item_size(
169 : struct xfs_log_item *lip,
170 : int *nvecs,
171 : int *nbytes)
172 : {
173 144961499 : *nvecs += 1;
174 144961499 : *nbytes += sizeof(struct xfs_cud_log_format);
175 144961499 : }
176 :
177 : /*
178 : * This is called to fill in the vector of log iovecs for the
179 : * given cud log item. We use only 1 iovec, and we point that
180 : * at the cud_log_format structure embedded in the cud item.
181 : * It is at this point that we assert that all of the extent
182 : * slots in the cud item have been filled.
183 : */
184 : STATIC void
185 149846 : xfs_cud_item_format(
186 : struct xfs_log_item *lip,
187 : struct xfs_log_vec *lv)
188 : {
189 149846 : struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
190 149846 : struct xfs_log_iovec *vecp = NULL;
191 :
192 149846 : cudp->cud_format.cud_type = XFS_LI_CUD;
193 149846 : cudp->cud_format.cud_size = 1;
194 :
195 149846 : xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
196 : sizeof(struct xfs_cud_log_format));
197 149846 : }
198 :
199 : /*
200 : * The CUD is either committed or aborted if the transaction is cancelled. If
201 : * the transaction is cancelled, drop our reference to the CUI and free the
202 : * CUD.
203 : */
204 : STATIC void
205 144961863 : xfs_cud_item_release(
206 : struct xfs_log_item *lip)
207 : {
208 144961863 : struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
209 :
210 144961863 : xfs_cui_release(cudp->cud_cuip);
211 144961905 : kmem_free(cudp->cud_item.li_lv_shadow);
212 144961899 : kmem_cache_free(xfs_cud_cache, cudp);
213 144961897 : }
214 :
215 : static struct xfs_log_item *
216 144960635 : xfs_cud_item_intent(
217 : struct xfs_log_item *lip)
218 : {
219 144960635 : return &CUD_ITEM(lip)->cud_cuip->cui_item;
220 : }
221 :
222 : static const struct xfs_item_ops xfs_cud_item_ops = {
223 : .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
224 : XFS_ITEM_INTENT_DONE,
225 : .iop_size = xfs_cud_item_size,
226 : .iop_format = xfs_cud_item_format,
227 : .iop_release = xfs_cud_item_release,
228 : .iop_intent = xfs_cud_item_intent,
229 : };
230 :
231 : static struct xfs_cud_log_item *
232 144961493 : xfs_trans_get_cud(
233 : struct xfs_trans *tp,
234 : struct xfs_cui_log_item *cuip)
235 : {
236 144961493 : struct xfs_cud_log_item *cudp;
237 :
238 144961493 : cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
239 144961714 : xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
240 : &xfs_cud_item_ops);
241 144961762 : cudp->cud_cuip = cuip;
242 144961762 : cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
243 :
244 144961762 : xfs_trans_add_item(tp, &cudp->cud_item);
245 144961882 : return cudp;
246 : }
247 :
248 : /*
249 : * Finish an refcount update and log it to the CUD. Note that the
250 : * transaction is marked dirty regardless of whether the refcount
251 : * update succeeds or fails to support the CUI/CUD lifecycle rules.
252 : */
253 : static int
254 145901108 : xfs_trans_log_finish_refcount_update(
255 : struct xfs_trans *tp,
256 : struct xfs_cud_log_item *cudp,
257 : struct xfs_refcount_intent *ri,
258 : struct xfs_btree_cur **pcur)
259 : {
260 145901108 : int error;
261 :
262 145901108 : error = xfs_refcount_finish_one(tp, ri, pcur);
263 :
264 : /*
265 : * Mark the transaction dirty, even on error. This ensures the
266 : * transaction is aborted, which:
267 : *
268 : * 1.) releases the CUI and frees the CUD
269 : * 2.) shuts down the filesystem
270 : */
271 145901658 : tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
272 145901658 : set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
273 :
274 145901723 : return error;
275 : }
276 :
277 : /* Sort refcount intents by AG. */
278 : static int
279 940806 : xfs_refcount_update_diff_items(
280 : void *priv,
281 : const struct list_head *a,
282 : const struct list_head *b)
283 : {
284 940806 : struct xfs_refcount_intent *ra;
285 940806 : struct xfs_refcount_intent *rb;
286 :
287 940806 : ra = container_of(a, struct xfs_refcount_intent, ri_list);
288 940806 : rb = container_of(b, struct xfs_refcount_intent, ri_list);
289 :
290 940806 : ASSERT(ra->ri_realtime == rb->ri_realtime);
291 :
292 940806 : if (ra->ri_realtime)
293 638995 : return ra->ri_rtg->rtg_rgno - rb->ri_rtg->rtg_rgno;
294 :
295 301811 : return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
296 : }
297 :
298 : /* Log refcount updates in the intent item. */
299 : STATIC void
300 145900470 : xfs_refcount_update_log_item(
301 : struct xfs_trans *tp,
302 : struct xfs_cui_log_item *cuip,
303 : struct xfs_refcount_intent *ri)
304 : {
305 145900470 : uint next_extent;
306 145900470 : struct xfs_phys_extent *pmap;
307 :
308 145900470 : tp->t_flags |= XFS_TRANS_DIRTY;
309 145900470 : set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
310 :
311 : /*
312 : * atomic_inc_return gives us the value after the increment;
313 : * we want to use it as an array index so we need to subtract 1 from
314 : * it.
315 : */
316 145900467 : next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
317 145901366 : ASSERT(next_extent < cuip->cui_format.cui_nextents);
318 145901366 : pmap = &cuip->cui_format.cui_extents[next_extent];
319 145901366 : pmap->pe_startblock = ri->ri_startblock;
320 145901366 : pmap->pe_len = ri->ri_blockcount;
321 :
322 145901366 : pmap->pe_flags = 0;
323 145901366 : switch (ri->ri_type) {
324 145901366 : case XFS_REFCOUNT_INCREASE:
325 : case XFS_REFCOUNT_DECREASE:
326 : case XFS_REFCOUNT_ALLOC_COW:
327 : case XFS_REFCOUNT_FREE_COW:
328 145901366 : pmap->pe_flags |= ri->ri_type;
329 145901366 : break;
330 0 : default:
331 0 : ASSERT(0);
332 : }
333 145901366 : if (ri->ri_realtime)
334 28508006 : pmap->pe_flags |= XFS_REFCOUNT_EXTENT_REALTIME;
335 145901366 : }
336 :
337 : static struct xfs_log_item *
338 144957722 : xfs_refcount_update_create_intent(
339 : struct xfs_trans *tp,
340 : struct list_head *items,
341 : unsigned int count,
342 : bool sort)
343 : {
344 144957722 : struct xfs_mount *mp = tp->t_mountp;
345 144957722 : struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
346 144956776 : struct xfs_refcount_intent *ri;
347 :
348 144956776 : ASSERT(count > 0);
349 :
350 144956776 : xfs_trans_add_item(tp, &cuip->cui_item);
351 144959466 : if (sort)
352 144959378 : list_sort(mp, items, xfs_refcount_update_diff_items);
353 290860539 : list_for_each_entry(ri, items, ri_list)
354 145900315 : xfs_refcount_update_log_item(tp, cuip, ri);
355 144960224 : return &cuip->cui_item;
356 : }
357 :
358 : /* Get an CUD so we can process all the deferred refcount updates. */
359 : static struct xfs_log_item *
360 144959354 : xfs_refcount_update_create_done(
361 : struct xfs_trans *tp,
362 : struct xfs_log_item *intent,
363 : unsigned int count)
364 : {
365 144959354 : return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
366 : }
367 :
368 : /* Take a passive ref to the AG containing the space we're refcounting. */
369 : void
370 145901528 : xfs_refcount_update_get_group(
371 : struct xfs_mount *mp,
372 : struct xfs_refcount_intent *ri)
373 : {
374 145901528 : xfs_agnumber_t agno;
375 :
376 145901528 : if (ri->ri_realtime) {
377 28507418 : xfs_rgnumber_t rgno;
378 :
379 28507418 : rgno = xfs_rtb_to_rgno(mp, ri->ri_startblock);
380 28507188 : ri->ri_rtg = xfs_rtgroup_intent_get(mp, rgno);
381 28506810 : return;
382 : }
383 :
384 117394110 : agno = XFS_FSB_TO_AGNO(mp, ri->ri_startblock);
385 117394110 : ri->ri_pag = xfs_perag_intent_get(mp, agno);
386 : }
387 :
388 : /* Release a passive AG ref after finishing refcounting work. */
389 : static inline void
390 145902744 : xfs_refcount_update_put_group(
391 : struct xfs_refcount_intent *ri)
392 : {
393 145902744 : if (ri->ri_realtime) {
394 28508257 : xfs_rtgroup_intent_put(ri->ri_rtg);
395 28508257 : return;
396 : }
397 :
398 117394487 : xfs_perag_intent_put(ri->ri_pag);
399 : }
400 :
401 : /* Process a deferred refcount update. */
402 : STATIC int
403 145900166 : xfs_refcount_update_finish_item(
404 : struct xfs_trans *tp,
405 : struct xfs_log_item *done,
406 : struct list_head *item,
407 : struct xfs_btree_cur **state)
408 : {
409 145900166 : struct xfs_refcount_intent *ri;
410 145900166 : int error;
411 :
412 145900166 : ri = container_of(item, struct xfs_refcount_intent, ri_list);
413 145900166 : error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done), ri,
414 : state);
415 :
416 : /* Did we run out of reservation? Requeue what we didn't finish. */
417 145900316 : if (!error && ri->ri_blockcount > 0) {
418 390 : ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
419 : ri->ri_type == XFS_REFCOUNT_DECREASE);
420 390 : return -EAGAIN;
421 : }
422 :
423 145899926 : xfs_refcount_update_put_group(ri);
424 145899987 : kmem_cache_free(xfs_refcount_intent_cache, ri);
425 145899987 : return error;
426 : }
427 :
428 : /* Abort all pending CUIs. */
429 : STATIC void
430 1508 : xfs_refcount_update_abort_intent(
431 : struct xfs_log_item *intent)
432 : {
433 1508 : xfs_cui_release(CUI_ITEM(intent));
434 1508 : }
435 :
436 : /* Cancel a deferred refcount update. */
437 : STATIC void
438 1510 : xfs_refcount_update_cancel_item(
439 : struct list_head *item)
440 : {
441 1510 : struct xfs_refcount_intent *ri;
442 :
443 1510 : ri = container_of(item, struct xfs_refcount_intent, ri_list);
444 :
445 1510 : xfs_refcount_update_put_group(ri);
446 1510 : kmem_cache_free(xfs_refcount_intent_cache, ri);
447 1510 : }
448 :
449 : const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
450 : .max_items = XFS_CUI_MAX_FAST_EXTENTS,
451 : .create_intent = xfs_refcount_update_create_intent,
452 : .abort_intent = xfs_refcount_update_abort_intent,
453 : .create_done = xfs_refcount_update_create_done,
454 : .finish_item = xfs_refcount_update_finish_item,
455 : .finish_cleanup = xfs_refcount_finish_one_cleanup,
456 : .cancel_item = xfs_refcount_update_cancel_item,
457 : };
458 :
459 : /* Is this recovered CUI ok? */
460 : static inline bool
461 1318 : xfs_cui_validate_phys(
462 : struct xfs_mount *mp,
463 : struct xfs_phys_extent *pmap)
464 : {
465 1318 : if (!xfs_has_reflink(mp))
466 : return false;
467 :
468 1318 : if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
469 : return false;
470 :
471 1318 : switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
472 : case XFS_REFCOUNT_INCREASE:
473 : case XFS_REFCOUNT_DECREASE:
474 : case XFS_REFCOUNT_ALLOC_COW:
475 : case XFS_REFCOUNT_FREE_COW:
476 1318 : break;
477 : default:
478 : return false;
479 : }
480 :
481 1318 : if (pmap->pe_flags & XFS_REFCOUNT_EXTENT_REALTIME)
482 0 : return xfs_verify_rtbext(mp, pmap->pe_startblock, pmap->pe_len);
483 :
484 1318 : return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len);
485 : }
486 :
487 : /*
488 : * Process a refcount update intent item that was recovered from the log.
489 : * We need to update the refcountbt.
490 : */
491 : STATIC int
492 1316 : xfs_cui_item_recover(
493 : struct xfs_log_item *lip,
494 : struct list_head *capture_list)
495 : {
496 1316 : struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
497 1316 : struct xfs_cud_log_item *cudp;
498 1316 : struct xfs_trans *tp;
499 1316 : struct xfs_btree_cur *rcur = NULL;
500 1316 : struct xfs_mount *mp = lip->li_log->l_mp;
501 1316 : unsigned int refc_type;
502 1316 : bool requeue_only = false;
503 1316 : int i;
504 1316 : int error = 0;
505 :
506 : /*
507 : * First check the validity of the extents described by the
508 : * CUI. If any are bad, then assume that all are bad and
509 : * just toss the CUI.
510 : */
511 2634 : for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
512 1318 : if (!xfs_cui_validate_phys(mp,
513 : &cuip->cui_format.cui_extents[i])) {
514 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
515 : &cuip->cui_format,
516 : sizeof(cuip->cui_format));
517 0 : return -EFSCORRUPTED;
518 : }
519 : }
520 :
521 : /*
522 : * Under normal operation, refcount updates are deferred, so we
523 : * wouldn't be adding them directly to a transaction. All
524 : * refcount updates manage reservation usage internally and
525 : * dynamically by deferring work that won't fit in the
526 : * transaction. Normally, any work that needs to be deferred
527 : * gets attached to the same defer_ops that scheduled the
528 : * refcount update. However, we're in log recovery here, so we
529 : * use the passed in defer_ops and to finish up any work that
530 : * doesn't fit. We need to reserve enough blocks to handle a
531 : * full btree split on either end of the refcount range.
532 : */
533 1316 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
534 1316 : mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
535 1316 : if (error)
536 : return error;
537 :
538 1316 : cudp = xfs_trans_get_cud(tp, cuip);
539 :
540 3950 : for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
541 1318 : struct xfs_refcount_intent fake = { };
542 1318 : struct xfs_phys_extent *pmap;
543 :
544 1318 : pmap = &cuip->cui_format.cui_extents[i];
545 1318 : refc_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
546 1318 : switch (refc_type) {
547 1318 : case XFS_REFCOUNT_INCREASE:
548 : case XFS_REFCOUNT_DECREASE:
549 : case XFS_REFCOUNT_ALLOC_COW:
550 : case XFS_REFCOUNT_FREE_COW:
551 1318 : fake.ri_type = refc_type;
552 1318 : break;
553 0 : default:
554 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
555 : &cuip->cui_format,
556 : sizeof(cuip->cui_format));
557 0 : error = -EFSCORRUPTED;
558 0 : goto abort_error;
559 : }
560 :
561 1318 : fake.ri_realtime = pmap->pe_flags & XFS_REFCOUNT_EXTENT_REALTIME;
562 1318 : fake.ri_startblock = pmap->pe_startblock;
563 1318 : fake.ri_blockcount = pmap->pe_len;
564 :
565 1318 : if (!requeue_only) {
566 1318 : xfs_refcount_update_get_group(mp, &fake);
567 1318 : error = xfs_trans_log_finish_refcount_update(tp, cudp,
568 : &fake, &rcur);
569 1318 : xfs_refcount_update_put_group(&fake);
570 : }
571 1318 : if (error == -EFSCORRUPTED)
572 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
573 : &cuip->cui_format,
574 : sizeof(cuip->cui_format));
575 1318 : if (error)
576 0 : goto abort_error;
577 :
578 : /* Requeue what we didn't finish. */
579 1318 : if (fake.ri_blockcount > 0) {
580 0 : struct xfs_bmbt_irec irec = {
581 0 : .br_startblock = fake.ri_startblock,
582 0 : .br_blockcount = fake.ri_blockcount,
583 : };
584 :
585 0 : switch (fake.ri_type) {
586 0 : case XFS_REFCOUNT_INCREASE:
587 0 : xfs_refcount_increase_extent(tp,
588 : fake.ri_realtime, &irec);
589 0 : break;
590 0 : case XFS_REFCOUNT_DECREASE:
591 0 : xfs_refcount_decrease_extent(tp,
592 : fake.ri_realtime, &irec);
593 0 : break;
594 0 : case XFS_REFCOUNT_ALLOC_COW:
595 0 : xfs_refcount_alloc_cow_extent(tp,
596 : fake.ri_realtime,
597 : irec.br_startblock,
598 : irec.br_blockcount);
599 0 : break;
600 0 : case XFS_REFCOUNT_FREE_COW:
601 0 : xfs_refcount_free_cow_extent(tp,
602 : fake.ri_realtime,
603 : irec.br_startblock,
604 : irec.br_blockcount);
605 0 : break;
606 0 : default:
607 0 : ASSERT(0);
608 : }
609 0 : requeue_only = true;
610 : }
611 : }
612 :
613 1316 : xfs_refcount_finish_one_cleanup(tp, rcur, error);
614 1316 : return xfs_defer_ops_capture_and_commit(tp, capture_list);
615 :
616 : abort_error:
617 0 : xfs_refcount_finish_one_cleanup(tp, rcur, error);
618 0 : xfs_trans_cancel(tp);
619 0 : return error;
620 : }
621 :
622 : STATIC bool
623 28946 : xfs_cui_item_match(
624 : struct xfs_log_item *lip,
625 : uint64_t intent_id)
626 : {
627 28946 : return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
628 : }
629 :
630 : /* Relog an intent item to push the log tail forward. */
631 : static struct xfs_log_item *
632 879 : xfs_cui_item_relog(
633 : struct xfs_log_item *intent,
634 : struct xfs_trans *tp)
635 : {
636 879 : struct xfs_cud_log_item *cudp;
637 879 : struct xfs_cui_log_item *cuip;
638 879 : struct xfs_phys_extent *pmap;
639 879 : unsigned int count;
640 :
641 879 : count = CUI_ITEM(intent)->cui_format.cui_nextents;
642 879 : pmap = CUI_ITEM(intent)->cui_format.cui_extents;
643 :
644 879 : tp->t_flags |= XFS_TRANS_DIRTY;
645 879 : cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
646 879 : set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
647 :
648 879 : cuip = xfs_cui_init(tp->t_mountp, count);
649 1758 : memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap));
650 879 : atomic_set(&cuip->cui_next_extent, count);
651 879 : xfs_trans_add_item(tp, &cuip->cui_item);
652 879 : set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
653 879 : return &cuip->cui_item;
654 : }
655 :
656 : static const struct xfs_item_ops xfs_cui_item_ops = {
657 : .flags = XFS_ITEM_INTENT,
658 : .iop_size = xfs_cui_item_size,
659 : .iop_format = xfs_cui_item_format,
660 : .iop_unpin = xfs_cui_item_unpin,
661 : .iop_release = xfs_cui_item_release,
662 : .iop_recover = xfs_cui_item_recover,
663 : .iop_match = xfs_cui_item_match,
664 : .iop_relog = xfs_cui_item_relog,
665 : };
666 :
667 : static inline void
668 28216 : xfs_cui_copy_format(
669 : struct xfs_cui_log_format *dst,
670 : const struct xfs_cui_log_format *src)
671 : {
672 28216 : unsigned int i;
673 :
674 56432 : memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents));
675 :
676 56436 : for (i = 0; i < src->cui_nextents; i++)
677 56440 : memcpy(&dst->cui_extents[i], &src->cui_extents[i],
678 : sizeof(struct xfs_phys_extent));
679 28216 : }
680 :
681 : /*
682 : * This routine is called to create an in-core extent refcount update
683 : * item from the cui format structure which was logged on disk.
684 : * It allocates an in-core cui, copies the extents from the format
685 : * structure into it, and adds the cui to the AIL with the given
686 : * LSN.
687 : */
688 : STATIC int
689 28216 : xlog_recover_cui_commit_pass2(
690 : struct xlog *log,
691 : struct list_head *buffer_list,
692 : struct xlog_recover_item *item,
693 : xfs_lsn_t lsn)
694 : {
695 28216 : struct xfs_mount *mp = log->l_mp;
696 28216 : struct xfs_cui_log_item *cuip;
697 28216 : struct xfs_cui_log_format *cui_formatp;
698 28216 : size_t len;
699 :
700 28216 : cui_formatp = item->ri_buf[0].i_addr;
701 :
702 28216 : if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
703 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
704 : item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
705 0 : return -EFSCORRUPTED;
706 : }
707 :
708 28216 : len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
709 28216 : if (item->ri_buf[0].i_len != len) {
710 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
711 : item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
712 0 : return -EFSCORRUPTED;
713 : }
714 :
715 28216 : cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
716 28216 : xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
717 28216 : atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
718 : /*
719 : * Insert the intent into the AIL directly and drop one reference so
720 : * that finishing or canceling the work will drop the other.
721 : */
722 28216 : xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
723 28216 : xfs_cui_release(cuip);
724 28216 : return 0;
725 : }
726 :
727 : const struct xlog_recover_item_ops xlog_cui_item_ops = {
728 : .item_type = XFS_LI_CUI,
729 : .commit_pass2 = xlog_recover_cui_commit_pass2,
730 : };
731 :
732 : /*
733 : * This routine is called when an CUD format structure is found in a committed
734 : * transaction in the log. Its purpose is to cancel the corresponding CUI if it
735 : * was still in the log. To do this it searches the AIL for the CUI with an id
736 : * equal to that in the CUD format structure. If we find it we drop the CUD
737 : * reference, which removes the CUI from the AIL and frees it.
738 : */
739 : STATIC int
740 26972 : xlog_recover_cud_commit_pass2(
741 : struct xlog *log,
742 : struct list_head *buffer_list,
743 : struct xlog_recover_item *item,
744 : xfs_lsn_t lsn)
745 : {
746 26972 : struct xfs_cud_log_format *cud_formatp;
747 :
748 26972 : cud_formatp = item->ri_buf[0].i_addr;
749 26972 : if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
750 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
751 : item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
752 0 : return -EFSCORRUPTED;
753 : }
754 :
755 26972 : xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
756 26972 : return 0;
757 : }
758 :
759 : const struct xlog_recover_item_ops xlog_cud_item_ops = {
760 : .item_type = XFS_LI_CUD,
761 : .commit_pass2 = xlog_recover_cud_commit_pass2,
762 : };
|