Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * Copyright (C) 2016 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_format.h"
9 : #include "xfs_log_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_bit.h"
12 : #include "xfs_shared.h"
13 : #include "xfs_mount.h"
14 : #include "xfs_defer.h"
15 : #include "xfs_trans.h"
16 : #include "xfs_trans_priv.h"
17 : #include "xfs_refcount_item.h"
18 : #include "xfs_log.h"
19 : #include "xfs_refcount.h"
20 : #include "xfs_error.h"
21 : #include "xfs_log_priv.h"
22 : #include "xfs_log_recover.h"
23 : #include "xfs_ag.h"
24 : #include "xfs_rtgroup.h"
25 :
26 : struct kmem_cache *xfs_cui_cache;
27 : struct kmem_cache *xfs_cud_cache;
28 :
29 : static const struct xfs_item_ops xfs_cui_item_ops;
30 :
31 : static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
32 : {
33 : return container_of(lip, struct xfs_cui_log_item, cui_item);
34 : }
35 :
36 : STATIC void
37 291340966 : xfs_cui_item_free(
38 : struct xfs_cui_log_item *cuip)
39 : {
40 291340966 : kmem_free(cuip->cui_item.li_lv_shadow);
41 291340984 : if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
42 0 : kmem_free(cuip);
43 : else
44 291340984 : kmem_cache_free(xfs_cui_cache, cuip);
45 291339861 : }
46 :
47 : /*
48 : * Freeing the CUI requires that we remove it from the AIL if it has already
49 : * been placed there. However, the CUI may not yet have been placed in the AIL
50 : * when called by xfs_cui_release() from CUD processing due to the ordering of
51 : * committed vs unpin operations in bulk insert operations. Hence the reference
52 : * count to ensure only the last caller frees the CUI.
53 : */
54 : STATIC void
55 582650830 : xfs_cui_release(
56 : struct xfs_cui_log_item *cuip)
57 : {
58 582650830 : ASSERT(atomic_read(&cuip->cui_refcount) > 0);
59 582650830 : if (!atomic_dec_and_test(&cuip->cui_refcount))
60 : return;
61 :
62 291340398 : xfs_trans_ail_delete(&cuip->cui_item, 0);
63 291343395 : xfs_cui_item_free(cuip);
64 : }
65 :
66 :
67 : STATIC void
68 291304487 : xfs_cui_item_size(
69 : struct xfs_log_item *lip,
70 : int *nvecs,
71 : int *nbytes)
72 : {
73 291304487 : struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
74 :
75 291304487 : *nvecs += 1;
76 291304487 : *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
77 291304487 : }
78 :
79 : /*
80 : * This is called to fill in the vector of log iovecs for the
81 : * given cui log item. We use only 1 iovec, and we point that
82 : * at the cui_log_format structure embedded in the cui item.
83 : * It is at this point that we assert that all of the extent
84 : * slots in the cui item have been filled.
85 : */
86 : STATIC void
87 291302128 : xfs_cui_item_format(
88 : struct xfs_log_item *lip,
89 : struct xfs_log_vec *lv)
90 : {
91 291302128 : struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
92 291302128 : struct xfs_log_iovec *vecp = NULL;
93 :
94 291302128 : ASSERT(atomic_read(&cuip->cui_next_extent) ==
95 : cuip->cui_format.cui_nextents);
96 :
97 291302128 : cuip->cui_format.cui_type = XFS_LI_CUI;
98 291302128 : cuip->cui_format.cui_size = 1;
99 :
100 291302128 : xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
101 291302128 : xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
102 291296978 : }
103 :
104 : /*
105 : * The unpin operation is the last place an CUI is manipulated in the log. It is
106 : * either inserted in the AIL or aborted in the event of a log I/O error. In
107 : * either case, the CUI transaction has been successfully committed to make it
108 : * this far. Therefore, we expect whoever committed the CUI to either construct
109 : * and commit the CUD or drop the CUD's reference in the event of error. Simply
110 : * drop the log's CUI reference now that the log is done with it.
111 : */
112 : STATIC void
113 291293865 : xfs_cui_item_unpin(
114 : struct xfs_log_item *lip,
115 : int remove)
116 : {
117 291293865 : struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
118 :
119 291293865 : xfs_cui_release(cuip);
120 291302099 : }
121 :
122 : /*
123 : * The CUI has been either committed or aborted if the transaction has been
124 : * cancelled. If the transaction was cancelled, an CUD isn't going to be
125 : * constructed and thus we free the CUI here directly.
126 : */
127 : STATIC void
128 34937 : xfs_cui_item_release(
129 : struct xfs_log_item *lip)
130 : {
131 34937 : xfs_cui_release(CUI_ITEM(lip));
132 34937 : }
133 :
134 : /*
135 : * Allocate and initialize an cui item with the given number of extents.
136 : */
137 : STATIC struct xfs_cui_log_item *
138 291326807 : xfs_cui_init(
139 : struct xfs_mount *mp,
140 : uint nextents)
141 :
142 : {
143 291326807 : struct xfs_cui_log_item *cuip;
144 :
145 291326807 : ASSERT(nextents > 0);
146 291326807 : if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
147 0 : cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
148 : 0);
149 : else
150 291326807 : cuip = kmem_cache_zalloc(xfs_cui_cache,
151 : GFP_KERNEL | __GFP_NOFAIL);
152 :
153 291340997 : xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
154 291335211 : cuip->cui_format.cui_nextents = nextents;
155 291335211 : cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
156 291335211 : atomic_set(&cuip->cui_next_extent, 0);
157 291335211 : atomic_set(&cuip->cui_refcount, 2);
158 :
159 291335211 : return cuip;
160 : }
161 :
162 : static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
163 : {
164 : return container_of(lip, struct xfs_cud_log_item, cud_item);
165 : }
166 :
167 : STATIC void
168 291303953 : xfs_cud_item_size(
169 : struct xfs_log_item *lip,
170 : int *nvecs,
171 : int *nbytes)
172 : {
173 291303953 : *nvecs += 1;
174 291303953 : *nbytes += sizeof(struct xfs_cud_log_format);
175 291303953 : }
176 :
177 : /*
178 : * This is called to fill in the vector of log iovecs for the
179 : * given cud log item. We use only 1 iovec, and we point that
180 : * at the cud_log_format structure embedded in the cud item.
181 : * It is at this point that we assert that all of the extent
182 : * slots in the cud item have been filled.
183 : */
184 : STATIC void
185 439190 : xfs_cud_item_format(
186 : struct xfs_log_item *lip,
187 : struct xfs_log_vec *lv)
188 : {
189 439190 : struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
190 439190 : struct xfs_log_iovec *vecp = NULL;
191 :
192 439190 : cudp->cud_format.cud_type = XFS_LI_CUD;
193 439190 : cudp->cud_format.cud_size = 1;
194 :
195 439190 : xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
196 : sizeof(struct xfs_cud_log_format));
197 439185 : }
198 :
199 : /*
200 : * The CUD is either committed or aborted if the transaction is cancelled. If
201 : * the transaction is cancelled, drop our reference to the CUI and free the
202 : * CUD.
203 : */
204 : STATIC void
205 291308511 : xfs_cud_item_release(
206 : struct xfs_log_item *lip)
207 : {
208 291308511 : struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
209 :
210 291308511 : xfs_cui_release(cudp->cud_cuip);
211 291308654 : kmem_free(cudp->cud_item.li_lv_shadow);
212 291307652 : kmem_cache_free(xfs_cud_cache, cudp);
213 291306264 : }
214 :
215 : static struct xfs_log_item *
216 291261120 : xfs_cud_item_intent(
217 : struct xfs_log_item *lip)
218 : {
219 291261120 : return &CUD_ITEM(lip)->cud_cuip->cui_item;
220 : }
221 :
222 : static const struct xfs_item_ops xfs_cud_item_ops = {
223 : .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
224 : XFS_ITEM_INTENT_DONE,
225 : .iop_size = xfs_cud_item_size,
226 : .iop_format = xfs_cud_item_format,
227 : .iop_release = xfs_cud_item_release,
228 : .iop_intent = xfs_cud_item_intent,
229 : };
230 :
231 : static struct xfs_cud_log_item *
232 291303109 : xfs_trans_get_cud(
233 : struct xfs_trans *tp,
234 : struct xfs_cui_log_item *cuip)
235 : {
236 291303109 : struct xfs_cud_log_item *cudp;
237 :
238 291303109 : cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
239 291305027 : xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
240 : &xfs_cud_item_ops);
241 291298600 : cudp->cud_cuip = cuip;
242 291298600 : cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
243 :
244 291298600 : xfs_trans_add_item(tp, &cudp->cud_item);
245 291301982 : return cudp;
246 : }
247 :
248 : /*
249 : * Finish an refcount update and log it to the CUD. Note that the
250 : * transaction is marked dirty regardless of whether the refcount
251 : * update succeeds or fails to support the CUI/CUD lifecycle rules.
252 : */
253 : static int
254 294703194 : xfs_trans_log_finish_refcount_update(
255 : struct xfs_trans *tp,
256 : struct xfs_cud_log_item *cudp,
257 : struct xfs_refcount_intent *ri,
258 : struct xfs_btree_cur **pcur)
259 : {
260 294703194 : int error;
261 :
262 294703194 : error = xfs_refcount_finish_one(tp, ri, pcur);
263 :
264 : /*
265 : * Mark the transaction dirty, even on error. This ensures the
266 : * transaction is aborted, which:
267 : *
268 : * 1.) releases the CUI and frees the CUD
269 : * 2.) shuts down the filesystem
270 : */
271 294706558 : tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
272 294706558 : set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
273 :
274 294712753 : return error;
275 : }
276 :
277 : /* Sort refcount intents by AG. */
278 : static int
279 3448302 : xfs_refcount_update_diff_items(
280 : void *priv,
281 : const struct list_head *a,
282 : const struct list_head *b)
283 : {
284 3448302 : struct xfs_refcount_intent *ra;
285 3448302 : struct xfs_refcount_intent *rb;
286 :
287 3448302 : ra = container_of(a, struct xfs_refcount_intent, ri_list);
288 3448302 : rb = container_of(b, struct xfs_refcount_intent, ri_list);
289 :
290 3448302 : ASSERT(ra->ri_realtime == rb->ri_realtime);
291 :
292 3448302 : if (ra->ri_realtime)
293 1395139 : return ra->ri_rtg->rtg_rgno - rb->ri_rtg->rtg_rgno;
294 :
295 2053163 : return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
296 : }
297 :
298 : /* Log refcount updates in the intent item. */
299 : STATIC void
300 294705297 : xfs_refcount_update_log_item(
301 : struct xfs_trans *tp,
302 : struct xfs_cui_log_item *cuip,
303 : struct xfs_refcount_intent *ri)
304 : {
305 294705297 : uint next_extent;
306 294705297 : struct xfs_phys_extent *pmap;
307 :
308 294705297 : tp->t_flags |= XFS_TRANS_DIRTY;
309 294705297 : set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
310 :
311 : /*
312 : * atomic_inc_return gives us the value after the increment;
313 : * we want to use it as an array index so we need to subtract 1 from
314 : * it.
315 : */
316 294710942 : next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
317 294705229 : ASSERT(next_extent < cuip->cui_format.cui_nextents);
318 294705229 : pmap = &cuip->cui_format.cui_extents[next_extent];
319 294705229 : pmap->pe_startblock = ri->ri_startblock;
320 294705229 : pmap->pe_len = ri->ri_blockcount;
321 :
322 294705229 : pmap->pe_flags = 0;
323 294705229 : switch (ri->ri_type) {
324 294705229 : case XFS_REFCOUNT_INCREASE:
325 : case XFS_REFCOUNT_DECREASE:
326 : case XFS_REFCOUNT_ALLOC_COW:
327 : case XFS_REFCOUNT_FREE_COW:
328 294705229 : pmap->pe_flags |= ri->ri_type;
329 294705229 : break;
330 0 : default:
331 0 : ASSERT(0);
332 : }
333 294705229 : if (ri->ri_realtime)
334 104598447 : pmap->pe_flags |= XFS_REFCOUNT_EXTENT_REALTIME;
335 294705229 : }
336 :
337 : static struct xfs_log_item *
338 291253491 : xfs_refcount_update_create_intent(
339 : struct xfs_trans *tp,
340 : struct list_head *items,
341 : unsigned int count,
342 : bool sort)
343 : {
344 291253491 : struct xfs_mount *mp = tp->t_mountp;
345 291253491 : struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
346 291256168 : struct xfs_refcount_intent *ri;
347 :
348 291256168 : ASSERT(count > 0);
349 :
350 291256168 : xfs_trans_add_item(tp, &cuip->cui_item);
351 291257830 : if (sort)
352 291256945 : list_sort(mp, items, xfs_refcount_update_diff_items);
353 585962591 : list_for_each_entry(ri, items, ri_list)
354 294704974 : xfs_refcount_update_log_item(tp, cuip, ri);
355 291257617 : return &cuip->cui_item;
356 : }
357 :
358 : /* Get an CUD so we can process all the deferred refcount updates. */
359 : static struct xfs_log_item *
360 291260315 : xfs_refcount_update_create_done(
361 : struct xfs_trans *tp,
362 : struct xfs_log_item *intent,
363 : unsigned int count)
364 : {
365 291260315 : return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
366 : }
367 :
368 : /* Take a passive ref to the AG containing the space we're refcounting. */
369 : void
370 294698890 : xfs_refcount_update_get_group(
371 : struct xfs_mount *mp,
372 : struct xfs_refcount_intent *ri)
373 : {
374 294698890 : xfs_agnumber_t agno;
375 :
376 294698890 : if (ri->ri_realtime) {
377 104598118 : xfs_rgnumber_t rgno;
378 :
379 104598118 : rgno = xfs_rtb_to_rgno(mp, ri->ri_startblock);
380 104597507 : ri->ri_rtg = xfs_rtgroup_intent_get(mp, rgno);
381 104599167 : return;
382 : }
383 :
384 190100772 : agno = XFS_FSB_TO_AGNO(mp, ri->ri_startblock);
385 190100772 : ri->ri_pag = xfs_perag_intent_get(mp, agno);
386 : }
387 :
388 : /* Release a passive AG ref after finishing refcounting work. */
389 : static inline void
390 294709789 : xfs_refcount_update_put_group(
391 : struct xfs_refcount_intent *ri)
392 : {
393 294709789 : if (ri->ri_realtime) {
394 104599381 : xfs_rtgroup_intent_put(ri->ri_rtg);
395 104599381 : return;
396 : }
397 :
398 190110408 : xfs_perag_intent_put(ri->ri_pag);
399 : }
400 :
401 : /* Process a deferred refcount update. */
402 : STATIC int
403 294703674 : xfs_refcount_update_finish_item(
404 : struct xfs_trans *tp,
405 : struct xfs_log_item *done,
406 : struct list_head *item,
407 : struct xfs_btree_cur **state)
408 : {
409 294703674 : struct xfs_refcount_intent *ri;
410 294703674 : int error;
411 :
412 294703674 : ri = container_of(item, struct xfs_refcount_intent, ri_list);
413 294703674 : error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done), ri,
414 : state);
415 :
416 : /* Did we run out of reservation? Requeue what we didn't finish. */
417 294710200 : if (!error && ri->ri_blockcount > 0) {
418 1795 : ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
419 : ri->ri_type == XFS_REFCOUNT_DECREASE);
420 1795 : return -EAGAIN;
421 : }
422 :
423 294708405 : xfs_refcount_update_put_group(ri);
424 294707332 : kmem_cache_free(xfs_refcount_intent_cache, ri);
425 294707332 : return error;
426 : }
427 :
428 : /* Abort all pending CUIs. */
429 : STATIC void
430 1686 : xfs_refcount_update_abort_intent(
431 : struct xfs_log_item *intent)
432 : {
433 1686 : xfs_cui_release(CUI_ITEM(intent));
434 1686 : }
435 :
436 : /* Cancel a deferred refcount update. */
437 : STATIC void
438 1700 : xfs_refcount_update_cancel_item(
439 : struct list_head *item)
440 : {
441 1700 : struct xfs_refcount_intent *ri;
442 :
443 1700 : ri = container_of(item, struct xfs_refcount_intent, ri_list);
444 :
445 1700 : xfs_refcount_update_put_group(ri);
446 1700 : kmem_cache_free(xfs_refcount_intent_cache, ri);
447 1700 : }
448 :
449 : const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
450 : .max_items = XFS_CUI_MAX_FAST_EXTENTS,
451 : .create_intent = xfs_refcount_update_create_intent,
452 : .abort_intent = xfs_refcount_update_abort_intent,
453 : .create_done = xfs_refcount_update_create_done,
454 : .finish_item = xfs_refcount_update_finish_item,
455 : .finish_cleanup = xfs_refcount_finish_one_cleanup,
456 : .cancel_item = xfs_refcount_update_cancel_item,
457 : };
458 :
459 : /* Is this recovered CUI ok? */
460 : static inline bool
461 1647 : xfs_cui_validate_phys(
462 : struct xfs_mount *mp,
463 : struct xfs_phys_extent *pmap)
464 : {
465 1647 : if (!xfs_has_reflink(mp))
466 : return false;
467 :
468 1647 : if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
469 : return false;
470 :
471 1647 : switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
472 : case XFS_REFCOUNT_INCREASE:
473 : case XFS_REFCOUNT_DECREASE:
474 : case XFS_REFCOUNT_ALLOC_COW:
475 : case XFS_REFCOUNT_FREE_COW:
476 1647 : break;
477 : default:
478 : return false;
479 : }
480 :
481 1647 : if (pmap->pe_flags & XFS_REFCOUNT_EXTENT_REALTIME)
482 25 : return xfs_verify_rtbext(mp, pmap->pe_startblock, pmap->pe_len);
483 :
484 1622 : return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len);
485 : }
486 :
487 : /*
488 : * Process a refcount update intent item that was recovered from the log.
489 : * We need to update the refcountbt.
490 : */
491 : STATIC int
492 1633 : xfs_cui_item_recover(
493 : struct xfs_log_item *lip,
494 : struct list_head *capture_list)
495 : {
496 1633 : struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
497 1633 : struct xfs_cud_log_item *cudp;
498 1633 : struct xfs_trans *tp;
499 1633 : struct xfs_btree_cur *rcur = NULL;
500 1633 : struct xfs_mount *mp = lip->li_log->l_mp;
501 1633 : unsigned int refc_type;
502 1633 : bool requeue_only = false;
503 1633 : int i;
504 1633 : int error = 0;
505 :
506 : /*
507 : * First check the validity of the extents described by the
508 : * CUI. If any are bad, then assume that all are bad and
509 : * just toss the CUI.
510 : */
511 3280 : for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
512 1647 : if (!xfs_cui_validate_phys(mp,
513 : &cuip->cui_format.cui_extents[i])) {
514 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
515 : &cuip->cui_format,
516 : sizeof(cuip->cui_format));
517 0 : return -EFSCORRUPTED;
518 : }
519 : }
520 :
521 : /*
522 : * Under normal operation, refcount updates are deferred, so we
523 : * wouldn't be adding them directly to a transaction. All
524 : * refcount updates manage reservation usage internally and
525 : * dynamically by deferring work that won't fit in the
526 : * transaction. Normally, any work that needs to be deferred
527 : * gets attached to the same defer_ops that scheduled the
528 : * refcount update. However, we're in log recovery here, so we
529 : * use the passed in defer_ops and to finish up any work that
530 : * doesn't fit. We need to reserve enough blocks to handle a
531 : * full btree split on either end of the refcount range.
532 : */
533 1633 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
534 1633 : mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
535 1633 : if (error)
536 : return error;
537 :
538 1633 : cudp = xfs_trans_get_cud(tp, cuip);
539 :
540 4913 : for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
541 1647 : struct xfs_refcount_intent fake = { };
542 1647 : struct xfs_phys_extent *pmap;
543 :
544 1647 : pmap = &cuip->cui_format.cui_extents[i];
545 1647 : refc_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
546 1647 : switch (refc_type) {
547 1647 : case XFS_REFCOUNT_INCREASE:
548 : case XFS_REFCOUNT_DECREASE:
549 : case XFS_REFCOUNT_ALLOC_COW:
550 : case XFS_REFCOUNT_FREE_COW:
551 1647 : fake.ri_type = refc_type;
552 1647 : break;
553 0 : default:
554 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
555 : &cuip->cui_format,
556 : sizeof(cuip->cui_format));
557 0 : error = -EFSCORRUPTED;
558 0 : goto abort_error;
559 : }
560 :
561 1647 : fake.ri_realtime = pmap->pe_flags & XFS_REFCOUNT_EXTENT_REALTIME;
562 1647 : fake.ri_startblock = pmap->pe_startblock;
563 1647 : fake.ri_blockcount = pmap->pe_len;
564 :
565 1647 : if (!requeue_only) {
566 1647 : xfs_refcount_update_get_group(mp, &fake);
567 1647 : error = xfs_trans_log_finish_refcount_update(tp, cudp,
568 : &fake, &rcur);
569 1647 : xfs_refcount_update_put_group(&fake);
570 : }
571 1647 : if (error == -EFSCORRUPTED)
572 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
573 : &cuip->cui_format,
574 : sizeof(cuip->cui_format));
575 1647 : if (error)
576 0 : goto abort_error;
577 :
578 : /* Requeue what we didn't finish. */
579 1647 : if (fake.ri_blockcount > 0) {
580 0 : struct xfs_bmbt_irec irec = {
581 0 : .br_startblock = fake.ri_startblock,
582 0 : .br_blockcount = fake.ri_blockcount,
583 : };
584 :
585 0 : switch (fake.ri_type) {
586 0 : case XFS_REFCOUNT_INCREASE:
587 0 : xfs_refcount_increase_extent(tp,
588 0 : fake.ri_realtime, &irec);
589 0 : break;
590 0 : case XFS_REFCOUNT_DECREASE:
591 0 : xfs_refcount_decrease_extent(tp,
592 0 : fake.ri_realtime, &irec);
593 0 : break;
594 0 : case XFS_REFCOUNT_ALLOC_COW:
595 0 : xfs_refcount_alloc_cow_extent(tp,
596 0 : fake.ri_realtime,
597 : irec.br_startblock,
598 : irec.br_blockcount);
599 0 : break;
600 0 : case XFS_REFCOUNT_FREE_COW:
601 0 : xfs_refcount_free_cow_extent(tp,
602 0 : fake.ri_realtime,
603 : irec.br_startblock,
604 : irec.br_blockcount);
605 0 : break;
606 0 : default:
607 0 : ASSERT(0);
608 : }
609 0 : requeue_only = true;
610 : }
611 : }
612 :
613 1633 : xfs_refcount_finish_one_cleanup(tp, rcur, error);
614 1633 : return xfs_defer_ops_capture_and_commit(tp, capture_list);
615 :
616 : abort_error:
617 0 : xfs_refcount_finish_one_cleanup(tp, rcur, error);
618 0 : xfs_trans_cancel(tp);
619 0 : return error;
620 : }
621 :
622 : STATIC bool
623 37289 : xfs_cui_item_match(
624 : struct xfs_log_item *lip,
625 : uint64_t intent_id)
626 : {
627 37289 : return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
628 : }
629 :
630 : /* Relog an intent item to push the log tail forward. */
631 : static struct xfs_log_item *
632 44395 : xfs_cui_item_relog(
633 : struct xfs_log_item *intent,
634 : struct xfs_trans *tp)
635 : {
636 44395 : struct xfs_cud_log_item *cudp;
637 44395 : struct xfs_cui_log_item *cuip;
638 44395 : struct xfs_phys_extent *pmap;
639 44395 : unsigned int count;
640 :
641 44395 : count = CUI_ITEM(intent)->cui_format.cui_nextents;
642 44395 : pmap = CUI_ITEM(intent)->cui_format.cui_extents;
643 :
644 44395 : tp->t_flags |= XFS_TRANS_DIRTY;
645 44395 : cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
646 44395 : set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
647 :
648 44395 : cuip = xfs_cui_init(tp->t_mountp, count);
649 88788 : memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap));
650 44394 : atomic_set(&cuip->cui_next_extent, count);
651 44394 : xfs_trans_add_item(tp, &cuip->cui_item);
652 44395 : set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
653 44395 : return &cuip->cui_item;
654 : }
655 :
656 : static const struct xfs_item_ops xfs_cui_item_ops = {
657 : .flags = XFS_ITEM_INTENT,
658 : .iop_size = xfs_cui_item_size,
659 : .iop_format = xfs_cui_item_format,
660 : .iop_unpin = xfs_cui_item_unpin,
661 : .iop_release = xfs_cui_item_release,
662 : .iop_recover = xfs_cui_item_recover,
663 : .iop_match = xfs_cui_item_match,
664 : .iop_relog = xfs_cui_item_relog,
665 : };
666 :
667 : static inline void
668 36570 : xfs_cui_copy_format(
669 : struct xfs_cui_log_format *dst,
670 : const struct xfs_cui_log_format *src)
671 : {
672 36570 : unsigned int i;
673 :
674 73140 : memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents));
675 :
676 73217 : for (i = 0; i < src->cui_nextents; i++)
677 73294 : memcpy(&dst->cui_extents[i], &src->cui_extents[i],
678 : sizeof(struct xfs_phys_extent));
679 36570 : }
680 :
681 : /*
682 : * This routine is called to create an in-core extent refcount update
683 : * item from the cui format structure which was logged on disk.
684 : * It allocates an in-core cui, copies the extents from the format
685 : * structure into it, and adds the cui to the AIL with the given
686 : * LSN.
687 : */
688 : STATIC int
689 36570 : xlog_recover_cui_commit_pass2(
690 : struct xlog *log,
691 : struct list_head *buffer_list,
692 : struct xlog_recover_item *item,
693 : xfs_lsn_t lsn)
694 : {
695 36570 : struct xfs_mount *mp = log->l_mp;
696 36570 : struct xfs_cui_log_item *cuip;
697 36570 : struct xfs_cui_log_format *cui_formatp;
698 36570 : size_t len;
699 :
700 36570 : cui_formatp = item->ri_buf[0].i_addr;
701 :
702 36570 : if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
703 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
704 : item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
705 0 : return -EFSCORRUPTED;
706 : }
707 :
708 36570 : len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
709 36570 : if (item->ri_buf[0].i_len != len) {
710 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
711 : item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
712 0 : return -EFSCORRUPTED;
713 : }
714 :
715 36570 : cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
716 36570 : xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
717 36570 : atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
718 : /*
719 : * Insert the intent into the AIL directly and drop one reference so
720 : * that finishing or canceling the work will drop the other.
721 : */
722 36570 : xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
723 36570 : xfs_cui_release(cuip);
724 36570 : return 0;
725 : }
726 :
727 : const struct xlog_recover_item_ops xlog_cui_item_ops = {
728 : .item_type = XFS_LI_CUI,
729 : .commit_pass2 = xlog_recover_cui_commit_pass2,
730 : };
731 :
732 : /*
733 : * This routine is called when an CUD format structure is found in a committed
734 : * transaction in the log. Its purpose is to cancel the corresponding CUI if it
735 : * was still in the log. To do this it searches the AIL for the CUI with an id
736 : * equal to that in the CUD format structure. If we find it we drop the CUD
737 : * reference, which removes the CUI from the AIL and frees it.
738 : */
739 : STATIC int
740 35068 : xlog_recover_cud_commit_pass2(
741 : struct xlog *log,
742 : struct list_head *buffer_list,
743 : struct xlog_recover_item *item,
744 : xfs_lsn_t lsn)
745 : {
746 35068 : struct xfs_cud_log_format *cud_formatp;
747 :
748 35068 : cud_formatp = item->ri_buf[0].i_addr;
749 35068 : if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
750 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
751 : item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
752 0 : return -EFSCORRUPTED;
753 : }
754 :
755 35068 : xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
756 35068 : return 0;
757 : }
758 :
759 : const struct xlog_recover_item_ops xlog_cud_item_ops = {
760 : .item_type = XFS_LI_CUD,
761 : .commit_pass2 = xlog_recover_cud_commit_pass2,
762 : };
|