Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * Copyright (C) 2016 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_format.h"
9 : #include "xfs_log_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_bit.h"
12 : #include "xfs_shared.h"
13 : #include "xfs_mount.h"
14 : #include "xfs_defer.h"
15 : #include "xfs_trans.h"
16 : #include "xfs_trans_priv.h"
17 : #include "xfs_rmap_item.h"
18 : #include "xfs_log.h"
19 : #include "xfs_rmap.h"
20 : #include "xfs_error.h"
21 : #include "xfs_log_priv.h"
22 : #include "xfs_log_recover.h"
23 : #include "xfs_ag.h"
24 : #include "xfs_rtgroup.h"
25 :
26 : struct kmem_cache *xfs_rui_cache;
27 : struct kmem_cache *xfs_rud_cache;
28 :
29 : static const struct xfs_item_ops xfs_rui_item_ops;
30 :
31 : static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
32 : {
33 : return container_of(lip, struct xfs_rui_log_item, rui_item);
34 : }
35 :
36 : STATIC void
37 329887206 : xfs_rui_item_free(
38 : struct xfs_rui_log_item *ruip)
39 : {
40 329887206 : kmem_free(ruip->rui_item.li_lv_shadow);
41 329887163 : if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
42 0 : kmem_free(ruip);
43 : else
44 329887163 : kmem_cache_free(xfs_rui_cache, ruip);
45 329886859 : }
46 :
47 : /*
48 : * Freeing the RUI requires that we remove it from the AIL if it has already
49 : * been placed there. However, the RUI may not yet have been placed in the AIL
50 : * when called by xfs_rui_release() from RUD processing due to the ordering of
51 : * committed vs unpin operations in bulk insert operations. Hence the reference
52 : * count to ensure only the last caller frees the RUI.
53 : */
54 : STATIC void
55 659652094 : xfs_rui_release(
56 : struct xfs_rui_log_item *ruip)
57 : {
58 659652094 : ASSERT(atomic_read(&ruip->rui_refcount) > 0);
59 1319383574 : if (!atomic_dec_and_test(&ruip->rui_refcount))
60 : return;
61 :
62 329886263 : xfs_trans_ail_delete(&ruip->rui_item, 0);
63 329886961 : xfs_rui_item_free(ruip);
64 : }
65 :
66 : STATIC void
67 329845494 : xfs_rui_item_size(
68 : struct xfs_log_item *lip,
69 : int *nvecs,
70 : int *nbytes)
71 : {
72 329845494 : struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
73 :
74 329845494 : *nvecs += 1;
75 329845494 : *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
76 329845494 : }
77 :
78 : /*
79 : * This is called to fill in the vector of log iovecs for the
80 : * given rui log item. We use only 1 iovec, and we point that
81 : * at the rui_log_format structure embedded in the rui item.
82 : * It is at this point that we assert that all of the extent
83 : * slots in the rui item have been filled.
84 : */
85 : STATIC void
86 329845580 : xfs_rui_item_format(
87 : struct xfs_log_item *lip,
88 : struct xfs_log_vec *lv)
89 : {
90 329845580 : struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
91 329845580 : struct xfs_log_iovec *vecp = NULL;
92 :
93 329845580 : ASSERT(atomic_read(&ruip->rui_next_extent) ==
94 : ruip->rui_format.rui_nextents);
95 :
96 329845580 : ruip->rui_format.rui_type = XFS_LI_RUI;
97 329845580 : ruip->rui_format.rui_size = 1;
98 :
99 329845580 : xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
100 329845580 : xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
101 329846110 : }
102 :
103 : /*
104 : * The unpin operation is the last place an RUI is manipulated in the log. It is
105 : * either inserted in the AIL or aborted in the event of a log I/O error. In
106 : * either case, the RUI transaction has been successfully committed to make it
107 : * this far. Therefore, we expect whoever committed the RUI to either construct
108 : * and commit the RUD or drop the RUD's reference in the event of error. Simply
109 : * drop the log's RUI reference now that the log is done with it.
110 : */
111 : STATIC void
112 329842755 : xfs_rui_item_unpin(
113 : struct xfs_log_item *lip,
114 : int remove)
115 : {
116 329842755 : struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
117 :
118 329842755 : xfs_rui_release(ruip);
119 329846102 : }
120 :
121 : /*
122 : * The RUI has been either committed or aborted if the transaction has been
123 : * cancelled. If the transaction was cancelled, an RUD isn't going to be
124 : * constructed and thus we free the RUI here directly.
125 : */
126 : STATIC void
127 39541 : xfs_rui_item_release(
128 : struct xfs_log_item *lip)
129 : {
130 39541 : xfs_rui_release(RUI_ITEM(lip));
131 39541 : }
132 :
133 : /*
134 : * Allocate and initialize an rui item with the given number of extents.
135 : */
136 : STATIC struct xfs_rui_log_item *
137 329843781 : xfs_rui_init(
138 : struct xfs_mount *mp,
139 : uint nextents)
140 :
141 : {
142 329843781 : struct xfs_rui_log_item *ruip;
143 :
144 329843781 : ASSERT(nextents > 0);
145 329843781 : if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
146 0 : ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
147 : else
148 329843781 : ruip = kmem_cache_zalloc(xfs_rui_cache,
149 : GFP_KERNEL | __GFP_NOFAIL);
150 :
151 329802739 : xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
152 329837350 : ruip->rui_format.rui_nextents = nextents;
153 329837350 : ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
154 329837350 : atomic_set(&ruip->rui_next_extent, 0);
155 329837350 : atomic_set(&ruip->rui_refcount, 2);
156 :
157 329837350 : return ruip;
158 : }
159 :
160 : static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
161 : {
162 : return container_of(lip, struct xfs_rud_log_item, rud_item);
163 : }
164 :
165 : STATIC void
166 329844612 : xfs_rud_item_size(
167 : struct xfs_log_item *lip,
168 : int *nvecs,
169 : int *nbytes)
170 : {
171 329844612 : *nvecs += 1;
172 329844612 : *nbytes += sizeof(struct xfs_rud_log_format);
173 329844612 : }
174 :
175 : /*
176 : * This is called to fill in the vector of log iovecs for the
177 : * given rud log item. We use only 1 iovec, and we point that
178 : * at the rud_log_format structure embedded in the rud item.
179 : * It is at this point that we assert that all of the extent
180 : * slots in the rud item have been filled.
181 : */
182 : STATIC void
183 396861 : xfs_rud_item_format(
184 : struct xfs_log_item *lip,
185 : struct xfs_log_vec *lv)
186 : {
187 396861 : struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
188 396861 : struct xfs_log_iovec *vecp = NULL;
189 :
190 396861 : rudp->rud_format.rud_type = XFS_LI_RUD;
191 396861 : rudp->rud_format.rud_size = 1;
192 :
193 396861 : xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
194 : sizeof(struct xfs_rud_log_format));
195 396862 : }
196 :
197 : /*
198 : * The RUD is either committed or aborted if the transaction is cancelled. If
199 : * the transaction is cancelled, drop our reference to the RUI and free the
200 : * RUD.
201 : */
202 : STATIC void
203 329846845 : xfs_rud_item_release(
204 : struct xfs_log_item *lip)
205 : {
206 329846845 : struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
207 :
208 329846845 : xfs_rui_release(rudp->rud_ruip);
209 329847032 : kmem_free(rudp->rud_item.li_lv_shadow);
210 329847158 : kmem_cache_free(xfs_rud_cache, rudp);
211 329846982 : }
212 :
213 : static struct xfs_log_item *
214 329843428 : xfs_rud_item_intent(
215 : struct xfs_log_item *lip)
216 : {
217 329843428 : return &RUD_ITEM(lip)->rud_ruip->rui_item;
218 : }
219 :
220 : static const struct xfs_item_ops xfs_rud_item_ops = {
221 : .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
222 : XFS_ITEM_INTENT_DONE,
223 : .iop_size = xfs_rud_item_size,
224 : .iop_format = xfs_rud_item_format,
225 : .iop_release = xfs_rud_item_release,
226 : .iop_intent = xfs_rud_item_intent,
227 : };
228 :
229 : static struct xfs_rud_log_item *
230 329839525 : xfs_trans_get_rud(
231 : struct xfs_trans *tp,
232 : struct xfs_rui_log_item *ruip)
233 : {
234 329839525 : struct xfs_rud_log_item *rudp;
235 :
236 329839525 : rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
237 329844997 : xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
238 : &xfs_rud_item_ops);
239 329846714 : rudp->rud_ruip = ruip;
240 329846714 : rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
241 :
242 329846714 : xfs_trans_add_item(tp, &rudp->rud_item);
243 329846594 : return rudp;
244 : }
245 :
246 : /*
247 : * Finish an rmap update and log it to the RUD. Note that the transaction is
248 : * marked dirty regardless of whether the rmap update succeeds or fails to
249 : * support the RUI/RUD lifecycle rules.
250 : */
251 : static int
252 351137659 : xfs_trans_log_finish_rmap_update(
253 : struct xfs_trans *tp,
254 : struct xfs_rud_log_item *rudp,
255 : struct xfs_rmap_intent *ri,
256 : struct xfs_btree_cur **pcur)
257 : {
258 351137659 : int error;
259 :
260 351137659 : error = xfs_rmap_finish_one(tp, ri, pcur);
261 :
262 : /*
263 : * Mark the transaction dirty, even on error. This ensures the
264 : * transaction is aborted, which:
265 : *
266 : * 1.) releases the RUI and frees the RUD
267 : * 2.) shuts down the filesystem
268 : */
269 351137650 : tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
270 351137650 : set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
271 :
272 351138141 : return error;
273 : }
274 :
275 : /* Sort rmap intents by AG. */
276 : static int
277 21296444 : xfs_rmap_update_diff_items(
278 : void *priv,
279 : const struct list_head *a,
280 : const struct list_head *b)
281 : {
282 21296444 : struct xfs_rmap_intent *ra;
283 21296444 : struct xfs_rmap_intent *rb;
284 :
285 21296444 : ra = container_of(a, struct xfs_rmap_intent, ri_list);
286 21296444 : rb = container_of(b, struct xfs_rmap_intent, ri_list);
287 :
288 21296444 : ASSERT(ra->ri_realtime == rb->ri_realtime);
289 :
290 21296444 : if (ra->ri_realtime)
291 1474305 : return ra->ri_rtg->rtg_rgno - rb->ri_rtg->rtg_rgno;
292 :
293 19822139 : return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
294 : }
295 :
296 : /* Log rmap updates in the intent item. */
297 : STATIC void
298 351102607 : xfs_rmap_update_log_item(
299 : struct xfs_trans *tp,
300 : struct xfs_rui_log_item *ruip,
301 : struct xfs_rmap_intent *ri)
302 : {
303 351102607 : uint next_extent;
304 351102607 : struct xfs_map_extent *map;
305 :
306 351102607 : tp->t_flags |= XFS_TRANS_DIRTY;
307 351102607 : set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
308 :
309 : /*
310 : * atomic_inc_return gives us the value after the increment;
311 : * we want to use it as an array index so we need to subtract 1 from
312 : * it.
313 : */
314 351109660 : next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
315 351137910 : ASSERT(next_extent < ruip->rui_format.rui_nextents);
316 351137910 : map = &ruip->rui_format.rui_extents[next_extent];
317 351137910 : map->me_owner = ri->ri_owner;
318 351137910 : map->me_startblock = ri->ri_bmap.br_startblock;
319 351137910 : map->me_startoff = ri->ri_bmap.br_startoff;
320 351137910 : map->me_len = ri->ri_bmap.br_blockcount;
321 :
322 351137910 : map->me_flags = 0;
323 351137910 : if (ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN)
324 65913791 : map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
325 351137910 : if (ri->ri_whichfork == XFS_ATTR_FORK)
326 15635289 : map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
327 351137910 : if (ri->ri_realtime)
328 75344660 : map->me_flags |= XFS_RMAP_EXTENT_REALTIME;
329 351137910 : switch (ri->ri_type) {
330 69060352 : case XFS_RMAP_MAP:
331 69060352 : map->me_flags |= XFS_RMAP_EXTENT_MAP;
332 69060352 : break;
333 102590025 : case XFS_RMAP_MAP_SHARED:
334 102590025 : map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
335 102590025 : break;
336 54562198 : case XFS_RMAP_UNMAP:
337 54562198 : map->me_flags |= XFS_RMAP_EXTENT_UNMAP;
338 54562198 : break;
339 83944935 : case XFS_RMAP_UNMAP_SHARED:
340 83944935 : map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
341 83944935 : break;
342 8906442 : case XFS_RMAP_CONVERT:
343 8906442 : map->me_flags |= XFS_RMAP_EXTENT_CONVERT;
344 8906442 : break;
345 27559889 : case XFS_RMAP_CONVERT_SHARED:
346 27559889 : map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
347 27559889 : break;
348 757554 : case XFS_RMAP_ALLOC:
349 757554 : map->me_flags |= XFS_RMAP_EXTENT_ALLOC;
350 757554 : break;
351 3756515 : case XFS_RMAP_FREE:
352 3756515 : map->me_flags |= XFS_RMAP_EXTENT_FREE;
353 3756515 : break;
354 0 : default:
355 0 : ASSERT(0);
356 : }
357 351137910 : }
358 :
359 : static struct xfs_log_item *
360 329804926 : xfs_rmap_update_create_intent(
361 : struct xfs_trans *tp,
362 : struct list_head *items,
363 : unsigned int count,
364 : bool sort)
365 : {
366 329804926 : struct xfs_mount *mp = tp->t_mountp;
367 329804926 : struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count);
368 329783895 : struct xfs_rmap_intent *ri;
369 :
370 329783895 : ASSERT(count > 0);
371 :
372 329783895 : xfs_trans_add_item(tp, &ruip->rui_item);
373 329822541 : if (sort)
374 329825079 : list_sort(mp, items, xfs_rmap_update_diff_items);
375 680960630 : list_for_each_entry(ri, items, ri_list)
376 351123840 : xfs_rmap_update_log_item(tp, ruip, ri);
377 329836790 : return &ruip->rui_item;
378 : }
379 :
380 : /* Get an RUD so we can process all the deferred rmap updates. */
381 : static struct xfs_log_item *
382 329842803 : xfs_rmap_update_create_done(
383 : struct xfs_trans *tp,
384 : struct xfs_log_item *intent,
385 : unsigned int count)
386 : {
387 329842803 : return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
388 : }
389 :
390 : /* Take a passive ref to the AG containing the space we're rmapping. */
391 : void
392 351122665 : xfs_rmap_update_get_group(
393 : struct xfs_mount *mp,
394 : struct xfs_rmap_intent *ri)
395 : {
396 351122665 : xfs_agnumber_t agno;
397 :
398 351122665 : if (ri->ri_realtime) {
399 75343717 : xfs_rgnumber_t rgno;
400 :
401 75343717 : rgno = xfs_rtb_to_rgno(mp, ri->ri_bmap.br_startblock);
402 75343716 : ri->ri_rtg = xfs_rtgroup_intent_get(mp, rgno);
403 75343299 : return;
404 : }
405 :
406 275778948 : agno = XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock);
407 275778948 : ri->ri_pag = xfs_perag_intent_get(mp, agno);
408 : }
409 :
410 : /* Release a passive AG ref after finishing rmapping work. */
411 : static inline void
412 351138617 : xfs_rmap_update_put_group(
413 : struct xfs_rmap_intent *ri)
414 : {
415 351138617 : if (ri->ri_realtime) {
416 75344754 : xfs_rtgroup_intent_put(ri->ri_rtg);
417 75344754 : return;
418 : }
419 :
420 275793863 : xfs_perag_intent_put(ri->ri_pag);
421 : }
422 :
423 : /* Process a deferred rmap update. */
424 : STATIC int
425 351136517 : xfs_rmap_update_finish_item(
426 : struct xfs_trans *tp,
427 : struct xfs_log_item *done,
428 : struct list_head *item,
429 : struct xfs_btree_cur **state)
430 : {
431 351136517 : struct xfs_rmap_intent *ri;
432 351136517 : int error;
433 :
434 351136517 : ri = container_of(item, struct xfs_rmap_intent, ri_list);
435 :
436 351136517 : error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done), ri,
437 : state);
438 :
439 351136231 : xfs_rmap_update_put_group(ri);
440 351136805 : kmem_cache_free(xfs_rmap_intent_cache, ri);
441 351137513 : return error;
442 : }
443 :
444 : /* Abort all pending RUIs. */
445 : STATIC void
446 767 : xfs_rmap_update_abort_intent(
447 : struct xfs_log_item *intent)
448 : {
449 767 : xfs_rui_release(RUI_ITEM(intent));
450 767 : }
451 :
452 : /* Cancel a deferred rmap update. */
453 : STATIC void
454 867 : xfs_rmap_update_cancel_item(
455 : struct list_head *item)
456 : {
457 867 : struct xfs_rmap_intent *ri;
458 :
459 867 : ri = container_of(item, struct xfs_rmap_intent, ri_list);
460 :
461 867 : xfs_rmap_update_put_group(ri);
462 867 : kmem_cache_free(xfs_rmap_intent_cache, ri);
463 867 : }
464 :
465 : const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
466 : .max_items = XFS_RUI_MAX_FAST_EXTENTS,
467 : .create_intent = xfs_rmap_update_create_intent,
468 : .abort_intent = xfs_rmap_update_abort_intent,
469 : .create_done = xfs_rmap_update_create_done,
470 : .finish_item = xfs_rmap_update_finish_item,
471 : .finish_cleanup = xfs_rmap_finish_one_cleanup,
472 : .cancel_item = xfs_rmap_update_cancel_item,
473 : };
474 :
475 : /* Is this recovered RUI ok? */
476 : static inline bool
477 1372 : xfs_rui_validate_map(
478 : struct xfs_mount *mp,
479 : struct xfs_map_extent *map)
480 : {
481 1372 : if (!xfs_has_rmapbt(mp))
482 : return false;
483 :
484 1372 : if (map->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
485 : return false;
486 :
487 1372 : switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
488 : case XFS_RMAP_EXTENT_MAP:
489 : case XFS_RMAP_EXTENT_MAP_SHARED:
490 : case XFS_RMAP_EXTENT_UNMAP:
491 : case XFS_RMAP_EXTENT_UNMAP_SHARED:
492 : case XFS_RMAP_EXTENT_CONVERT:
493 : case XFS_RMAP_EXTENT_CONVERT_SHARED:
494 : case XFS_RMAP_EXTENT_ALLOC:
495 : case XFS_RMAP_EXTENT_FREE:
496 1372 : break;
497 : default:
498 : return false;
499 : }
500 :
501 2736 : if (!XFS_RMAP_NON_INODE_OWNER(map->me_owner) &&
502 1364 : !xfs_verify_ino(mp, map->me_owner))
503 : return false;
504 :
505 1372 : if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
506 : return false;
507 :
508 1372 : if (map->me_flags & XFS_RMAP_EXTENT_REALTIME)
509 0 : return xfs_verify_rtbext(mp, map->me_startblock, map->me_len);
510 :
511 1372 : return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
512 : }
513 :
514 : /*
515 : * Process an rmap update intent item that was recovered from the log.
516 : * We need to update the rmapbt.
517 : */
518 : STATIC int
519 1366 : xfs_rui_item_recover(
520 : struct xfs_log_item *lip,
521 : struct list_head *capture_list)
522 : {
523 1366 : struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
524 1366 : struct xfs_rud_log_item *rudp;
525 1366 : struct xfs_trans *tp;
526 1366 : struct xfs_btree_cur *rcur = NULL;
527 1366 : struct xfs_mount *mp = lip->li_log->l_mp;
528 1366 : int i;
529 1366 : int error = 0;
530 :
531 : /*
532 : * First check the validity of the extents described by the
533 : * RUI. If any are bad, then assume that all are bad and
534 : * just toss the RUI.
535 : */
536 2738 : for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
537 1372 : if (!xfs_rui_validate_map(mp,
538 : &ruip->rui_format.rui_extents[i])) {
539 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
540 : &ruip->rui_format,
541 : sizeof(ruip->rui_format));
542 0 : return -EFSCORRUPTED;
543 : }
544 : }
545 :
546 1366 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
547 : mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
548 1366 : if (error)
549 : return error;
550 1366 : rudp = xfs_trans_get_rud(tp, ruip);
551 :
552 4102 : for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
553 1372 : struct xfs_rmap_intent fake = { };
554 1372 : struct xfs_map_extent *map;
555 :
556 1372 : map = &ruip->rui_format.rui_extents[i];
557 1372 : switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
558 : case XFS_RMAP_EXTENT_MAP:
559 : fake.ri_type = XFS_RMAP_MAP;
560 : break;
561 339 : case XFS_RMAP_EXTENT_MAP_SHARED:
562 339 : fake.ri_type = XFS_RMAP_MAP_SHARED;
563 339 : break;
564 35 : case XFS_RMAP_EXTENT_UNMAP:
565 35 : fake.ri_type = XFS_RMAP_UNMAP;
566 35 : break;
567 766 : case XFS_RMAP_EXTENT_UNMAP_SHARED:
568 766 : fake.ri_type = XFS_RMAP_UNMAP_SHARED;
569 766 : break;
570 15 : case XFS_RMAP_EXTENT_CONVERT:
571 15 : fake.ri_type = XFS_RMAP_CONVERT;
572 15 : break;
573 43 : case XFS_RMAP_EXTENT_CONVERT_SHARED:
574 43 : fake.ri_type = XFS_RMAP_CONVERT_SHARED;
575 43 : break;
576 4 : case XFS_RMAP_EXTENT_ALLOC:
577 4 : fake.ri_type = XFS_RMAP_ALLOC;
578 4 : break;
579 4 : case XFS_RMAP_EXTENT_FREE:
580 4 : fake.ri_type = XFS_RMAP_FREE;
581 4 : break;
582 0 : default:
583 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
584 : &ruip->rui_format,
585 : sizeof(ruip->rui_format));
586 0 : error = -EFSCORRUPTED;
587 0 : goto abort_error;
588 : }
589 :
590 1372 : fake.ri_realtime = !!(map->me_flags & XFS_RMAP_EXTENT_REALTIME);
591 1372 : fake.ri_owner = map->me_owner;
592 1372 : fake.ri_whichfork = (map->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
593 1372 : XFS_ATTR_FORK : XFS_DATA_FORK;
594 1372 : fake.ri_bmap.br_startblock = map->me_startblock;
595 1372 : fake.ri_bmap.br_startoff = map->me_startoff;
596 1372 : fake.ri_bmap.br_blockcount = map->me_len;
597 1372 : fake.ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
598 1372 : XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
599 :
600 1372 : xfs_rmap_update_get_group(mp, &fake);
601 1372 : error = xfs_trans_log_finish_rmap_update(tp, rudp, &fake,
602 : &rcur);
603 1372 : if (error == -EFSCORRUPTED)
604 2 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
605 : map, sizeof(*map));
606 1372 : xfs_rmap_update_put_group(&fake);
607 1372 : if (error)
608 2 : goto abort_error;
609 :
610 : }
611 :
612 1364 : xfs_rmap_finish_one_cleanup(tp, rcur, error);
613 1364 : return xfs_defer_ops_capture_and_commit(tp, capture_list);
614 :
615 : abort_error:
616 2 : xfs_rmap_finish_one_cleanup(tp, rcur, error);
617 2 : xfs_trans_cancel(tp);
618 2 : return error;
619 : }
620 :
621 : STATIC bool
622 42395 : xfs_rui_item_match(
623 : struct xfs_log_item *lip,
624 : uint64_t intent_id)
625 : {
626 42395 : return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
627 : }
628 :
629 : /* Relog an intent item to push the log tail forward. */
630 : static struct xfs_log_item *
631 1164 : xfs_rui_item_relog(
632 : struct xfs_log_item *intent,
633 : struct xfs_trans *tp)
634 : {
635 1164 : struct xfs_rud_log_item *rudp;
636 1164 : struct xfs_rui_log_item *ruip;
637 1164 : struct xfs_map_extent *map;
638 1164 : unsigned int count;
639 :
640 1164 : count = RUI_ITEM(intent)->rui_format.rui_nextents;
641 1164 : map = RUI_ITEM(intent)->rui_format.rui_extents;
642 :
643 1164 : tp->t_flags |= XFS_TRANS_DIRTY;
644 1164 : rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
645 1164 : set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
646 :
647 1164 : ruip = xfs_rui_init(tp->t_mountp, count);
648 2328 : memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map));
649 1164 : atomic_set(&ruip->rui_next_extent, count);
650 1164 : xfs_trans_add_item(tp, &ruip->rui_item);
651 1164 : set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
652 1164 : return &ruip->rui_item;
653 : }
654 :
655 : static const struct xfs_item_ops xfs_rui_item_ops = {
656 : .flags = XFS_ITEM_INTENT,
657 : .iop_size = xfs_rui_item_size,
658 : .iop_format = xfs_rui_item_format,
659 : .iop_unpin = xfs_rui_item_unpin,
660 : .iop_release = xfs_rui_item_release,
661 : .iop_recover = xfs_rui_item_recover,
662 : .iop_match = xfs_rui_item_match,
663 : .iop_relog = xfs_rui_item_relog,
664 : };
665 :
666 : static inline void
667 40869 : xfs_rui_copy_format(
668 : struct xfs_rui_log_format *dst,
669 : const struct xfs_rui_log_format *src)
670 : {
671 40869 : unsigned int i;
672 :
673 81738 : memcpy(dst, src, offsetof(struct xfs_rui_log_format, rui_extents));
674 :
675 82010 : for (i = 0; i < src->rui_nextents; i++)
676 82282 : memcpy(&dst->rui_extents[i], &src->rui_extents[i],
677 : sizeof(struct xfs_map_extent));
678 40869 : }
679 :
680 : /*
681 : * This routine is called to create an in-core extent rmap update
682 : * item from the rui format structure which was logged on disk.
683 : * It allocates an in-core rui, copies the extents from the format
684 : * structure into it, and adds the rui to the AIL with the given
685 : * LSN.
686 : */
687 : STATIC int
688 40869 : xlog_recover_rui_commit_pass2(
689 : struct xlog *log,
690 : struct list_head *buffer_list,
691 : struct xlog_recover_item *item,
692 : xfs_lsn_t lsn)
693 : {
694 40869 : struct xfs_mount *mp = log->l_mp;
695 40869 : struct xfs_rui_log_item *ruip;
696 40869 : struct xfs_rui_log_format *rui_formatp;
697 40869 : size_t len;
698 :
699 40869 : rui_formatp = item->ri_buf[0].i_addr;
700 :
701 40869 : if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) {
702 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
703 : item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
704 0 : return -EFSCORRUPTED;
705 : }
706 :
707 40869 : len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents);
708 40869 : if (item->ri_buf[0].i_len != len) {
709 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
710 : item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
711 0 : return -EFSCORRUPTED;
712 : }
713 :
714 40869 : ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
715 40869 : xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
716 40869 : atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
717 : /*
718 : * Insert the intent into the AIL directly and drop one reference so
719 : * that finishing or canceling the work will drop the other.
720 : */
721 40869 : xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
722 40869 : xfs_rui_release(ruip);
723 40869 : return 0;
724 : }
725 :
726 : const struct xlog_recover_item_ops xlog_rui_item_ops = {
727 : .item_type = XFS_LI_RUI,
728 : .commit_pass2 = xlog_recover_rui_commit_pass2,
729 : };
730 :
731 : /*
732 : * This routine is called when an RUD format structure is found in a committed
733 : * transaction in the log. Its purpose is to cancel the corresponding RUI if it
734 : * was still in the log. To do this it searches the AIL for the RUI with an id
735 : * equal to that in the RUD format structure. If we find it we drop the RUD
736 : * reference, which removes the RUI from the AIL and frees it.
737 : */
738 : STATIC int
739 39650 : xlog_recover_rud_commit_pass2(
740 : struct xlog *log,
741 : struct list_head *buffer_list,
742 : struct xlog_recover_item *item,
743 : xfs_lsn_t lsn)
744 : {
745 39650 : struct xfs_rud_log_format *rud_formatp;
746 :
747 39650 : rud_formatp = item->ri_buf[0].i_addr;
748 39650 : if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) {
749 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
750 : rud_formatp, item->ri_buf[0].i_len);
751 0 : return -EFSCORRUPTED;
752 : }
753 :
754 39650 : xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
755 39650 : return 0;
756 : }
757 :
758 : const struct xlog_recover_item_ops xlog_rud_item_ops = {
759 : .item_type = XFS_LI_RUD,
760 : .commit_pass2 = xlog_recover_rud_commit_pass2,
761 : };
|