Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2020-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_format.h"
9 : #include "xfs_log_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_bit.h"
12 : #include "xfs_shared.h"
13 : #include "xfs_mount.h"
14 : #include "xfs_defer.h"
15 : #include "xfs_inode.h"
16 : #include "xfs_trans.h"
17 : #include "xfs_trans_priv.h"
18 : #include "xfs_swapext_item.h"
19 : #include "xfs_swapext.h"
20 : #include "xfs_log.h"
21 : #include "xfs_bmap.h"
22 : #include "xfs_icache.h"
23 : #include "xfs_bmap_btree.h"
24 : #include "xfs_trans_space.h"
25 : #include "xfs_error.h"
26 : #include "xfs_log_priv.h"
27 : #include "xfs_log_recover.h"
28 : #include "xfs_xchgrange.h"
29 : #include "xfs_trace.h"
30 :
31 : struct kmem_cache *xfs_sxi_cache;
32 : struct kmem_cache *xfs_sxd_cache;
33 :
34 : static const struct xfs_item_ops xfs_sxi_item_ops;
35 :
36 : static inline struct xfs_sxi_log_item *SXI_ITEM(struct xfs_log_item *lip)
37 : {
38 : return container_of(lip, struct xfs_sxi_log_item, sxi_item);
39 : }
40 :
41 : STATIC void
42 4098560 : xfs_sxi_item_free(
43 : struct xfs_sxi_log_item *sxi_lip)
44 : {
45 4098560 : kmem_free(sxi_lip->sxi_item.li_lv_shadow);
46 4098560 : kmem_cache_free(xfs_sxi_cache, sxi_lip);
47 4098560 : }
48 :
49 : /*
50 : * Freeing the SXI requires that we remove it from the AIL if it has already
51 : * been placed there. However, the SXI may not yet have been placed in the AIL
52 : * when called by xfs_sxi_release() from SXD processing due to the ordering of
53 : * committed vs unpin operations in bulk insert operations. Hence the reference
54 : * count to ensure only the last caller frees the SXI.
55 : */
56 : STATIC void
57 8197120 : xfs_sxi_release(
58 : struct xfs_sxi_log_item *sxi_lip)
59 : {
60 8197120 : ASSERT(atomic_read(&sxi_lip->sxi_refcount) > 0);
61 8197120 : if (atomic_dec_and_test(&sxi_lip->sxi_refcount)) {
62 4098560 : xfs_trans_ail_delete(&sxi_lip->sxi_item, SHUTDOWN_LOG_IO_ERROR);
63 4098560 : xfs_sxi_item_free(sxi_lip);
64 : }
65 8197120 : }
66 :
67 :
68 : STATIC void
69 4093815 : xfs_sxi_item_size(
70 : struct xfs_log_item *lip,
71 : int *nvecs,
72 : int *nbytes)
73 : {
74 4093815 : *nvecs += 1;
75 4093815 : *nbytes += sizeof(struct xfs_sxi_log_format);
76 4093815 : }
77 :
78 : /*
79 : * This is called to fill in the vector of log iovecs for the given sxi log
80 : * item. We use only 1 iovec, and we point that at the sxi_log_format structure
81 : * embedded in the sxi item.
82 : */
83 : STATIC void
84 4093812 : xfs_sxi_item_format(
85 : struct xfs_log_item *lip,
86 : struct xfs_log_vec *lv)
87 : {
88 4093812 : struct xfs_sxi_log_item *sxi_lip = SXI_ITEM(lip);
89 4093812 : struct xfs_log_iovec *vecp = NULL;
90 :
91 4093812 : sxi_lip->sxi_format.sxi_type = XFS_LI_SXI;
92 4093812 : sxi_lip->sxi_format.sxi_size = 1;
93 :
94 4093812 : xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_SXI_FORMAT,
95 4093812 : &sxi_lip->sxi_format,
96 : sizeof(struct xfs_sxi_log_format));
97 4093802 : }
98 :
99 : /*
100 : * The unpin operation is the last place an SXI is manipulated in the log. It
101 : * is either inserted in the AIL or aborted in the event of a log I/O error. In
102 : * either case, the SXI transaction has been successfully committed to make it
103 : * this far. Therefore, we expect whoever committed the SXI to either construct
104 : * and commit the SXD or drop the SXD's reference in the event of error. Simply
105 : * drop the log's SXI reference now that the log is done with it.
106 : */
107 : STATIC void
108 4093818 : xfs_sxi_item_unpin(
109 : struct xfs_log_item *lip,
110 : int remove)
111 : {
112 4093818 : struct xfs_sxi_log_item *sxi_lip = SXI_ITEM(lip);
113 :
114 4093818 : xfs_sxi_release(sxi_lip);
115 4093818 : }
116 :
117 : /*
118 : * The SXI has been either committed or aborted if the transaction has been
119 : * cancelled. If the transaction was cancelled, an SXD isn't going to be
120 : * constructed and thus we free the SXI here directly.
121 : */
122 : STATIC void
123 4725 : xfs_sxi_item_release(
124 : struct xfs_log_item *lip)
125 : {
126 4725 : xfs_sxi_release(SXI_ITEM(lip));
127 4725 : }
128 :
129 : /* Allocate and initialize an sxi item with the given number of extents. */
130 : STATIC struct xfs_sxi_log_item *
131 4098549 : xfs_sxi_init(
132 : struct xfs_mount *mp)
133 :
134 : {
135 4098549 : struct xfs_sxi_log_item *sxi_lip;
136 :
137 4098549 : sxi_lip = kmem_cache_zalloc(xfs_sxi_cache, GFP_KERNEL | __GFP_NOFAIL);
138 :
139 4098554 : xfs_log_item_init(mp, &sxi_lip->sxi_item, XFS_LI_SXI, &xfs_sxi_item_ops);
140 4098552 : sxi_lip->sxi_format.sxi_id = (uintptr_t)(void *)sxi_lip;
141 4098552 : atomic_set(&sxi_lip->sxi_refcount, 2);
142 :
143 4098552 : return sxi_lip;
144 : }
145 :
146 : static inline struct xfs_sxd_log_item *SXD_ITEM(struct xfs_log_item *lip)
147 : {
148 : return container_of(lip, struct xfs_sxd_log_item, sxd_item);
149 : }
150 :
151 : STATIC void
152 4093811 : xfs_sxd_item_size(
153 : struct xfs_log_item *lip,
154 : int *nvecs,
155 : int *nbytes)
156 : {
157 4093811 : *nvecs += 1;
158 4093811 : *nbytes += sizeof(struct xfs_sxd_log_format);
159 4093811 : }
160 :
161 : /*
162 : * This is called to fill in the vector of log iovecs for the given sxd log
163 : * item. We use only 1 iovec, and we point that at the sxd_log_format structure
164 : * embedded in the sxd item.
165 : */
166 : STATIC void
167 4093814 : xfs_sxd_item_format(
168 : struct xfs_log_item *lip,
169 : struct xfs_log_vec *lv)
170 : {
171 4093814 : struct xfs_sxd_log_item *sxd_lip = SXD_ITEM(lip);
172 4093814 : struct xfs_log_iovec *vecp = NULL;
173 :
174 4093814 : sxd_lip->sxd_format.sxd_type = XFS_LI_SXD;
175 4093814 : sxd_lip->sxd_format.sxd_size = 1;
176 :
177 4093814 : xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_SXD_FORMAT, &sxd_lip->sxd_format,
178 : sizeof(struct xfs_sxd_log_format));
179 4093813 : }
180 :
181 : /*
182 : * The SXD is either committed or aborted if the transaction is cancelled. If
183 : * the transaction is cancelled, drop our reference to the SXI and free the
184 : * SXD.
185 : */
186 : STATIC void
187 4093818 : xfs_sxd_item_release(
188 : struct xfs_log_item *lip)
189 : {
190 4093818 : struct xfs_sxd_log_item *sxd_lip = SXD_ITEM(lip);
191 :
192 4093818 : kmem_free(sxd_lip->sxd_item.li_lv_shadow);
193 4093818 : xfs_sxi_release(sxd_lip->sxd_intent_log_item);
194 4093818 : kmem_cache_free(xfs_sxd_cache, sxd_lip);
195 4093818 : }
196 :
197 : static struct xfs_log_item *
198 0 : xfs_sxd_item_intent(
199 : struct xfs_log_item *lip)
200 : {
201 0 : return &SXD_ITEM(lip)->sxd_intent_log_item->sxi_item;
202 : }
203 :
204 : static const struct xfs_item_ops xfs_sxd_item_ops = {
205 : .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
206 : XFS_ITEM_INTENT_DONE,
207 : .iop_size = xfs_sxd_item_size,
208 : .iop_format = xfs_sxd_item_format,
209 : .iop_release = xfs_sxd_item_release,
210 : .iop_intent = xfs_sxd_item_intent,
211 : };
212 :
213 : static struct xfs_sxd_log_item *
214 4093803 : xfs_trans_get_sxd(
215 : struct xfs_trans *tp,
216 : struct xfs_sxi_log_item *sxi_lip)
217 : {
218 4093803 : struct xfs_sxd_log_item *sxd_lip;
219 :
220 4093803 : sxd_lip = kmem_cache_zalloc(xfs_sxd_cache, GFP_KERNEL | __GFP_NOFAIL);
221 4093810 : xfs_log_item_init(tp->t_mountp, &sxd_lip->sxd_item, XFS_LI_SXD,
222 : &xfs_sxd_item_ops);
223 4093811 : sxd_lip->sxd_intent_log_item = sxi_lip;
224 4093811 : sxd_lip->sxd_format.sxd_sxi_id = sxi_lip->sxi_format.sxi_id;
225 :
226 4093811 : xfs_trans_add_item(tp, &sxd_lip->sxd_item);
227 4093812 : return sxd_lip;
228 : }
229 :
230 : /*
231 : * Finish an swapext update and log it to the SXD. Note that the transaction is
232 : * marked dirty regardless of whether the swapext update succeeds or fails to
233 : * support the SXI/SXD lifecycle rules.
234 : */
235 : static int
236 5117747 : xfs_swapext_finish_update(
237 : struct xfs_trans *tp,
238 : struct xfs_log_item *done,
239 : struct xfs_swapext_intent *sxi)
240 : {
241 5117747 : int error;
242 :
243 5117747 : error = xfs_swapext_finish_one(tp, sxi);
244 :
245 : /*
246 : * Mark the transaction dirty, even on error. This ensures the
247 : * transaction is aborted, which:
248 : *
249 : * 1.) releases the SXI and frees the SXD
250 : * 2.) shuts down the filesystem
251 : */
252 5117754 : tp->t_flags |= XFS_TRANS_DIRTY;
253 5117754 : if (done)
254 4093801 : set_bit(XFS_LI_DIRTY, &done->li_flags);
255 :
256 5117759 : return error;
257 : }
258 :
259 : /* Log swapext updates in the intent item. */
260 : STATIC struct xfs_log_item *
261 5117767 : xfs_swapext_create_intent(
262 : struct xfs_trans *tp,
263 : struct list_head *items,
264 : unsigned int count,
265 : bool sort)
266 : {
267 5117767 : struct xfs_sxi_log_item *sxi_lip;
268 5117767 : struct xfs_swapext_intent *sxi;
269 5117767 : struct xfs_swap_extent *sx;
270 :
271 5117767 : ASSERT(count == 1);
272 :
273 5117767 : sxi = list_first_entry_or_null(items, struct xfs_swapext_intent,
274 : sxi_list);
275 :
276 : /*
277 : * We use the same defer ops control machinery to perform extent swaps
278 : * even if we aren't using the machinery to track the operation status
279 : * through log items.
280 : */
281 5117767 : if (!(sxi->sxi_op_flags & XFS_SWAP_EXT_OP_LOGGED))
282 : return NULL;
283 :
284 4093799 : sxi_lip = xfs_sxi_init(tp->t_mountp);
285 4093798 : xfs_trans_add_item(tp, &sxi_lip->sxi_item);
286 4093798 : tp->t_flags |= XFS_TRANS_DIRTY;
287 4093798 : set_bit(XFS_LI_DIRTY, &sxi_lip->sxi_item.li_flags);
288 :
289 4093803 : sx = &sxi_lip->sxi_format.sxi_extent;
290 4093803 : sx->sx_inode1 = sxi->sxi_ip1->i_ino;
291 4093803 : sx->sx_inode2 = sxi->sxi_ip2->i_ino;
292 4093803 : sx->sx_startoff1 = sxi->sxi_startoff1;
293 4093803 : sx->sx_startoff2 = sxi->sxi_startoff2;
294 4093803 : sx->sx_blockcount = sxi->sxi_blockcount;
295 4093803 : sx->sx_isize1 = sxi->sxi_isize1;
296 4093803 : sx->sx_isize2 = sxi->sxi_isize2;
297 4093803 : sx->sx_flags = sxi->sxi_flags;
298 :
299 4093803 : return &sxi_lip->sxi_item;
300 : }
301 :
302 : STATIC struct xfs_log_item *
303 5117727 : xfs_swapext_create_done(
304 : struct xfs_trans *tp,
305 : struct xfs_log_item *intent,
306 : unsigned int count)
307 : {
308 5117727 : if (intent == NULL)
309 : return NULL;
310 4093774 : return &xfs_trans_get_sxd(tp, SXI_ITEM(intent))->sxd_item;
311 : }
312 :
313 : /* Process a deferred swapext update. */
314 : STATIC int
315 5117729 : xfs_swapext_finish_item(
316 : struct xfs_trans *tp,
317 : struct xfs_log_item *done,
318 : struct list_head *item,
319 : struct xfs_btree_cur **state)
320 : {
321 5117729 : struct xfs_swapext_intent *sxi;
322 5117729 : int error;
323 :
324 5117729 : sxi = container_of(item, struct xfs_swapext_intent, sxi_list);
325 :
326 : /*
327 : * Swap one more extent between the two files. If there's still more
328 : * work to do, we want to requeue ourselves after all other pending
329 : * deferred operations have finished. This includes all of the dfops
330 : * that we queued directly as well as any new ones created in the
331 : * process of finishing the others. Doing so prevents us from queuing
332 : * a large number of SXI log items in kernel memory, which in turn
333 : * prevents us from pinning the tail of the log (while logging those
334 : * new SXI items) until the first SXI items can be processed.
335 : */
336 5117729 : error = xfs_swapext_finish_update(tp, done, sxi);
337 5117742 : if (error == -EAGAIN)
338 : return error;
339 :
340 1293095 : kmem_cache_free(xfs_swapext_intent_cache, sxi);
341 1293095 : return error;
342 : }
343 :
344 : /* Abort all pending SXIs. */
345 : STATIC void
346 17 : xfs_swapext_abort_intent(
347 : struct xfs_log_item *intent)
348 : {
349 17 : xfs_sxi_release(SXI_ITEM(intent));
350 17 : }
351 :
352 : /* Cancel a deferred swapext update. */
353 : STATIC void
354 32 : xfs_swapext_cancel_item(
355 : struct list_head *item)
356 : {
357 32 : struct xfs_swapext_intent *sxi;
358 :
359 32 : sxi = container_of(item, struct xfs_swapext_intent, sxi_list);
360 32 : kmem_cache_free(xfs_swapext_intent_cache, sxi);
361 32 : }
362 :
363 : const struct xfs_defer_op_type xfs_swapext_defer_type = {
364 : .max_items = 1,
365 : .create_intent = xfs_swapext_create_intent,
366 : .abort_intent = xfs_swapext_abort_intent,
367 : .create_done = xfs_swapext_create_done,
368 : .finish_item = xfs_swapext_finish_item,
369 : .cancel_item = xfs_swapext_cancel_item,
370 : };
371 :
372 : /* Is this recovered SXI ok? */
373 : static inline bool
374 17 : xfs_sxi_validate(
375 : struct xfs_mount *mp,
376 : struct xfs_sxi_log_item *sxi_lip)
377 : {
378 17 : struct xfs_swap_extent *sx = &sxi_lip->sxi_format.sxi_extent;
379 :
380 34 : if (!xfs_sb_version_haslogswapext(&mp->m_sb))
381 : return false;
382 :
383 17 : if (sxi_lip->sxi_format.__pad != 0)
384 : return false;
385 :
386 17 : if (sx->sx_flags & ~XFS_SWAP_EXT_FLAGS)
387 : return false;
388 :
389 34 : if (!xfs_verify_ino(mp, sx->sx_inode1) ||
390 17 : !xfs_verify_ino(mp, sx->sx_inode2))
391 0 : return false;
392 :
393 17 : if ((sx->sx_flags & XFS_SWAP_EXT_SET_SIZES) &&
394 6 : (sx->sx_isize1 < 0 || sx->sx_isize2 < 0))
395 : return false;
396 :
397 17 : if (!xfs_verify_fileext(mp, sx->sx_startoff1, sx->sx_blockcount))
398 : return false;
399 :
400 17 : return xfs_verify_fileext(mp, sx->sx_startoff2, sx->sx_blockcount);
401 : }
402 :
403 : /*
404 : * Use the recovered log state to create a new request, estimate resource
405 : * requirements, and create a new incore intent state.
406 : */
407 : STATIC struct xfs_swapext_intent *
408 17 : xfs_sxi_item_recover_intent(
409 : struct xfs_mount *mp,
410 : const struct xfs_swap_extent *sx,
411 : struct xfs_swapext_req *req,
412 : unsigned int *reflink_state)
413 : {
414 17 : struct xfs_inode *ip1, *ip2;
415 17 : int error;
416 :
417 : /*
418 : * Grab both inodes and set IRECOVERY to prevent trimming of post-eof
419 : * extents and freeing of unlinked inodes until we're totally done
420 : * processing files.
421 : */
422 17 : error = xlog_recover_iget(mp, sx->sx_inode1, &ip1);
423 17 : if (error)
424 0 : return ERR_PTR(error);
425 17 : error = xlog_recover_iget(mp, sx->sx_inode2, &ip2);
426 17 : if (error)
427 0 : goto err_rele1;
428 :
429 17 : req->ip1 = ip1;
430 17 : req->ip2 = ip2;
431 17 : req->startoff1 = sx->sx_startoff1;
432 17 : req->startoff2 = sx->sx_startoff2;
433 17 : req->blockcount = sx->sx_blockcount;
434 :
435 17 : if (sx->sx_flags & XFS_SWAP_EXT_ATTR_FORK)
436 0 : req->whichfork = XFS_ATTR_FORK;
437 : else
438 17 : req->whichfork = XFS_DATA_FORK;
439 :
440 17 : if (sx->sx_flags & XFS_SWAP_EXT_SET_SIZES)
441 6 : req->req_flags |= XFS_SWAP_REQ_SET_SIZES;
442 17 : if (sx->sx_flags & XFS_SWAP_EXT_INO1_WRITTEN)
443 12 : req->req_flags |= XFS_SWAP_REQ_INO1_WRITTEN;
444 17 : req->req_flags |= XFS_SWAP_REQ_LOGGED;
445 :
446 17 : xfs_xchg_range_ilock(NULL, ip1, ip2);
447 17 : error = xfs_swapext_estimate(req);
448 17 : xfs_xchg_range_iunlock(ip1, ip2);
449 17 : if (error)
450 0 : goto err_rele2;
451 :
452 17 : return xfs_swapext_init_intent(req, reflink_state);
453 :
454 : err_rele2:
455 0 : xfs_irele(ip2);
456 0 : err_rele1:
457 0 : xfs_irele(ip1);
458 0 : return ERR_PTR(error);
459 : }
460 :
461 : /* Process a swapext update intent item that was recovered from the log. */
462 : STATIC int
463 17 : xfs_sxi_item_recover(
464 : struct xfs_log_item *lip,
465 : struct list_head *capture_list)
466 : {
467 17 : struct xfs_swapext_req req = { .req_flags = 0 };
468 17 : struct xfs_swapext_intent *sxi;
469 17 : struct xfs_sxi_log_item *sxi_lip = SXI_ITEM(lip);
470 17 : struct xfs_mount *mp = lip->li_log->l_mp;
471 17 : struct xfs_swap_extent *sx = &sxi_lip->sxi_format.sxi_extent;
472 17 : struct xfs_sxd_log_item *sxd_lip = NULL;
473 17 : struct xfs_trans *tp;
474 17 : struct xfs_inode *ip1, *ip2;
475 17 : unsigned int reflink_state;
476 17 : int error = 0;
477 :
478 17 : if (!xfs_sxi_validate(mp, sxi_lip)) {
479 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
480 : &sxi_lip->sxi_format,
481 : sizeof(sxi_lip->sxi_format));
482 0 : return -EFSCORRUPTED;
483 : }
484 :
485 17 : sxi = xfs_sxi_item_recover_intent(mp, sx, &req, &reflink_state);
486 17 : if (IS_ERR(sxi))
487 0 : return PTR_ERR(sxi);
488 :
489 17 : trace_xfs_swapext_recover(mp, sxi);
490 :
491 17 : ip1 = sxi->sxi_ip1;
492 17 : ip2 = sxi->sxi_ip2;
493 :
494 17 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, req.resblks, 0, 0,
495 : &tp);
496 17 : if (error)
497 0 : goto err_rele;
498 :
499 17 : sxd_lip = xfs_trans_get_sxd(tp, sxi_lip);
500 :
501 17 : xfs_xchg_range_ilock(tp, ip1, ip2);
502 :
503 17 : xfs_swapext_ensure_reflink(tp, sxi, reflink_state);
504 17 : error = xfs_swapext_finish_update(tp, &sxd_lip->sxd_item, sxi);
505 17 : if (error == -EAGAIN) {
506 : /*
507 : * If there's more extent swapping to be done, we have to
508 : * schedule that as a separate deferred operation to be run
509 : * after we've finished replaying all of the intents we
510 : * recovered from the log. Transfer ownership of the sxi to
511 : * the transaction.
512 : */
513 14 : xfs_swapext_schedule(tp, sxi);
514 14 : error = 0;
515 14 : sxi = NULL;
516 : }
517 17 : if (error == -EFSCORRUPTED)
518 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, sx,
519 : sizeof(*sx));
520 17 : if (error)
521 0 : goto err_cancel;
522 :
523 : /*
524 : * Commit transaction, which frees the transaction and saves the inodes
525 : * for later replay activities.
526 : */
527 17 : error = xfs_defer_ops_capture_and_commit(tp, capture_list);
528 17 : goto err_unlock;
529 :
530 : err_cancel:
531 0 : xfs_trans_cancel(tp);
532 17 : err_unlock:
533 17 : xfs_xchg_range_iunlock(ip1, ip2);
534 17 : err_rele:
535 17 : if (sxi)
536 3 : kmem_cache_free(xfs_swapext_intent_cache, sxi);
537 17 : xfs_irele(ip2);
538 17 : xfs_irele(ip1);
539 17 : return error;
540 : }
541 :
542 : STATIC bool
543 4769 : xfs_sxi_item_match(
544 : struct xfs_log_item *lip,
545 : uint64_t intent_id)
546 : {
547 4769 : return SXI_ITEM(lip)->sxi_format.sxi_id == intent_id;
548 : }
549 :
550 : /* Relog an intent item to push the log tail forward. */
551 : static struct xfs_log_item *
552 12 : xfs_sxi_item_relog(
553 : struct xfs_log_item *intent,
554 : struct xfs_trans *tp)
555 : {
556 12 : struct xfs_sxd_log_item *sxd_lip;
557 12 : struct xfs_sxi_log_item *sxi_lip;
558 12 : struct xfs_swap_extent *sx;
559 :
560 12 : sx = &SXI_ITEM(intent)->sxi_format.sxi_extent;
561 :
562 12 : tp->t_flags |= XFS_TRANS_DIRTY;
563 12 : sxd_lip = xfs_trans_get_sxd(tp, SXI_ITEM(intent));
564 12 : set_bit(XFS_LI_DIRTY, &sxd_lip->sxd_item.li_flags);
565 :
566 12 : sxi_lip = xfs_sxi_init(tp->t_mountp);
567 24 : memcpy(&sxi_lip->sxi_format.sxi_extent, sx, sizeof(*sx));
568 12 : xfs_trans_add_item(tp, &sxi_lip->sxi_item);
569 12 : set_bit(XFS_LI_DIRTY, &sxi_lip->sxi_item.li_flags);
570 12 : return &sxi_lip->sxi_item;
571 : }
572 :
573 : static const struct xfs_item_ops xfs_sxi_item_ops = {
574 : .flags = XFS_ITEM_INTENT,
575 : .iop_size = xfs_sxi_item_size,
576 : .iop_format = xfs_sxi_item_format,
577 : .iop_unpin = xfs_sxi_item_unpin,
578 : .iop_release = xfs_sxi_item_release,
579 : .iop_recover = xfs_sxi_item_recover,
580 : .iop_match = xfs_sxi_item_match,
581 : .iop_relog = xfs_sxi_item_relog,
582 : };
583 :
584 : /*
585 : * This routine is called to create an in-core extent swapext update item from
586 : * the sxi format structure which was logged on disk. It allocates an in-core
587 : * sxi, copies the extents from the format structure into it, and adds the sxi
588 : * to the AIL with the given LSN.
589 : */
590 : STATIC int
591 4742 : xlog_recover_sxi_commit_pass2(
592 : struct xlog *log,
593 : struct list_head *buffer_list,
594 : struct xlog_recover_item *item,
595 : xfs_lsn_t lsn)
596 : {
597 4742 : struct xfs_mount *mp = log->l_mp;
598 4742 : struct xfs_sxi_log_item *sxi_lip;
599 4742 : struct xfs_sxi_log_format *sxi_formatp;
600 4742 : size_t len;
601 :
602 4742 : sxi_formatp = item->ri_buf[0].i_addr;
603 :
604 4742 : len = sizeof(struct xfs_sxi_log_format);
605 4742 : if (item->ri_buf[0].i_len != len) {
606 0 : XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
607 0 : return -EFSCORRUPTED;
608 : }
609 :
610 4742 : if (sxi_formatp->__pad != 0) {
611 0 : XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
612 0 : return -EFSCORRUPTED;
613 : }
614 :
615 4742 : sxi_lip = xfs_sxi_init(mp);
616 9484 : memcpy(&sxi_lip->sxi_format, sxi_formatp, len);
617 :
618 4742 : xfs_trans_ail_insert(log->l_ailp, &sxi_lip->sxi_item, lsn);
619 4742 : xfs_sxi_release(sxi_lip);
620 4742 : return 0;
621 : }
622 :
623 : const struct xlog_recover_item_ops xlog_sxi_item_ops = {
624 : .item_type = XFS_LI_SXI,
625 : .commit_pass2 = xlog_recover_sxi_commit_pass2,
626 : };
627 :
628 : /*
629 : * This routine is called when an SXD format structure is found in a committed
630 : * transaction in the log. Its purpose is to cancel the corresponding SXI if it
631 : * was still in the log. To do this it searches the AIL for the SXI with an id
632 : * equal to that in the SXD format structure. If we find it we drop the SXD
633 : * reference, which removes the SXI from the AIL and frees it.
634 : */
635 : STATIC int
636 4725 : xlog_recover_sxd_commit_pass2(
637 : struct xlog *log,
638 : struct list_head *buffer_list,
639 : struct xlog_recover_item *item,
640 : xfs_lsn_t lsn)
641 : {
642 4725 : struct xfs_sxd_log_format *sxd_formatp;
643 :
644 4725 : sxd_formatp = item->ri_buf[0].i_addr;
645 4725 : if (item->ri_buf[0].i_len != sizeof(struct xfs_sxd_log_format)) {
646 0 : XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
647 0 : return -EFSCORRUPTED;
648 : }
649 :
650 4725 : xlog_recover_release_intent(log, XFS_LI_SXI, sxd_formatp->sxd_sxi_id);
651 4725 : return 0;
652 : }
653 :
654 : const struct xlog_recover_item_ops xlog_sxd_item_ops = {
655 : .item_type = XFS_LI_SXD,
656 : .commit_pass2 = xlog_recover_sxd_commit_pass2,
657 : };
|