Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2020-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_format.h"
9 : #include "xfs_log_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_bit.h"
12 : #include "xfs_shared.h"
13 : #include "xfs_mount.h"
14 : #include "xfs_defer.h"
15 : #include "xfs_inode.h"
16 : #include "xfs_trans.h"
17 : #include "xfs_trans_priv.h"
18 : #include "xfs_swapext_item.h"
19 : #include "xfs_swapext.h"
20 : #include "xfs_log.h"
21 : #include "xfs_bmap.h"
22 : #include "xfs_icache.h"
23 : #include "xfs_bmap_btree.h"
24 : #include "xfs_trans_space.h"
25 : #include "xfs_error.h"
26 : #include "xfs_log_priv.h"
27 : #include "xfs_log_recover.h"
28 : #include "xfs_xchgrange.h"
29 : #include "xfs_trace.h"
30 :
31 : struct kmem_cache *xfs_sxi_cache;
32 : struct kmem_cache *xfs_sxd_cache;
33 :
34 : static const struct xfs_item_ops xfs_sxi_item_ops;
35 :
36 : static inline struct xfs_sxi_log_item *SXI_ITEM(struct xfs_log_item *lip)
37 : {
38 : return container_of(lip, struct xfs_sxi_log_item, sxi_item);
39 : }
40 :
41 : STATIC void
42 538017 : xfs_sxi_item_free(
43 : struct xfs_sxi_log_item *sxi_lip)
44 : {
45 538017 : kmem_free(sxi_lip->sxi_item.li_lv_shadow);
46 538017 : kmem_cache_free(xfs_sxi_cache, sxi_lip);
47 538017 : }
48 :
49 : /*
50 : * Freeing the SXI requires that we remove it from the AIL if it has already
51 : * been placed there. However, the SXI may not yet have been placed in the AIL
52 : * when called by xfs_sxi_release() from SXD processing due to the ordering of
53 : * committed vs unpin operations in bulk insert operations. Hence the reference
54 : * count to ensure only the last caller frees the SXI.
55 : */
56 : STATIC void
57 1076034 : xfs_sxi_release(
58 : struct xfs_sxi_log_item *sxi_lip)
59 : {
60 1076034 : ASSERT(atomic_read(&sxi_lip->sxi_refcount) > 0);
61 2152068 : if (atomic_dec_and_test(&sxi_lip->sxi_refcount)) {
62 538017 : xfs_trans_ail_delete(&sxi_lip->sxi_item, SHUTDOWN_LOG_IO_ERROR);
63 538017 : xfs_sxi_item_free(sxi_lip);
64 : }
65 1076034 : }
66 :
67 :
68 : STATIC void
69 537615 : xfs_sxi_item_size(
70 : struct xfs_log_item *lip,
71 : int *nvecs,
72 : int *nbytes)
73 : {
74 537615 : *nvecs += 1;
75 537615 : *nbytes += sizeof(struct xfs_sxi_log_format);
76 537615 : }
77 :
78 : /*
79 : * This is called to fill in the vector of log iovecs for the given sxi log
80 : * item. We use only 1 iovec, and we point that at the sxi_log_format structure
81 : * embedded in the sxi item.
82 : */
83 : STATIC void
84 537615 : xfs_sxi_item_format(
85 : struct xfs_log_item *lip,
86 : struct xfs_log_vec *lv)
87 : {
88 537615 : struct xfs_sxi_log_item *sxi_lip = SXI_ITEM(lip);
89 537615 : struct xfs_log_iovec *vecp = NULL;
90 :
91 537615 : sxi_lip->sxi_format.sxi_type = XFS_LI_SXI;
92 537615 : sxi_lip->sxi_format.sxi_size = 1;
93 :
94 537615 : xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_SXI_FORMAT,
95 537615 : &sxi_lip->sxi_format,
96 : sizeof(struct xfs_sxi_log_format));
97 537615 : }
98 :
99 : /*
100 : * The unpin operation is the last place an SXI is manipulated in the log. It
101 : * is either inserted in the AIL or aborted in the event of a log I/O error. In
102 : * either case, the SXI transaction has been successfully committed to make it
103 : * this far. Therefore, we expect whoever committed the SXI to either construct
104 : * and commit the SXD or drop the SXD's reference in the event of error. Simply
105 : * drop the log's SXI reference now that the log is done with it.
106 : */
107 : STATIC void
108 537615 : xfs_sxi_item_unpin(
109 : struct xfs_log_item *lip,
110 : int remove)
111 : {
112 537615 : struct xfs_sxi_log_item *sxi_lip = SXI_ITEM(lip);
113 :
114 537615 : xfs_sxi_release(sxi_lip);
115 537615 : }
116 :
117 : /*
118 : * The SXI has been either committed or aborted if the transaction has been
119 : * cancelled. If the transaction was cancelled, an SXD isn't going to be
120 : * constructed and thus we free the SXI here directly.
121 : */
122 : STATIC void
123 396 : xfs_sxi_item_release(
124 : struct xfs_log_item *lip)
125 : {
126 396 : xfs_sxi_release(SXI_ITEM(lip));
127 396 : }
128 :
129 : /* Allocate and initialize an sxi item with the given number of extents. */
130 : STATIC struct xfs_sxi_log_item *
131 538017 : xfs_sxi_init(
132 : struct xfs_mount *mp)
133 :
134 : {
135 538017 : struct xfs_sxi_log_item *sxi_lip;
136 :
137 538017 : sxi_lip = kmem_cache_zalloc(xfs_sxi_cache, GFP_KERNEL | __GFP_NOFAIL);
138 :
139 538017 : xfs_log_item_init(mp, &sxi_lip->sxi_item, XFS_LI_SXI, &xfs_sxi_item_ops);
140 538017 : sxi_lip->sxi_format.sxi_id = (uintptr_t)(void *)sxi_lip;
141 538017 : atomic_set(&sxi_lip->sxi_refcount, 2);
142 :
143 538017 : return sxi_lip;
144 : }
145 :
146 : static inline struct xfs_sxd_log_item *SXD_ITEM(struct xfs_log_item *lip)
147 : {
148 : return container_of(lip, struct xfs_sxd_log_item, sxd_item);
149 : }
150 :
151 : STATIC void
152 537615 : xfs_sxd_item_size(
153 : struct xfs_log_item *lip,
154 : int *nvecs,
155 : int *nbytes)
156 : {
157 537615 : *nvecs += 1;
158 537615 : *nbytes += sizeof(struct xfs_sxd_log_format);
159 537615 : }
160 :
161 : /*
162 : * This is called to fill in the vector of log iovecs for the given sxd log
163 : * item. We use only 1 iovec, and we point that at the sxd_log_format structure
164 : * embedded in the sxd item.
165 : */
166 : STATIC void
167 537615 : xfs_sxd_item_format(
168 : struct xfs_log_item *lip,
169 : struct xfs_log_vec *lv)
170 : {
171 537615 : struct xfs_sxd_log_item *sxd_lip = SXD_ITEM(lip);
172 537615 : struct xfs_log_iovec *vecp = NULL;
173 :
174 537615 : sxd_lip->sxd_format.sxd_type = XFS_LI_SXD;
175 537615 : sxd_lip->sxd_format.sxd_size = 1;
176 :
177 537615 : xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_SXD_FORMAT, &sxd_lip->sxd_format,
178 : sizeof(struct xfs_sxd_log_format));
179 537615 : }
180 :
181 : /*
182 : * The SXD is either committed or aborted if the transaction is cancelled. If
183 : * the transaction is cancelled, drop our reference to the SXI and free the
184 : * SXD.
185 : */
186 : STATIC void
187 537615 : xfs_sxd_item_release(
188 : struct xfs_log_item *lip)
189 : {
190 537615 : struct xfs_sxd_log_item *sxd_lip = SXD_ITEM(lip);
191 :
192 537615 : kmem_free(sxd_lip->sxd_item.li_lv_shadow);
193 537615 : xfs_sxi_release(sxd_lip->sxd_intent_log_item);
194 537615 : kmem_cache_free(xfs_sxd_cache, sxd_lip);
195 537615 : }
196 :
197 : static struct xfs_log_item *
198 0 : xfs_sxd_item_intent(
199 : struct xfs_log_item *lip)
200 : {
201 0 : return &SXD_ITEM(lip)->sxd_intent_log_item->sxi_item;
202 : }
203 :
204 : static const struct xfs_item_ops xfs_sxd_item_ops = {
205 : .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
206 : XFS_ITEM_INTENT_DONE,
207 : .iop_size = xfs_sxd_item_size,
208 : .iop_format = xfs_sxd_item_format,
209 : .iop_release = xfs_sxd_item_release,
210 : .iop_intent = xfs_sxd_item_intent,
211 : };
212 :
213 : static struct xfs_sxd_log_item *
214 537615 : xfs_trans_get_sxd(
215 : struct xfs_trans *tp,
216 : struct xfs_sxi_log_item *sxi_lip)
217 : {
218 537615 : struct xfs_sxd_log_item *sxd_lip;
219 :
220 537615 : sxd_lip = kmem_cache_zalloc(xfs_sxd_cache, GFP_KERNEL | __GFP_NOFAIL);
221 537615 : xfs_log_item_init(tp->t_mountp, &sxd_lip->sxd_item, XFS_LI_SXD,
222 : &xfs_sxd_item_ops);
223 537615 : sxd_lip->sxd_intent_log_item = sxi_lip;
224 537615 : sxd_lip->sxd_format.sxd_sxi_id = sxi_lip->sxi_format.sxi_id;
225 :
226 537615 : xfs_trans_add_item(tp, &sxd_lip->sxd_item);
227 537615 : return sxd_lip;
228 : }
229 :
230 : /*
231 : * Finish an swapext update and log it to the SXD. Note that the transaction is
232 : * marked dirty regardless of whether the swapext update succeeds or fails to
233 : * support the SXI/SXD lifecycle rules.
234 : */
235 : static int
236 884380 : xfs_swapext_finish_update(
237 : struct xfs_trans *tp,
238 : struct xfs_log_item *done,
239 : struct xfs_swapext_intent *sxi)
240 : {
241 884380 : int error;
242 :
243 884380 : error = xfs_swapext_finish_one(tp, sxi);
244 :
245 : /*
246 : * Mark the transaction dirty, even on error. This ensures the
247 : * transaction is aborted, which:
248 : *
249 : * 1.) releases the SXI and frees the SXD
250 : * 2.) shuts down the filesystem
251 : */
252 884380 : tp->t_flags |= XFS_TRANS_DIRTY;
253 884380 : if (done)
254 537596 : set_bit(XFS_LI_DIRTY, &done->li_flags);
255 :
256 884380 : return error;
257 : }
258 :
259 : /* Log swapext updates in the intent item. */
260 : STATIC struct xfs_log_item *
261 884386 : xfs_swapext_create_intent(
262 : struct xfs_trans *tp,
263 : struct list_head *items,
264 : unsigned int count,
265 : bool sort)
266 : {
267 884386 : struct xfs_sxi_log_item *sxi_lip;
268 884386 : struct xfs_swapext_intent *sxi;
269 884386 : struct xfs_swap_extent *sx;
270 :
271 884386 : ASSERT(count == 1);
272 :
273 884386 : sxi = list_first_entry_or_null(items, struct xfs_swapext_intent,
274 : sxi_list);
275 :
276 : /*
277 : * We use the same defer ops control machinery to perform extent swaps
278 : * even if we aren't using the machinery to track the operation status
279 : * through log items.
280 : */
281 884386 : if (!(sxi->sxi_op_flags & XFS_SWAP_EXT_OP_LOGGED))
282 : return NULL;
283 :
284 537596 : sxi_lip = xfs_sxi_init(tp->t_mountp);
285 537596 : xfs_trans_add_item(tp, &sxi_lip->sxi_item);
286 537596 : tp->t_flags |= XFS_TRANS_DIRTY;
287 537596 : set_bit(XFS_LI_DIRTY, &sxi_lip->sxi_item.li_flags);
288 :
289 537596 : sx = &sxi_lip->sxi_format.sxi_extent;
290 537596 : sx->sx_inode1 = sxi->sxi_ip1->i_ino;
291 537596 : sx->sx_inode2 = sxi->sxi_ip2->i_ino;
292 537596 : sx->sx_startoff1 = sxi->sxi_startoff1;
293 537596 : sx->sx_startoff2 = sxi->sxi_startoff2;
294 537596 : sx->sx_blockcount = sxi->sxi_blockcount;
295 537596 : sx->sx_isize1 = sxi->sxi_isize1;
296 537596 : sx->sx_isize2 = sxi->sxi_isize2;
297 537596 : sx->sx_flags = sxi->sxi_flags;
298 :
299 537596 : return &sxi_lip->sxi_item;
300 : }
301 :
302 : STATIC struct xfs_log_item *
303 884374 : xfs_swapext_create_done(
304 : struct xfs_trans *tp,
305 : struct xfs_log_item *intent,
306 : unsigned int count)
307 : {
308 884374 : if (intent == NULL)
309 : return NULL;
310 537590 : return &xfs_trans_get_sxd(tp, SXI_ITEM(intent))->sxd_item;
311 : }
312 :
313 : /* Process a deferred swapext update. */
314 : STATIC int
315 884374 : xfs_swapext_finish_item(
316 : struct xfs_trans *tp,
317 : struct xfs_log_item *done,
318 : struct list_head *item,
319 : struct xfs_btree_cur **state)
320 : {
321 884374 : struct xfs_swapext_intent *sxi;
322 884374 : int error;
323 :
324 884374 : sxi = container_of(item, struct xfs_swapext_intent, sxi_list);
325 :
326 : /*
327 : * Swap one more extent between the two files. If there's still more
328 : * work to do, we want to requeue ourselves after all other pending
329 : * deferred operations have finished. This includes all of the dfops
330 : * that we queued directly as well as any new ones created in the
331 : * process of finishing the others. Doing so prevents us from queuing
332 : * a large number of SXI log items in kernel memory, which in turn
333 : * prevents us from pinning the tail of the log (while logging those
334 : * new SXI items) until the first SXI items can be processed.
335 : */
336 884374 : error = xfs_swapext_finish_update(tp, done, sxi);
337 884374 : if (error == -EAGAIN)
338 : return error;
339 :
340 221789 : kmem_cache_free(xfs_swapext_intent_cache, sxi);
341 221789 : return error;
342 : }
343 :
344 : /* Abort all pending SXIs. */
345 : STATIC void
346 6 : xfs_swapext_abort_intent(
347 : struct xfs_log_item *intent)
348 : {
349 6 : xfs_sxi_release(SXI_ITEM(intent));
350 6 : }
351 :
352 : /* Cancel a deferred swapext update. */
353 : STATIC void
354 12 : xfs_swapext_cancel_item(
355 : struct list_head *item)
356 : {
357 12 : struct xfs_swapext_intent *sxi;
358 :
359 12 : sxi = container_of(item, struct xfs_swapext_intent, sxi_list);
360 12 : kmem_cache_free(xfs_swapext_intent_cache, sxi);
361 12 : }
362 :
363 : const struct xfs_defer_op_type xfs_swapext_defer_type = {
364 : .max_items = 1,
365 : .create_intent = xfs_swapext_create_intent,
366 : .abort_intent = xfs_swapext_abort_intent,
367 : .create_done = xfs_swapext_create_done,
368 : .finish_item = xfs_swapext_finish_item,
369 : .cancel_item = xfs_swapext_cancel_item,
370 : };
371 :
372 : /* Is this recovered SXI ok? */
373 : static inline bool
374 6 : xfs_sxi_validate(
375 : struct xfs_mount *mp,
376 : struct xfs_sxi_log_item *sxi_lip)
377 : {
378 6 : struct xfs_swap_extent *sx = &sxi_lip->sxi_format.sxi_extent;
379 :
380 12 : if (!xfs_sb_version_haslogswapext(&mp->m_sb))
381 : return false;
382 :
383 6 : if (sxi_lip->sxi_format.__pad != 0)
384 : return false;
385 :
386 6 : if (sx->sx_flags & ~XFS_SWAP_EXT_FLAGS)
387 : return false;
388 :
389 12 : if (!xfs_verify_ino(mp, sx->sx_inode1) ||
390 6 : !xfs_verify_ino(mp, sx->sx_inode2))
391 0 : return false;
392 :
393 6 : if ((sx->sx_flags & XFS_SWAP_EXT_SET_SIZES) &&
394 2 : (sx->sx_isize1 < 0 || sx->sx_isize2 < 0))
395 : return false;
396 :
397 6 : if (!xfs_verify_fileext(mp, sx->sx_startoff1, sx->sx_blockcount))
398 : return false;
399 :
400 6 : return xfs_verify_fileext(mp, sx->sx_startoff2, sx->sx_blockcount);
401 : }
402 :
403 : /*
404 : * Use the recovered log state to create a new request, estimate resource
405 : * requirements, and create a new incore intent state.
406 : */
407 : STATIC struct xfs_swapext_intent *
408 6 : xfs_sxi_item_recover_intent(
409 : struct xfs_mount *mp,
410 : const struct xfs_swap_extent *sx,
411 : struct xfs_swapext_req *req,
412 : unsigned int *reflink_state)
413 : {
414 6 : struct xfs_inode *ip1, *ip2;
415 6 : int error;
416 :
417 : /*
418 : * Grab both inodes and set IRECOVERY to prevent trimming of post-eof
419 : * extents and freeing of unlinked inodes until we're totally done
420 : * processing files.
421 : */
422 6 : error = xlog_recover_iget(mp, sx->sx_inode1, &ip1);
423 6 : if (error)
424 0 : return ERR_PTR(error);
425 6 : error = xlog_recover_iget(mp, sx->sx_inode2, &ip2);
426 6 : if (error)
427 0 : goto err_rele1;
428 :
429 6 : req->ip1 = ip1;
430 6 : req->ip2 = ip2;
431 6 : req->startoff1 = sx->sx_startoff1;
432 6 : req->startoff2 = sx->sx_startoff2;
433 6 : req->blockcount = sx->sx_blockcount;
434 :
435 6 : if (sx->sx_flags & XFS_SWAP_EXT_ATTR_FORK)
436 0 : req->whichfork = XFS_ATTR_FORK;
437 : else
438 6 : req->whichfork = XFS_DATA_FORK;
439 :
440 6 : if (sx->sx_flags & XFS_SWAP_EXT_SET_SIZES)
441 2 : req->req_flags |= XFS_SWAP_REQ_SET_SIZES;
442 6 : if (sx->sx_flags & XFS_SWAP_EXT_INO1_WRITTEN)
443 4 : req->req_flags |= XFS_SWAP_REQ_INO1_WRITTEN;
444 6 : req->req_flags |= XFS_SWAP_REQ_LOGGED;
445 :
446 6 : xfs_xchg_range_ilock(NULL, ip1, ip2);
447 6 : error = xfs_swapext_estimate(req);
448 6 : xfs_xchg_range_iunlock(ip1, ip2);
449 6 : if (error)
450 0 : goto err_rele2;
451 :
452 6 : return xfs_swapext_init_intent(req, reflink_state);
453 :
454 : err_rele2:
455 0 : xfs_irele(ip2);
456 0 : err_rele1:
457 0 : xfs_irele(ip1);
458 0 : return ERR_PTR(error);
459 : }
460 :
461 : /* Process a swapext update intent item that was recovered from the log. */
462 : STATIC int
463 6 : xfs_sxi_item_recover(
464 : struct xfs_log_item *lip,
465 : struct list_head *capture_list)
466 : {
467 6 : struct xfs_swapext_req req = { .req_flags = 0 };
468 6 : struct xfs_swapext_intent *sxi;
469 6 : struct xfs_sxi_log_item *sxi_lip = SXI_ITEM(lip);
470 6 : struct xfs_mount *mp = lip->li_log->l_mp;
471 6 : struct xfs_swap_extent *sx = &sxi_lip->sxi_format.sxi_extent;
472 6 : struct xfs_sxd_log_item *sxd_lip = NULL;
473 6 : struct xfs_trans *tp;
474 6 : struct xfs_inode *ip1, *ip2;
475 6 : unsigned int reflink_state;
476 6 : int error = 0;
477 :
478 6 : if (!xfs_sxi_validate(mp, sxi_lip)) {
479 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
480 : &sxi_lip->sxi_format,
481 : sizeof(sxi_lip->sxi_format));
482 0 : return -EFSCORRUPTED;
483 : }
484 :
485 6 : sxi = xfs_sxi_item_recover_intent(mp, sx, &req, &reflink_state);
486 6 : if (IS_ERR(sxi))
487 0 : return PTR_ERR(sxi);
488 :
489 6 : trace_xfs_swapext_recover(mp, sxi);
490 :
491 6 : ip1 = sxi->sxi_ip1;
492 6 : ip2 = sxi->sxi_ip2;
493 :
494 6 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, req.resblks, 0, 0,
495 : &tp);
496 6 : if (error)
497 0 : goto err_rele;
498 :
499 6 : sxd_lip = xfs_trans_get_sxd(tp, sxi_lip);
500 :
501 6 : xfs_xchg_range_ilock(tp, ip1, ip2);
502 :
503 6 : xfs_swapext_ensure_reflink(tp, sxi, reflink_state);
504 6 : error = xfs_swapext_finish_update(tp, &sxd_lip->sxd_item, sxi);
505 6 : if (error == -EAGAIN) {
506 : /*
507 : * If there's more extent swapping to be done, we have to
508 : * schedule that as a separate deferred operation to be run
509 : * after we've finished replaying all of the intents we
510 : * recovered from the log. Transfer ownership of the sxi to
511 : * the transaction.
512 : */
513 5 : xfs_swapext_schedule(tp, sxi);
514 5 : error = 0;
515 5 : sxi = NULL;
516 : }
517 6 : if (error == -EFSCORRUPTED)
518 0 : XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, sx,
519 : sizeof(*sx));
520 6 : if (error)
521 0 : goto err_cancel;
522 :
523 : /*
524 : * Commit transaction, which frees the transaction and saves the inodes
525 : * for later replay activities.
526 : */
527 6 : error = xfs_defer_ops_capture_and_commit(tp, capture_list);
528 6 : goto err_unlock;
529 :
530 : err_cancel:
531 0 : xfs_trans_cancel(tp);
532 6 : err_unlock:
533 6 : xfs_xchg_range_iunlock(ip1, ip2);
534 6 : err_rele:
535 6 : if (sxi)
536 1 : kmem_cache_free(xfs_swapext_intent_cache, sxi);
537 6 : xfs_irele(ip2);
538 6 : xfs_irele(ip1);
539 6 : return error;
540 : }
541 :
542 : STATIC bool
543 396 : xfs_sxi_item_match(
544 : struct xfs_log_item *lip,
545 : uint64_t intent_id)
546 : {
547 396 : return SXI_ITEM(lip)->sxi_format.sxi_id == intent_id;
548 : }
549 :
550 : /* Relog an intent item to push the log tail forward. */
551 : static struct xfs_log_item *
552 19 : xfs_sxi_item_relog(
553 : struct xfs_log_item *intent,
554 : struct xfs_trans *tp)
555 : {
556 19 : struct xfs_sxd_log_item *sxd_lip;
557 19 : struct xfs_sxi_log_item *sxi_lip;
558 19 : struct xfs_swap_extent *sx;
559 :
560 19 : sx = &SXI_ITEM(intent)->sxi_format.sxi_extent;
561 :
562 19 : tp->t_flags |= XFS_TRANS_DIRTY;
563 19 : sxd_lip = xfs_trans_get_sxd(tp, SXI_ITEM(intent));
564 19 : set_bit(XFS_LI_DIRTY, &sxd_lip->sxd_item.li_flags);
565 :
566 19 : sxi_lip = xfs_sxi_init(tp->t_mountp);
567 38 : memcpy(&sxi_lip->sxi_format.sxi_extent, sx, sizeof(*sx));
568 19 : xfs_trans_add_item(tp, &sxi_lip->sxi_item);
569 19 : set_bit(XFS_LI_DIRTY, &sxi_lip->sxi_item.li_flags);
570 19 : return &sxi_lip->sxi_item;
571 : }
572 :
573 : static const struct xfs_item_ops xfs_sxi_item_ops = {
574 : .flags = XFS_ITEM_INTENT,
575 : .iop_size = xfs_sxi_item_size,
576 : .iop_format = xfs_sxi_item_format,
577 : .iop_unpin = xfs_sxi_item_unpin,
578 : .iop_release = xfs_sxi_item_release,
579 : .iop_recover = xfs_sxi_item_recover,
580 : .iop_match = xfs_sxi_item_match,
581 : .iop_relog = xfs_sxi_item_relog,
582 : };
583 :
584 : /*
585 : * This routine is called to create an in-core extent swapext update item from
586 : * the sxi format structure which was logged on disk. It allocates an in-core
587 : * sxi, copies the extents from the format structure into it, and adds the sxi
588 : * to the AIL with the given LSN.
589 : */
590 : STATIC int
591 402 : xlog_recover_sxi_commit_pass2(
592 : struct xlog *log,
593 : struct list_head *buffer_list,
594 : struct xlog_recover_item *item,
595 : xfs_lsn_t lsn)
596 : {
597 402 : struct xfs_mount *mp = log->l_mp;
598 402 : struct xfs_sxi_log_item *sxi_lip;
599 402 : struct xfs_sxi_log_format *sxi_formatp;
600 402 : size_t len;
601 :
602 402 : sxi_formatp = item->ri_buf[0].i_addr;
603 :
604 402 : len = sizeof(struct xfs_sxi_log_format);
605 402 : if (item->ri_buf[0].i_len != len) {
606 0 : XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
607 0 : return -EFSCORRUPTED;
608 : }
609 :
610 402 : if (sxi_formatp->__pad != 0) {
611 0 : XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
612 0 : return -EFSCORRUPTED;
613 : }
614 :
615 402 : sxi_lip = xfs_sxi_init(mp);
616 804 : memcpy(&sxi_lip->sxi_format, sxi_formatp, len);
617 :
618 402 : xfs_trans_ail_insert(log->l_ailp, &sxi_lip->sxi_item, lsn);
619 402 : xfs_sxi_release(sxi_lip);
620 402 : return 0;
621 : }
622 :
623 : const struct xlog_recover_item_ops xlog_sxi_item_ops = {
624 : .item_type = XFS_LI_SXI,
625 : .commit_pass2 = xlog_recover_sxi_commit_pass2,
626 : };
627 :
628 : /*
629 : * This routine is called when an SXD format structure is found in a committed
630 : * transaction in the log. Its purpose is to cancel the corresponding SXI if it
631 : * was still in the log. To do this it searches the AIL for the SXI with an id
632 : * equal to that in the SXD format structure. If we find it we drop the SXD
633 : * reference, which removes the SXI from the AIL and frees it.
634 : */
635 : STATIC int
636 396 : xlog_recover_sxd_commit_pass2(
637 : struct xlog *log,
638 : struct list_head *buffer_list,
639 : struct xlog_recover_item *item,
640 : xfs_lsn_t lsn)
641 : {
642 396 : struct xfs_sxd_log_format *sxd_formatp;
643 :
644 396 : sxd_formatp = item->ri_buf[0].i_addr;
645 396 : if (item->ri_buf[0].i_len != sizeof(struct xfs_sxd_log_format)) {
646 0 : XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
647 0 : return -EFSCORRUPTED;
648 : }
649 :
650 396 : xlog_recover_release_intent(log, XFS_LI_SXI, sxd_formatp->sxd_sxi_id);
651 396 : return 0;
652 : }
653 :
654 : const struct xlog_recover_item_ops xlog_sxd_item_ops = {
655 : .item_type = XFS_LI_SXD,
656 : .commit_pass2 = xlog_recover_sxd_commit_pass2,
657 : };
|