Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * Copyright (C) 2016 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_mount.h"
13 : #include "xfs_defer.h"
14 : #include "xfs_trans.h"
15 : #include "xfs_buf_item.h"
16 : #include "xfs_inode.h"
17 : #include "xfs_inode_item.h"
18 : #include "xfs_trace.h"
19 : #include "xfs_icache.h"
20 : #include "xfs_log.h"
21 : #include "xfs_rmap.h"
22 : #include "xfs_refcount.h"
23 : #include "xfs_bmap.h"
24 : #include "xfs_alloc.h"
25 : #include "xfs_buf.h"
26 : #include "xfs_da_format.h"
27 : #include "xfs_da_btree.h"
28 : #include "xfs_attr.h"
29 :
30 : static struct kmem_cache *xfs_defer_pending_cache;
31 :
32 : /*
33 : * Deferred Operations in XFS
34 : *
35 : * Due to the way locking rules work in XFS, certain transactions (block
36 : * mapping and unmapping, typically) have permanent reservations so that
37 : * we can roll the transaction to adhere to AG locking order rules and
38 : * to unlock buffers between metadata updates. Prior to rmap/reflink,
39 : * the mapping code had a mechanism to perform these deferrals for
40 : * extents that were going to be freed; this code makes that facility
41 : * more generic.
42 : *
43 : * When adding the reverse mapping and reflink features, it became
44 : * necessary to perform complex remapping multi-transactions to comply
45 : * with AG locking order rules, and to be able to spread a single
46 : * refcount update operation (an operation on an n-block extent can
47 : * update as many as n records!) among multiple transactions. XFS can
48 : * roll a transaction to facilitate this, but using this facility
49 : * requires us to log "intent" items in case log recovery needs to
50 : * redo the operation, and to log "done" items to indicate that redo
51 : * is not necessary.
52 : *
53 : * Deferred work is tracked in xfs_defer_pending items. Each pending
54 : * item tracks one type of deferred work. Incoming work items (which
55 : * have not yet had an intent logged) are attached to a pending item
56 : * on the dop_intake list, where they wait for the caller to finish
57 : * the deferred operations.
58 : *
59 : * Finishing a set of deferred operations is an involved process. To
60 : * start, we define "rolling a deferred-op transaction" as follows:
61 : *
62 : * > For each xfs_defer_pending item on the dop_intake list,
63 : * - Sort the work items in AG order. XFS locking
64 : * order rules require us to lock buffers in AG order.
65 : * - Create a log intent item for that type.
66 : * - Attach it to the pending item.
67 : * - Move the pending item from the dop_intake list to the
68 : * dop_pending list.
69 : * > Roll the transaction.
70 : *
71 : * NOTE: To avoid exceeding the transaction reservation, we limit the
72 : * number of items that we attach to a given xfs_defer_pending.
73 : *
74 : * The actual finishing process looks like this:
75 : *
76 : * > For each xfs_defer_pending in the dop_pending list,
77 : * - Roll the deferred-op transaction as above.
78 : * - Create a log done item for that type, and attach it to the
79 : * log intent item.
80 : * - For each work item attached to the log intent item,
81 : * * Perform the described action.
82 : * * Attach the work item to the log done item.
83 : * * If the result of doing the work was -EAGAIN, ->finish work
84 : * wants a new transaction. See the "Requesting a Fresh
85 : * Transaction while Finishing Deferred Work" section below for
86 : * details.
87 : *
88 : * The key here is that we must log an intent item for all pending
89 : * work items every time we roll the transaction, and that we must log
90 : * a done item as soon as the work is completed. With this mechanism
91 : * we can perform complex remapping operations, chaining intent items
92 : * as needed.
93 : *
94 : * Requesting a Fresh Transaction while Finishing Deferred Work
95 : *
96 : * If ->finish_item decides that it needs a fresh transaction to
97 : * finish the work, it must ask its caller (xfs_defer_finish) for a
98 : * continuation. The most likely cause of this circumstance are the
99 : * refcount adjust functions deciding that they've logged enough items
100 : * to be at risk of exceeding the transaction reservation.
101 : *
102 : * To get a fresh transaction, we want to log the existing log done
103 : * item to prevent the log intent item from replaying, immediately log
104 : * a new log intent item with the unfinished work items, roll the
105 : * transaction, and re-call ->finish_item wherever it left off. The
106 : * log done item and the new log intent item must be in the same
107 : * transaction or atomicity cannot be guaranteed; defer_finish ensures
108 : * that this happens.
109 : *
110 : * This requires some coordination between ->finish_item and
111 : * defer_finish. Upon deciding to request a new transaction,
112 : * ->finish_item should update the current work item to reflect the
113 : * unfinished work. Next, it should reset the log done item's list
114 : * count to the number of items finished, and return -EAGAIN.
115 : * defer_finish sees the -EAGAIN, logs the new log intent item
116 : * with the remaining work items, and leaves the xfs_defer_pending
117 : * item at the head of the dop_work queue. Then it rolls the
118 : * transaction and picks up processing where it left off. It is
119 : * required that ->finish_item must be careful to leave enough
120 : * transaction reservation to fit the new log intent item.
121 : *
122 : * This is an example of remapping the extent (E, E+B) into file X at
123 : * offset A and dealing with the extent (C, C+B) already being mapped
124 : * there:
125 : * +-------------------------------------------------+
126 : * | Unmap file X startblock C offset A length B | t0
127 : * | Intent to reduce refcount for extent (C, B) |
128 : * | Intent to remove rmap (X, C, A, B) |
129 : * | Intent to free extent (D, 1) (bmbt block) |
130 : * | Intent to map (X, A, B) at startblock E |
131 : * +-------------------------------------------------+
132 : * | Map file X startblock E offset A length B | t1
133 : * | Done mapping (X, E, A, B) |
134 : * | Intent to increase refcount for extent (E, B) |
135 : * | Intent to add rmap (X, E, A, B) |
136 : * +-------------------------------------------------+
137 : * | Reduce refcount for extent (C, B) | t2
138 : * | Done reducing refcount for extent (C, 9) |
139 : * | Intent to reduce refcount for extent (C+9, B-9) |
140 : * | (ran out of space after 9 refcount updates) |
141 : * +-------------------------------------------------+
142 : * | Reduce refcount for extent (C+9, B+9) | t3
143 : * | Done reducing refcount for extent (C+9, B-9) |
144 : * | Increase refcount for extent (E, B) |
145 : * | Done increasing refcount for extent (E, B) |
146 : * | Intent to free extent (C, B) |
147 : * | Intent to free extent (F, 1) (refcountbt block) |
148 : * | Intent to remove rmap (F, 1, REFC) |
149 : * +-------------------------------------------------+
150 : * | Remove rmap (X, C, A, B) | t4
151 : * | Done removing rmap (X, C, A, B) |
152 : * | Add rmap (X, E, A, B) |
153 : * | Done adding rmap (X, E, A, B) |
154 : * | Remove rmap (F, 1, REFC) |
155 : * | Done removing rmap (F, 1, REFC) |
156 : * +-------------------------------------------------+
157 : * | Free extent (C, B) | t5
158 : * | Done freeing extent (C, B) |
159 : * | Free extent (D, 1) |
160 : * | Done freeing extent (D, 1) |
161 : * | Free extent (F, 1) |
162 : * | Done freeing extent (F, 1) |
163 : * +-------------------------------------------------+
164 : *
165 : * If we should crash before t2 commits, log recovery replays
166 : * the following intent items:
167 : *
168 : * - Intent to reduce refcount for extent (C, B)
169 : * - Intent to remove rmap (X, C, A, B)
170 : * - Intent to free extent (D, 1) (bmbt block)
171 : * - Intent to increase refcount for extent (E, B)
172 : * - Intent to add rmap (X, E, A, B)
173 : *
174 : * In the process of recovering, it should also generate and take care
175 : * of these intent items:
176 : *
177 : * - Intent to free extent (C, B)
178 : * - Intent to free extent (F, 1) (refcountbt block)
179 : * - Intent to remove rmap (F, 1, REFC)
180 : *
181 : * Note that the continuation requested between t2 and t3 is likely to
182 : * reoccur.
183 : */
184 :
185 : static const struct xfs_defer_op_type *defer_op_types[] = {
186 : [XFS_DEFER_OPS_TYPE_BMAP] = &xfs_bmap_update_defer_type,
187 : [XFS_DEFER_OPS_TYPE_REFCOUNT] = &xfs_refcount_update_defer_type,
188 : [XFS_DEFER_OPS_TYPE_RMAP] = &xfs_rmap_update_defer_type,
189 : [XFS_DEFER_OPS_TYPE_FREE] = &xfs_extent_free_defer_type,
190 : [XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type,
191 : [XFS_DEFER_OPS_TYPE_ATTR] = &xfs_attr_defer_type,
192 : };
193 :
194 : /*
195 : * Ensure there's a log intent item associated with this deferred work item if
196 : * the operation must be restarted on crash. Returns 1 if there's a log item;
197 : * 0 if there isn't; or a negative errno.
198 : */
199 : static int
200 816766217 : xfs_defer_create_intent(
201 : struct xfs_trans *tp,
202 : struct xfs_defer_pending *dfp,
203 : bool sort)
204 : {
205 816766217 : const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
206 816757039 : struct xfs_log_item *lip;
207 :
208 816757039 : if (dfp->dfp_intent)
209 : return 1;
210 :
211 816822957 : lip = ops->create_intent(tp, &dfp->dfp_work, dfp->dfp_count, sort);
212 816935781 : if (!lip)
213 : return 0;
214 700254823 : if (IS_ERR(lip))
215 0 : return PTR_ERR(lip);
216 :
217 700254823 : dfp->dfp_intent = lip;
218 700254823 : return 1;
219 : }
220 :
221 : /*
222 : * For each pending item in the intake list, log its intent item and the
223 : * associated extents, then add the entire intake list to the end of
224 : * the pending list.
225 : *
226 : * Returns 1 if at least one log item was associated with the deferred work;
227 : * 0 if there are no log items; or a negative errno.
228 : */
229 : static int
230 816839872 : xfs_defer_create_intents(
231 : struct xfs_trans *tp)
232 : {
233 816839872 : struct xfs_defer_pending *dfp;
234 816839872 : int ret = 0;
235 :
236 1607803783 : list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
237 790889638 : int ret2;
238 :
239 790889638 : trace_xfs_defer_create_intent(tp->t_mountp, dfp);
240 790735982 : ret2 = xfs_defer_create_intent(tp, dfp, true);
241 790963911 : if (ret2 < 0)
242 0 : return ret2;
243 790963911 : ret |= ret2;
244 : }
245 : return ret;
246 : }
247 :
248 : STATIC void
249 5126 : xfs_defer_pending_abort(
250 : struct xfs_mount *mp,
251 : struct list_head *dop_list)
252 : {
253 5126 : struct xfs_defer_pending *dfp;
254 5126 : const struct xfs_defer_op_type *ops;
255 :
256 : /* Abort intent items that don't have a done item. */
257 13805 : list_for_each_entry(dfp, dop_list, dfp_list) {
258 8679 : ops = defer_op_types[dfp->dfp_type];
259 8679 : trace_xfs_defer_pending_abort(mp, dfp);
260 8679 : if (dfp->dfp_intent && !dfp->dfp_done) {
261 3734 : ops->abort_intent(dfp->dfp_intent);
262 3734 : dfp->dfp_intent = NULL;
263 : }
264 : }
265 5126 : }
266 :
267 : /* Abort all the intents that were committed. */
268 : STATIC void
269 5126 : xfs_defer_trans_abort(
270 : struct xfs_trans *tp,
271 : struct list_head *dop_pending)
272 : {
273 5126 : trace_xfs_defer_trans_abort(tp, _RET_IP_);
274 5126 : xfs_defer_pending_abort(tp->t_mountp, dop_pending);
275 5126 : }
276 :
277 : /*
278 : * Capture resources that the caller said not to release ("held") when the
279 : * transaction commits. Caller is responsible for zero-initializing @dres.
280 : */
281 : static int
282 889456779 : xfs_defer_save_resources(
283 : struct xfs_defer_resources *dres,
284 : struct xfs_trans *tp)
285 : {
286 889456779 : struct xfs_buf_log_item *bli;
287 889456779 : struct xfs_inode_log_item *ili;
288 889456779 : struct xfs_log_item *lip;
289 :
290 889456779 : BUILD_BUG_ON(NBBY * sizeof(dres->dr_ordered) < XFS_DEFER_OPS_NR_BUFS);
291 :
292 4512596781 : list_for_each_entry(lip, &tp->t_items, li_trans) {
293 3623016812 : switch (lip->li_type) {
294 1511557538 : case XFS_LI_BUF:
295 1511557538 : bli = container_of(lip, struct xfs_buf_log_item,
296 : bli_item);
297 1511557538 : if (bli->bli_flags & XFS_BLI_HOLD) {
298 2321460 : if (dres->dr_bufs >= XFS_DEFER_OPS_NR_BUFS) {
299 0 : ASSERT(0);
300 0 : return -EFSCORRUPTED;
301 : }
302 2321460 : if (bli->bli_flags & XFS_BLI_ORDERED)
303 49068 : dres->dr_ordered |=
304 49068 : (1U << dres->dr_bufs);
305 : else
306 2272392 : xfs_trans_dirty_buf(tp, bli->bli_buf);
307 2321719 : dres->dr_bp[dres->dr_bufs++] = bli->bli_buf;
308 : }
309 : break;
310 927520072 : case XFS_LI_INODE:
311 927520072 : ili = container_of(lip, struct xfs_inode_log_item,
312 : ili_item);
313 927520072 : if (ili->ili_lock_flags == 0) {
314 882613563 : if (dres->dr_inos >= XFS_DEFER_OPS_NR_INODES) {
315 0 : ASSERT(0);
316 0 : return -EFSCORRUPTED;
317 : }
318 882613563 : xfs_trans_log_inode(tp, ili->ili_inode,
319 : XFS_ILOG_CORE);
320 882724482 : dres->dr_ip[dres->dr_inos++] = ili->ili_inode;
321 : }
322 : break;
323 : default:
324 : break;
325 : }
326 : }
327 :
328 : return 0;
329 : }
330 :
331 : /* Attach the held resources to the transaction. */
332 : static void
333 889615039 : xfs_defer_restore_resources(
334 : struct xfs_trans *tp,
335 : struct xfs_defer_resources *dres)
336 : {
337 889615039 : unsigned short i;
338 :
339 : /* Rejoin the joined inodes. */
340 1772289253 : for (i = 0; i < dres->dr_inos; i++)
341 882682094 : xfs_trans_ijoin(tp, dres->dr_ip[i], 0);
342 :
343 : /* Rejoin the buffers and dirty them so the log moves forward. */
344 891927245 : for (i = 0; i < dres->dr_bufs; i++) {
345 2321472 : xfs_trans_bjoin(tp, dres->dr_bp[i]);
346 2321243 : if (dres->dr_ordered & (1U << i))
347 49068 : xfs_trans_ordered_buf(tp, dres->dr_bp[i]);
348 2321246 : xfs_trans_bhold(tp, dres->dr_bp[i]);
349 : }
350 889605773 : }
351 :
352 : /* Roll a transaction so we can do some deferred op processing. */
353 : STATIC int
354 889485632 : xfs_defer_trans_roll(
355 : struct xfs_trans **tpp)
356 : {
357 889485632 : struct xfs_defer_resources dres = { };
358 889485632 : int error;
359 :
360 889485632 : error = xfs_defer_save_resources(&dres, *tpp);
361 889568252 : if (error)
362 : return error;
363 :
364 889578122 : trace_xfs_defer_trans_roll(*tpp, _RET_IP_);
365 :
366 : /*
367 : * Roll the transaction. Rolling always given a new transaction (even
368 : * if committing the old one fails!) to hand back to the caller, so we
369 : * join the held resources to the new transaction so that we always
370 : * return with the held resources joined to @tpp, no matter what
371 : * happened.
372 : */
373 889467510 : error = xfs_trans_roll(tpp);
374 :
375 889622529 : xfs_defer_restore_resources(*tpp, &dres);
376 :
377 889601224 : if (error)
378 187 : trace_xfs_defer_trans_roll_error(*tpp, error);
379 : return error;
380 : }
381 :
382 : /*
383 : * Free up any items left in the list.
384 : */
385 : static void
386 10616 : xfs_defer_cancel_list(
387 : struct xfs_mount *mp,
388 : struct list_head *dop_list)
389 : {
390 10616 : struct xfs_defer_pending *dfp;
391 10616 : struct xfs_defer_pending *pli;
392 10616 : struct list_head *pwi;
393 10616 : struct list_head *n;
394 10616 : const struct xfs_defer_op_type *ops;
395 :
396 : /*
397 : * Free the pending items. Caller should already have arranged
398 : * for the intent items to be released.
399 : */
400 19383 : list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
401 8767 : ops = defer_op_types[dfp->dfp_type];
402 8768 : trace_xfs_defer_cancel_list(mp, dfp);
403 8767 : list_del(&dfp->dfp_list);
404 12621 : list_for_each_safe(pwi, n, &dfp->dfp_work) {
405 3853 : list_del(pwi);
406 3853 : dfp->dfp_count--;
407 3853 : trace_xfs_defer_cancel_item(mp, dfp, pwi);
408 3853 : ops->cancel_item(pwi);
409 : }
410 8768 : ASSERT(dfp->dfp_count == 0);
411 8768 : kmem_cache_free(xfs_defer_pending_cache, dfp);
412 : }
413 10616 : }
414 :
415 : /*
416 : * Prevent a log intent item from pinning the tail of the log by logging a
417 : * done item to release the intent item; and then log a new intent item.
418 : * The caller should provide a fresh transaction and roll it after we're done.
419 : */
420 : static int
421 725975952 : xfs_defer_relog(
422 : struct xfs_trans **tpp,
423 : struct list_head *dfops)
424 : {
425 725975952 : struct xlog *log = (*tpp)->t_mountp->m_log;
426 725975952 : struct xfs_defer_pending *dfp;
427 725975952 : xfs_lsn_t threshold_lsn = NULLCOMMITLSN;
428 :
429 :
430 725975952 : ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
431 :
432 1972763353 : list_for_each_entry(dfp, dfops, dfp_list) {
433 : /*
434 : * If the log intent item for this deferred op is not a part of
435 : * the current log checkpoint, relog the intent item to keep
436 : * the log tail moving forward. We're ok with this being racy
437 : * because an incorrect decision means we'll be a little slower
438 : * at pushing the tail.
439 : */
440 2467032583 : if (dfp->dfp_intent == NULL ||
441 1219919658 : xfs_log_item_in_current_chkpt(dfp->dfp_intent))
442 1246712464 : continue;
443 :
444 : /*
445 : * Figure out where we need the tail to be in order to maintain
446 : * the minimum required free space in the log. Only sample
447 : * the log threshold once per call.
448 : */
449 400461 : if (threshold_lsn == NULLCOMMITLSN) {
450 371310 : threshold_lsn = xlog_grant_push_threshold(log, 0);
451 371311 : if (threshold_lsn == NULLCOMMITLSN)
452 : break;
453 : }
454 74937 : if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0)
455 4232 : continue;
456 :
457 70705 : trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
458 70705 : XFS_STATS_INC((*tpp)->t_mountp, defer_relog);
459 70705 : dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp);
460 : }
461 :
462 725977435 : if ((*tpp)->t_flags & XFS_TRANS_DIRTY)
463 67671 : return xfs_defer_trans_roll(tpp);
464 : return 0;
465 : }
466 :
467 : /*
468 : * Log an intent-done item for the first pending intent, and finish the work
469 : * items.
470 : */
471 : static int
472 816971825 : xfs_defer_finish_one(
473 : struct xfs_trans *tp,
474 : struct xfs_defer_pending *dfp)
475 : {
476 816971825 : const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
477 816971382 : struct xfs_btree_cur *state = NULL;
478 816971382 : struct list_head *li, *n;
479 816971382 : int error;
480 :
481 816971382 : trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
482 :
483 816871317 : dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
484 1678109450 : list_for_each_safe(li, n, &dfp->dfp_work) {
485 887081608 : list_del(li);
486 887064941 : dfp->dfp_count--;
487 887064941 : trace_xfs_defer_finish_item(tp->t_mountp, dfp, li);
488 886999341 : error = ops->finish_item(tp, dfp->dfp_done, li, &state);
489 887178268 : if (error == -EAGAIN) {
490 25975035 : int ret;
491 :
492 : /*
493 : * Caller wants a fresh transaction; put the work item
494 : * back on the list and log a new log intent item to
495 : * replace the old one. See "Requesting a Fresh
496 : * Transaction while Finishing Deferred Work" above.
497 : */
498 25975035 : list_add(li, &dfp->dfp_work);
499 25969360 : dfp->dfp_count++;
500 25969360 : dfp->dfp_done = NULL;
501 25969360 : dfp->dfp_intent = NULL;
502 25969360 : ret = xfs_defer_create_intent(tp, dfp, false);
503 25986957 : if (ret < 0)
504 0 : error = ret;
505 : }
506 :
507 887190190 : if (error)
508 25992926 : goto out;
509 : }
510 :
511 : /* Done with the dfp, free it. */
512 791027842 : list_del(&dfp->dfp_list);
513 790927533 : kmem_cache_free(xfs_defer_pending_cache, dfp);
514 816994021 : out:
515 816994021 : if (ops->finish_cleanup)
516 519850305 : ops->finish_cleanup(tp, state, error);
517 817005608 : return error;
518 : }
519 :
520 : /*
521 : * Finish all the pending work. This involves logging intent items for
522 : * any work items that wandered in since the last transaction roll (if
523 : * one has even happened), rolling the transaction, and finishing the
524 : * work items in the first item on the logged-and-pending list.
525 : *
526 : * If an inode is provided, relog it to the new transaction.
527 : */
528 : int
529 891985010 : xfs_defer_finish_noroll(
530 : struct xfs_trans **tp)
531 : {
532 891985010 : struct xfs_defer_pending *dfp = NULL;
533 891985010 : int error = 0;
534 891985010 : LIST_HEAD(dop_pending);
535 :
536 891985010 : ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
537 :
538 891985010 : trace_xfs_defer_finish(*tp, _RET_IP_);
539 :
540 : /* Until we run out of pending work to finish... */
541 1708425092 : while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
542 : /*
543 : * Deferred items that are created in the process of finishing
544 : * other deferred work items should be queued at the head of
545 : * the pending list, which puts them ahead of the deferred work
546 : * that was created by the caller. This keeps the number of
547 : * pending work items to a minimum, which decreases the amount
548 : * of time that any one intent item can stick around in memory,
549 : * pinning the log tail.
550 : */
551 816628396 : int has_intents = xfs_defer_create_intents(*tp);
552 :
553 816918885 : list_splice_init(&(*tp)->t_dfops, &dop_pending);
554 :
555 816918885 : if (has_intents < 0) {
556 0 : error = has_intents;
557 0 : goto out_shutdown;
558 : }
559 816918885 : if (has_intents || dfp) {
560 725862481 : error = xfs_defer_trans_roll(tp);
561 725984694 : if (error)
562 184 : goto out_shutdown;
563 :
564 : /* Relog intent items to keep the log moving. */
565 725984510 : error = xfs_defer_relog(tp, &dop_pending);
566 725965570 : if (error)
567 0 : goto out_shutdown;
568 : }
569 :
570 817021974 : dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
571 : dfp_list);
572 817021974 : error = xfs_defer_finish_one(*tp, dfp);
573 816928958 : if (error && error != -EAGAIN)
574 4942 : goto out_shutdown;
575 : }
576 :
577 891350997 : trace_xfs_defer_finish_done(*tp, _RET_IP_);
578 891352731 : return 0;
579 :
580 5126 : out_shutdown:
581 5126 : xfs_defer_trans_abort(*tp, &dop_pending);
582 5126 : xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
583 5126 : trace_xfs_defer_finish_error(*tp, error);
584 5126 : xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
585 5126 : xfs_defer_cancel(*tp);
586 5126 : return error;
587 : }
588 :
589 : int
590 177242044 : xfs_defer_finish(
591 : struct xfs_trans **tp)
592 : {
593 177242044 : int error;
594 :
595 : /*
596 : * Finish and roll the transaction once more to avoid returning to the
597 : * caller with a dirty transaction.
598 : */
599 177242044 : error = xfs_defer_finish_noroll(tp);
600 177215935 : if (error)
601 : return error;
602 177213956 : if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
603 163539311 : error = xfs_defer_trans_roll(tp);
604 163557113 : if (error) {
605 3 : xfs_force_shutdown((*tp)->t_mountp,
606 : SHUTDOWN_CORRUPT_INCORE);
607 3 : return error;
608 : }
609 : }
610 :
611 : /* Reset LOWMODE now that we've finished all the dfops. */
612 177231755 : ASSERT(list_empty(&(*tp)->t_dfops));
613 177231755 : (*tp)->t_flags &= ~XFS_TRANS_LOWMODE;
614 177231755 : return 0;
615 : }
616 :
617 : void
618 5491 : xfs_defer_cancel(
619 : struct xfs_trans *tp)
620 : {
621 5491 : struct xfs_mount *mp = tp->t_mountp;
622 :
623 5491 : trace_xfs_defer_cancel(tp, _RET_IP_);
624 5491 : xfs_defer_cancel_list(mp, &tp->t_dfops);
625 5491 : }
626 :
627 : /* Add an item for later deferred processing. */
628 : void
629 861127683 : xfs_defer_add(
630 : struct xfs_trans *tp,
631 : enum xfs_defer_ops_type type,
632 : struct list_head *li)
633 : {
634 861127683 : struct xfs_defer_pending *dfp = NULL;
635 861127683 : const struct xfs_defer_op_type *ops = defer_op_types[type];
636 :
637 861046337 : ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
638 861046337 : BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
639 :
640 : /*
641 : * Add the item to a pending item at the end of the intake list.
642 : * If the last pending item has the same type, reuse it. Else,
643 : * create a new pending item at the end of the intake list.
644 : */
645 861046337 : if (!list_empty(&tp->t_dfops)) {
646 319360845 : dfp = list_last_entry(&tp->t_dfops,
647 : struct xfs_defer_pending, dfp_list);
648 319360845 : if (dfp->dfp_type != type ||
649 71909270 : (ops->max_items && dfp->dfp_count >= ops->max_items))
650 : dfp = NULL;
651 : }
652 70148909 : if (!dfp) {
653 790897428 : dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
654 : GFP_NOFS | __GFP_NOFAIL);
655 790991546 : dfp->dfp_type = type;
656 790991546 : dfp->dfp_intent = NULL;
657 790991546 : dfp->dfp_done = NULL;
658 790991546 : dfp->dfp_count = 0;
659 790991546 : INIT_LIST_HEAD(&dfp->dfp_work);
660 790991546 : list_add_tail(&dfp->dfp_list, &tp->t_dfops);
661 : }
662 :
663 861080716 : list_add_tail(li, &dfp->dfp_work);
664 861138478 : trace_xfs_defer_add_item(tp->t_mountp, dfp, li);
665 861075063 : dfp->dfp_count++;
666 861075063 : }
667 :
668 : /*
669 : * Move deferred ops from one transaction to another and reset the source to
670 : * initial state. This is primarily used to carry state forward across
671 : * transaction rolls with pending dfops.
672 : */
673 : void
674 891475735 : xfs_defer_move(
675 : struct xfs_trans *dtp,
676 : struct xfs_trans *stp)
677 : {
678 891475735 : list_splice_init(&stp->t_dfops, &dtp->t_dfops);
679 :
680 : /*
681 : * Low free space mode was historically controlled by a dfops field.
682 : * This meant that low mode state potentially carried across multiple
683 : * transaction rolls. Transfer low mode on a dfops move to preserve
684 : * that behavior.
685 : */
686 891475735 : dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE);
687 891475735 : stp->t_flags &= ~XFS_TRANS_LOWMODE;
688 891475735 : }
689 :
690 : /*
691 : * Prepare a chain of fresh deferred ops work items to be completed later. Log
692 : * recovery requires the ability to put off until later the actual finishing
693 : * work so that it can process unfinished items recovered from the log in
694 : * correct order.
695 : *
696 : * Create and log intent items for all the work that we're capturing so that we
697 : * can be assured that the items will get replayed if the system goes down
698 : * before log recovery gets a chance to finish the work it put off. The entire
699 : * deferred ops state is transferred to the capture structure and the
700 : * transaction is then ready for the caller to commit it. If there are no
701 : * intent items to capture, this function returns NULL.
702 : *
703 : * If capture_ip is not NULL, the capture structure will obtain an extra
704 : * reference to the inode.
705 : */
706 : static struct xfs_defer_capture *
707 5496 : xfs_defer_ops_capture(
708 : struct xfs_trans *tp)
709 : {
710 5496 : struct xfs_defer_capture *dfc;
711 5496 : unsigned short i;
712 5496 : int error;
713 :
714 5496 : if (list_empty(&tp->t_dfops))
715 : return NULL;
716 :
717 1340 : error = xfs_defer_create_intents(tp);
718 1340 : if (error < 0)
719 0 : return ERR_PTR(error);
720 :
721 : /* Create an object to capture the defer ops. */
722 1340 : dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
723 1340 : INIT_LIST_HEAD(&dfc->dfc_list);
724 1340 : INIT_LIST_HEAD(&dfc->dfc_dfops);
725 :
726 : /* Move the dfops chain and transaction state to the capture struct. */
727 1340 : list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
728 1340 : dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
729 1340 : tp->t_flags &= ~XFS_TRANS_LOWMODE;
730 :
731 : /* Capture the remaining block reservations along with the dfops. */
732 1340 : dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used;
733 1340 : dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used;
734 :
735 : /* Preserve the log reservation size. */
736 1340 : dfc->dfc_logres = tp->t_log_res;
737 :
738 1340 : error = xfs_defer_save_resources(&dfc->dfc_held, tp);
739 1340 : if (error) {
740 : /*
741 : * Resource capture should never fail, but if it does, we
742 : * still have to shut down the log and release things
743 : * properly.
744 : */
745 0 : xfs_force_shutdown(tp->t_mountp, SHUTDOWN_CORRUPT_INCORE);
746 : }
747 :
748 : /*
749 : * Grab extra references to the inodes and buffers because callers are
750 : * expected to release their held references after we commit the
751 : * transaction.
752 : */
753 2006 : for (i = 0; i < dfc->dfc_held.dr_inos; i++) {
754 666 : ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL));
755 666 : ihold(VFS_I(dfc->dfc_held.dr_ip[i]));
756 : }
757 :
758 1340 : for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
759 0 : xfs_buf_hold(dfc->dfc_held.dr_bp[i]);
760 :
761 : return dfc;
762 : }
763 :
764 : /* Release all resources that we used to capture deferred ops. */
765 : void
766 0 : xfs_defer_ops_capture_abort(
767 : struct xfs_mount *mp,
768 : struct xfs_defer_capture *dfc)
769 : {
770 0 : unsigned short i;
771 :
772 0 : xfs_defer_pending_abort(mp, &dfc->dfc_dfops);
773 0 : xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
774 :
775 0 : for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
776 0 : xfs_buf_relse(dfc->dfc_held.dr_bp[i]);
777 :
778 0 : for (i = 0; i < dfc->dfc_held.dr_inos; i++)
779 0 : xfs_irele(dfc->dfc_held.dr_ip[i]);
780 :
781 0 : kmem_free(dfc);
782 0 : }
783 :
784 : /*
785 : * Capture any deferred ops and commit the transaction. This is the last step
786 : * needed to finish a log intent item that we recovered from the log. If any
787 : * of the deferred ops operate on an inode, the caller must pass in that inode
788 : * so that the reference can be transferred to the capture structure. The
789 : * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
790 : * xfs_defer_ops_continue.
791 : */
792 : int
793 5496 : xfs_defer_ops_capture_and_commit(
794 : struct xfs_trans *tp,
795 : struct list_head *capture_list)
796 : {
797 5496 : struct xfs_mount *mp = tp->t_mountp;
798 5496 : struct xfs_defer_capture *dfc;
799 5496 : int error;
800 :
801 : /* If we don't capture anything, commit transaction and exit. */
802 5496 : dfc = xfs_defer_ops_capture(tp);
803 5496 : if (IS_ERR(dfc)) {
804 0 : xfs_trans_cancel(tp);
805 0 : return PTR_ERR(dfc);
806 : }
807 5496 : if (!dfc)
808 4156 : return xfs_trans_commit(tp);
809 :
810 : /* Commit the transaction and add the capture structure to the list. */
811 1340 : error = xfs_trans_commit(tp);
812 1340 : if (error) {
813 0 : xfs_defer_ops_capture_abort(mp, dfc);
814 0 : return error;
815 : }
816 :
817 1340 : list_add_tail(&dfc->dfc_list, capture_list);
818 1340 : return 0;
819 : }
820 :
821 : /*
822 : * Attach a chain of captured deferred ops to a new transaction and free the
823 : * capture structure. If an inode was captured, it will be passed back to the
824 : * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
825 : * The caller now owns the inode reference.
826 : */
827 : void
828 1340 : xfs_defer_ops_continue(
829 : struct xfs_defer_capture *dfc,
830 : struct xfs_trans *tp,
831 : struct xfs_defer_resources *dres)
832 : {
833 1340 : unsigned int i;
834 :
835 1340 : ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
836 1340 : ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
837 :
838 : /* Lock the captured resources to the new transaction. */
839 1340 : if (dfc->dfc_held.dr_inos == 2)
840 0 : xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL,
841 : dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL);
842 1340 : else if (dfc->dfc_held.dr_inos == 1)
843 666 : xfs_ilock(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL);
844 :
845 1340 : for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
846 0 : xfs_buf_lock(dfc->dfc_held.dr_bp[i]);
847 :
848 : /* Join the captured resources to the new transaction. */
849 1340 : xfs_defer_restore_resources(tp, &dfc->dfc_held);
850 2680 : memcpy(dres, &dfc->dfc_held, sizeof(struct xfs_defer_resources));
851 1340 : dres->dr_bufs = 0;
852 :
853 : /* Move captured dfops chain and state to the transaction. */
854 1340 : list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
855 1340 : tp->t_flags |= dfc->dfc_tpflags;
856 :
857 1340 : kmem_free(dfc);
858 1340 : }
859 :
860 : /* Release the resources captured and continued during recovery. */
861 : void
862 1340 : xfs_defer_resources_rele(
863 : struct xfs_defer_resources *dres)
864 : {
865 1340 : unsigned short i;
866 :
867 2006 : for (i = 0; i < dres->dr_inos; i++) {
868 666 : xfs_iunlock(dres->dr_ip[i], XFS_ILOCK_EXCL);
869 666 : xfs_irele(dres->dr_ip[i]);
870 666 : dres->dr_ip[i] = NULL;
871 : }
872 :
873 1340 : for (i = 0; i < dres->dr_bufs; i++) {
874 0 : xfs_buf_relse(dres->dr_bp[i]);
875 0 : dres->dr_bp[i] = NULL;
876 : }
877 :
878 1340 : dres->dr_inos = 0;
879 1340 : dres->dr_bufs = 0;
880 1340 : dres->dr_ordered = 0;
881 1340 : }
882 :
883 : static inline int __init
884 50 : xfs_defer_init_cache(void)
885 : {
886 50 : xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending",
887 : sizeof(struct xfs_defer_pending),
888 : 0, 0, NULL);
889 :
890 50 : return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM;
891 : }
892 :
893 : static inline void
894 : xfs_defer_destroy_cache(void)
895 : {
896 49 : kmem_cache_destroy(xfs_defer_pending_cache);
897 49 : xfs_defer_pending_cache = NULL;
898 : }
899 :
900 : /* Set up caches for deferred work items. */
901 : int __init
902 50 : xfs_defer_init_item_caches(void)
903 : {
904 50 : int error;
905 :
906 50 : error = xfs_defer_init_cache();
907 50 : if (error)
908 : return error;
909 50 : error = xfs_rmap_intent_init_cache();
910 50 : if (error)
911 0 : goto err;
912 50 : error = xfs_refcount_intent_init_cache();
913 50 : if (error)
914 0 : goto err;
915 50 : error = xfs_bmap_intent_init_cache();
916 50 : if (error)
917 0 : goto err;
918 50 : error = xfs_extfree_intent_init_cache();
919 50 : if (error)
920 0 : goto err;
921 50 : error = xfs_attr_intent_init_cache();
922 50 : if (error)
923 0 : goto err;
924 : return 0;
925 0 : err:
926 0 : xfs_defer_destroy_item_caches();
927 0 : return error;
928 : }
929 :
930 : /* Destroy all the deferred work item caches, if they've been allocated. */
931 : void
932 49 : xfs_defer_destroy_item_caches(void)
933 : {
934 49 : xfs_attr_intent_destroy_cache();
935 49 : xfs_extfree_intent_destroy_cache();
936 49 : xfs_bmap_intent_destroy_cache();
937 49 : xfs_refcount_intent_destroy_cache();
938 49 : xfs_rmap_intent_destroy_cache();
939 49 : xfs_defer_destroy_cache();
940 49 : }
|