Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2022-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_mount.h"
12 : #include "xfs_btree.h"
13 : #include "xfs_btree_staging.h"
14 : #include "xfs_log_format.h"
15 : #include "xfs_trans.h"
16 : #include "xfs_log.h"
17 : #include "xfs_sb.h"
18 : #include "xfs_inode.h"
19 : #include "xfs_alloc.h"
20 : #include "xfs_rmap.h"
21 : #include "xfs_ag.h"
22 : #include "xfs_defer.h"
23 : #include "xfs_extfree_item.h"
24 : #include "scrub/scrub.h"
25 : #include "scrub/common.h"
26 : #include "scrub/trace.h"
27 : #include "scrub/repair.h"
28 : #include "scrub/newbt.h"
29 :
30 : /*
31 : * Estimate proper slack values for a btree that's being reloaded.
32 : *
33 : * Under most circumstances, we'll take whatever default loading value the
34 : * btree bulk loading code calculates for us. However, there are some
35 : * exceptions to this rule:
36 : *
37 : * (1) If someone turned one of the debug knobs.
38 : * (2) If this is a per-AG btree and the AG has less than ~9% space free.
39 : * (3) If this is an inode btree and the FS has less than ~9% space free.
40 : *
41 : * Note that we actually use 3/32 for the comparison to avoid division.
42 : */
43 : static void
44 182515 : xrep_newbt_estimate_slack(
45 : struct xrep_newbt *xnr)
46 : {
47 182515 : struct xfs_scrub *sc = xnr->sc;
48 182515 : struct xfs_btree_bload *bload = &xnr->bload;
49 182515 : uint64_t free;
50 182515 : uint64_t sz;
51 :
52 : /*
53 : * The xfs_globals values are set to -1 (i.e. take the bload defaults)
54 : * unless someone has set them otherwise, so we just pull the values
55 : * here.
56 : */
57 182515 : bload->leaf_slack = xfs_globals.bload_leaf_slack;
58 182515 : bload->node_slack = xfs_globals.bload_node_slack;
59 :
60 182515 : if (sc->ops->type == ST_PERAG) {
61 121701 : free = sc->sa.pag->pagf_freeblks;
62 121701 : sz = xfs_ag_block_count(sc->mp, sc->sa.pag->pag_agno);
63 : } else {
64 60814 : free = percpu_counter_sum(&sc->mp->m_fdblocks);
65 60814 : sz = sc->mp->m_sb.sb_dblocks;
66 : }
67 :
68 : /* No further changes if there's more than 3/32ths space left. */
69 182520 : if (free >= ((sz * 3) >> 5))
70 : return;
71 :
72 : /* We're low on space; load the btrees as tightly as possible. */
73 91 : if (bload->leaf_slack < 0)
74 91 : bload->leaf_slack = 0;
75 91 : if (bload->node_slack < 0)
76 91 : bload->node_slack = 0;
77 : }
78 :
79 : /* Initialize accounting resources for staging a new AG btree. */
80 : void
81 182528 : xrep_newbt_init_ag(
82 : struct xrep_newbt *xnr,
83 : struct xfs_scrub *sc,
84 : const struct xfs_owner_info *oinfo,
85 : xfs_fsblock_t alloc_hint,
86 : enum xfs_ag_resv_type resv)
87 : {
88 182528 : memset(xnr, 0, sizeof(struct xrep_newbt));
89 182528 : xnr->sc = sc;
90 182528 : xnr->oinfo = *oinfo; /* structure copy */
91 182528 : xnr->alloc_hint = alloc_hint;
92 182528 : xnr->resv = resv;
93 182528 : INIT_LIST_HEAD(&xnr->resv_list);
94 182528 : xnr->bload.max_dirty = XFS_B_TO_FSBT(sc->mp, 256U << 10); /* 256K */
95 182528 : xrep_newbt_estimate_slack(xnr);
96 182514 : }
97 :
98 : /* Initialize accounting resources for staging a new inode fork btree. */
99 : int
100 60814 : xrep_newbt_init_inode(
101 : struct xrep_newbt *xnr,
102 : struct xfs_scrub *sc,
103 : int whichfork,
104 : const struct xfs_owner_info *oinfo)
105 : {
106 60814 : struct xfs_ifork *ifp;
107 :
108 60814 : ifp = kmem_cache_zalloc(xfs_ifork_cache, XCHK_GFP_FLAGS);
109 60814 : if (!ifp)
110 : return -ENOMEM;
111 :
112 60814 : xrep_newbt_init_ag(xnr, sc, oinfo,
113 60814 : XFS_INO_TO_FSB(sc->mp, sc->ip->i_ino),
114 : XFS_AG_RESV_NONE);
115 60814 : xnr->ifake.if_fork = ifp;
116 60814 : xnr->ifake.if_fork_size = xfs_inode_fork_size(sc->ip, whichfork);
117 60814 : xnr->ifake.if_whichfork = whichfork;
118 60814 : return 0;
119 : }
120 :
121 : /*
122 : * Initialize accounting resources for staging a new btree. Callers are
123 : * expected to add their own reservations (and clean them up) manually.
124 : */
125 : void
126 42124 : xrep_newbt_init_bare(
127 : struct xrep_newbt *xnr,
128 : struct xfs_scrub *sc)
129 : {
130 42124 : xrep_newbt_init_ag(xnr, sc, &XFS_RMAP_OINFO_ANY_OWNER, NULLFSBLOCK,
131 : XFS_AG_RESV_NONE);
132 42144 : }
133 :
134 : /*
135 : * Set up automatic reaping of the blocks reserved for btree reconstruction in
136 : * case we crash by logging a deferred free item for each extent we allocate so
137 : * that we can get all of the space back if we crash before we can commit the
138 : * new btree. This function returns a token that can be used to cancel
139 : * automatic reaping if repair is successful.
140 : */
141 : static int
142 91123 : xrep_newbt_schedule_autoreap(
143 : struct xrep_newbt *xnr,
144 : struct xrep_newbt_resv *resv)
145 : {
146 91123 : struct xfs_extent_free_item efi_item = {
147 91123 : .xefi_blockcount = resv->len,
148 91123 : .xefi_owner = xnr->oinfo.oi_owner,
149 : .xefi_flags = XFS_EFI_SKIP_DISCARD,
150 91123 : .xefi_pag = resv->pag,
151 : };
152 91123 : struct xfs_scrub *sc = xnr->sc;
153 91123 : struct xfs_log_item *lip;
154 91123 : LIST_HEAD(items);
155 :
156 91123 : ASSERT(xnr->oinfo.oi_offset == 0);
157 :
158 91123 : efi_item.xefi_startblock = XFS_AGB_TO_FSB(sc->mp, resv->pag->pag_agno,
159 : resv->agbno);
160 91123 : if (xnr->oinfo.oi_flags & XFS_OWNER_INFO_ATTR_FORK)
161 0 : efi_item.xefi_flags |= XFS_EFI_ATTR_FORK;
162 91123 : if (xnr->oinfo.oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
163 5172 : efi_item.xefi_flags |= XFS_EFI_BMBT_BLOCK;
164 :
165 91123 : INIT_LIST_HEAD(&efi_item.xefi_list);
166 91123 : list_add(&efi_item.xefi_list, &items);
167 :
168 91109 : xfs_perag_intent_hold(resv->pag);
169 91099 : lip = xfs_extent_free_defer_type.create_intent(sc->tp, &items, 1,
170 : false);
171 91118 : ASSERT(lip != NULL && !IS_ERR(lip));
172 :
173 91118 : resv->efi = lip;
174 91118 : return 0;
175 : }
176 :
177 : /*
178 : * Earlier, we logged EFIs for the extents that we allocated to hold the new
179 : * btree so that we could automatically roll back those allocations if the
180 : * system crashed. Now we log an EFD to cancel the EFI, either because the
181 : * repair succeeded and the new blocks are in use; or because the repair was
182 : * cancelled and we're about to free the extents directly.
183 : */
184 : static inline void
185 91118 : xrep_newbt_finish_autoreap(
186 : struct xfs_scrub *sc,
187 : struct xrep_newbt_resv *resv)
188 : {
189 91118 : struct xfs_efd_log_item *efdp;
190 91118 : struct xfs_extent *extp;
191 91118 : struct xfs_log_item *efd_lip;
192 :
193 91118 : efd_lip = xfs_extent_free_defer_type.create_done(sc->tp, resv->efi, 1);
194 91122 : efdp = container_of(efd_lip, struct xfs_efd_log_item, efd_item);
195 91122 : extp = efdp->efd_format.efd_extents;
196 91122 : extp->ext_start = XFS_AGB_TO_FSB(sc->mp, resv->pag->pag_agno,
197 : resv->agbno);
198 91122 : extp->ext_len = resv->len;
199 91122 : efdp->efd_next_extent++;
200 91122 : set_bit(XFS_LI_DIRTY, &efd_lip->li_flags);
201 91123 : xfs_perag_intent_rele(resv->pag);
202 91128 : }
203 :
204 : /* Abort an EFI logged for a new btree block reservation. */
205 : static inline void
206 0 : xrep_newbt_cancel_autoreap(
207 : struct xrep_newbt_resv *resv)
208 : {
209 0 : xfs_extent_free_defer_type.abort_intent(resv->efi);
210 0 : xfs_perag_intent_rele(resv->pag);
211 0 : }
212 :
213 : /*
214 : * Relog the EFIs attached to a staging btree so that we don't pin the log
215 : * tail. Same logic as xfs_defer_relog.
216 : */
217 : int
218 186713 : xrep_newbt_relog_autoreap(
219 : struct xrep_newbt *xnr)
220 : {
221 186713 : struct xrep_newbt_resv *resv;
222 186713 : unsigned int efi_bytes = 0;
223 :
224 487657 : list_for_each_entry(resv, &xnr->resv_list, list) {
225 : /*
226 : * If the log intent item for this deferred op is in a
227 : * different checkpoint, relog it to keep the log tail moving
228 : * forward. We're ok with this being racy because an incorrect
229 : * decision means we'll be a little slower at pushing the tail.
230 : */
231 300940 : if (!resv->efi || xfs_log_item_in_current_chkpt(resv->efi))
232 299837 : continue;
233 :
234 1107 : resv->efi = xfs_trans_item_relog(resv->efi, xnr->sc->tp);
235 :
236 : /*
237 : * If free space is very fragmented, it's possible that the new
238 : * btree will be allocated a large number of small extents.
239 : * On an active system, it's possible that so many of those
240 : * EFIs will need relogging here that doing them all in one
241 : * transaction will overflow the reservation.
242 : *
243 : * Each allocation for the new btree (xrep_newbt_resv) points
244 : * to a unique single-mapping EFI, so each relog operation logs
245 : * a single-mapping EFD followed by a new EFI. Each single
246 : * mapping EF[ID] item consumes about 128 bytes, so we'll
247 : * assume 256 bytes per relog. Roll if we consume more than
248 : * half of the transaction reservation.
249 : */
250 1107 : efi_bytes += 256;
251 1107 : if (efi_bytes > xnr->sc->tp->t_log_res / 2) {
252 0 : int error;
253 :
254 0 : error = xrep_roll_trans(xnr->sc);
255 0 : if (error)
256 0 : return error;
257 :
258 : efi_bytes = 0;
259 : }
260 : }
261 :
262 186717 : if (xnr->sc->tp->t_flags & XFS_TRANS_DIRTY)
263 1015 : return xrep_roll_trans(xnr->sc);
264 : return 0;
265 : }
266 :
267 : /*
268 : * Designate specific blocks to be used to build our new btree. @pag must be
269 : * a passive reference.
270 : */
271 : STATIC int
272 112219 : xrep_newbt_add_blocks(
273 : struct xrep_newbt *xnr,
274 : struct xfs_perag *pag,
275 : xfs_agblock_t agbno,
276 : xfs_extlen_t len,
277 : bool autoreap)
278 : {
279 112219 : struct xrep_newbt_resv *resv;
280 112219 : int error;
281 :
282 112219 : resv = kmalloc(sizeof(struct xrep_newbt_resv), XCHK_GFP_FLAGS);
283 112166 : if (!resv)
284 : return -ENOMEM;
285 :
286 112166 : INIT_LIST_HEAD(&resv->list);
287 112166 : resv->agbno = agbno;
288 112166 : resv->len = len;
289 112166 : resv->used = 0;
290 112166 : resv->pag = xfs_perag_hold(pag);
291 :
292 112237 : if (autoreap) {
293 91125 : error = xrep_newbt_schedule_autoreap(xnr, resv);
294 91118 : if (error)
295 0 : goto out_pag;
296 : }
297 :
298 112230 : list_add_tail(&resv->list, &xnr->resv_list);
299 112230 : return 0;
300 : out_pag:
301 0 : xfs_perag_put(resv->pag);
302 0 : kfree(resv);
303 0 : return error;
304 : }
305 :
306 : /*
307 : * Add an extent to the new btree reservation pool. Callers are required to
308 : * handle any automatic reaping if the repair is cancelled. @pag must be a
309 : * passive reference.
310 : */
311 : int
312 21063 : xrep_newbt_add_extent(
313 : struct xrep_newbt *xnr,
314 : struct xfs_perag *pag,
315 : xfs_agblock_t agbno,
316 : xfs_extlen_t len)
317 : {
318 21063 : return xrep_newbt_add_blocks(xnr, pag, agbno, len, false);
319 : }
320 :
321 : /* Don't let our allocation hint take us beyond this AG */
322 : static inline void
323 85930 : xrep_newbt_validate_ag_alloc_hint(
324 : struct xrep_newbt *xnr)
325 : {
326 85930 : struct xfs_scrub *sc = xnr->sc;
327 85930 : xfs_agnumber_t agno = XFS_FSB_TO_AGNO(sc->mp, xnr->alloc_hint);
328 :
329 171848 : if (agno == sc->sa.pag->pag_agno &&
330 85928 : xfs_verify_fsbno(sc->mp, xnr->alloc_hint))
331 : return;
332 :
333 0 : xnr->alloc_hint = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno,
334 : XFS_AGFL_BLOCK(sc->mp) + 1);
335 : }
336 :
337 : /* Allocate disk space for a new per-AG btree. */
338 : STATIC int
339 79667 : xrep_newbt_alloc_ag_blocks(
340 : struct xrep_newbt *xnr,
341 : uint64_t nr_blocks)
342 : {
343 79667 : struct xfs_scrub *sc = xnr->sc;
344 79667 : int error = 0;
345 :
346 79667 : ASSERT(sc->sa.pag != NULL);
347 :
348 165619 : while (nr_blocks > 0) {
349 85942 : struct xfs_alloc_arg args = {
350 85942 : .tp = sc->tp,
351 85942 : .mp = sc->mp,
352 : .oinfo = xnr->oinfo,
353 : .minlen = 1,
354 : .maxlen = nr_blocks,
355 : .prod = 1,
356 85942 : .resv = xnr->resv,
357 : };
358 :
359 85942 : xrep_newbt_validate_ag_alloc_hint(xnr);
360 :
361 85932 : if (xnr->alloc_vextent)
362 5474 : error = xnr->alloc_vextent(sc, &args, xnr->alloc_hint);
363 : else
364 80458 : error = xfs_alloc_vextent_near_bno(&args,
365 : xnr->alloc_hint);
366 85946 : if (error)
367 0 : return error;
368 85946 : if (args.fsbno == NULLFSBLOCK)
369 : return -ENOSPC;
370 :
371 85946 : trace_xrep_newbt_alloc_ag_blocks(sc->mp, args.agno, args.agbno,
372 85946 : args.len, xnr->oinfo.oi_owner);
373 :
374 85946 : error = xrep_newbt_add_blocks(xnr, sc->sa.pag, args.agbno,
375 : args.len, true);
376 85952 : if (error)
377 0 : return error;
378 :
379 85952 : nr_blocks -= args.len;
380 85952 : xnr->alloc_hint = args.fsbno + args.len;
381 :
382 85952 : error = xrep_defer_finish(sc);
383 85952 : if (error)
384 0 : return error;
385 : }
386 :
387 : return 0;
388 : }
389 :
390 : /* Don't let our allocation hint take us beyond EOFS */
391 : static inline void
392 5172 : xrep_newbt_validate_file_alloc_hint(
393 : struct xrep_newbt *xnr)
394 : {
395 5172 : struct xfs_scrub *sc = xnr->sc;
396 :
397 5172 : if (xfs_verify_fsbno(sc->mp, xnr->alloc_hint))
398 : return;
399 :
400 0 : xnr->alloc_hint = XFS_AGB_TO_FSB(sc->mp, 0, XFS_AGFL_BLOCK(sc->mp) + 1);
401 : }
402 :
403 : /* Allocate disk space for our new file-based btree. */
404 : STATIC int
405 5163 : xrep_newbt_alloc_file_blocks(
406 : struct xrep_newbt *xnr,
407 : uint64_t nr_blocks)
408 : {
409 5163 : struct xfs_scrub *sc = xnr->sc;
410 5163 : int error = 0;
411 :
412 10335 : while (nr_blocks > 0) {
413 5172 : struct xfs_alloc_arg args = {
414 5172 : .tp = sc->tp,
415 5172 : .mp = sc->mp,
416 : .oinfo = xnr->oinfo,
417 : .minlen = 1,
418 : .maxlen = nr_blocks,
419 : .prod = 1,
420 5172 : .resv = xnr->resv,
421 : };
422 5172 : struct xfs_perag *pag;
423 :
424 5172 : xrep_newbt_validate_file_alloc_hint(xnr);
425 :
426 5172 : if (xnr->alloc_vextent)
427 0 : error = xnr->alloc_vextent(sc, &args, xnr->alloc_hint);
428 : else
429 5172 : error = xfs_alloc_vextent_start_ag(&args,
430 : xnr->alloc_hint);
431 5172 : if (error)
432 0 : return error;
433 5172 : if (args.fsbno == NULLFSBLOCK)
434 : return -ENOSPC;
435 :
436 5172 : trace_xrep_newbt_alloc_file_blocks(sc->mp, args.agno,
437 5172 : args.agbno, args.len, xnr->oinfo.oi_owner);
438 :
439 5172 : pag = xfs_perag_get(sc->mp, args.agno);
440 5172 : if (!pag) {
441 0 : ASSERT(0);
442 0 : return -EFSCORRUPTED;
443 : }
444 :
445 5172 : error = xrep_newbt_add_blocks(xnr, pag, args.agbno, args.len,
446 : true);
447 5172 : xfs_perag_put(pag);
448 5172 : if (error)
449 0 : return error;
450 :
451 5172 : nr_blocks -= args.len;
452 5172 : xnr->alloc_hint = args.fsbno + args.len;
453 :
454 5172 : error = xrep_defer_finish(sc);
455 5172 : if (error)
456 0 : return error;
457 : }
458 :
459 : return 0;
460 : }
461 :
462 : /* Allocate disk space for our new btree. */
463 : int
464 84817 : xrep_newbt_alloc_blocks(
465 : struct xrep_newbt *xnr,
466 : uint64_t nr_blocks)
467 : {
468 84817 : if (xnr->sc->ip)
469 5163 : return xrep_newbt_alloc_file_blocks(xnr, nr_blocks);
470 79654 : return xrep_newbt_alloc_ag_blocks(xnr, nr_blocks);
471 : }
472 :
473 : /*
474 : * How many extent freeing items can we attach to a transaction before we want
475 : * to finish the chain so that unreserving new btree blocks doesn't overrun
476 : * the transaction reservation?
477 : */
478 : #define XREP_REAP_MAX_NEWBT_EFIS (128)
479 :
480 : /*
481 : * Free the unused part of an extent. Returns the number of EFIs logged or
482 : * a negative errno.
483 : */
484 : STATIC int
485 91126 : xrep_newbt_free_extent(
486 : struct xrep_newbt *xnr,
487 : struct xrep_newbt_resv *resv,
488 : bool btree_committed)
489 : {
490 91126 : struct xfs_scrub *sc = xnr->sc;
491 91126 : xfs_agblock_t free_agbno = resv->agbno;
492 91126 : xfs_extlen_t free_aglen = resv->len;
493 91126 : xfs_fsblock_t fsbno;
494 91126 : int error;
495 :
496 : /*
497 : * If we used space and committed the btree, remove those blocks from
498 : * the extent before we act on it.
499 : */
500 91126 : if (btree_committed) {
501 91126 : free_agbno += resv->used;
502 91126 : free_aglen -= resv->used;
503 : }
504 :
505 91126 : xrep_newbt_finish_autoreap(sc, resv);
506 :
507 91128 : if (free_aglen == 0)
508 : return 0;
509 :
510 0 : trace_xrep_newbt_free_blocks(sc->mp, resv->pag->pag_agno, free_agbno,
511 0 : free_aglen, xnr->oinfo.oi_owner);
512 :
513 0 : ASSERT(xnr->resv != XFS_AG_RESV_AGFL);
514 0 : ASSERT(xnr->resv != XFS_AG_RESV_IGNORE);
515 :
516 : /*
517 : * Use EFIs to free the reservations. This reduces the chance
518 : * that we leak blocks if the system goes down.
519 : */
520 0 : fsbno = XFS_AGB_TO_FSB(sc->mp, resv->pag->pag_agno, free_agbno);
521 0 : error = __xfs_free_extent_later(sc->tp, fsbno, free_aglen, &xnr->oinfo,
522 : xnr->resv, true);
523 0 : if (error)
524 0 : return error;
525 :
526 : return 1;
527 : }
528 :
529 : /* Free all the accounting info and disk space we reserved for a new btree. */
530 : STATIC int
531 182610 : xrep_newbt_free(
532 : struct xrep_newbt *xnr,
533 : bool btree_committed)
534 : {
535 182610 : struct xfs_scrub *sc = xnr->sc;
536 182610 : struct xrep_newbt_resv *resv, *n;
537 182610 : unsigned int freed = 0;
538 182610 : int error = 0;
539 :
540 : /*
541 : * If the filesystem already went down, we can't free the blocks. Skip
542 : * ahead to freeing the incore metadata because we can't fix anything.
543 : */
544 365220 : if (xfs_is_shutdown(sc->mp))
545 0 : goto junkit;
546 :
547 273732 : list_for_each_entry_safe(resv, n, &xnr->resv_list, list) {
548 91121 : int ret;
549 :
550 91121 : ret = xrep_newbt_free_extent(xnr, resv, btree_committed);
551 91128 : list_del(&resv->list);
552 91128 : xfs_perag_put(resv->pag);
553 91128 : kfree(resv);
554 91122 : if (ret < 0) {
555 0 : error = ret;
556 0 : goto junkit;
557 : }
558 :
559 91122 : freed += ret;
560 91122 : if (freed >= XREP_REAP_MAX_NEWBT_EFIS) {
561 0 : error = xrep_defer_finish(sc);
562 0 : if (error)
563 0 : goto junkit;
564 : freed = 0;
565 : }
566 : }
567 :
568 182611 : if (freed)
569 0 : error = xrep_defer_finish(sc);
570 :
571 182611 : junkit:
572 : /*
573 : * If we still have reservations attached to @newbt, cleanup must have
574 : * failed and the filesystem is about to go down. Clean up the incore
575 : * reservations.
576 : */
577 182614 : list_for_each_entry_safe(resv, n, &xnr->resv_list, list) {
578 0 : xrep_newbt_cancel_autoreap(resv);
579 0 : list_del(&resv->list);
580 0 : xfs_perag_put(resv->pag);
581 0 : kfree(resv);
582 : }
583 :
584 182614 : if (sc->ip) {
585 60813 : kmem_cache_free(xfs_ifork_cache, xnr->ifake.if_fork);
586 60813 : xnr->ifake.if_fork = NULL;
587 : }
588 :
589 182614 : return error;
590 : }
591 :
592 : /*
593 : * Free all the accounting info and unused disk space allocations after
594 : * committing a new btree.
595 : */
596 : int
597 140384 : xrep_newbt_commit(
598 : struct xrep_newbt *xnr)
599 : {
600 140384 : return xrep_newbt_free(xnr, true);
601 : }
602 :
603 : /*
604 : * Free all the accounting info and all of the disk space we reserved for a new
605 : * btree that we're not going to commit. We want to try to roll things back
606 : * cleanly for things like ENOSPC midway through allocation.
607 : */
608 : void
609 42228 : xrep_newbt_cancel(
610 : struct xrep_newbt *xnr)
611 : {
612 42228 : xrep_newbt_free(xnr, false);
613 42228 : }
614 :
615 : /* Feed one of the reserved btree blocks to the bulk loader. */
616 : int
617 435472 : xrep_newbt_claim_block(
618 : struct xfs_btree_cur *cur,
619 : struct xrep_newbt *xnr,
620 : union xfs_btree_ptr *ptr)
621 : {
622 435472 : struct xrep_newbt_resv *resv;
623 435472 : struct xfs_mount *mp = cur->bc_mp;
624 435472 : xfs_agblock_t agbno;
625 :
626 : /*
627 : * The first item in the list should always have a free block unless
628 : * we're completely out.
629 : */
630 435472 : resv = list_first_entry(&xnr->resv_list, struct xrep_newbt_resv, list);
631 435472 : if (resv->used == resv->len)
632 : return -ENOSPC;
633 :
634 : /*
635 : * Peel off a block from the start of the reservation. We allocate
636 : * blocks in order to place blocks on disk in increasing record or key
637 : * order. The block reservations tend to end up on the list in
638 : * decreasing order, which hopefully results in leaf blocks ending up
639 : * together.
640 : */
641 435472 : agbno = resv->agbno + resv->used;
642 435472 : resv->used++;
643 :
644 : /* If we used all the blocks in this reservation, move it to the end. */
645 435472 : if (resv->used == resv->len)
646 112239 : list_move_tail(&resv->list, &xnr->resv_list);
647 :
648 435464 : trace_xrep_newbt_claim_block(mp, resv->pag->pag_agno, agbno, 1,
649 435464 : xnr->oinfo.oi_owner);
650 :
651 435514 : if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
652 5340 : ptr->l = cpu_to_be64(XFS_AGB_TO_FSB(mp, resv->pag->pag_agno,
653 : agbno));
654 : else
655 860348 : ptr->s = cpu_to_be32(agbno);
656 : return 0;
657 : }
658 :
659 : /* How many reserved blocks are unused? */
660 : unsigned int
661 42228 : xrep_newbt_unused_blocks(
662 : struct xrep_newbt *xnr)
663 : {
664 42228 : struct xrep_newbt_resv *resv;
665 42228 : unsigned int unused = 0;
666 :
667 63343 : list_for_each_entry(resv, &xnr->resv_list, list)
668 21115 : unused += resv->len - resv->used;
669 42228 : return unused;
670 : }
|