Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_bit.h"
13 : #include "xfs_sb.h"
14 : #include "xfs_mount.h"
15 : #include "xfs_defer.h"
16 : #include "xfs_inode.h"
17 : #include "xfs_trans.h"
18 : #include "xfs_alloc.h"
19 : #include "xfs_btree.h"
20 : #include "xfs_btree_staging.h"
21 : #include "xfs_imeta.h"
22 : #include "xfs_rmap.h"
23 : #include "xfs_rtrmap_btree.h"
24 : #include "xfs_trace.h"
25 : #include "xfs_cksum.h"
26 : #include "xfs_error.h"
27 : #include "xfs_extent_busy.h"
28 : #include "xfs_rtgroup.h"
29 : #include "xfs_bmap.h"
30 : #include "xfs_health.h"
31 : #include "scrub/xfile.h"
32 : #include "scrub/xfbtree.h"
33 : #include "xfs_btree_mem.h"
34 :
35 : static struct kmem_cache *xfs_rtrmapbt_cur_cache;
36 :
37 : /*
38 : * Realtime Reverse Map btree.
39 : *
40 : * This is a btree used to track the owner(s) of a given extent in the realtime
41 : * device. See the comments in xfs_rmap_btree.c for more information.
42 : *
43 : * This tree is basically the same as the regular rmap btree except that it
44 : * is rooted in an inode and does not live in free space.
45 : */
46 :
47 : static struct xfs_btree_cur *
48 19904827 : xfs_rtrmapbt_dup_cursor(
49 : struct xfs_btree_cur *cur)
50 : {
51 19904827 : struct xfs_btree_cur *new;
52 :
53 19904827 : new = xfs_rtrmapbt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ino.rtg,
54 : cur->bc_ino.ip);
55 :
56 : /* Copy the flags values since init cursor doesn't get them. */
57 19904825 : new->bc_ino.flags = cur->bc_ino.flags;
58 :
59 19904825 : return new;
60 : }
61 :
62 : STATIC int
63 24718915 : xfs_rtrmapbt_get_minrecs(
64 : struct xfs_btree_cur *cur,
65 : int level)
66 : {
67 24718915 : if (level == cur->bc_nlevels - 1) {
68 38579 : struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
69 :
70 38579 : return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
71 38579 : level == 0) / 2;
72 : }
73 :
74 24680336 : return cur->bc_mp->m_rtrmap_mnr[level != 0];
75 : }
76 :
77 : STATIC int
78 83648389024 : xfs_rtrmapbt_get_maxrecs(
79 : struct xfs_btree_cur *cur,
80 : int level)
81 : {
82 83648389024 : if (level == cur->bc_nlevels - 1) {
83 383655829 : struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
84 :
85 383659651 : return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
86 : level == 0);
87 : }
88 :
89 83264733195 : return cur->bc_mp->m_rtrmap_mxr[level != 0];
90 : }
91 :
92 : /* Calculate number of records in the ondisk realtime rmap btree inode root. */
93 : unsigned int
94 0 : xfs_rtrmapbt_droot_maxrecs(
95 : unsigned int blocklen,
96 : bool leaf)
97 : {
98 1965660 : blocklen -= sizeof(struct xfs_rtrmap_root);
99 :
100 0 : if (leaf)
101 217446 : return blocklen / sizeof(struct xfs_rmap_rec);
102 1733811 : return blocklen / (2 * sizeof(struct xfs_rmap_key) +
103 : sizeof(xfs_rtrmap_ptr_t));
104 : }
105 :
106 : /*
107 : * Get the maximum records we could store in the on-disk format.
108 : *
109 : * For non-root nodes this is equivalent to xfs_rtrmapbt_get_maxrecs, but
110 : * for the root node this checks the available space in the dinode fork
111 : * so that we can resize the in-memory buffer to match it. After a
112 : * resize to the maximum size this function returns the same value
113 : * as xfs_rtrmapbt_get_maxrecs for the root node, too.
114 : */
115 : STATIC int
116 1953271 : xfs_rtrmapbt_get_dmaxrecs(
117 : struct xfs_btree_cur *cur,
118 : int level)
119 : {
120 1953271 : if (level != cur->bc_nlevels - 1)
121 2014 : return cur->bc_mp->m_rtrmap_mxr[level != 0];
122 3902514 : return xfs_rtrmapbt_droot_maxrecs(cur->bc_ino.forksize, level == 0);
123 : }
124 :
125 : /*
126 : * Convert the ondisk record's offset field into the ondisk key's offset field.
127 : * Fork and bmbt are significant parts of the rmap record key, but written
128 : * status is merely a record attribute.
129 : */
130 : static inline __be64 ondisk_rec_offset_to_key(const union xfs_btree_rec *rec)
131 : {
132 >13984*10^7 : return rec->rmap.rm_offset & ~cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN);
133 : }
134 :
135 : STATIC void
136 70290819375 : xfs_rtrmapbt_init_key_from_rec(
137 : union xfs_btree_key *key,
138 : const union xfs_btree_rec *rec)
139 : {
140 70290819375 : key->rmap.rm_startblock = rec->rmap.rm_startblock;
141 70290819375 : key->rmap.rm_owner = rec->rmap.rm_owner;
142 70290819375 : key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
143 70290819375 : }
144 :
145 : STATIC void
146 69524699770 : xfs_rtrmapbt_init_high_key_from_rec(
147 : union xfs_btree_key *key,
148 : const union xfs_btree_rec *rec)
149 : {
150 69524699770 : uint64_t off;
151 69524699770 : int adj;
152 :
153 69524699770 : adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
154 :
155 69524699770 : key->rmap.rm_startblock = rec->rmap.rm_startblock;
156 69524699770 : be32_add_cpu(&key->rmap.rm_startblock, adj);
157 69558892783 : key->rmap.rm_owner = rec->rmap.rm_owner;
158 69558892783 : key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
159 69558892783 : if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
160 68771242867 : XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
161 : return;
162 68771242867 : off = be64_to_cpu(key->rmap.rm_offset);
163 68771242867 : off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
164 68771242867 : key->rmap.rm_offset = cpu_to_be64(off);
165 : }
166 :
167 : STATIC void
168 343928754 : xfs_rtrmapbt_init_rec_from_cur(
169 : struct xfs_btree_cur *cur,
170 : union xfs_btree_rec *rec)
171 : {
172 343928754 : rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
173 343928754 : rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
174 343928754 : rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
175 343928754 : rec->rmap.rm_offset = cpu_to_be64(
176 : xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
177 343928754 : }
178 :
179 : STATIC void
180 364835976 : xfs_rtrmapbt_init_ptr_from_cur(
181 : struct xfs_btree_cur *cur,
182 : union xfs_btree_ptr *ptr)
183 : {
184 364835976 : ptr->l = 0;
185 364835976 : }
186 :
187 : /*
188 : * Mask the appropriate parts of the ondisk key field for a key comparison.
189 : * Fork and bmbt are significant parts of the rmap record key, but written
190 : * status is merely a record attribute.
191 : */
192 : static inline uint64_t offset_keymask(uint64_t offset)
193 : {
194 969518101 : return offset & ~XFS_RMAP_OFF_UNWRITTEN;
195 : }
196 :
197 : STATIC int64_t
198 3948460669 : xfs_rtrmapbt_key_diff(
199 : struct xfs_btree_cur *cur,
200 : const union xfs_btree_key *key)
201 : {
202 3948460669 : struct xfs_rmap_irec *rec = &cur->bc_rec.r;
203 3948460669 : const struct xfs_rmap_key *kp = &key->rmap;
204 3948460669 : __u64 x, y;
205 3948460669 : int64_t d;
206 :
207 3948460669 : d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
208 3948460669 : if (d)
209 : return d;
210 :
211 313276961 : x = be64_to_cpu(kp->rm_owner);
212 313276961 : y = rec->rm_owner;
213 313276961 : if (x > y)
214 : return 1;
215 224611541 : else if (y > x)
216 : return -1;
217 :
218 121240605 : x = offset_keymask(be64_to_cpu(kp->rm_offset));
219 121240605 : y = offset_keymask(xfs_rmap_irec_offset_pack(rec));
220 121240605 : if (x > y)
221 : return 1;
222 119963877 : else if (y > x)
223 1833390 : return -1;
224 : return 0;
225 : }
226 :
227 : STATIC int64_t
228 >14473*10^7 : xfs_rtrmapbt_diff_two_keys(
229 : struct xfs_btree_cur *cur,
230 : const union xfs_btree_key *k1,
231 : const union xfs_btree_key *k2,
232 : const union xfs_btree_key *mask)
233 : {
234 >14473*10^7 : const struct xfs_rmap_key *kp1 = &k1->rmap;
235 >14473*10^7 : const struct xfs_rmap_key *kp2 = &k2->rmap;
236 >14473*10^7 : int64_t d;
237 >14473*10^7 : __u64 x, y;
238 :
239 : /* Doesn't make sense to mask off the physical space part */
240 >14473*10^7 : ASSERT(!mask || mask->rmap.rm_startblock);
241 :
242 >14473*10^7 : d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
243 >14473*10^7 : be32_to_cpu(kp2->rm_startblock);
244 >14473*10^7 : if (d)
245 : return d;
246 :
247 5044965067 : if (!mask || mask->rmap.rm_owner) {
248 4576353514 : x = be64_to_cpu(kp1->rm_owner);
249 4576353514 : y = be64_to_cpu(kp2->rm_owner);
250 4576353514 : if (x > y)
251 : return 1;
252 1313256376 : else if (y > x)
253 : return -1;
254 : }
255 :
256 887601755 : if (!mask || mask->rmap.rm_offset) {
257 : /* Doesn't make sense to allow offset but not owner */
258 419122588 : ASSERT(!mask || mask->rmap.rm_owner);
259 :
260 419122588 : x = offset_keymask(be64_to_cpu(kp1->rm_offset));
261 419122588 : y = offset_keymask(be64_to_cpu(kp2->rm_offset));
262 419122588 : if (x > y)
263 : return 1;
264 305340914 : else if (y > x)
265 3221835 : return -1;
266 : }
267 :
268 : return 0;
269 : }
270 :
271 : static xfs_failaddr_t
272 22616172 : xfs_rtrmapbt_verify(
273 : struct xfs_buf *bp)
274 : {
275 22616172 : struct xfs_mount *mp = bp->b_target->bt_mount;
276 22616172 : struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
277 22616172 : xfs_failaddr_t fa;
278 22616172 : int level;
279 :
280 22616172 : if (!xfs_verify_magic(bp, block->bb_magic))
281 0 : return __this_address;
282 :
283 22616129 : if (!xfs_has_rmapbt(mp))
284 0 : return __this_address;
285 22616129 : fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
286 22616154 : if (fa)
287 : return fa;
288 22616153 : level = be16_to_cpu(block->bb_level);
289 22616153 : if (level > mp->m_rtrmap_maxlevels)
290 0 : return __this_address;
291 :
292 22616154 : return xfs_btree_lblock_verify(bp, mp->m_rtrmap_mxr[level != 0]);
293 : }
294 :
295 : static void
296 445995 : xfs_rtrmapbt_read_verify(
297 : struct xfs_buf *bp)
298 : {
299 445995 : xfs_failaddr_t fa;
300 :
301 445995 : if (!xfs_btree_lblock_verify_crc(bp))
302 0 : xfs_verifier_error(bp, -EFSBADCRC, __this_address);
303 : else {
304 445995 : fa = xfs_rtrmapbt_verify(bp);
305 445995 : if (fa)
306 0 : xfs_verifier_error(bp, -EFSCORRUPTED, fa);
307 : }
308 :
309 445995 : if (bp->b_error)
310 0 : trace_xfs_btree_corrupt(bp, _RET_IP_);
311 445995 : }
312 :
313 : static void
314 8455020 : xfs_rtrmapbt_write_verify(
315 : struct xfs_buf *bp)
316 : {
317 8455020 : xfs_failaddr_t fa;
318 :
319 8455020 : fa = xfs_rtrmapbt_verify(bp);
320 8455020 : if (fa) {
321 0 : trace_xfs_btree_corrupt(bp, _RET_IP_);
322 0 : xfs_verifier_error(bp, -EFSCORRUPTED, fa);
323 0 : return;
324 : }
325 8455020 : xfs_btree_lblock_calc_crc(bp);
326 :
327 : }
328 :
329 : const struct xfs_buf_ops xfs_rtrmapbt_buf_ops = {
330 : .name = "xfs_rtrmapbt",
331 : .magic = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) },
332 : .verify_read = xfs_rtrmapbt_read_verify,
333 : .verify_write = xfs_rtrmapbt_write_verify,
334 : .verify_struct = xfs_rtrmapbt_verify,
335 : };
336 :
337 : STATIC int
338 4871790 : xfs_rtrmapbt_keys_inorder(
339 : struct xfs_btree_cur *cur,
340 : const union xfs_btree_key *k1,
341 : const union xfs_btree_key *k2)
342 : {
343 4871790 : uint32_t x;
344 4871790 : uint32_t y;
345 4871790 : uint64_t a;
346 4871790 : uint64_t b;
347 :
348 4871790 : x = be32_to_cpu(k1->rmap.rm_startblock);
349 4871790 : y = be32_to_cpu(k2->rmap.rm_startblock);
350 4871790 : if (x < y)
351 : return 1;
352 78834 : else if (x > y)
353 : return 0;
354 78834 : a = be64_to_cpu(k1->rmap.rm_owner);
355 78834 : b = be64_to_cpu(k2->rmap.rm_owner);
356 78834 : if (a < b)
357 : return 1;
358 0 : else if (a > b)
359 : return 0;
360 0 : a = offset_keymask(be64_to_cpu(k1->rmap.rm_offset));
361 0 : b = offset_keymask(be64_to_cpu(k2->rmap.rm_offset));
362 0 : if (a <= b)
363 0 : return 1;
364 : return 0;
365 : }
366 :
367 : STATIC int
368 699233707 : xfs_rtrmapbt_recs_inorder(
369 : struct xfs_btree_cur *cur,
370 : const union xfs_btree_rec *r1,
371 : const union xfs_btree_rec *r2)
372 : {
373 699233707 : uint32_t x;
374 699233707 : uint32_t y;
375 699233707 : uint64_t a;
376 699233707 : uint64_t b;
377 :
378 699233707 : x = be32_to_cpu(r1->rmap.rm_startblock);
379 699233707 : y = be32_to_cpu(r2->rmap.rm_startblock);
380 699233707 : if (x < y)
381 : return 1;
382 96244341 : else if (x > y)
383 : return 0;
384 96244341 : a = be64_to_cpu(r1->rmap.rm_owner);
385 96244341 : b = be64_to_cpu(r2->rmap.rm_owner);
386 96244341 : if (a < b)
387 : return 1;
388 5016160 : else if (a > b)
389 : return 0;
390 5016160 : a = offset_keymask(be64_to_cpu(r1->rmap.rm_offset));
391 5016160 : b = offset_keymask(be64_to_cpu(r2->rmap.rm_offset));
392 5016160 : if (a <= b)
393 5016160 : return 1;
394 : return 0;
395 : }
396 :
397 : STATIC enum xbtree_key_contig
398 5301168572 : xfs_rtrmapbt_keys_contiguous(
399 : struct xfs_btree_cur *cur,
400 : const union xfs_btree_key *key1,
401 : const union xfs_btree_key *key2,
402 : const union xfs_btree_key *mask)
403 : {
404 5301168572 : ASSERT(!mask || mask->rmap.rm_startblock);
405 :
406 : /*
407 : * We only support checking contiguity of the physical space component.
408 : * If any callers ever need more specificity than that, they'll have to
409 : * implement it here.
410 : */
411 5301168572 : ASSERT(!mask || (!mask->rmap.rm_owner && !mask->rmap.rm_offset));
412 :
413 10602337144 : return xbtree_key_contig(be32_to_cpu(key1->rmap.rm_startblock),
414 5301168572 : be32_to_cpu(key2->rmap.rm_startblock));
415 : }
416 :
417 : /* Move the rtrmap btree root from one incore buffer to another. */
418 : static void
419 325556 : xfs_rtrmapbt_broot_move(
420 : struct xfs_inode *ip,
421 : int whichfork,
422 : struct xfs_btree_block *dst_broot,
423 : size_t dst_bytes,
424 : struct xfs_btree_block *src_broot,
425 : size_t src_bytes,
426 : unsigned int level,
427 : unsigned int numrecs)
428 : {
429 325556 : struct xfs_mount *mp = ip->i_mount;
430 325556 : void *dptr;
431 325556 : void *sptr;
432 :
433 325556 : ASSERT(xfs_rtrmap_droot_space(src_broot) <=
434 : xfs_inode_fork_size(ip, whichfork));
435 :
436 : /*
437 : * We always have to move the pointers because they are not butted
438 : * against the btree block header.
439 : */
440 325556 : if (numrecs && level > 0) {
441 1302 : sptr = xfs_rtrmap_broot_ptr_addr(mp, src_broot, 1, src_bytes);
442 1302 : dptr = xfs_rtrmap_broot_ptr_addr(mp, dst_broot, 1, dst_bytes);
443 2604 : memmove(dptr, sptr, numrecs * sizeof(xfs_fsblock_t));
444 : }
445 :
446 325556 : if (src_broot == dst_broot)
447 : return;
448 :
449 : /*
450 : * If the root is being totally relocated, we have to migrate the block
451 : * header and the keys/records that come after it.
452 : */
453 266442 : memcpy(dst_broot, src_broot, XFS_RTRMAP_BLOCK_LEN);
454 :
455 133221 : if (!numrecs)
456 : return;
457 :
458 133221 : if (level == 0) {
459 132801 : sptr = xfs_rtrmap_rec_addr(src_broot, 1);
460 132801 : dptr = xfs_rtrmap_rec_addr(dst_broot, 1);
461 265602 : memcpy(dptr, sptr, numrecs * sizeof(struct xfs_rmap_rec));
462 : } else {
463 420 : sptr = xfs_rtrmap_key_addr(src_broot, 1);
464 420 : dptr = xfs_rtrmap_key_addr(dst_broot, 1);
465 840 : memcpy(dptr, sptr, numrecs * 2 * sizeof(struct xfs_rmap_key));
466 : }
467 : }
468 :
469 : static const struct xfs_ifork_broot_ops xfs_rtrmapbt_iroot_ops = {
470 : .maxrecs = xfs_rtrmapbt_maxrecs,
471 : .size = xfs_rtrmap_broot_space_calc,
472 : .move = xfs_rtrmapbt_broot_move,
473 : };
474 :
475 : const struct xfs_btree_ops xfs_rtrmapbt_ops = {
476 : .rec_len = sizeof(struct xfs_rmap_rec),
477 : .key_len = 2 * sizeof(struct xfs_rmap_key),
478 : .lru_refs = XFS_RMAP_BTREE_REF,
479 : .geom_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE |
480 : XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING |
481 : XFS_BTREE_IROOT_RECORDS,
482 :
483 : .dup_cursor = xfs_rtrmapbt_dup_cursor,
484 : .alloc_block = xfs_btree_alloc_imeta_block,
485 : .free_block = xfs_btree_free_imeta_block,
486 : .get_minrecs = xfs_rtrmapbt_get_minrecs,
487 : .get_maxrecs = xfs_rtrmapbt_get_maxrecs,
488 : .get_dmaxrecs = xfs_rtrmapbt_get_dmaxrecs,
489 : .init_key_from_rec = xfs_rtrmapbt_init_key_from_rec,
490 : .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
491 : .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur,
492 : .init_ptr_from_cur = xfs_rtrmapbt_init_ptr_from_cur,
493 : .key_diff = xfs_rtrmapbt_key_diff,
494 : .buf_ops = &xfs_rtrmapbt_buf_ops,
495 : .diff_two_keys = xfs_rtrmapbt_diff_two_keys,
496 : .keys_inorder = xfs_rtrmapbt_keys_inorder,
497 : .recs_inorder = xfs_rtrmapbt_recs_inorder,
498 : .keys_contiguous = xfs_rtrmapbt_keys_contiguous,
499 : .iroot_ops = &xfs_rtrmapbt_iroot_ops,
500 : };
501 :
502 : /* Initialize a new rt rmap btree cursor. */
503 : static struct xfs_btree_cur *
504 123724580 : xfs_rtrmapbt_init_common(
505 : struct xfs_mount *mp,
506 : struct xfs_trans *tp,
507 : struct xfs_rtgroup *rtg,
508 : struct xfs_inode *ip)
509 : {
510 123724580 : struct xfs_btree_cur *cur;
511 :
512 123724580 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
513 :
514 123723964 : cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RTRMAP,
515 123723964 : &xfs_rtrmapbt_ops, mp->m_rtrmap_maxlevels,
516 : xfs_rtrmapbt_cur_cache);
517 123721615 : cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
518 :
519 123721615 : cur->bc_ino.ip = ip;
520 123721615 : cur->bc_ino.allocated = 0;
521 123721615 : cur->bc_ino.flags = 0;
522 :
523 123721615 : cur->bc_ino.rtg = xfs_rtgroup_hold(rtg);
524 123722457 : return cur;
525 : }
526 :
527 : /* Allocate a new rt rmap btree cursor. */
528 : struct xfs_btree_cur *
529 123724601 : xfs_rtrmapbt_init_cursor(
530 : struct xfs_mount *mp,
531 : struct xfs_trans *tp,
532 : struct xfs_rtgroup *rtg,
533 : struct xfs_inode *ip)
534 : {
535 123724601 : struct xfs_btree_cur *cur;
536 123724601 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
537 :
538 123724601 : cur = xfs_rtrmapbt_init_common(mp, tp, rtg, ip);
539 123697353 : cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
540 123697353 : cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK);
541 123697353 : cur->bc_ino.whichfork = XFS_DATA_FORK;
542 123697353 : return cur;
543 : }
544 :
545 : /* Create a new rt reverse mapping btree cursor with a fake root for staging. */
546 : struct xfs_btree_cur *
547 3193 : xfs_rtrmapbt_stage_cursor(
548 : struct xfs_mount *mp,
549 : struct xfs_rtgroup *rtg,
550 : struct xfs_inode *ip,
551 : struct xbtree_ifakeroot *ifake)
552 : {
553 3193 : struct xfs_btree_cur *cur;
554 :
555 3193 : cur = xfs_rtrmapbt_init_common(mp, NULL, rtg, ip);
556 3193 : cur->bc_nlevels = ifake->if_levels;
557 3193 : cur->bc_ino.forksize = ifake->if_fork_size;
558 3193 : cur->bc_ino.whichfork = -1;
559 3193 : xfs_btree_stage_ifakeroot(cur, ifake, NULL);
560 3193 : return cur;
561 : }
562 :
563 : #ifdef CONFIG_XFS_BTREE_IN_XFILE
564 : /*
565 : * Validate an in-memory realtime rmap btree block. Callers are allowed to
566 : * generate an in-memory btree even if the ondisk feature is not enabled.
567 : */
568 : static xfs_failaddr_t
569 11569892 : xfs_rtrmapbt_mem_verify(
570 : struct xfs_buf *bp)
571 : {
572 11569892 : struct xfs_mount *mp = bp->b_mount;
573 11569892 : struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
574 11569892 : xfs_failaddr_t fa;
575 11569892 : unsigned int level;
576 :
577 11569892 : if (!xfs_verify_magic(bp, block->bb_magic))
578 0 : return __this_address;
579 :
580 11569891 : fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
581 11569891 : if (fa)
582 : return fa;
583 :
584 11569893 : level = be16_to_cpu(block->bb_level);
585 11569893 : if (xfs_has_rmapbt(mp)) {
586 11569893 : if (level >= mp->m_rtrmap_maxlevels)
587 0 : return __this_address;
588 : } else {
589 0 : if (level >= xfs_rtrmapbt_maxlevels_ondisk())
590 0 : return __this_address;
591 : }
592 :
593 11569893 : return xfbtree_lblock_verify(bp,
594 : xfs_rtrmapbt_maxrecs(mp, xfo_to_b(1), level == 0));
595 : }
596 :
597 : static void
598 3251 : xfs_rtrmapbt_mem_rw_verify(
599 : struct xfs_buf *bp)
600 : {
601 3251 : xfs_failaddr_t fa = xfs_rtrmapbt_mem_verify(bp);
602 :
603 3251 : if (fa)
604 0 : xfs_verifier_error(bp, -EFSCORRUPTED, fa);
605 3251 : }
606 :
607 : /* skip crc checks on in-memory btrees to save time */
608 : static const struct xfs_buf_ops xfs_rtrmapbt_mem_buf_ops = {
609 : .name = "xfs_rtrmapbt_mem",
610 : .magic = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) },
611 : .verify_read = xfs_rtrmapbt_mem_rw_verify,
612 : .verify_write = xfs_rtrmapbt_mem_rw_verify,
613 : .verify_struct = xfs_rtrmapbt_mem_verify,
614 : };
615 :
616 : static const struct xfs_btree_ops xfs_rtrmapbt_mem_ops = {
617 : .rec_len = sizeof(struct xfs_rmap_rec),
618 : .key_len = 2 * sizeof(struct xfs_rmap_key),
619 : .lru_refs = XFS_RMAP_BTREE_REF,
620 : .geom_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING |
621 : XFS_BTREE_LONG_PTRS | XFS_BTREE_IN_XFILE,
622 :
623 : .dup_cursor = xfbtree_dup_cursor,
624 : .set_root = xfbtree_set_root,
625 : .alloc_block = xfbtree_alloc_block,
626 : .free_block = xfbtree_free_block,
627 : .get_minrecs = xfbtree_get_minrecs,
628 : .get_maxrecs = xfbtree_get_maxrecs,
629 : .init_key_from_rec = xfs_rtrmapbt_init_key_from_rec,
630 : .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
631 : .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur,
632 : .init_ptr_from_cur = xfbtree_init_ptr_from_cur,
633 : .key_diff = xfs_rtrmapbt_key_diff,
634 : .buf_ops = &xfs_rtrmapbt_mem_buf_ops,
635 : .diff_two_keys = xfs_rtrmapbt_diff_two_keys,
636 : .keys_inorder = xfs_rtrmapbt_keys_inorder,
637 : .recs_inorder = xfs_rtrmapbt_recs_inorder,
638 : .keys_contiguous = xfs_rtrmapbt_keys_contiguous,
639 : };
640 :
641 : /* Create a cursor for an in-memory btree. */
642 : struct xfs_btree_cur *
643 6862766 : xfs_rtrmapbt_mem_cursor(
644 : struct xfs_rtgroup *rtg,
645 : struct xfs_trans *tp,
646 : struct xfs_buf *head_bp,
647 : struct xfbtree *xfbtree)
648 : {
649 6862766 : struct xfs_btree_cur *cur;
650 6862766 : struct xfs_mount *mp = rtg->rtg_mount;
651 :
652 : /* Overlapping btree; 2 keys per pointer. */
653 6862766 : cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RTRMAP,
654 6862766 : &xfs_rtrmapbt_mem_ops, mp->m_rtrmap_maxlevels,
655 : xfs_rtrmapbt_cur_cache);
656 6862767 : cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
657 6862767 : cur->bc_mem.xfbtree = xfbtree;
658 6862767 : cur->bc_mem.head_bp = head_bp;
659 6862767 : cur->bc_nlevels = xfs_btree_mem_head_nlevels(head_bp);
660 :
661 6862769 : cur->bc_mem.rtg = xfs_rtgroup_hold(rtg);
662 6862766 : return cur;
663 : }
664 :
665 : int
666 3249 : xfs_rtrmapbt_mem_create(
667 : struct xfs_mount *mp,
668 : xfs_rgnumber_t rgno,
669 : struct xfs_buftarg *target,
670 : struct xfbtree **xfbtreep)
671 : {
672 3249 : struct xfbtree_config cfg = {
673 : .btree_ops = &xfs_rtrmapbt_mem_ops,
674 : .target = target,
675 : .flags = XFBTREE_DIRECT_MAP,
676 : .owner = rgno,
677 : };
678 :
679 3249 : return xfbtree_create(mp, &cfg, xfbtreep);
680 : }
681 : #endif /* CONFIG_XFS_BTREE_IN_XFILE */
682 :
683 : /*
684 : * Install a new rt reverse mapping btree root. Caller is responsible for
685 : * invalidating and freeing the old btree blocks.
686 : */
687 : void
688 3192 : xfs_rtrmapbt_commit_staged_btree(
689 : struct xfs_btree_cur *cur,
690 : struct xfs_trans *tp)
691 : {
692 3192 : struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake;
693 3192 : struct xfs_ifork *ifp;
694 3192 : int flags = XFS_ILOG_CORE | XFS_ILOG_DBROOT;
695 :
696 3192 : ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
697 3192 : ASSERT(ifake->if_fork->if_format == XFS_DINODE_FMT_RMAP);
698 :
699 : /*
700 : * Free any resources hanging off the real fork, then shallow-copy the
701 : * staging fork's contents into the real fork to transfer everything
702 : * we just built.
703 : */
704 3192 : ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK);
705 3192 : xfs_idestroy_fork(ifp);
706 6386 : memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork));
707 :
708 3193 : xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
709 3192 : xfs_btree_commit_ifakeroot(cur, tp, XFS_DATA_FORK, &xfs_rtrmapbt_ops);
710 3192 : }
711 :
712 : /* Calculate number of records in a rt reverse mapping btree block. */
713 : static inline unsigned int
714 : xfs_rtrmapbt_block_maxrecs(
715 : unsigned int blocklen,
716 : bool leaf)
717 : {
718 395658070 : if (leaf)
719 581823 : return blocklen / sizeof(struct xfs_rmap_rec);
720 385848067 : return blocklen /
721 : (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rtrmap_ptr_t));
722 : }
723 :
724 : /*
725 : * Calculate number of records in an rt reverse mapping btree block.
726 : */
727 : unsigned int
728 374242 : xfs_rtrmapbt_maxrecs(
729 : struct xfs_mount *mp,
730 : unsigned int blocklen,
731 : bool leaf)
732 : {
733 395658070 : blocklen -= XFS_RTRMAP_BLOCK_LEN;
734 395658070 : return xfs_rtrmapbt_block_maxrecs(blocklen, leaf);
735 : }
736 :
737 : /* Compute the max possible height for realtime reverse mapping btrees. */
738 : unsigned int
739 12 : xfs_rtrmapbt_maxlevels_ondisk(void)
740 : {
741 12 : unsigned long long max_dblocks;
742 12 : unsigned int minrecs[2];
743 12 : unsigned int blocklen;
744 :
745 12 : blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN;
746 :
747 12 : minrecs[0] = xfs_rtrmapbt_block_maxrecs(blocklen, true) / 2;
748 12 : minrecs[1] = xfs_rtrmapbt_block_maxrecs(blocklen, false) / 2;
749 :
750 : /*
751 : * Compute the asymptotic maxlevels for an rtrmapbt on any rtreflink fs.
752 : *
753 : * On a reflink filesystem, each block in an rtgroup can have up to
754 : * 2^32 (per the refcount record format) owners, which means that
755 : * theoretically we could face up to 2^64 rmap records. However, we're
756 : * likely to run out of blocks in the data device long before that
757 : * happens, which means that we must compute the max height based on
758 : * what the btree will look like if it consumes almost all the blocks
759 : * in the data device due to maximal sharing factor.
760 : */
761 12 : max_dblocks = -1U; /* max ag count */
762 12 : max_dblocks *= XFS_MAX_CRC_AG_BLOCKS;
763 12 : return xfs_btree_space_to_height(minrecs, max_dblocks);
764 : }
765 :
766 : int __init
767 12 : xfs_rtrmapbt_init_cur_cache(void)
768 : {
769 12 : xfs_rtrmapbt_cur_cache = kmem_cache_create("xfs_rtrmapbt_cur",
770 12 : xfs_btree_cur_sizeof(xfs_rtrmapbt_maxlevels_ondisk()),
771 : 0, 0, NULL);
772 :
773 12 : if (!xfs_rtrmapbt_cur_cache)
774 0 : return -ENOMEM;
775 : return 0;
776 : }
777 :
778 : void
779 12 : xfs_rtrmapbt_destroy_cur_cache(void)
780 : {
781 12 : kmem_cache_destroy(xfs_rtrmapbt_cur_cache);
782 12 : xfs_rtrmapbt_cur_cache = NULL;
783 12 : }
784 :
785 : /* Compute the maximum height of an rt reverse mapping btree. */
786 : void
787 24771 : xfs_rtrmapbt_compute_maxlevels(
788 : struct xfs_mount *mp)
789 : {
790 24771 : unsigned int d_maxlevels, r_maxlevels;
791 :
792 24771 : if (!xfs_has_rtrmapbt(mp)) {
793 24525 : mp->m_rtrmap_maxlevels = 0;
794 24525 : return;
795 : }
796 :
797 : /*
798 : * The realtime rmapbt lives on the data device, which means that its
799 : * maximum height is constrained by the size of the data device and
800 : * the height required to store one rmap record for each block in an
801 : * rt group.
802 : *
803 : * On a reflink filesystem, each rt block can have up to 2^32 (per the
804 : * refcount record format) owners, which means that theoretically we
805 : * could face up to 2^64 rmap records. This makes the computation of
806 : * maxlevels based on record count meaningless, so we only consider the
807 : * size of the data device.
808 : */
809 246 : d_maxlevels = xfs_btree_space_to_height(mp->m_rtrmap_mnr,
810 : mp->m_sb.sb_dblocks);
811 246 : if (xfs_has_rtreflink(mp)) {
812 246 : mp->m_rtrmap_maxlevels = d_maxlevels + 1;
813 246 : return;
814 : }
815 :
816 0 : r_maxlevels = xfs_btree_compute_maxlevels(mp->m_rtrmap_mnr,
817 0 : mp->m_sb.sb_rgblocks);
818 :
819 : /* Add one level to handle the inode root level. */
820 0 : mp->m_rtrmap_maxlevels = min(d_maxlevels, r_maxlevels) + 1;
821 : }
822 :
823 : #define XFS_RTRMAP_NAMELEN 17
824 :
825 : /* Create the metadata directory path for an rtrmap btree inode. */
826 : int
827 1221 : xfs_rtrmapbt_create_path(
828 : struct xfs_mount *mp,
829 : xfs_rgnumber_t rgno,
830 : struct xfs_imeta_path **pathp)
831 : {
832 1221 : struct xfs_imeta_path *path;
833 1221 : unsigned char *fname;
834 1221 : int error;
835 :
836 1221 : error = xfs_imeta_create_file_path(mp, 2, &path);
837 1221 : if (error)
838 : return error;
839 :
840 1221 : fname = kmalloc(XFS_RTRMAP_NAMELEN, GFP_KERNEL);
841 1221 : if (!fname) {
842 0 : xfs_imeta_free_path(path);
843 0 : return -ENOMEM;
844 : }
845 :
846 1221 : snprintf(fname, XFS_RTRMAP_NAMELEN, "%u.rmap", rgno);
847 1221 : path->im_path[0] = "realtime";
848 1221 : path->im_path[1] = fname;
849 1221 : path->im_dynamicmask = 0x2;
850 1221 : *pathp = path;
851 1221 : return 0;
852 : }
853 :
854 : /* Calculate the rtrmap btree size for some records. */
855 : unsigned long long
856 43546 : xfs_rtrmapbt_calc_size(
857 : struct xfs_mount *mp,
858 : unsigned long long len)
859 : {
860 43546 : return xfs_btree_calc_size(mp->m_rtrmap_mnr, len);
861 : }
862 :
863 : /*
864 : * Calculate the maximum rmap btree size.
865 : */
866 : static unsigned long long
867 : xfs_rtrmapbt_max_size(
868 : struct xfs_mount *mp,
869 : xfs_rtblock_t rtblocks)
870 : {
871 : /* Bail out if we're uninitialized, which can happen in mkfs. */
872 2876 : if (mp->m_rtrmap_mxr[0] == 0)
873 : return 0;
874 :
875 2876 : return xfs_rtrmapbt_calc_size(mp, rtblocks);
876 : }
877 :
878 : /*
879 : * Figure out how many blocks to reserve and how many are used by this btree.
880 : */
881 : xfs_filblks_t
882 2876 : xfs_rtrmapbt_calc_reserves(
883 : struct xfs_mount *mp)
884 : {
885 2876 : if (!xfs_has_rtrmapbt(mp))
886 : return 0;
887 :
888 : /* 1/64th (~1.5%) of the space, and enough for 1 record per block. */
889 5752 : return max_t(xfs_filblks_t, mp->m_sb.sb_rgblocks >> 6,
890 : xfs_rtrmapbt_max_size(mp, mp->m_sb.sb_rgblocks));
891 : }
892 :
893 : /* Convert on-disk form of btree root to in-memory form. */
894 : STATIC void
895 1211 : xfs_rtrmapbt_from_disk(
896 : struct xfs_inode *ip,
897 : struct xfs_rtrmap_root *dblock,
898 : unsigned int dblocklen,
899 : struct xfs_btree_block *rblock)
900 : {
901 1211 : struct xfs_mount *mp = ip->i_mount;
902 1211 : struct xfs_rmap_key *fkp;
903 1211 : __be64 *fpp;
904 1211 : struct xfs_rmap_key *tkp;
905 1211 : __be64 *tpp;
906 1211 : struct xfs_rmap_rec *frp;
907 1211 : struct xfs_rmap_rec *trp;
908 1211 : unsigned int rblocklen = xfs_rtrmap_broot_space(mp, dblock);
909 1211 : unsigned int numrecs;
910 1211 : unsigned int maxrecs;
911 :
912 1211 : xfs_btree_init_block(mp, rblock, &xfs_rtrmapbt_ops, 0, 0, ip->i_ino);
913 :
914 1211 : rblock->bb_level = dblock->bb_level;
915 1211 : rblock->bb_numrecs = dblock->bb_numrecs;
916 1211 : numrecs = be16_to_cpu(dblock->bb_numrecs);
917 :
918 1211 : if (be16_to_cpu(rblock->bb_level) > 0) {
919 255 : maxrecs = xfs_rtrmapbt_droot_maxrecs(dblocklen, false);
920 255 : fkp = xfs_rtrmap_droot_key_addr(dblock, 1);
921 255 : tkp = xfs_rtrmap_key_addr(rblock, 1);
922 255 : fpp = xfs_rtrmap_droot_ptr_addr(dblock, 1, maxrecs);
923 255 : tpp = xfs_rtrmap_broot_ptr_addr(mp, rblock, 1, rblocklen);
924 510 : memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs);
925 510 : memcpy(tpp, fpp, sizeof(*fpp) * numrecs);
926 : } else {
927 956 : frp = xfs_rtrmap_droot_rec_addr(dblock, 1);
928 956 : trp = xfs_rtrmap_rec_addr(rblock, 1);
929 1912 : memcpy(trp, frp, sizeof(*frp) * numrecs);
930 : }
931 1211 : }
932 :
933 : /* Load a realtime reverse mapping btree root in from disk. */
934 : int
935 1211 : xfs_iformat_rtrmap(
936 : struct xfs_inode *ip,
937 : struct xfs_dinode *dip)
938 : {
939 1211 : struct xfs_mount *mp = ip->i_mount;
940 1211 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
941 1211 : struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
942 1211 : unsigned int numrecs;
943 1211 : unsigned int level;
944 1211 : int dsize;
945 :
946 1211 : dsize = XFS_DFORK_SIZE(dip, mp, XFS_DATA_FORK);
947 1211 : numrecs = be16_to_cpu(dfp->bb_numrecs);
948 1211 : level = be16_to_cpu(dfp->bb_level);
949 :
950 1211 : if (level > mp->m_rtrmap_maxlevels ||
951 1211 : xfs_rtrmap_droot_space_calc(level, numrecs) > dsize) {
952 0 : xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
953 0 : return -EFSCORRUPTED;
954 : }
955 :
956 2422 : xfs_iroot_alloc(ip, XFS_DATA_FORK,
957 : xfs_rtrmap_broot_space_calc(mp, level, numrecs));
958 1211 : xfs_rtrmapbt_from_disk(ip, dfp, dsize, ifp->if_broot);
959 1211 : return 0;
960 : }
961 :
962 : /* Convert in-memory form of btree root to on-disk form. */
963 : void
964 18095 : xfs_rtrmapbt_to_disk(
965 : struct xfs_mount *mp,
966 : struct xfs_btree_block *rblock,
967 : unsigned int rblocklen,
968 : struct xfs_rtrmap_root *dblock,
969 : unsigned int dblocklen)
970 : {
971 18095 : struct xfs_rmap_key *fkp;
972 18095 : __be64 *fpp;
973 18095 : struct xfs_rmap_key *tkp;
974 18095 : __be64 *tpp;
975 18095 : struct xfs_rmap_rec *frp;
976 18095 : struct xfs_rmap_rec *trp;
977 18095 : unsigned int numrecs;
978 18095 : unsigned int maxrecs;
979 :
980 18095 : ASSERT(rblock->bb_magic == cpu_to_be32(XFS_RTRMAP_CRC_MAGIC));
981 18095 : ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid));
982 18095 : ASSERT(rblock->bb_u.l.bb_blkno == cpu_to_be64(XFS_BUF_DADDR_NULL));
983 18095 : ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
984 18095 : ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
985 :
986 18095 : dblock->bb_level = rblock->bb_level;
987 18095 : dblock->bb_numrecs = rblock->bb_numrecs;
988 18095 : numrecs = be16_to_cpu(rblock->bb_numrecs);
989 :
990 18095 : if (be16_to_cpu(rblock->bb_level) > 0) {
991 14148 : maxrecs = xfs_rtrmapbt_droot_maxrecs(dblocklen, false);
992 14148 : fkp = xfs_rtrmap_key_addr(rblock, 1);
993 14148 : tkp = xfs_rtrmap_droot_key_addr(dblock, 1);
994 14148 : fpp = xfs_rtrmap_broot_ptr_addr(mp, rblock, 1, rblocklen);
995 14148 : tpp = xfs_rtrmap_droot_ptr_addr(dblock, 1, maxrecs);
996 28296 : memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs);
997 28296 : memcpy(tpp, fpp, sizeof(*fpp) * numrecs);
998 : } else {
999 3947 : frp = xfs_rtrmap_rec_addr(rblock, 1);
1000 3947 : trp = xfs_rtrmap_droot_rec_addr(dblock, 1);
1001 7894 : memcpy(trp, frp, sizeof(*frp) * numrecs);
1002 : }
1003 18095 : }
1004 :
1005 : /* Flush a realtime reverse mapping btree root out to disk. */
1006 : void
1007 18095 : xfs_iflush_rtrmap(
1008 : struct xfs_inode *ip,
1009 : struct xfs_dinode *dip)
1010 : {
1011 18095 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1012 18095 : struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
1013 :
1014 18095 : ASSERT(ifp->if_broot != NULL);
1015 18095 : ASSERT(ifp->if_broot_bytes > 0);
1016 18095 : ASSERT(xfs_rtrmap_droot_space(ifp->if_broot) <=
1017 : xfs_inode_fork_size(ip, XFS_DATA_FORK));
1018 18095 : xfs_rtrmapbt_to_disk(ip->i_mount, ifp->if_broot, ifp->if_broot_bytes,
1019 18095 : dfp, XFS_DFORK_SIZE(dip, ip->i_mount, XFS_DATA_FORK));
1020 18095 : }
1021 :
1022 : /*
1023 : * Create a realtime rmap btree inode.
1024 : *
1025 : * Regardless of the return value, the caller must clean up @upd. If a new
1026 : * inode is returned through @*ipp, the caller must finish setting up the incore
1027 : * inode and release it.
1028 : */
1029 : int
1030 10 : xfs_rtrmapbt_create(
1031 : struct xfs_imeta_update *upd,
1032 : struct xfs_inode **ipp)
1033 : {
1034 10 : struct xfs_mount *mp = upd->mp;
1035 10 : struct xfs_ifork *ifp;
1036 10 : int error;
1037 :
1038 10 : error = xfs_imeta_create(upd, S_IFREG, ipp);
1039 10 : if (error)
1040 : return error;
1041 :
1042 10 : ifp = xfs_ifork_ptr(upd->ip, XFS_DATA_FORK);
1043 10 : ifp->if_format = XFS_DINODE_FMT_RMAP;
1044 10 : ASSERT(ifp->if_broot_bytes == 0);
1045 10 : ASSERT(ifp->if_bytes == 0);
1046 :
1047 : /* Initialize the empty incore btree root. */
1048 10 : xfs_iroot_alloc(upd->ip, XFS_DATA_FORK,
1049 : xfs_rtrmap_broot_space_calc(mp, 0, 0));
1050 10 : xfs_btree_init_block(mp, ifp->if_broot, &xfs_rtrmapbt_ops, 0, 0,
1051 10 : upd->ip->i_ino);
1052 10 : xfs_trans_log_inode(upd->tp, upd->ip, XFS_ILOG_CORE | XFS_ILOG_DBROOT);
1053 10 : return 0;
1054 : }
|