Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_bit.h"
13 : #include "xfs_sb.h"
14 : #include "xfs_mount.h"
15 : #include "xfs_defer.h"
16 : #include "xfs_inode.h"
17 : #include "xfs_trans.h"
18 : #include "xfs_alloc.h"
19 : #include "xfs_btree.h"
20 : #include "xfs_btree_staging.h"
21 : #include "xfs_imeta.h"
22 : #include "xfs_rmap.h"
23 : #include "xfs_rtrmap_btree.h"
24 : #include "xfs_trace.h"
25 : #include "xfs_cksum.h"
26 : #include "xfs_error.h"
27 : #include "xfs_extent_busy.h"
28 : #include "xfs_rtgroup.h"
29 : #include "xfs_bmap.h"
30 : #include "xfs_health.h"
31 : #include "scrub/xfile.h"
32 : #include "scrub/xfbtree.h"
33 : #include "xfs_btree_mem.h"
34 :
35 : static struct kmem_cache *xfs_rtrmapbt_cur_cache;
36 :
37 : /*
38 : * Realtime Reverse Map btree.
39 : *
40 : * This is a btree used to track the owner(s) of a given extent in the realtime
41 : * device. See the comments in xfs_rmap_btree.c for more information.
42 : *
43 : * This tree is basically the same as the regular rmap btree except that it
44 : * is rooted in an inode and does not live in free space.
45 : */
46 :
47 : static struct xfs_btree_cur *
48 62804846 : xfs_rtrmapbt_dup_cursor(
49 : struct xfs_btree_cur *cur)
50 : {
51 62804846 : struct xfs_btree_cur *new;
52 :
53 62804846 : new = xfs_rtrmapbt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ino.rtg,
54 : cur->bc_ino.ip);
55 :
56 : /* Copy the flags values since init cursor doesn't get them. */
57 62804970 : new->bc_ino.flags = cur->bc_ino.flags;
58 :
59 62804970 : return new;
60 : }
61 :
62 : STATIC int
63 105415060 : xfs_rtrmapbt_get_minrecs(
64 : struct xfs_btree_cur *cur,
65 : int level)
66 : {
67 105415060 : if (level == cur->bc_nlevels - 1) {
68 102396 : struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
69 :
70 102396 : return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
71 102396 : level == 0) / 2;
72 : }
73 :
74 105312664 : return cur->bc_mp->m_rtrmap_mnr[level != 0];
75 : }
76 :
77 : STATIC int
78 >25708*10^7 : xfs_rtrmapbt_get_maxrecs(
79 : struct xfs_btree_cur *cur,
80 : int level)
81 : {
82 >25708*10^7 : if (level == cur->bc_nlevels - 1) {
83 1280642124 : struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
84 :
85 1280681085 : return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
86 : level == 0);
87 : }
88 :
89 >25580*10^7 : return cur->bc_mp->m_rtrmap_mxr[level != 0];
90 : }
91 :
92 : /* Calculate number of records in the ondisk realtime rmap btree inode root. */
93 : unsigned int
94 0 : xfs_rtrmapbt_droot_maxrecs(
95 : unsigned int blocklen,
96 : bool leaf)
97 : {
98 6746775 : blocklen -= sizeof(struct xfs_rtrmap_root);
99 :
100 0 : if (leaf)
101 760511 : return blocklen / sizeof(struct xfs_rmap_rec);
102 5936599 : return blocklen / (2 * sizeof(struct xfs_rmap_key) +
103 : sizeof(xfs_rtrmap_ptr_t));
104 : }
105 :
106 : /*
107 : * Get the maximum records we could store in the on-disk format.
108 : *
109 : * For non-root nodes this is equivalent to xfs_rtrmapbt_get_maxrecs, but
110 : * for the root node this checks the available space in the dinode fork
111 : * so that we can resize the in-memory buffer to match it. After a
112 : * resize to the maximum size this function returns the same value
113 : * as xfs_rtrmapbt_get_maxrecs for the root node, too.
114 : */
115 : STATIC int
116 6708138 : xfs_rtrmapbt_get_dmaxrecs(
117 : struct xfs_btree_cur *cur,
118 : int level)
119 : {
120 6708138 : if (level != cur->bc_nlevels - 1)
121 11028 : return cur->bc_mp->m_rtrmap_mxr[level != 0];
122 13394220 : return xfs_rtrmapbt_droot_maxrecs(cur->bc_ino.forksize, level == 0);
123 : }
124 :
125 : /*
126 : * Convert the ondisk record's offset field into the ondisk key's offset field.
127 : * Fork and bmbt are significant parts of the rmap record key, but written
128 : * status is merely a record attribute.
129 : */
130 : static inline __be64 ondisk_rec_offset_to_key(const union xfs_btree_rec *rec)
131 : {
132 >28668*10^7 : return rec->rmap.rm_offset & ~cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN);
133 : }
134 :
135 : STATIC void
136 >16978*10^7 : xfs_rtrmapbt_init_key_from_rec(
137 : union xfs_btree_key *key,
138 : const union xfs_btree_rec *rec)
139 : {
140 >16978*10^7 : key->rmap.rm_startblock = rec->rmap.rm_startblock;
141 >16978*10^7 : key->rmap.rm_owner = rec->rmap.rm_owner;
142 >16978*10^7 : key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
143 >16978*10^7 : }
144 :
145 : STATIC void
146 >11690*10^7 : xfs_rtrmapbt_init_high_key_from_rec(
147 : union xfs_btree_key *key,
148 : const union xfs_btree_rec *rec)
149 : {
150 >11690*10^7 : uint64_t off;
151 >11690*10^7 : int adj;
152 :
153 >11690*10^7 : adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
154 :
155 >11690*10^7 : key->rmap.rm_startblock = rec->rmap.rm_startblock;
156 >11690*10^7 : be32_add_cpu(&key->rmap.rm_startblock, adj);
157 >11690*10^7 : key->rmap.rm_owner = rec->rmap.rm_owner;
158 >11690*10^7 : key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
159 >11690*10^7 : if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
160 >11559*10^7 : XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
161 : return;
162 >11559*10^7 : off = be64_to_cpu(key->rmap.rm_offset);
163 >11559*10^7 : off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
164 >11559*10^7 : key->rmap.rm_offset = cpu_to_be64(off);
165 : }
166 :
167 : STATIC void
168 1057093614 : xfs_rtrmapbt_init_rec_from_cur(
169 : struct xfs_btree_cur *cur,
170 : union xfs_btree_rec *rec)
171 : {
172 1057093614 : rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
173 1057093614 : rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
174 1057093614 : rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
175 1057093614 : rec->rmap.rm_offset = cpu_to_be64(
176 : xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
177 1057093614 : }
178 :
179 : STATIC void
180 1203076361 : xfs_rtrmapbt_init_ptr_from_cur(
181 : struct xfs_btree_cur *cur,
182 : union xfs_btree_ptr *ptr)
183 : {
184 1203076361 : ptr->l = 0;
185 1203076361 : }
186 :
187 : /*
188 : * Mask the appropriate parts of the ondisk key field for a key comparison.
189 : * Fork and bmbt are significant parts of the rmap record key, but written
190 : * status is merely a record attribute.
191 : */
192 : static inline uint64_t offset_keymask(uint64_t offset)
193 : {
194 10019400885 : return offset & ~XFS_RMAP_OFF_UNWRITTEN;
195 : }
196 :
197 : STATIC int64_t
198 16575666540 : xfs_rtrmapbt_key_diff(
199 : struct xfs_btree_cur *cur,
200 : const union xfs_btree_key *key)
201 : {
202 16575666540 : struct xfs_rmap_irec *rec = &cur->bc_rec.r;
203 16575666540 : const struct xfs_rmap_key *kp = &key->rmap;
204 16575666540 : __u64 x, y;
205 16575666540 : int64_t d;
206 :
207 16575666540 : d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
208 16575666540 : if (d)
209 : return d;
210 :
211 3388675874 : x = be64_to_cpu(kp->rm_owner);
212 3388675874 : y = rec->rm_owner;
213 3388675874 : if (x > y)
214 : return 1;
215 2952107173 : else if (y > x)
216 : return -1;
217 :
218 1525974682 : x = offset_keymask(be64_to_cpu(kp->rm_offset));
219 1525974682 : y = offset_keymask(xfs_rmap_irec_offset_pack(rec));
220 1525974682 : if (x > y)
221 : return 1;
222 1288794199 : else if (y > x)
223 854881581 : return -1;
224 : return 0;
225 : }
226 :
227 : STATIC int64_t
228 >31417*10^7 : xfs_rtrmapbt_diff_two_keys(
229 : struct xfs_btree_cur *cur,
230 : const union xfs_btree_key *k1,
231 : const union xfs_btree_key *k2,
232 : const union xfs_btree_key *mask)
233 : {
234 >31417*10^7 : const struct xfs_rmap_key *kp1 = &k1->rmap;
235 >31417*10^7 : const struct xfs_rmap_key *kp2 = &k2->rmap;
236 >31417*10^7 : int64_t d;
237 >31417*10^7 : __u64 x, y;
238 :
239 : /* Doesn't make sense to mask off the physical space part */
240 >31417*10^7 : ASSERT(!mask || mask->rmap.rm_startblock);
241 :
242 >31417*10^7 : d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
243 >31417*10^7 : be32_to_cpu(kp2->rm_startblock);
244 >31417*10^7 : if (d)
245 : return d;
246 :
247 28837740188 : if (!mask || mask->rmap.rm_owner) {
248 28105028794 : x = be64_to_cpu(kp1->rm_owner);
249 28105028794 : y = be64_to_cpu(kp2->rm_owner);
250 28105028794 : if (x > y)
251 : return 1;
252 10741391646 : else if (y > x)
253 : return -1;
254 : }
255 :
256 9183237407 : if (!mask || mask->rmap.rm_offset) {
257 : /* Doesn't make sense to allow offset but not owner */
258 8452568032 : ASSERT(!mask || mask->rmap.rm_owner);
259 :
260 8452568032 : x = offset_keymask(be64_to_cpu(kp1->rm_offset));
261 8452568032 : y = offset_keymask(be64_to_cpu(kp2->rm_offset));
262 8452568032 : if (x > y)
263 : return 1;
264 1456310706 : else if (y > x)
265 82301171 : return -1;
266 : }
267 :
268 : return 0;
269 : }
270 :
271 : static xfs_failaddr_t
272 27372511 : xfs_rtrmapbt_verify(
273 : struct xfs_buf *bp)
274 : {
275 27372511 : struct xfs_mount *mp = bp->b_target->bt_mount;
276 27372511 : struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
277 27372511 : xfs_failaddr_t fa;
278 27372511 : int level;
279 :
280 27372511 : if (!xfs_verify_magic(bp, block->bb_magic))
281 0 : return __this_address;
282 :
283 27372414 : if (!xfs_has_rmapbt(mp))
284 0 : return __this_address;
285 27372414 : fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
286 27372376 : if (fa)
287 : return fa;
288 27372394 : level = be16_to_cpu(block->bb_level);
289 27372394 : if (level > mp->m_rtrmap_maxlevels)
290 0 : return __this_address;
291 :
292 27372394 : return xfs_btree_lblock_verify(bp, mp->m_rtrmap_mxr[level != 0]);
293 : }
294 :
295 : static void
296 232795 : xfs_rtrmapbt_read_verify(
297 : struct xfs_buf *bp)
298 : {
299 232795 : xfs_failaddr_t fa;
300 :
301 232795 : if (!xfs_btree_lblock_verify_crc(bp))
302 2 : xfs_verifier_error(bp, -EFSBADCRC, __this_address);
303 : else {
304 232793 : fa = xfs_rtrmapbt_verify(bp);
305 232793 : if (fa)
306 0 : xfs_verifier_error(bp, -EFSCORRUPTED, fa);
307 : }
308 :
309 232795 : if (bp->b_error)
310 2 : trace_xfs_btree_corrupt(bp, _RET_IP_);
311 232795 : }
312 :
313 : static void
314 9958652 : xfs_rtrmapbt_write_verify(
315 : struct xfs_buf *bp)
316 : {
317 9958652 : xfs_failaddr_t fa;
318 :
319 9958652 : fa = xfs_rtrmapbt_verify(bp);
320 9958650 : if (fa) {
321 0 : trace_xfs_btree_corrupt(bp, _RET_IP_);
322 0 : xfs_verifier_error(bp, -EFSCORRUPTED, fa);
323 0 : return;
324 : }
325 9958650 : xfs_btree_lblock_calc_crc(bp);
326 :
327 : }
328 :
329 : const struct xfs_buf_ops xfs_rtrmapbt_buf_ops = {
330 : .name = "xfs_rtrmapbt",
331 : .magic = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) },
332 : .verify_read = xfs_rtrmapbt_read_verify,
333 : .verify_write = xfs_rtrmapbt_write_verify,
334 : .verify_struct = xfs_rtrmapbt_verify,
335 : };
336 :
337 : STATIC int
338 7716350 : xfs_rtrmapbt_keys_inorder(
339 : struct xfs_btree_cur *cur,
340 : const union xfs_btree_key *k1,
341 : const union xfs_btree_key *k2)
342 : {
343 7716350 : uint32_t x;
344 7716350 : uint32_t y;
345 7716350 : uint64_t a;
346 7716350 : uint64_t b;
347 :
348 7716350 : x = be32_to_cpu(k1->rmap.rm_startblock);
349 7716350 : y = be32_to_cpu(k2->rmap.rm_startblock);
350 7716350 : if (x < y)
351 : return 1;
352 841952 : else if (x > y)
353 : return 0;
354 841952 : a = be64_to_cpu(k1->rmap.rm_owner);
355 841952 : b = be64_to_cpu(k2->rmap.rm_owner);
356 841952 : if (a < b)
357 : return 1;
358 238468 : else if (a > b)
359 : return 0;
360 238468 : a = offset_keymask(be64_to_cpu(k1->rmap.rm_offset));
361 238468 : b = offset_keymask(be64_to_cpu(k2->rmap.rm_offset));
362 238468 : if (a <= b)
363 238468 : return 1;
364 : return 0;
365 : }
366 :
367 : STATIC int
368 1229521545 : xfs_rtrmapbt_recs_inorder(
369 : struct xfs_btree_cur *cur,
370 : const union xfs_btree_rec *r1,
371 : const union xfs_btree_rec *r2)
372 : {
373 1229521545 : uint32_t x;
374 1229521545 : uint32_t y;
375 1229521545 : uint64_t a;
376 1229521545 : uint64_t b;
377 :
378 1229521545 : x = be32_to_cpu(r1->rmap.rm_startblock);
379 1229521545 : y = be32_to_cpu(r2->rmap.rm_startblock);
380 1229521545 : if (x < y)
381 : return 1;
382 224781458 : else if (x > y)
383 : return 0;
384 224781458 : a = be64_to_cpu(r1->rmap.rm_owner);
385 224781458 : b = be64_to_cpu(r2->rmap.rm_owner);
386 224781458 : if (a < b)
387 : return 1;
388 40619703 : else if (a > b)
389 : return 0;
390 40619703 : a = offset_keymask(be64_to_cpu(r1->rmap.rm_offset));
391 40619703 : b = offset_keymask(be64_to_cpu(r2->rmap.rm_offset));
392 40619703 : if (a <= b)
393 40619703 : return 1;
394 : return 0;
395 : }
396 :
397 : STATIC enum xbtree_key_contig
398 6817076879 : xfs_rtrmapbt_keys_contiguous(
399 : struct xfs_btree_cur *cur,
400 : const union xfs_btree_key *key1,
401 : const union xfs_btree_key *key2,
402 : const union xfs_btree_key *mask)
403 : {
404 6817076879 : ASSERT(!mask || mask->rmap.rm_startblock);
405 :
406 : /*
407 : * We only support checking contiguity of the physical space component.
408 : * If any callers ever need more specificity than that, they'll have to
409 : * implement it here.
410 : */
411 6817076879 : ASSERT(!mask || (!mask->rmap.rm_owner && !mask->rmap.rm_offset));
412 :
413 13634153758 : return xbtree_key_contig(be32_to_cpu(key1->rmap.rm_startblock),
414 6817076879 : be32_to_cpu(key2->rmap.rm_startblock));
415 : }
416 :
417 : /* Move the rtrmap btree root from one incore buffer to another. */
418 : static void
419 1289841 : xfs_rtrmapbt_broot_move(
420 : struct xfs_inode *ip,
421 : int whichfork,
422 : struct xfs_btree_block *dst_broot,
423 : size_t dst_bytes,
424 : struct xfs_btree_block *src_broot,
425 : size_t src_bytes,
426 : unsigned int level,
427 : unsigned int numrecs)
428 : {
429 1289841 : struct xfs_mount *mp = ip->i_mount;
430 1289841 : void *dptr;
431 1289841 : void *sptr;
432 :
433 2579682 : ASSERT(xfs_rtrmap_droot_space(src_broot) <=
434 : xfs_inode_fork_size(ip, whichfork));
435 :
436 : /*
437 : * We always have to move the pointers because they are not butted
438 : * against the btree block header.
439 : */
440 1289841 : if (numrecs && level > 0) {
441 15769 : sptr = xfs_rtrmap_broot_ptr_addr(mp, src_broot, 1, src_bytes);
442 15769 : dptr = xfs_rtrmap_broot_ptr_addr(mp, dst_broot, 1, dst_bytes);
443 31538 : memmove(dptr, sptr, numrecs * sizeof(xfs_fsblock_t));
444 : }
445 :
446 1289841 : if (src_broot == dst_broot)
447 : return;
448 :
449 : /*
450 : * If the root is being totally relocated, we have to migrate the block
451 : * header and the keys/records that come after it.
452 : */
453 1167026 : memcpy(dst_broot, src_broot, XFS_RTRMAP_BLOCK_LEN);
454 :
455 583513 : if (!numrecs)
456 : return;
457 :
458 583513 : if (level == 0) {
459 576912 : sptr = xfs_rtrmap_rec_addr(src_broot, 1);
460 576912 : dptr = xfs_rtrmap_rec_addr(dst_broot, 1);
461 1153824 : memcpy(dptr, sptr, numrecs * sizeof(struct xfs_rmap_rec));
462 : } else {
463 6601 : sptr = xfs_rtrmap_key_addr(src_broot, 1);
464 6601 : dptr = xfs_rtrmap_key_addr(dst_broot, 1);
465 13202 : memcpy(dptr, sptr, numrecs * 2 * sizeof(struct xfs_rmap_key));
466 : }
467 : }
468 :
469 : static const struct xfs_ifork_broot_ops xfs_rtrmapbt_iroot_ops = {
470 : .maxrecs = xfs_rtrmapbt_maxrecs,
471 : .size = xfs_rtrmap_broot_space_calc,
472 : .move = xfs_rtrmapbt_broot_move,
473 : };
474 :
475 : const struct xfs_btree_ops xfs_rtrmapbt_ops = {
476 : .rec_len = sizeof(struct xfs_rmap_rec),
477 : .key_len = 2 * sizeof(struct xfs_rmap_key),
478 : .lru_refs = XFS_RMAP_BTREE_REF,
479 : .geom_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE |
480 : XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING |
481 : XFS_BTREE_IROOT_RECORDS,
482 :
483 : .dup_cursor = xfs_rtrmapbt_dup_cursor,
484 : .alloc_block = xfs_btree_alloc_imeta_block,
485 : .free_block = xfs_btree_free_imeta_block,
486 : .get_minrecs = xfs_rtrmapbt_get_minrecs,
487 : .get_maxrecs = xfs_rtrmapbt_get_maxrecs,
488 : .get_dmaxrecs = xfs_rtrmapbt_get_dmaxrecs,
489 : .init_key_from_rec = xfs_rtrmapbt_init_key_from_rec,
490 : .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
491 : .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur,
492 : .init_ptr_from_cur = xfs_rtrmapbt_init_ptr_from_cur,
493 : .key_diff = xfs_rtrmapbt_key_diff,
494 : .buf_ops = &xfs_rtrmapbt_buf_ops,
495 : .diff_two_keys = xfs_rtrmapbt_diff_two_keys,
496 : .keys_inorder = xfs_rtrmapbt_keys_inorder,
497 : .recs_inorder = xfs_rtrmapbt_recs_inorder,
498 : .keys_contiguous = xfs_rtrmapbt_keys_contiguous,
499 : .iroot_ops = &xfs_rtrmapbt_iroot_ops,
500 : };
501 :
502 : /* Initialize a new rt rmap btree cursor. */
503 : static struct xfs_btree_cur *
504 538434855 : xfs_rtrmapbt_init_common(
505 : struct xfs_mount *mp,
506 : struct xfs_trans *tp,
507 : struct xfs_rtgroup *rtg,
508 : struct xfs_inode *ip)
509 : {
510 538434855 : struct xfs_btree_cur *cur;
511 :
512 538434855 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
513 :
514 538368003 : cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RTRMAP,
515 538368003 : &xfs_rtrmapbt_ops, mp->m_rtrmap_maxlevels,
516 : xfs_rtrmapbt_cur_cache);
517 538538880 : cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
518 :
519 538538880 : cur->bc_ino.ip = ip;
520 538538880 : cur->bc_ino.allocated = 0;
521 538538880 : cur->bc_ino.flags = 0;
522 :
523 538538880 : cur->bc_ino.rtg = xfs_rtgroup_hold(rtg);
524 538587357 : return cur;
525 : }
526 :
527 : /* Allocate a new rt rmap btree cursor. */
528 : struct xfs_btree_cur *
529 538439537 : xfs_rtrmapbt_init_cursor(
530 : struct xfs_mount *mp,
531 : struct xfs_trans *tp,
532 : struct xfs_rtgroup *rtg,
533 : struct xfs_inode *ip)
534 : {
535 538439537 : struct xfs_btree_cur *cur;
536 538439537 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
537 :
538 538439537 : cur = xfs_rtrmapbt_init_common(mp, tp, rtg, ip);
539 538579611 : cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
540 538579611 : cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK);
541 538579611 : cur->bc_ino.whichfork = XFS_DATA_FORK;
542 538579611 : return cur;
543 : }
544 :
545 : /* Create a new rt reverse mapping btree cursor with a fake root for staging. */
546 : struct xfs_btree_cur *
547 11073 : xfs_rtrmapbt_stage_cursor(
548 : struct xfs_mount *mp,
549 : struct xfs_rtgroup *rtg,
550 : struct xfs_inode *ip,
551 : struct xbtree_ifakeroot *ifake)
552 : {
553 11073 : struct xfs_btree_cur *cur;
554 :
555 11073 : cur = xfs_rtrmapbt_init_common(mp, NULL, rtg, ip);
556 11073 : cur->bc_nlevels = ifake->if_levels;
557 11073 : cur->bc_ino.forksize = ifake->if_fork_size;
558 11073 : cur->bc_ino.whichfork = -1;
559 11073 : xfs_btree_stage_ifakeroot(cur, ifake, NULL);
560 11072 : return cur;
561 : }
562 :
563 : #ifdef CONFIG_XFS_BTREE_IN_XFILE
564 : /*
565 : * Validate an in-memory realtime rmap btree block. Callers are allowed to
566 : * generate an in-memory btree even if the ondisk feature is not enabled.
567 : */
568 : static xfs_failaddr_t
569 114471382 : xfs_rtrmapbt_mem_verify(
570 : struct xfs_buf *bp)
571 : {
572 114471382 : struct xfs_mount *mp = bp->b_mount;
573 114471382 : struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
574 114471382 : xfs_failaddr_t fa;
575 114471382 : unsigned int level;
576 :
577 114471382 : if (!xfs_verify_magic(bp, block->bb_magic))
578 0 : return __this_address;
579 :
580 114461949 : fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
581 114457748 : if (fa)
582 : return fa;
583 :
584 114469853 : level = be16_to_cpu(block->bb_level);
585 114469853 : if (xfs_has_rmapbt(mp)) {
586 114469853 : if (level >= mp->m_rtrmap_maxlevels)
587 0 : return __this_address;
588 : } else {
589 0 : if (level >= xfs_rtrmapbt_maxlevels_ondisk())
590 0 : return __this_address;
591 : }
592 :
593 114469853 : return xfbtree_lblock_verify(bp,
594 : xfs_rtrmapbt_maxrecs(mp, xfo_to_b(1), level == 0));
595 : }
596 :
597 : static void
598 11143 : xfs_rtrmapbt_mem_rw_verify(
599 : struct xfs_buf *bp)
600 : {
601 11143 : xfs_failaddr_t fa = xfs_rtrmapbt_mem_verify(bp);
602 :
603 11143 : if (fa)
604 0 : xfs_verifier_error(bp, -EFSCORRUPTED, fa);
605 11143 : }
606 :
607 : /* skip crc checks on in-memory btrees to save time */
608 : static const struct xfs_buf_ops xfs_rtrmapbt_mem_buf_ops = {
609 : .name = "xfs_rtrmapbt_mem",
610 : .magic = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) },
611 : .verify_read = xfs_rtrmapbt_mem_rw_verify,
612 : .verify_write = xfs_rtrmapbt_mem_rw_verify,
613 : .verify_struct = xfs_rtrmapbt_mem_verify,
614 : };
615 :
616 : static const struct xfs_btree_ops xfs_rtrmapbt_mem_ops = {
617 : .rec_len = sizeof(struct xfs_rmap_rec),
618 : .key_len = 2 * sizeof(struct xfs_rmap_key),
619 : .lru_refs = XFS_RMAP_BTREE_REF,
620 : .geom_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING |
621 : XFS_BTREE_LONG_PTRS | XFS_BTREE_IN_XFILE,
622 :
623 : .dup_cursor = xfbtree_dup_cursor,
624 : .set_root = xfbtree_set_root,
625 : .alloc_block = xfbtree_alloc_block,
626 : .free_block = xfbtree_free_block,
627 : .get_minrecs = xfbtree_get_minrecs,
628 : .get_maxrecs = xfbtree_get_maxrecs,
629 : .init_key_from_rec = xfs_rtrmapbt_init_key_from_rec,
630 : .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
631 : .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur,
632 : .init_ptr_from_cur = xfbtree_init_ptr_from_cur,
633 : .key_diff = xfs_rtrmapbt_key_diff,
634 : .buf_ops = &xfs_rtrmapbt_mem_buf_ops,
635 : .diff_two_keys = xfs_rtrmapbt_diff_two_keys,
636 : .keys_inorder = xfs_rtrmapbt_keys_inorder,
637 : .recs_inorder = xfs_rtrmapbt_recs_inorder,
638 : .keys_contiguous = xfs_rtrmapbt_keys_contiguous,
639 : };
640 :
641 : /* Create a cursor for an in-memory btree. */
642 : struct xfs_btree_cur *
643 52700556 : xfs_rtrmapbt_mem_cursor(
644 : struct xfs_rtgroup *rtg,
645 : struct xfs_trans *tp,
646 : struct xfs_buf *head_bp,
647 : struct xfbtree *xfbtree)
648 : {
649 52700556 : struct xfs_btree_cur *cur;
650 52700556 : struct xfs_mount *mp = rtg->rtg_mount;
651 :
652 : /* Overlapping btree; 2 keys per pointer. */
653 52700556 : cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RTRMAP,
654 52700556 : &xfs_rtrmapbt_mem_ops, mp->m_rtrmap_maxlevels,
655 : xfs_rtrmapbt_cur_cache);
656 52706070 : cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
657 52706070 : cur->bc_mem.xfbtree = xfbtree;
658 52706070 : cur->bc_mem.head_bp = head_bp;
659 52706070 : cur->bc_nlevels = xfs_btree_mem_head_nlevels(head_bp);
660 :
661 52692317 : cur->bc_mem.rtg = xfs_rtgroup_hold(rtg);
662 52705772 : return cur;
663 : }
664 :
665 : int
666 11143 : xfs_rtrmapbt_mem_create(
667 : struct xfs_mount *mp,
668 : xfs_rgnumber_t rgno,
669 : struct xfs_buftarg *target,
670 : struct xfbtree **xfbtreep)
671 : {
672 11143 : struct xfbtree_config cfg = {
673 : .btree_ops = &xfs_rtrmapbt_mem_ops,
674 : .target = target,
675 : .flags = XFBTREE_DIRECT_MAP,
676 : .owner = rgno,
677 : };
678 :
679 11143 : return xfbtree_create(mp, &cfg, xfbtreep);
680 : }
681 : #endif /* CONFIG_XFS_BTREE_IN_XFILE */
682 :
683 : /*
684 : * Install a new rt reverse mapping btree root. Caller is responsible for
685 : * invalidating and freeing the old btree blocks.
686 : */
687 : void
688 11063 : xfs_rtrmapbt_commit_staged_btree(
689 : struct xfs_btree_cur *cur,
690 : struct xfs_trans *tp)
691 : {
692 11063 : struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake;
693 11063 : struct xfs_ifork *ifp;
694 11063 : int flags = XFS_ILOG_CORE | XFS_ILOG_DBROOT;
695 :
696 11063 : ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
697 11063 : ASSERT(ifake->if_fork->if_format == XFS_DINODE_FMT_RMAP);
698 :
699 : /*
700 : * Free any resources hanging off the real fork, then shallow-copy the
701 : * staging fork's contents into the real fork to transfer everything
702 : * we just built.
703 : */
704 11063 : ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK);
705 11063 : xfs_idestroy_fork(ifp);
706 22138 : memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork));
707 :
708 11069 : xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
709 11064 : xfs_btree_commit_ifakeroot(cur, tp, XFS_DATA_FORK, &xfs_rtrmapbt_ops);
710 11066 : }
711 :
712 : /* Calculate number of records in a rt reverse mapping btree block. */
713 : static inline unsigned int
714 : xfs_rtrmapbt_block_maxrecs(
715 : unsigned int blocklen,
716 : bool leaf)
717 : {
718 1396742574 : if (leaf)
719 2184960 : return blocklen / sizeof(struct xfs_rmap_rec);
720 1316964205 : return blocklen /
721 : (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rtrmap_ptr_t));
722 : }
723 :
724 : /*
725 : * Calculate number of records in an rt reverse mapping btree block.
726 : */
727 : unsigned int
728 1423806 : xfs_rtrmapbt_maxrecs(
729 : struct xfs_mount *mp,
730 : unsigned int blocklen,
731 : bool leaf)
732 : {
733 1396742574 : blocklen -= XFS_RTRMAP_BLOCK_LEN;
734 1396742574 : return xfs_rtrmapbt_block_maxrecs(blocklen, leaf);
735 : }
736 :
737 : /* Compute the max possible height for realtime reverse mapping btrees. */
738 : unsigned int
739 59 : xfs_rtrmapbt_maxlevels_ondisk(void)
740 : {
741 59 : unsigned long long max_dblocks;
742 59 : unsigned int minrecs[2];
743 59 : unsigned int blocklen;
744 :
745 59 : blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN;
746 :
747 59 : minrecs[0] = xfs_rtrmapbt_block_maxrecs(blocklen, true) / 2;
748 59 : minrecs[1] = xfs_rtrmapbt_block_maxrecs(blocklen, false) / 2;
749 :
750 : /*
751 : * Compute the asymptotic maxlevels for an rtrmapbt on any rtreflink fs.
752 : *
753 : * On a reflink filesystem, each block in an rtgroup can have up to
754 : * 2^32 (per the refcount record format) owners, which means that
755 : * theoretically we could face up to 2^64 rmap records. However, we're
756 : * likely to run out of blocks in the data device long before that
757 : * happens, which means that we must compute the max height based on
758 : * what the btree will look like if it consumes almost all the blocks
759 : * in the data device due to maximal sharing factor.
760 : */
761 59 : max_dblocks = -1U; /* max ag count */
762 59 : max_dblocks *= XFS_MAX_CRC_AG_BLOCKS;
763 59 : return xfs_btree_space_to_height(minrecs, max_dblocks);
764 : }
765 :
766 : int __init
767 59 : xfs_rtrmapbt_init_cur_cache(void)
768 : {
769 59 : xfs_rtrmapbt_cur_cache = kmem_cache_create("xfs_rtrmapbt_cur",
770 59 : xfs_btree_cur_sizeof(xfs_rtrmapbt_maxlevels_ondisk()),
771 : 0, 0, NULL);
772 :
773 59 : if (!xfs_rtrmapbt_cur_cache)
774 0 : return -ENOMEM;
775 : return 0;
776 : }
777 :
778 : void
779 58 : xfs_rtrmapbt_destroy_cur_cache(void)
780 : {
781 58 : kmem_cache_destroy(xfs_rtrmapbt_cur_cache);
782 58 : xfs_rtrmapbt_cur_cache = NULL;
783 58 : }
784 :
785 : /* Compute the maximum height of an rt reverse mapping btree. */
786 : void
787 70799 : xfs_rtrmapbt_compute_maxlevels(
788 : struct xfs_mount *mp)
789 : {
790 70799 : unsigned int d_maxlevels, r_maxlevels;
791 :
792 70799 : if (!xfs_has_rtrmapbt(mp)) {
793 50022 : mp->m_rtrmap_maxlevels = 0;
794 50022 : return;
795 : }
796 :
797 : /*
798 : * The realtime rmapbt lives on the data device, which means that its
799 : * maximum height is constrained by the size of the data device and
800 : * the height required to store one rmap record for each block in an
801 : * rt group.
802 : *
803 : * On a reflink filesystem, each rt block can have up to 2^32 (per the
804 : * refcount record format) owners, which means that theoretically we
805 : * could face up to 2^64 rmap records. This makes the computation of
806 : * maxlevels based on record count meaningless, so we only consider the
807 : * size of the data device.
808 : */
809 20777 : d_maxlevels = xfs_btree_space_to_height(mp->m_rtrmap_mnr,
810 : mp->m_sb.sb_dblocks);
811 20777 : if (xfs_has_rtreflink(mp)) {
812 20755 : mp->m_rtrmap_maxlevels = d_maxlevels + 1;
813 20755 : return;
814 : }
815 :
816 22 : r_maxlevels = xfs_btree_compute_maxlevels(mp->m_rtrmap_mnr,
817 22 : mp->m_sb.sb_rgblocks);
818 :
819 : /* Add one level to handle the inode root level. */
820 22 : mp->m_rtrmap_maxlevels = min(d_maxlevels, r_maxlevels) + 1;
821 : }
822 :
823 : #define XFS_RTRMAP_NAMELEN 17
824 :
825 : /* Create the metadata directory path for an rtrmap btree inode. */
826 : int
827 100500 : xfs_rtrmapbt_create_path(
828 : struct xfs_mount *mp,
829 : xfs_rgnumber_t rgno,
830 : struct xfs_imeta_path **pathp)
831 : {
832 100500 : struct xfs_imeta_path *path;
833 100500 : unsigned char *fname;
834 100500 : int error;
835 :
836 100500 : error = xfs_imeta_create_file_path(mp, 2, &path);
837 100500 : if (error)
838 : return error;
839 :
840 100500 : fname = kmalloc(XFS_RTRMAP_NAMELEN, GFP_KERNEL);
841 100500 : if (!fname) {
842 0 : xfs_imeta_free_path(path);
843 0 : return -ENOMEM;
844 : }
845 :
846 100500 : snprintf(fname, XFS_RTRMAP_NAMELEN, "%u.rmap", rgno);
847 100500 : path->im_path[0] = "realtime";
848 100500 : path->im_path[1] = fname;
849 100500 : path->im_dynamicmask = 0x2;
850 100500 : *pathp = path;
851 100500 : return 0;
852 : }
853 :
854 : /* Calculate the rtrmap btree size for some records. */
855 : unsigned long long
856 95884 : xfs_rtrmapbt_calc_size(
857 : struct xfs_mount *mp,
858 : unsigned long long len)
859 : {
860 95884 : return xfs_btree_calc_size(mp->m_rtrmap_mnr, len);
861 : }
862 :
863 : /*
864 : * Calculate the maximum rmap btree size.
865 : */
866 : static unsigned long long
867 : xfs_rtrmapbt_max_size(
868 : struct xfs_mount *mp,
869 : xfs_rtblock_t rtblocks)
870 : {
871 : /* Bail out if we're uninitialized, which can happen in mkfs. */
872 198275 : if (mp->m_rtrmap_mxr[0] == 0)
873 : return 0;
874 :
875 198275 : return xfs_rtrmapbt_calc_size(mp, rtblocks);
876 : }
877 :
878 : /*
879 : * Figure out how many blocks to reserve and how many are used by this btree.
880 : */
881 : xfs_filblks_t
882 198325 : xfs_rtrmapbt_calc_reserves(
883 : struct xfs_mount *mp)
884 : {
885 198325 : if (!xfs_has_rtrmapbt(mp))
886 : return 0;
887 :
888 : /* 1/64th (~1.5%) of the space, and enough for 1 record per block. */
889 396550 : return max_t(xfs_filblks_t, mp->m_sb.sb_rgblocks >> 6,
890 : xfs_rtrmapbt_max_size(mp, mp->m_sb.sb_rgblocks));
891 : }
892 :
893 : /* Convert on-disk form of btree root to in-memory form. */
894 : STATIC void
895 97411 : xfs_rtrmapbt_from_disk(
896 : struct xfs_inode *ip,
897 : struct xfs_rtrmap_root *dblock,
898 : unsigned int dblocklen,
899 : struct xfs_btree_block *rblock)
900 : {
901 97411 : struct xfs_mount *mp = ip->i_mount;
902 97411 : struct xfs_rmap_key *fkp;
903 97411 : __be64 *fpp;
904 97411 : struct xfs_rmap_key *tkp;
905 97411 : __be64 *tpp;
906 97411 : struct xfs_rmap_rec *frp;
907 97411 : struct xfs_rmap_rec *trp;
908 97411 : unsigned int rblocklen = xfs_rtrmap_broot_space(mp, dblock);
909 97411 : unsigned int numrecs;
910 97411 : unsigned int maxrecs;
911 :
912 97411 : xfs_btree_init_block(mp, rblock, &xfs_rtrmapbt_ops, 0, 0, ip->i_ino);
913 :
914 97411 : rblock->bb_level = dblock->bb_level;
915 97411 : rblock->bb_numrecs = dblock->bb_numrecs;
916 97411 : numrecs = be16_to_cpu(dblock->bb_numrecs);
917 :
918 97411 : if (be16_to_cpu(rblock->bb_level) > 0) {
919 21538 : maxrecs = xfs_rtrmapbt_droot_maxrecs(dblocklen, false);
920 21538 : fkp = xfs_rtrmap_droot_key_addr(dblock, 1);
921 21538 : tkp = xfs_rtrmap_key_addr(rblock, 1);
922 21538 : fpp = xfs_rtrmap_droot_ptr_addr(dblock, 1, maxrecs);
923 21538 : tpp = xfs_rtrmap_broot_ptr_addr(mp, rblock, 1, rblocklen);
924 43076 : memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs);
925 43076 : memcpy(tpp, fpp, sizeof(*fpp) * numrecs);
926 : } else {
927 75873 : frp = xfs_rtrmap_droot_rec_addr(dblock, 1);
928 75873 : trp = xfs_rtrmap_rec_addr(rblock, 1);
929 151746 : memcpy(trp, frp, sizeof(*frp) * numrecs);
930 : }
931 97411 : }
932 :
933 : /* Load a realtime reverse mapping btree root in from disk. */
934 : int
935 97411 : xfs_iformat_rtrmap(
936 : struct xfs_inode *ip,
937 : struct xfs_dinode *dip)
938 : {
939 97411 : struct xfs_mount *mp = ip->i_mount;
940 97411 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
941 97411 : struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
942 97411 : unsigned int numrecs;
943 97411 : unsigned int level;
944 97411 : int dsize;
945 :
946 97411 : dsize = XFS_DFORK_SIZE(dip, mp, XFS_DATA_FORK);
947 97411 : numrecs = be16_to_cpu(dfp->bb_numrecs);
948 97411 : level = be16_to_cpu(dfp->bb_level);
949 :
950 97411 : if (level > mp->m_rtrmap_maxlevels ||
951 97411 : xfs_rtrmap_droot_space_calc(level, numrecs) > dsize) {
952 0 : xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
953 0 : return -EFSCORRUPTED;
954 : }
955 :
956 194822 : xfs_iroot_alloc(ip, XFS_DATA_FORK,
957 : xfs_rtrmap_broot_space_calc(mp, level, numrecs));
958 97411 : xfs_rtrmapbt_from_disk(ip, dfp, dsize, ifp->if_broot);
959 97411 : return 0;
960 : }
961 :
962 : /* Convert in-memory form of btree root to on-disk form. */
963 : void
964 65488 : xfs_rtrmapbt_to_disk(
965 : struct xfs_mount *mp,
966 : struct xfs_btree_block *rblock,
967 : unsigned int rblocklen,
968 : struct xfs_rtrmap_root *dblock,
969 : unsigned int dblocklen)
970 : {
971 65488 : struct xfs_rmap_key *fkp;
972 65488 : __be64 *fpp;
973 65488 : struct xfs_rmap_key *tkp;
974 65488 : __be64 *tpp;
975 65488 : struct xfs_rmap_rec *frp;
976 65488 : struct xfs_rmap_rec *trp;
977 65488 : unsigned int numrecs;
978 65488 : unsigned int maxrecs;
979 :
980 65488 : ASSERT(rblock->bb_magic == cpu_to_be32(XFS_RTRMAP_CRC_MAGIC));
981 65488 : ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid));
982 65488 : ASSERT(rblock->bb_u.l.bb_blkno == cpu_to_be64(XFS_BUF_DADDR_NULL));
983 65488 : ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
984 65488 : ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
985 :
986 65488 : dblock->bb_level = rblock->bb_level;
987 65488 : dblock->bb_numrecs = rblock->bb_numrecs;
988 65488 : numrecs = be16_to_cpu(rblock->bb_numrecs);
989 :
990 65488 : if (be16_to_cpu(rblock->bb_level) > 0) {
991 28127 : maxrecs = xfs_rtrmapbt_droot_maxrecs(dblocklen, false);
992 28127 : fkp = xfs_rtrmap_key_addr(rblock, 1);
993 28127 : tkp = xfs_rtrmap_droot_key_addr(dblock, 1);
994 28127 : fpp = xfs_rtrmap_broot_ptr_addr(mp, rblock, 1, rblocklen);
995 28127 : tpp = xfs_rtrmap_droot_ptr_addr(dblock, 1, maxrecs);
996 56254 : memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs);
997 56254 : memcpy(tpp, fpp, sizeof(*fpp) * numrecs);
998 : } else {
999 37361 : frp = xfs_rtrmap_rec_addr(rblock, 1);
1000 37361 : trp = xfs_rtrmap_droot_rec_addr(dblock, 1);
1001 74722 : memcpy(trp, frp, sizeof(*frp) * numrecs);
1002 : }
1003 65488 : }
1004 :
1005 : /* Flush a realtime reverse mapping btree root out to disk. */
1006 : void
1007 44700 : xfs_iflush_rtrmap(
1008 : struct xfs_inode *ip,
1009 : struct xfs_dinode *dip)
1010 : {
1011 44700 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1012 44700 : struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
1013 :
1014 44700 : ASSERT(ifp->if_broot != NULL);
1015 44700 : ASSERT(ifp->if_broot_bytes > 0);
1016 89400 : ASSERT(xfs_rtrmap_droot_space(ifp->if_broot) <=
1017 : xfs_inode_fork_size(ip, XFS_DATA_FORK));
1018 44700 : xfs_rtrmapbt_to_disk(ip->i_mount, ifp->if_broot, ifp->if_broot_bytes,
1019 44700 : dfp, XFS_DFORK_SIZE(dip, ip->i_mount, XFS_DATA_FORK));
1020 44700 : }
1021 :
1022 : /*
1023 : * Create a realtime rmap btree inode.
1024 : *
1025 : * Regardless of the return value, the caller must clean up @upd. If a new
1026 : * inode is returned through @*ipp, the caller must finish setting up the incore
1027 : * inode and release it.
1028 : */
1029 : int
1030 3083 : xfs_rtrmapbt_create(
1031 : struct xfs_imeta_update *upd,
1032 : struct xfs_inode **ipp)
1033 : {
1034 3083 : struct xfs_mount *mp = upd->mp;
1035 3083 : struct xfs_ifork *ifp;
1036 3083 : int error;
1037 :
1038 3083 : error = xfs_imeta_create(upd, S_IFREG, ipp);
1039 3083 : if (error)
1040 : return error;
1041 :
1042 3083 : ifp = xfs_ifork_ptr(upd->ip, XFS_DATA_FORK);
1043 3083 : ifp->if_format = XFS_DINODE_FMT_RMAP;
1044 3083 : ASSERT(ifp->if_broot_bytes == 0);
1045 3083 : ASSERT(ifp->if_bytes == 0);
1046 :
1047 : /* Initialize the empty incore btree root. */
1048 3083 : xfs_iroot_alloc(upd->ip, XFS_DATA_FORK,
1049 : xfs_rtrmap_broot_space_calc(mp, 0, 0));
1050 3083 : xfs_btree_init_block(mp, ifp->if_broot, &xfs_rtrmapbt_ops, 0, 0,
1051 3083 : upd->ip->i_ino);
1052 3083 : xfs_trans_log_inode(upd->tp, upd->ip, XFS_ILOG_CORE | XFS_ILOG_DBROOT);
1053 3083 : return 0;
1054 : }
|