Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2022-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_bit.h"
12 : #include "xfs_sb.h"
13 : #include "xfs_mount.h"
14 : #include "xfs_btree.h"
15 : #include "xfs_alloc_btree.h"
16 : #include "xfs_rmap_btree.h"
17 : #include "xfs_alloc.h"
18 : #include "xfs_ialloc.h"
19 : #include "xfs_rmap.h"
20 : #include "xfs_ag.h"
21 : #include "xfs_ag_resv.h"
22 : #include "xfs_health.h"
23 : #include "xfs_error.h"
24 : #include "xfs_bmap.h"
25 : #include "xfs_defer.h"
26 : #include "xfs_log_format.h"
27 : #include "xfs_trans.h"
28 : #include "xfs_trace.h"
29 : #include "xfs_inode.h"
30 : #include "xfs_icache.h"
31 : #include "xfs_buf_item.h"
32 : #include "xfs_rtgroup.h"
33 : #include "xfs_rtbitmap.h"
34 :
35 : /*
36 : * Passive reference counting access wrappers to the rtgroup structures. If
37 : * the rtgroup structure is to be freed, the freeing code is responsible for
38 : * cleaning up objects with passive references before freeing the structure.
39 : */
40 : struct xfs_rtgroup *
41 192188960 : xfs_rtgroup_get(
42 : struct xfs_mount *mp,
43 : xfs_rgnumber_t rgno)
44 : {
45 192188960 : struct xfs_rtgroup *rtg;
46 :
47 192188960 : rcu_read_lock();
48 192187616 : rtg = radix_tree_lookup(&mp->m_rtgroup_tree, rgno);
49 192190984 : if (rtg) {
50 192189763 : trace_xfs_rtgroup_get(rtg, _RET_IP_);
51 192183375 : ASSERT(atomic_read(&rtg->rtg_ref) >= 0);
52 192183375 : atomic_inc(&rtg->rtg_ref);
53 : }
54 192191000 : rcu_read_unlock();
55 192190690 : return rtg;
56 : }
57 :
58 : /* Get a passive reference to the given rtgroup. */
59 : struct xfs_rtgroup *
60 226143869 : xfs_rtgroup_hold(
61 : struct xfs_rtgroup *rtg)
62 : {
63 226143869 : ASSERT(atomic_read(&rtg->rtg_ref) > 0 ||
64 : atomic_read(&rtg->rtg_active_ref) > 0);
65 :
66 226143869 : trace_xfs_rtgroup_hold(rtg, _RET_IP_);
67 226156942 : atomic_inc(&rtg->rtg_ref);
68 226150690 : return rtg;
69 : }
70 :
71 : void
72 418324182 : xfs_rtgroup_put(
73 : struct xfs_rtgroup *rtg)
74 : {
75 418324182 : trace_xfs_rtgroup_put(rtg, _RET_IP_);
76 418343942 : ASSERT(atomic_read(&rtg->rtg_ref) > 0);
77 418343942 : atomic_dec(&rtg->rtg_ref);
78 418348951 : }
79 :
80 : /*
81 : * Active references for rtgroup structures. This is for short term access to
82 : * the rtgroup structures for walking trees or accessing state. If an rtgroup
83 : * is being shrunk or is offline, then this will fail to find that group and
84 : * return NULL instead.
85 : */
86 : struct xfs_rtgroup *
87 1361356 : xfs_rtgroup_grab(
88 : struct xfs_mount *mp,
89 : xfs_agnumber_t agno)
90 : {
91 1361356 : struct xfs_rtgroup *rtg;
92 :
93 1361356 : rcu_read_lock();
94 1361352 : rtg = radix_tree_lookup(&mp->m_rtgroup_tree, agno);
95 1361352 : if (rtg) {
96 1218512 : trace_xfs_rtgroup_grab(rtg, _RET_IP_);
97 2436990 : if (!atomic_inc_not_zero(&rtg->rtg_active_ref))
98 0 : rtg = NULL;
99 : }
100 1361341 : rcu_read_unlock();
101 1361354 : return rtg;
102 : }
103 :
104 : void
105 1219744 : xfs_rtgroup_rele(
106 : struct xfs_rtgroup *rtg)
107 : {
108 1219744 : trace_xfs_rtgroup_rele(rtg, _RET_IP_);
109 2439491 : if (atomic_dec_and_test(&rtg->rtg_active_ref))
110 1231 : wake_up(&rtg->rtg_active_wq);
111 1219747 : }
112 :
113 : int
114 35704 : xfs_initialize_rtgroups(
115 : struct xfs_mount *mp,
116 : xfs_rgnumber_t rgcount)
117 : {
118 35704 : struct xfs_rtgroup *rtg;
119 35704 : xfs_rgnumber_t index;
120 35704 : xfs_rgnumber_t first_initialised = NULLRGNUMBER;
121 35704 : int error;
122 :
123 35704 : if (!xfs_has_rtgroups(mp))
124 : return 0;
125 :
126 : /*
127 : * Walk the current rtgroup tree so we don't try to initialise rt
128 : * groups that already exist (growfs case). Allocate and insert all the
129 : * rtgroups we don't find ready for initialisation.
130 : */
131 36256 : for (index = 0; index < rgcount; index++) {
132 1225 : rtg = xfs_rtgroup_get(mp, index);
133 1225 : if (rtg) {
134 4 : xfs_rtgroup_put(rtg);
135 4 : continue;
136 : }
137 :
138 1221 : rtg = kmem_zalloc(sizeof(struct xfs_rtgroup), KM_MAYFAIL);
139 1221 : if (!rtg) {
140 0 : error = -ENOMEM;
141 0 : goto out_unwind_new_rtgs;
142 : }
143 1221 : rtg->rtg_rgno = index;
144 1221 : rtg->rtg_mount = mp;
145 :
146 1221 : error = radix_tree_preload(GFP_NOFS);
147 1221 : if (error)
148 0 : goto out_free_rtg;
149 :
150 1221 : spin_lock(&mp->m_rtgroup_lock);
151 1221 : if (radix_tree_insert(&mp->m_rtgroup_tree, index, rtg)) {
152 0 : WARN_ON_ONCE(1);
153 0 : spin_unlock(&mp->m_rtgroup_lock);
154 0 : radix_tree_preload_end();
155 0 : error = -EEXIST;
156 0 : goto out_free_rtg;
157 : }
158 1221 : spin_unlock(&mp->m_rtgroup_lock);
159 1221 : radix_tree_preload_end();
160 :
161 : #ifdef __KERNEL__
162 : /* Place kernel structure only init below this point. */
163 1221 : spin_lock_init(&rtg->rtg_state_lock);
164 1221 : init_waitqueue_head(&rtg->rtg_active_wq);
165 1221 : xfs_defer_drain_init(&rtg->rtg_intents_drain);
166 1221 : xfs_hooks_init(&rtg->rtg_rmap_update_hooks);
167 : #endif /* __KERNEL__ */
168 :
169 : /* Active ref owned by mount indicates rtgroup is online. */
170 1221 : atomic_set(&rtg->rtg_active_ref, 1);
171 :
172 : /* first new rtg is fully initialized */
173 1221 : if (first_initialised == NULLRGNUMBER)
174 241 : first_initialised = index;
175 : }
176 :
177 : return 0;
178 :
179 0 : out_free_rtg:
180 0 : kmem_free(rtg);
181 0 : out_unwind_new_rtgs:
182 : /* unwind any prior newly initialized rtgs */
183 0 : for (index = first_initialised; index < rgcount; index++) {
184 0 : rtg = radix_tree_delete(&mp->m_rtgroup_tree, index);
185 0 : if (!rtg)
186 : break;
187 0 : kmem_free(rtg);
188 : }
189 : return error;
190 : }
191 :
192 : STATIC void
193 1231 : __xfs_free_rtgroups(
194 : struct rcu_head *head)
195 : {
196 1231 : struct xfs_rtgroup *rtg;
197 :
198 1231 : rtg = container_of(head, struct xfs_rtgroup, rcu_head);
199 1231 : kmem_free(rtg);
200 1231 : }
201 :
202 : /*
203 : * Free up the rtgroup resources associated with the mount structure.
204 : */
205 : void
206 24339 : xfs_free_rtgroups(
207 : struct xfs_mount *mp)
208 : {
209 24339 : struct xfs_rtgroup *rtg;
210 24339 : xfs_rgnumber_t rgno;
211 :
212 24339 : if (!xfs_has_rtgroups(mp))
213 : return;
214 :
215 24961 : for (rgno = 0; rgno < mp->m_sb.sb_rgcount; rgno++) {
216 1231 : spin_lock(&mp->m_rtgroup_lock);
217 1231 : rtg = radix_tree_delete(&mp->m_rtgroup_tree, rgno);
218 1231 : spin_unlock(&mp->m_rtgroup_lock);
219 1231 : ASSERT(rtg);
220 1231 : XFS_IS_CORRUPT(mp, atomic_read(&rtg->rtg_ref) != 0);
221 1231 : xfs_defer_drain_free(&rtg->rtg_intents_drain);
222 :
223 : /* drop the mount's active reference */
224 1231 : xfs_rtgroup_rele(rtg);
225 1231 : XFS_IS_CORRUPT(mp, atomic_read(&rtg->rtg_active_ref) != 0);
226 :
227 1231 : call_rcu(&rtg->rcu_head, __xfs_free_rtgroups);
228 : }
229 : }
230 :
231 : /* Find the size of the rtgroup, in blocks. */
232 : static xfs_rgblock_t
233 979547087 : __xfs_rtgroup_block_count(
234 : struct xfs_mount *mp,
235 : xfs_rgnumber_t rgno,
236 : xfs_rgnumber_t rgcount,
237 : xfs_rfsblock_t rblocks)
238 : {
239 979547087 : ASSERT(rgno < rgcount);
240 :
241 979547087 : if (rgno < rgcount - 1)
242 830849057 : return mp->m_sb.sb_rgblocks;
243 148698030 : return xfs_rtb_rounddown_rtx(mp,
244 148698030 : rblocks - (rgno * mp->m_sb.sb_rgblocks));
245 : }
246 :
247 : /* Compute the number of blocks in this realtime group. */
248 : xfs_rgblock_t
249 979498242 : xfs_rtgroup_block_count(
250 : struct xfs_mount *mp,
251 : xfs_rgnumber_t rgno)
252 : {
253 979498242 : return __xfs_rtgroup_block_count(mp, rgno, mp->m_sb.sb_rgcount,
254 : mp->m_sb.sb_rblocks);
255 : }
256 :
257 : static xfs_failaddr_t
258 90473 : xfs_rtsb_verify(
259 : struct xfs_buf *bp)
260 : {
261 90473 : struct xfs_mount *mp = bp->b_mount;
262 90473 : struct xfs_rtsb *rsb = bp->b_addr;
263 :
264 90473 : if (!xfs_verify_magic(bp, rsb->rsb_magicnum))
265 0 : return __this_address;
266 180946 : if (be32_to_cpu(rsb->rsb_blocksize) != mp->m_sb.sb_blocksize)
267 0 : return __this_address;
268 90473 : if (be64_to_cpu(rsb->rsb_rblocks) != mp->m_sb.sb_rblocks)
269 0 : return __this_address;
270 :
271 90473 : if (be64_to_cpu(rsb->rsb_rextents) != mp->m_sb.sb_rextents)
272 0 : return __this_address;
273 :
274 90473 : if (!uuid_equal(&rsb->rsb_uuid, &mp->m_sb.sb_uuid))
275 0 : return __this_address;
276 :
277 180946 : if (be32_to_cpu(rsb->rsb_rgcount) != mp->m_sb.sb_rgcount)
278 0 : return __this_address;
279 :
280 180946 : if (be32_to_cpu(rsb->rsb_rextsize) != mp->m_sb.sb_rextsize)
281 0 : return __this_address;
282 180946 : if (be32_to_cpu(rsb->rsb_rbmblocks) != mp->m_sb.sb_rbmblocks)
283 0 : return __this_address;
284 :
285 180946 : if (be32_to_cpu(rsb->rsb_rgblocks) != mp->m_sb.sb_rgblocks)
286 0 : return __this_address;
287 90473 : if (rsb->rsb_blocklog != mp->m_sb.sb_blocklog)
288 0 : return __this_address;
289 90473 : if (rsb->rsb_sectlog != mp->m_sb.sb_sectlog)
290 0 : return __this_address;
291 90473 : if (rsb->rsb_rextslog != mp->m_sb.sb_rextslog)
292 0 : return __this_address;
293 90473 : if (rsb->rsb_pad)
294 0 : return __this_address;
295 :
296 90473 : if (rsb->rsb_pad2)
297 0 : return __this_address;
298 :
299 90473 : if (!uuid_equal(&rsb->rsb_meta_uuid, &mp->m_sb.sb_meta_uuid))
300 0 : return __this_address;
301 :
302 : /* Everything to the end of the fs block must be zero */
303 180946 : if (memchr_inv(rsb + 1, 0, BBTOB(bp->b_length) - sizeof(*rsb)))
304 0 : return __this_address;
305 :
306 : return NULL;
307 : }
308 :
309 : static void
310 10813 : xfs_rtsb_read_verify(
311 : struct xfs_buf *bp)
312 : {
313 10813 : xfs_failaddr_t fa;
314 :
315 10813 : if (!xfs_buf_verify_cksum(bp, XFS_RTSB_CRC_OFF))
316 0 : xfs_verifier_error(bp, -EFSBADCRC, __this_address);
317 : else {
318 10813 : fa = xfs_rtsb_verify(bp);
319 10813 : if (fa)
320 0 : xfs_verifier_error(bp, -EFSCORRUPTED, fa);
321 : }
322 10813 : }
323 :
324 : static void
325 79660 : xfs_rtsb_write_verify(
326 : struct xfs_buf *bp)
327 : {
328 79660 : struct xfs_rtsb *rsb = bp->b_addr;
329 79660 : struct xfs_buf_log_item *bip = bp->b_log_item;
330 79660 : xfs_failaddr_t fa;
331 :
332 79660 : fa = xfs_rtsb_verify(bp);
333 79660 : if (fa) {
334 0 : xfs_verifier_error(bp, -EFSCORRUPTED, fa);
335 0 : return;
336 : }
337 :
338 79660 : if (bip)
339 77648 : rsb->rsb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
340 :
341 79660 : xfs_buf_update_cksum(bp, XFS_RTSB_CRC_OFF);
342 : }
343 :
344 : const struct xfs_buf_ops xfs_rtsb_buf_ops = {
345 : .name = "xfs_rtsb",
346 : .magic = { 0, cpu_to_be32(XFS_RTSB_MAGIC) },
347 : .verify_read = xfs_rtsb_read_verify,
348 : .verify_write = xfs_rtsb_write_verify,
349 : .verify_struct = xfs_rtsb_verify,
350 : };
351 :
352 : /* Update a realtime superblock from the primary fs super */
353 : void
354 102609 : xfs_rtgroup_update_super(
355 : struct xfs_buf *rtsb_bp,
356 : const struct xfs_buf *sb_bp)
357 : {
358 102609 : const struct xfs_dsb *dsb = sb_bp->b_addr;
359 102609 : struct xfs_rtsb *rsb = rtsb_bp->b_addr;
360 102609 : const uuid_t *meta_uuid;
361 :
362 102609 : rsb->rsb_magicnum = cpu_to_be32(XFS_RTSB_MAGIC);
363 102609 : rsb->rsb_blocksize = dsb->sb_blocksize;
364 102609 : rsb->rsb_rblocks = dsb->sb_rblocks;
365 :
366 102609 : rsb->rsb_rextents = dsb->sb_rextents;
367 102609 : rsb->rsb_lsn = 0;
368 :
369 205218 : memcpy(&rsb->rsb_uuid, &dsb->sb_uuid, sizeof(rsb->rsb_uuid));
370 :
371 102609 : rsb->rsb_rgcount = dsb->sb_rgcount;
372 205218 : memcpy(&rsb->rsb_fname, &dsb->sb_fname, XFSLABEL_MAX);
373 :
374 102609 : rsb->rsb_rextsize = dsb->sb_rextsize;
375 102609 : rsb->rsb_rbmblocks = dsb->sb_rbmblocks;
376 :
377 102609 : rsb->rsb_rgblocks = dsb->sb_rgblocks;
378 102609 : rsb->rsb_blocklog = dsb->sb_blocklog;
379 102609 : rsb->rsb_sectlog = dsb->sb_sectlog;
380 102609 : rsb->rsb_rextslog = dsb->sb_rextslog;
381 102609 : rsb->rsb_pad = 0;
382 102609 : rsb->rsb_pad2 = 0;
383 :
384 : /*
385 : * The metadata uuid is the fs uuid if the metauuid feature is not
386 : * enabled.
387 : */
388 102609 : if (dsb->sb_features_incompat &
389 : cpu_to_be32(XFS_SB_FEAT_INCOMPAT_META_UUID))
390 0 : meta_uuid = &dsb->sb_meta_uuid;
391 : else
392 : meta_uuid = &dsb->sb_uuid;
393 205218 : memcpy(&rsb->rsb_meta_uuid, meta_uuid, sizeof(rsb->rsb_meta_uuid));
394 102609 : }
395 :
396 : /*
397 : * Update the primary realtime superblock from a filesystem superblock and
398 : * log it to the given transaction.
399 : */
400 : void
401 262919 : xfs_rtgroup_log_super(
402 : struct xfs_trans *tp,
403 : const struct xfs_buf *sb_bp)
404 : {
405 262919 : struct xfs_buf *rtsb_bp;
406 :
407 262919 : if (!xfs_has_rtgroups(tp->t_mountp))
408 : return;
409 :
410 261219 : rtsb_bp = xfs_trans_getrtsb(tp);
411 261219 : if (!rtsb_bp) {
412 : /*
413 : * It's possible for the rtgroups feature to be enabled but
414 : * there is no incore rt superblock buffer if the rt geometry
415 : * was specified at mkfs time but the rt section has not yet
416 : * been attached. In this case, rblocks must be zero.
417 : */
418 158610 : ASSERT(tp->t_mountp->m_sb.sb_rblocks == 0);
419 158610 : return;
420 : }
421 :
422 102609 : xfs_rtgroup_update_super(rtsb_bp, sb_bp);
423 102609 : xfs_trans_ordered_buf(tp, rtsb_bp);
424 : }
425 :
426 : /* Initialize a secondary realtime superblock. */
427 : int
428 2012 : xfs_rtgroup_init_secondary_super(
429 : struct xfs_mount *mp,
430 : xfs_rgnumber_t rgno,
431 : struct xfs_buf **bpp)
432 : {
433 2012 : struct xfs_buf *bp;
434 2012 : struct xfs_rtsb *rsb;
435 2012 : xfs_rtblock_t rtbno;
436 2012 : int error;
437 :
438 2012 : ASSERT(rgno != 0);
439 :
440 2012 : error = xfs_buf_get_uncached(mp->m_rtdev_targp, XFS_FSB_TO_BB(mp, 1),
441 : 0, &bp);
442 2012 : if (error)
443 : return error;
444 :
445 2012 : rtbno = xfs_rgbno_to_rtb(mp, rgno, 0);
446 2012 : bp->b_maps[0].bm_bn = xfs_rtb_to_daddr(mp, rtbno);
447 2012 : bp->b_ops = &xfs_rtsb_buf_ops;
448 2012 : xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
449 :
450 2012 : rsb = bp->b_addr;
451 2012 : rsb->rsb_magicnum = cpu_to_be32(XFS_RTSB_MAGIC);
452 2012 : rsb->rsb_blocksize = cpu_to_be32(mp->m_sb.sb_blocksize);
453 2012 : rsb->rsb_rblocks = cpu_to_be64(mp->m_sb.sb_rblocks);
454 :
455 2012 : rsb->rsb_rextents = cpu_to_be64(mp->m_sb.sb_rextents);
456 :
457 4024 : memcpy(&rsb->rsb_uuid, &mp->m_sb.sb_uuid, sizeof(rsb->rsb_uuid));
458 :
459 2012 : rsb->rsb_rgcount = cpu_to_be32(mp->m_sb.sb_rgcount);
460 4024 : memcpy(&rsb->rsb_fname, &mp->m_sb.sb_fname, XFSLABEL_MAX);
461 :
462 2012 : rsb->rsb_rextsize = cpu_to_be32(mp->m_sb.sb_rextsize);
463 2012 : rsb->rsb_rbmblocks = cpu_to_be32(mp->m_sb.sb_rbmblocks);
464 :
465 2012 : rsb->rsb_rgblocks = cpu_to_be32(mp->m_sb.sb_rgblocks);
466 2012 : rsb->rsb_blocklog = mp->m_sb.sb_blocklog;
467 2012 : rsb->rsb_sectlog = mp->m_sb.sb_sectlog;
468 2012 : rsb->rsb_rextslog = mp->m_sb.sb_rextslog;
469 :
470 4024 : memcpy(&rsb->rsb_meta_uuid, &mp->m_sb.sb_meta_uuid,
471 : sizeof(rsb->rsb_meta_uuid));
472 :
473 2012 : *bpp = bp;
474 2012 : return 0;
475 : }
476 :
477 : /*
478 : * Update all the realtime superblocks to match the new state of the primary.
479 : * Because we are completely overwriting all the existing fields in the
480 : * secondary superblock buffers, there is no need to read them in from disk.
481 : * Just get a new buffer, stamp it and write it.
482 : *
483 : * The rt super buffers do not need to be kept them in memory once they are
484 : * written so we mark them as a one-shot buffer.
485 : */
486 : int
487 181 : xfs_rtgroup_update_secondary_sbs(
488 : struct xfs_mount *mp)
489 : {
490 181 : LIST_HEAD (buffer_list);
491 181 : struct xfs_rtgroup *rtg;
492 181 : xfs_rgnumber_t start_rgno = 1;
493 181 : int saved_error = 0;
494 181 : int error = 0;
495 :
496 193 : for_each_rtgroup_from(mp, start_rgno, rtg) {
497 12 : struct xfs_buf *bp;
498 :
499 12 : error = xfs_rtgroup_init_secondary_super(mp, rtg->rtg_rgno,
500 : &bp);
501 : /*
502 : * If we get an error reading or writing alternate superblocks,
503 : * continue. If we break early, we'll leave more superblocks
504 : * un-updated than updated.
505 : */
506 12 : if (error) {
507 0 : xfs_warn(mp,
508 : "error allocating secondary superblock for rt group %d",
509 : rtg->rtg_rgno);
510 0 : if (!saved_error)
511 0 : saved_error = error;
512 12 : continue;
513 : }
514 :
515 12 : xfs_buf_oneshot(bp);
516 12 : xfs_buf_delwri_queue(bp, &buffer_list);
517 12 : xfs_buf_relse(bp);
518 :
519 : /* don't hold too many buffers at once */
520 12 : if (rtg->rtg_rgno % 16)
521 12 : continue;
522 :
523 0 : error = xfs_buf_delwri_submit(&buffer_list);
524 0 : if (error) {
525 0 : xfs_warn(mp,
526 : "write error %d updating a secondary superblock near rt group %u",
527 : error, rtg->rtg_rgno);
528 0 : if (!saved_error)
529 0 : saved_error = error;
530 0 : continue;
531 : }
532 : }
533 181 : error = xfs_buf_delwri_submit(&buffer_list);
534 181 : if (error) {
535 0 : xfs_warn(mp,
536 : "write error %d updating a secondary superblock near rt group %u",
537 : error, start_rgno);
538 : }
539 :
540 181 : return saved_error ? saved_error : error;
541 : }
542 :
543 : /* Lock metadata inodes associated with this rt group. */
544 : void
545 145439601 : xfs_rtgroup_lock(
546 : struct xfs_trans *tp,
547 : struct xfs_rtgroup *rtg,
548 : unsigned int rtglock_flags)
549 : {
550 145439601 : ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
551 145439601 : ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
552 : !(rtglock_flags & XFS_RTGLOCK_BITMAP));
553 :
554 145439601 : if (rtglock_flags & XFS_RTGLOCK_BITMAP)
555 43763 : xfs_rtbitmap_lock(tp, rtg->rtg_mount);
556 145395838 : else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED)
557 29222785 : xfs_rtbitmap_lock_shared(rtg->rtg_mount, XFS_RBMLOCK_BITMAP);
558 :
559 145440083 : if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg->rtg_rmapip) {
560 103764007 : xfs_ilock(rtg->rtg_rmapip, XFS_ILOCK_EXCL);
561 103764587 : if (tp)
562 73870905 : xfs_trans_ijoin(tp, rtg->rtg_rmapip, XFS_ILOCK_EXCL);
563 : }
564 :
565 145440584 : if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg->rtg_refcountip) {
566 70970397 : xfs_ilock(rtg->rtg_refcountip, XFS_ILOCK_EXCL);
567 70973292 : if (tp)
568 28206764 : xfs_trans_ijoin(tp, rtg->rtg_refcountip,
569 : XFS_ILOCK_EXCL);
570 : }
571 145443461 : }
572 :
573 : /* Unlock metadata inodes associated with this rt group. */
574 : void
575 43372985 : xfs_rtgroup_unlock(
576 : struct xfs_rtgroup *rtg,
577 : unsigned int rtglock_flags)
578 : {
579 43372985 : ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
580 43372985 : ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
581 : !(rtglock_flags & XFS_RTGLOCK_BITMAP));
582 :
583 43372985 : if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg->rtg_refcountip)
584 42769736 : xfs_iunlock(rtg->rtg_refcountip, XFS_ILOCK_EXCL);
585 :
586 43372670 : if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg->rtg_rmapip)
587 29894932 : xfs_iunlock(rtg->rtg_rmapip, XFS_ILOCK_EXCL);
588 :
589 43372463 : if (rtglock_flags & XFS_RTGLOCK_BITMAP)
590 43763 : xfs_rtbitmap_unlock(rtg->rtg_mount);
591 43328700 : else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED)
592 29224092 : xfs_rtbitmap_unlock_shared(rtg->rtg_mount, XFS_RBMLOCK_BITMAP);
593 43372735 : }
594 :
595 : /* Retrieve rt group geometry. */
596 : int
597 396 : xfs_rtgroup_get_geometry(
598 : struct xfs_rtgroup *rtg,
599 : struct xfs_rtgroup_geometry *rgeo)
600 : {
601 : /* Fill out form. */
602 396 : memset(rgeo, 0, sizeof(*rgeo));
603 396 : rgeo->rg_number = rtg->rtg_rgno;
604 396 : rgeo->rg_length = rtg->rtg_blockcount;
605 396 : xfs_rtgroup_geom_health(rtg, rgeo);
606 396 : return 0;
607 : }
|