Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 : * All Rights Reserved.
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_bit.h"
13 : #include "xfs_sb.h"
14 : #include "xfs_mount.h"
15 : #include "xfs_inode.h"
16 : #include "xfs_iwalk.h"
17 : #include "xfs_quota.h"
18 : #include "xfs_bmap.h"
19 : #include "xfs_bmap_util.h"
20 : #include "xfs_trans.h"
21 : #include "xfs_trans_space.h"
22 : #include "xfs_qm.h"
23 : #include "xfs_trace.h"
24 : #include "xfs_icache.h"
25 : #include "xfs_error.h"
26 : #include "xfs_ag.h"
27 : #include "xfs_ialloc.h"
28 : #include "xfs_log_priv.h"
29 : #include "xfs_health.h"
30 :
31 : /*
32 : * The global quota manager. There is only one of these for the entire
33 : * system, _not_ one per file system. XQM keeps track of the overall
34 : * quota functionality, including maintaining the freelist and hash
35 : * tables of dquots.
36 : */
37 : STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
38 : STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
39 :
40 : STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
41 : STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
42 : /*
43 : * We use the batch lookup interface to iterate over the dquots as it
44 : * currently is the only interface into the radix tree code that allows
45 : * fuzzy lookups instead of exact matches. Holding the lock over multiple
46 : * operations is fine as all callers are used either during mount/umount
47 : * or quotaoff.
48 : */
49 : #define XFS_DQ_LOOKUP_BATCH 32
50 :
51 : STATIC int
52 129159 : xfs_qm_dquot_walk(
53 : struct xfs_mount *mp,
54 : xfs_dqtype_t type,
55 : int (*execute)(struct xfs_dquot *dqp, void *data),
56 : void *data)
57 : {
58 129159 : struct xfs_quotainfo *qi = mp->m_quotainfo;
59 129159 : struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
60 129159 : uint32_t next_index;
61 129159 : int last_error = 0;
62 129159 : int skipped;
63 129159 : int nr_found;
64 :
65 129159 : restart:
66 129159 : skipped = 0;
67 129159 : next_index = 0;
68 129159 : nr_found = 0;
69 :
70 552222 : while (1) {
71 681381 : struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
72 681381 : int error;
73 681381 : int i;
74 :
75 681381 : mutex_lock(&qi->qi_tree_lock);
76 681381 : nr_found = radix_tree_gang_lookup(tree, (void **)batch,
77 : next_index, XFS_DQ_LOOKUP_BATCH);
78 681381 : if (!nr_found) {
79 129130 : mutex_unlock(&qi->qi_tree_lock);
80 129130 : break;
81 : }
82 :
83 15523323 : for (i = 0; i < nr_found; i++) {
84 14971072 : struct xfs_dquot *dqp = batch[i];
85 :
86 14971072 : next_index = dqp->q_id + 1;
87 :
88 14971072 : error = execute(batch[i], data);
89 14971072 : if (error == -EAGAIN) {
90 0 : skipped++;
91 0 : continue;
92 : }
93 14971072 : if (error && last_error != -EFSCORRUPTED)
94 0 : last_error = error;
95 : }
96 :
97 552251 : mutex_unlock(&qi->qi_tree_lock);
98 :
99 : /* bail out if the filesystem is corrupted. */
100 552251 : if (last_error == -EFSCORRUPTED) {
101 : skipped = 0;
102 : break;
103 : }
104 : /* we're done if id overflows back to zero */
105 552251 : if (!next_index)
106 : break;
107 : }
108 :
109 129159 : if (skipped) {
110 0 : delay(1);
111 0 : goto restart;
112 : }
113 :
114 129159 : return last_error;
115 : }
116 :
117 :
118 : /*
119 : * Purge a dquot from all tracking data structures and free it.
120 : */
121 : STATIC int
122 14942931 : xfs_qm_dqpurge(
123 : struct xfs_dquot *dqp,
124 : void *data)
125 : {
126 14942931 : struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
127 14942931 : int error = -EAGAIN;
128 :
129 14942931 : xfs_dqlock(dqp);
130 14942931 : if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
131 0 : goto out_unlock;
132 :
133 14942931 : dqp->q_flags |= XFS_DQFLAG_FREEING;
134 :
135 14942931 : xfs_dqflock(dqp);
136 :
137 : /*
138 : * If we are turning this type of quotas off, we don't care
139 : * about the dirty metadata sitting in this dquot. OTOH, if
140 : * we're unmounting, we do care, so we flush it and wait.
141 : */
142 14942931 : if (XFS_DQ_IS_DIRTY(dqp)) {
143 47293 : struct xfs_buf *bp = NULL;
144 :
145 : /*
146 : * We don't care about getting disk errors here. We need
147 : * to purge this dquot anyway, so we go ahead regardless.
148 : */
149 47293 : error = xfs_qm_dqflush(dqp, &bp);
150 47293 : if (!error) {
151 0 : error = xfs_bwrite(bp);
152 0 : xfs_buf_relse(bp);
153 47293 : } else if (error == -EAGAIN) {
154 0 : dqp->q_flags &= ~XFS_DQFLAG_FREEING;
155 0 : goto out_unlock;
156 : }
157 47293 : xfs_dqflock(dqp);
158 : }
159 :
160 14942931 : ASSERT(atomic_read(&dqp->q_pincount) == 0);
161 29885862 : ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
162 : !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
163 :
164 14942931 : xfs_dqfunlock(dqp);
165 14942931 : xfs_dqunlock(dqp);
166 :
167 14942931 : radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
168 14942931 : qi->qi_dquots--;
169 :
170 : /*
171 : * We move dquots to the freelist as soon as their reference count
172 : * hits zero, so it really should be on the freelist here.
173 : */
174 14942931 : ASSERT(!list_empty(&dqp->q_lru));
175 14942931 : list_lru_del(&qi->qi_lru, &dqp->q_lru);
176 14942931 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
177 :
178 14942931 : xfs_qm_dqdestroy(dqp);
179 14942931 : return 0;
180 :
181 0 : out_unlock:
182 0 : xfs_dqunlock(dqp);
183 0 : return error;
184 : }
185 :
186 : /*
187 : * Purge the dquot cache.
188 : */
189 : static void
190 36147 : xfs_qm_dqpurge_all(
191 : struct xfs_mount *mp)
192 : {
193 36147 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
194 36147 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
195 36147 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
196 36147 : }
197 :
198 : /*
199 : * Just destroy the quotainfo structure.
200 : */
201 : void
202 60712 : xfs_qm_unmount(
203 : struct xfs_mount *mp)
204 : {
205 60712 : if (mp->m_quotainfo) {
206 36147 : xfs_qm_dqpurge_all(mp);
207 36147 : xfs_qm_destroy_quotainfo(mp);
208 : }
209 60712 : }
210 :
211 : /*
212 : * Called from the vfsops layer.
213 : */
214 : void
215 60680 : xfs_qm_unmount_quotas(
216 : xfs_mount_t *mp)
217 : {
218 : /*
219 : * Release the dquots that root inode, et al might be holding,
220 : * before we flush quotas and blow away the quotainfo structure.
221 : */
222 60680 : ASSERT(mp->m_rootip);
223 60680 : xfs_qm_dqdetach(mp->m_rootip);
224 60680 : if (mp->m_rbmip)
225 60680 : xfs_qm_dqdetach(mp->m_rbmip);
226 60680 : if (mp->m_rsumip)
227 60680 : xfs_qm_dqdetach(mp->m_rsumip);
228 :
229 : /*
230 : * Release the quota inodes.
231 : */
232 60680 : if (mp->m_quotainfo) {
233 36142 : if (mp->m_quotainfo->qi_uquotaip) {
234 35626 : xfs_irele(mp->m_quotainfo->qi_uquotaip);
235 35626 : mp->m_quotainfo->qi_uquotaip = NULL;
236 : }
237 36142 : if (mp->m_quotainfo->qi_gquotaip) {
238 35270 : xfs_irele(mp->m_quotainfo->qi_gquotaip);
239 35270 : mp->m_quotainfo->qi_gquotaip = NULL;
240 : }
241 36142 : if (mp->m_quotainfo->qi_pquotaip) {
242 35131 : xfs_irele(mp->m_quotainfo->qi_pquotaip);
243 35131 : mp->m_quotainfo->qi_pquotaip = NULL;
244 : }
245 : }
246 60680 : }
247 :
248 : STATIC int
249 52769235 : xfs_qm_dqattach_one(
250 : struct xfs_inode *ip,
251 : xfs_dqtype_t type,
252 : bool doalloc,
253 : struct xfs_dquot **IO_idqpp)
254 : {
255 52769235 : struct xfs_dquot *dqp;
256 52769235 : int error;
257 :
258 52769235 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
259 52769050 : error = 0;
260 :
261 : /*
262 : * See if we already have it in the inode itself. IO_idqpp is &i_udquot
263 : * or &i_gdquot. This made the code look weird, but made the logic a lot
264 : * simpler.
265 : */
266 52769050 : dqp = *IO_idqpp;
267 52769050 : if (dqp) {
268 0 : trace_xfs_dqattach_found(dqp);
269 0 : return 0;
270 : }
271 :
272 : /*
273 : * Find the dquot from somewhere. This bumps the reference count of
274 : * dquot and returns it locked. This can return ENOENT if dquot didn't
275 : * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
276 : * turned off suddenly.
277 : */
278 52769050 : error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
279 52770025 : if (error)
280 : return error;
281 :
282 52765954 : trace_xfs_dqattach_get(dqp);
283 :
284 : /*
285 : * dqget may have dropped and re-acquired the ilock, but it guarantees
286 : * that the dquot returned is the one that should go in the inode.
287 : */
288 52765952 : *IO_idqpp = dqp;
289 52765952 : xfs_dqunlock(dqp);
290 52765952 : return 0;
291 : }
292 :
293 : static bool
294 1940763542 : xfs_qm_need_dqattach(
295 : struct xfs_inode *ip)
296 : {
297 1940763542 : struct xfs_mount *mp = ip->i_mount;
298 :
299 1940763542 : if (!XFS_IS_QUOTA_ON(mp))
300 : return false;
301 1260656616 : if (!XFS_NOT_DQATTACHED(mp, ip))
302 : return false;
303 69196948 : if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
304 0 : return false;
305 : return true;
306 : }
307 :
308 : /*
309 : * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
310 : * into account.
311 : * If @doalloc is true, the dquot(s) will be allocated if needed.
312 : * Inode may get unlocked and relocked in here, and the caller must deal with
313 : * the consequences.
314 : */
315 : int
316 836987325 : xfs_qm_dqattach_locked(
317 : xfs_inode_t *ip,
318 : bool doalloc)
319 : {
320 836987325 : xfs_mount_t *mp = ip->i_mount;
321 836987325 : int error = 0;
322 :
323 836987325 : if (!xfs_qm_need_dqattach(ip))
324 : return 0;
325 :
326 17592798 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
327 :
328 17592550 : if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
329 17591619 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
330 : doalloc, &ip->i_udquot);
331 17592475 : if (error)
332 3012 : goto done;
333 17589463 : ASSERT(ip->i_udquot);
334 : }
335 :
336 17590394 : if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
337 17589313 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
338 : doalloc, &ip->i_gdquot);
339 17589314 : if (error)
340 720 : goto done;
341 17588594 : ASSERT(ip->i_gdquot);
342 : }
343 :
344 17589675 : if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
345 17588239 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
346 : doalloc, &ip->i_pdquot);
347 17588239 : if (error)
348 339 : goto done;
349 17587900 : ASSERT(ip->i_pdquot);
350 : }
351 :
352 17589336 : done:
353 : /*
354 : * Don't worry about the dquots that we may have attached before any
355 : * error - they'll get detached later if it has not already been done.
356 : */
357 17593407 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
358 : return error;
359 : }
360 :
361 : int
362 1103947940 : xfs_qm_dqattach(
363 : struct xfs_inode *ip)
364 : {
365 1103947940 : int error;
366 :
367 1103947940 : if (!xfs_qm_need_dqattach(ip))
368 : return 0;
369 :
370 17005686 : xfs_ilock(ip, XFS_ILOCK_EXCL);
371 17005847 : error = xfs_qm_dqattach_locked(ip, false);
372 17006389 : xfs_iunlock(ip, XFS_ILOCK_EXCL);
373 :
374 17006389 : return error;
375 : }
376 :
377 : /*
378 : * Release dquots (and their references) if any.
379 : * The inode should be locked EXCL except when this's called by
380 : * xfs_ireclaim.
381 : */
382 : void
383 1115808729 : xfs_qm_dqdetach(
384 : xfs_inode_t *ip)
385 : {
386 1115808729 : if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
387 : return;
388 :
389 81665810 : trace_xfs_dquot_dqdetach(ip);
390 :
391 163183250 : ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
392 81591625 : if (ip->i_udquot) {
393 81588800 : xfs_qm_dqrele(ip->i_udquot);
394 81588307 : ip->i_udquot = NULL;
395 : }
396 81591132 : if (ip->i_gdquot) {
397 81265096 : xfs_qm_dqrele(ip->i_gdquot);
398 81259401 : ip->i_gdquot = NULL;
399 : }
400 81585437 : if (ip->i_pdquot) {
401 81244654 : xfs_qm_dqrele(ip->i_pdquot);
402 81246380 : ip->i_pdquot = NULL;
403 : }
404 : }
405 :
406 : struct xfs_qm_isolate {
407 : struct list_head buffers;
408 : struct list_head dispose;
409 : };
410 :
411 : static enum lru_status
412 970684 : xfs_qm_dquot_isolate(
413 : struct list_head *item,
414 : struct list_lru_one *lru,
415 : spinlock_t *lru_lock,
416 : void *arg)
417 : __releases(lru_lock) __acquires(lru_lock)
418 : {
419 970684 : struct xfs_dquot *dqp = container_of(item,
420 : struct xfs_dquot, q_lru);
421 970684 : struct xfs_qm_isolate *isol = arg;
422 :
423 970684 : if (!xfs_dqlock_nowait(dqp))
424 99 : goto out_miss_busy;
425 :
426 : /*
427 : * If something else is freeing this dquot and hasn't yet removed it
428 : * from the LRU, leave it for the freeing task to complete the freeing
429 : * process rather than risk it being free from under us here.
430 : */
431 970585 : if (dqp->q_flags & XFS_DQFLAG_FREEING)
432 0 : goto out_miss_unlock;
433 :
434 : /*
435 : * This dquot has acquired a reference in the meantime remove it from
436 : * the freelist and try again.
437 : */
438 970585 : if (dqp->q_nrefs) {
439 47644 : xfs_dqunlock(dqp);
440 47644 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
441 :
442 47644 : trace_xfs_dqreclaim_want(dqp);
443 47644 : list_lru_isolate(lru, &dqp->q_lru);
444 47644 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
445 47644 : return LRU_REMOVED;
446 : }
447 :
448 : /*
449 : * If the dquot is dirty, flush it. If it's already being flushed, just
450 : * skip it so there is time for the IO to complete before we try to
451 : * reclaim it again on the next LRU pass.
452 : */
453 922941 : if (!xfs_dqflock_nowait(dqp))
454 125414 : goto out_miss_unlock;
455 :
456 797527 : if (XFS_DQ_IS_DIRTY(dqp)) {
457 862 : struct xfs_buf *bp = NULL;
458 862 : int error;
459 :
460 862 : trace_xfs_dqreclaim_dirty(dqp);
461 :
462 : /* we have to drop the LRU lock to flush the dquot */
463 862 : spin_unlock(lru_lock);
464 :
465 862 : error = xfs_qm_dqflush(dqp, &bp);
466 862 : if (error)
467 0 : goto out_unlock_dirty;
468 :
469 862 : xfs_buf_delwri_queue(bp, &isol->buffers);
470 862 : xfs_buf_relse(bp);
471 862 : goto out_unlock_dirty;
472 : }
473 796665 : xfs_dqfunlock(dqp);
474 :
475 : /*
476 : * Prevent lookups now that we are past the point of no return.
477 : */
478 796665 : dqp->q_flags |= XFS_DQFLAG_FREEING;
479 796665 : xfs_dqunlock(dqp);
480 :
481 796665 : ASSERT(dqp->q_nrefs == 0);
482 796665 : list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
483 796665 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
484 796665 : trace_xfs_dqreclaim_done(dqp);
485 796665 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
486 796665 : return LRU_REMOVED;
487 :
488 125414 : out_miss_unlock:
489 125414 : xfs_dqunlock(dqp);
490 125513 : out_miss_busy:
491 125513 : trace_xfs_dqreclaim_busy(dqp);
492 125513 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
493 125513 : return LRU_SKIP;
494 :
495 : out_unlock_dirty:
496 862 : trace_xfs_dqreclaim_busy(dqp);
497 862 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
498 862 : xfs_dqunlock(dqp);
499 862 : spin_lock(lru_lock);
500 862 : return LRU_RETRY;
501 : }
502 :
503 : static unsigned long
504 13474 : xfs_qm_shrink_scan(
505 : struct shrinker *shrink,
506 : struct shrink_control *sc)
507 : {
508 13474 : struct xfs_quotainfo *qi = container_of(shrink,
509 : struct xfs_quotainfo, qi_shrinker);
510 13474 : struct xfs_qm_isolate isol;
511 13474 : unsigned long freed;
512 13474 : int error;
513 :
514 13474 : if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
515 : return 0;
516 :
517 13474 : INIT_LIST_HEAD(&isol.buffers);
518 13474 : INIT_LIST_HEAD(&isol.dispose);
519 :
520 13474 : freed = list_lru_shrink_walk(&qi->qi_lru, sc,
521 : xfs_qm_dquot_isolate, &isol);
522 :
523 13474 : error = xfs_buf_delwri_submit(&isol.buffers);
524 13474 : if (error)
525 0 : xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
526 :
527 810139 : while (!list_empty(&isol.dispose)) {
528 796665 : struct xfs_dquot *dqp;
529 :
530 796665 : dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
531 796665 : list_del_init(&dqp->q_lru);
532 796665 : xfs_qm_dqfree_one(dqp);
533 : }
534 :
535 : return freed;
536 : }
537 :
538 : static unsigned long
539 3726 : xfs_qm_shrink_count(
540 : struct shrinker *shrink,
541 : struct shrink_control *sc)
542 : {
543 3726 : struct xfs_quotainfo *qi = container_of(shrink,
544 : struct xfs_quotainfo, qi_shrinker);
545 :
546 3726 : return list_lru_shrink_count(&qi->qi_lru, sc);
547 : }
548 :
549 : STATIC void
550 106027 : xfs_qm_set_defquota(
551 : struct xfs_mount *mp,
552 : xfs_dqtype_t type,
553 : struct xfs_quotainfo *qinf)
554 : {
555 106027 : struct xfs_dquot *dqp;
556 106027 : struct xfs_def_quota *defq;
557 106027 : int error;
558 :
559 106027 : error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
560 106027 : if (error)
561 20289 : return;
562 :
563 85738 : defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
564 :
565 : /*
566 : * Timers and warnings have been already set, let's just set the
567 : * default limits for this quota type
568 : */
569 85738 : defq->blk.hard = dqp->q_blk.hardlimit;
570 85738 : defq->blk.soft = dqp->q_blk.softlimit;
571 85738 : defq->ino.hard = dqp->q_ino.hardlimit;
572 85738 : defq->ino.soft = dqp->q_ino.softlimit;
573 85738 : defq->rtb.hard = dqp->q_rtb.hardlimit;
574 85738 : defq->rtb.soft = dqp->q_rtb.softlimit;
575 85738 : xfs_qm_dqdestroy(dqp);
576 : }
577 :
578 : /* Initialize quota time limits from the root dquot. */
579 : static void
580 108426 : xfs_qm_init_timelimits(
581 : struct xfs_mount *mp,
582 : xfs_dqtype_t type)
583 : {
584 108426 : struct xfs_quotainfo *qinf = mp->m_quotainfo;
585 108426 : struct xfs_def_quota *defq;
586 108426 : struct xfs_dquot *dqp;
587 108426 : int error;
588 :
589 108426 : defq = xfs_get_defquota(qinf, type);
590 :
591 108426 : defq->blk.time = XFS_QM_BTIMELIMIT;
592 108426 : defq->ino.time = XFS_QM_ITIMELIMIT;
593 108426 : defq->rtb.time = XFS_QM_RTBTIMELIMIT;
594 :
595 : /*
596 : * We try to get the limits from the superuser's limits fields.
597 : * This is quite hacky, but it is standard quota practice.
598 : *
599 : * Since we may not have done a quotacheck by this point, just read
600 : * the dquot without attaching it to any hashtables or lists.
601 : */
602 108426 : error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
603 108426 : if (error)
604 22688 : return;
605 :
606 : /*
607 : * The warnings and timers set the grace period given to
608 : * a user or group before he or she can not perform any
609 : * more writing. If it is zero, a default is used.
610 : */
611 85738 : if (dqp->q_blk.timer)
612 150 : defq->blk.time = dqp->q_blk.timer;
613 85738 : if (dqp->q_ino.timer)
614 144 : defq->ino.time = dqp->q_ino.timer;
615 85738 : if (dqp->q_rtb.timer)
616 24 : defq->rtb.time = dqp->q_rtb.timer;
617 :
618 85738 : xfs_qm_dqdestroy(dqp);
619 : }
620 :
621 : /*
622 : * This initializes all the quota information that's kept in the
623 : * mount structure
624 : */
625 : STATIC int
626 36211 : xfs_qm_init_quotainfo(
627 : struct xfs_mount *mp)
628 : {
629 36211 : struct xfs_quotainfo *qinf;
630 36211 : int error;
631 :
632 36211 : ASSERT(XFS_IS_QUOTA_ON(mp));
633 :
634 36211 : qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
635 :
636 36211 : error = list_lru_init(&qinf->qi_lru);
637 36211 : if (error)
638 0 : goto out_free_qinf;
639 :
640 : /*
641 : * See if quotainodes are setup, and if not, allocate them,
642 : * and change the superblock accordingly.
643 : */
644 36211 : error = xfs_qm_init_quotainos(mp);
645 36211 : if (error)
646 69 : goto out_free_lru;
647 :
648 36142 : INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
649 36142 : INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
650 36142 : INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
651 36142 : mutex_init(&qinf->qi_tree_lock);
652 :
653 : /* mutex used to serialize quotaoffs */
654 36142 : mutex_init(&qinf->qi_quotaofflock);
655 :
656 : /* Precalc some constants */
657 36142 : qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
658 36142 : qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
659 36142 : if (xfs_has_bigtime(mp)) {
660 35988 : qinf->qi_expiry_min =
661 : xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
662 35988 : qinf->qi_expiry_max =
663 : xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
664 : } else {
665 154 : qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
666 154 : qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
667 : }
668 36142 : trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
669 : qinf->qi_expiry_max);
670 :
671 36142 : mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
672 :
673 36142 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
674 36142 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
675 36142 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
676 :
677 36142 : if (XFS_IS_UQUOTA_ON(mp))
678 35626 : xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
679 36142 : if (XFS_IS_GQUOTA_ON(mp))
680 35270 : xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
681 36142 : if (XFS_IS_PQUOTA_ON(mp))
682 35131 : xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
683 :
684 36142 : qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
685 36142 : qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
686 36142 : qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
687 36142 : qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
688 :
689 36142 : error = register_shrinker(&qinf->qi_shrinker, "xfs-qm:%s",
690 36142 : mp->m_super->s_id);
691 36142 : if (error)
692 0 : goto out_free_inos;
693 :
694 36142 : xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks);
695 36142 : xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks);
696 :
697 36142 : return 0;
698 :
699 : out_free_inos:
700 0 : mutex_destroy(&qinf->qi_quotaofflock);
701 0 : mutex_destroy(&qinf->qi_tree_lock);
702 0 : xfs_qm_destroy_quotainos(qinf);
703 69 : out_free_lru:
704 69 : list_lru_destroy(&qinf->qi_lru);
705 69 : out_free_qinf:
706 69 : kmem_free(qinf);
707 69 : mp->m_quotainfo = NULL;
708 69 : return error;
709 : }
710 :
711 : /*
712 : * Gets called when unmounting a filesystem or when all quotas get
713 : * turned off.
714 : * This purges the quota inodes, destroys locks and frees itself.
715 : */
716 : void
717 36147 : xfs_qm_destroy_quotainfo(
718 : struct xfs_mount *mp)
719 : {
720 36147 : struct xfs_quotainfo *qi;
721 :
722 36147 : qi = mp->m_quotainfo;
723 36147 : ASSERT(qi != NULL);
724 :
725 36147 : unregister_shrinker(&qi->qi_shrinker);
726 36147 : list_lru_destroy(&qi->qi_lru);
727 36147 : xfs_qm_destroy_quotainos(qi);
728 36147 : mutex_destroy(&qi->qi_tree_lock);
729 36147 : mutex_destroy(&qi->qi_quotaofflock);
730 36147 : kmem_free(qi);
731 36147 : mp->m_quotainfo = NULL;
732 36147 : }
733 :
734 : /*
735 : * Create an inode and return with a reference already taken, but unlocked
736 : * This is how we create quota inodes
737 : */
738 : STATIC int
739 20313 : xfs_qm_qino_alloc(
740 : struct xfs_mount *mp,
741 : struct xfs_inode **ipp,
742 : unsigned int flags)
743 : {
744 20313 : struct xfs_trans *tp;
745 20313 : int error;
746 20313 : bool need_alloc = true;
747 :
748 20313 : *ipp = NULL;
749 : /*
750 : * With superblock that doesn't have separate pquotino, we
751 : * share an inode between gquota and pquota. If the on-disk
752 : * superblock has GQUOTA and the filesystem is now mounted
753 : * with PQUOTA, just use sb_gquotino for sb_pquotino and
754 : * vice-versa.
755 : */
756 20313 : if (!xfs_has_pquotino(mp) &&
757 48 : (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
758 27 : xfs_ino_t ino = NULLFSINO;
759 :
760 27 : if ((flags & XFS_QMOPT_PQUOTA) &&
761 21 : (mp->m_sb.sb_gquotino != NULLFSINO)) {
762 6 : ino = mp->m_sb.sb_gquotino;
763 6 : if (XFS_IS_CORRUPT(mp,
764 : mp->m_sb.sb_pquotino != NULLFSINO)) {
765 0 : xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
766 0 : return -EFSCORRUPTED;
767 : }
768 21 : } else if ((flags & XFS_QMOPT_GQUOTA) &&
769 6 : (mp->m_sb.sb_pquotino != NULLFSINO)) {
770 0 : ino = mp->m_sb.sb_pquotino;
771 0 : if (XFS_IS_CORRUPT(mp,
772 : mp->m_sb.sb_gquotino != NULLFSINO)) {
773 0 : xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
774 0 : return -EFSCORRUPTED;
775 : }
776 : }
777 6 : if (ino != NULLFSINO) {
778 6 : error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
779 6 : if (error)
780 : return error;
781 6 : mp->m_sb.sb_gquotino = NULLFSINO;
782 6 : mp->m_sb.sb_pquotino = NULLFSINO;
783 6 : need_alloc = false;
784 : }
785 : }
786 :
787 20313 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
788 20379 : need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
789 : 0, 0, &tp);
790 20313 : if (error)
791 : return error;
792 :
793 20244 : if (need_alloc) {
794 20238 : xfs_ino_t ino;
795 :
796 20238 : error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
797 20238 : if (!error)
798 20238 : error = xfs_init_new_inode(&nop_mnt_idmap, tp, NULL, ino,
799 : S_IFREG, 1, 0, 0, false, ipp);
800 20238 : if (error) {
801 0 : xfs_trans_cancel(tp);
802 0 : return error;
803 : }
804 : }
805 :
806 : /*
807 : * Make the changes in the superblock, and log those too.
808 : * sbfields arg may contain fields other than *QUOTINO;
809 : * VERSIONNUM for example.
810 : */
811 20244 : spin_lock(&mp->m_sb_lock);
812 20244 : if (flags & XFS_QMOPT_SBVERSION) {
813 6968 : ASSERT(!xfs_has_quota(mp));
814 :
815 6968 : xfs_add_quota(mp);
816 6968 : mp->m_sb.sb_uquotino = NULLFSINO;
817 6968 : mp->m_sb.sb_gquotino = NULLFSINO;
818 6968 : mp->m_sb.sb_pquotino = NULLFSINO;
819 :
820 : /* qflags will get updated fully _after_ quotacheck */
821 6968 : mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
822 : }
823 20244 : if (flags & XFS_QMOPT_UQUOTA)
824 6848 : mp->m_sb.sb_uquotino = (*ipp)->i_ino;
825 13396 : else if (flags & XFS_QMOPT_GQUOTA)
826 6710 : mp->m_sb.sb_gquotino = (*ipp)->i_ino;
827 : else
828 6686 : mp->m_sb.sb_pquotino = (*ipp)->i_ino;
829 20244 : spin_unlock(&mp->m_sb_lock);
830 20244 : xfs_log_sb(tp);
831 :
832 20244 : error = xfs_trans_commit(tp);
833 20244 : if (error) {
834 0 : ASSERT(xfs_is_shutdown(mp));
835 0 : xfs_alert(mp, "%s failed (error %d)!", __func__, error);
836 : }
837 20244 : if (need_alloc) {
838 20238 : xfs_iunlock(*ipp, XFS_ILOCK_EXCL);
839 20238 : xfs_finish_inode_setup(*ipp);
840 : }
841 : return error;
842 : }
843 :
844 :
845 : STATIC void
846 1959 : xfs_qm_reset_dqcounts(
847 : struct xfs_mount *mp,
848 : struct xfs_buf *bp,
849 : xfs_dqid_t id,
850 : xfs_dqtype_t type)
851 : {
852 1959 : struct xfs_dqblk *dqb;
853 1959 : int j;
854 :
855 1959 : trace_xfs_reset_dqcounts(bp, _RET_IP_);
856 :
857 : /*
858 : * Reset all counters and timers. They'll be
859 : * started afresh by xfs_qm_quotacheck.
860 : */
861 : #ifdef DEBUG
862 1959 : j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
863 : sizeof(struct xfs_dqblk);
864 1959 : ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
865 : #endif
866 1959 : dqb = bp->b_addr;
867 60729 : for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
868 58770 : struct xfs_disk_dquot *ddq;
869 :
870 58770 : ddq = (struct xfs_disk_dquot *)&dqb[j];
871 :
872 : /*
873 : * Do a sanity check, and if needed, repair the dqblk. Don't
874 : * output any warnings because it's perfectly possible to
875 : * find uninitialised dquot blks. See comment in
876 : * xfs_dquot_verify.
877 : */
878 58770 : if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
879 58680 : (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
880 270 : xfs_dqblk_repair(mp, &dqb[j], id + j, type);
881 :
882 : /*
883 : * Reset type in case we are reusing group quota file for
884 : * project quotas or vice versa
885 : */
886 58770 : ddq->d_type = type;
887 58770 : ddq->d_bcount = 0;
888 58770 : ddq->d_icount = 0;
889 58770 : ddq->d_rtbcount = 0;
890 :
891 : /*
892 : * dquot id 0 stores the default grace period and the maximum
893 : * warning limit that were set by the administrator, so we
894 : * should not reset them.
895 : */
896 58770 : if (ddq->d_id != 0) {
897 58333 : ddq->d_btimer = 0;
898 58333 : ddq->d_itimer = 0;
899 58333 : ddq->d_rtbtimer = 0;
900 58333 : ddq->d_bwarns = 0;
901 58333 : ddq->d_iwarns = 0;
902 58333 : ddq->d_rtbwarns = 0;
903 58333 : if (xfs_has_bigtime(mp))
904 57463 : ddq->d_type |= XFS_DQTYPE_BIGTIME;
905 : }
906 :
907 58770 : if (xfs_has_crc(mp)) {
908 57870 : xfs_update_cksum((char *)&dqb[j],
909 : sizeof(struct xfs_dqblk),
910 : XFS_DQUOT_CRC_OFF);
911 : }
912 : }
913 1959 : }
914 :
915 : STATIC int
916 1791 : xfs_qm_reset_dqcounts_all(
917 : struct xfs_mount *mp,
918 : xfs_dqid_t firstid,
919 : xfs_fsblock_t bno,
920 : xfs_filblks_t blkcnt,
921 : xfs_dqtype_t type,
922 : struct list_head *buffer_list)
923 : {
924 1791 : struct xfs_buf *bp;
925 1791 : int error = 0;
926 :
927 1791 : ASSERT(blkcnt > 0);
928 :
929 : /*
930 : * Blkcnt arg can be a very big number, and might even be
931 : * larger than the log itself. So, we have to break it up into
932 : * manageable-sized transactions.
933 : * Note that we don't start a permanent transaction here; we might
934 : * not be able to get a log reservation for the whole thing up front,
935 : * and we don't really care to either, because we just discard
936 : * everything if we were to crash in the middle of this loop.
937 : */
938 3750 : while (blkcnt--) {
939 5877 : error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
940 1959 : XFS_FSB_TO_DADDR(mp, bno),
941 1959 : mp->m_quotainfo->qi_dqchunklen, 0, &bp,
942 : &xfs_dquot_buf_ops);
943 :
944 : /*
945 : * CRC and validation errors will return a EFSCORRUPTED here. If
946 : * this occurs, re-read without CRC validation so that we can
947 : * repair the damage via xfs_qm_reset_dqcounts(). This process
948 : * will leave a trace in the log indicating corruption has
949 : * been detected.
950 : */
951 1959 : if (error == -EFSCORRUPTED) {
952 9 : error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
953 3 : XFS_FSB_TO_DADDR(mp, bno),
954 3 : mp->m_quotainfo->qi_dqchunklen, 0, &bp,
955 : NULL);
956 : }
957 :
958 1959 : if (error)
959 : break;
960 :
961 : /*
962 : * A corrupt buffer might not have a verifier attached, so
963 : * make sure we have the correct one attached before writeback
964 : * occurs.
965 : */
966 1959 : bp->b_ops = &xfs_dquot_buf_ops;
967 1959 : xfs_qm_reset_dqcounts(mp, bp, firstid, type);
968 1959 : xfs_buf_delwri_queue(bp, buffer_list);
969 1959 : xfs_buf_relse(bp);
970 :
971 : /* goto the next block. */
972 1959 : bno++;
973 1959 : firstid += mp->m_quotainfo->qi_dqperchunk;
974 : }
975 :
976 1791 : return error;
977 : }
978 :
979 : /*
980 : * Iterate over all allocated dquot blocks in this quota inode, zeroing all
981 : * counters for every chunk of dquots that we find.
982 : */
983 : STATIC int
984 20718 : xfs_qm_reset_dqcounts_buf(
985 : struct xfs_mount *mp,
986 : struct xfs_inode *qip,
987 : xfs_dqtype_t type,
988 : struct list_head *buffer_list)
989 : {
990 20718 : struct xfs_bmbt_irec *map;
991 20718 : int i, nmaps; /* number of map entries */
992 20718 : int error; /* return value */
993 20718 : xfs_fileoff_t lblkno;
994 20718 : xfs_filblks_t maxlblkcnt;
995 20718 : xfs_dqid_t firstid;
996 20718 : xfs_fsblock_t rablkno;
997 20718 : xfs_filblks_t rablkcnt;
998 :
999 20718 : error = 0;
1000 : /*
1001 : * This looks racy, but we can't keep an inode lock across a
1002 : * trans_reserve. But, this gets called during quotacheck, and that
1003 : * happens only at mount time which is single threaded.
1004 : */
1005 20718 : if (qip->i_nblocks == 0)
1006 : return 0;
1007 :
1008 432 : map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
1009 :
1010 432 : lblkno = 0;
1011 432 : maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1012 1112 : do {
1013 1112 : uint lock_mode;
1014 :
1015 1112 : nmaps = XFS_DQITER_MAP_SIZE;
1016 : /*
1017 : * We aren't changing the inode itself. Just changing
1018 : * some of its data. No new blocks are added here, and
1019 : * the inode is never added to the transaction.
1020 : */
1021 1112 : lock_mode = xfs_ilock_data_map_shared(qip);
1022 1112 : error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1023 : map, &nmaps, 0);
1024 1112 : xfs_iunlock(qip, lock_mode);
1025 1112 : if (error)
1026 : break;
1027 :
1028 1112 : ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1029 4678 : for (i = 0; i < nmaps; i++) {
1030 3566 : ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1031 3566 : ASSERT(map[i].br_blockcount);
1032 :
1033 :
1034 3566 : lblkno += map[i].br_blockcount;
1035 :
1036 3566 : if (map[i].br_startblock == HOLESTARTBLOCK)
1037 1775 : continue;
1038 :
1039 1791 : firstid = (xfs_dqid_t) map[i].br_startoff *
1040 1791 : mp->m_quotainfo->qi_dqperchunk;
1041 : /*
1042 : * Do a read-ahead on the next extent.
1043 : */
1044 1791 : if ((i+1 < nmaps) &&
1045 1774 : (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1046 16 : rablkcnt = map[i+1].br_blockcount;
1047 16 : rablkno = map[i+1].br_startblock;
1048 34 : while (rablkcnt--) {
1049 54 : xfs_buf_readahead(mp->m_ddev_targp,
1050 18 : XFS_FSB_TO_DADDR(mp, rablkno),
1051 18 : mp->m_quotainfo->qi_dqchunklen,
1052 : &xfs_dquot_buf_ops);
1053 18 : rablkno++;
1054 : }
1055 : }
1056 : /*
1057 : * Iterate thru all the blks in the extent and
1058 : * reset the counters of all the dquots inside them.
1059 : */
1060 1791 : error = xfs_qm_reset_dqcounts_all(mp, firstid,
1061 : map[i].br_startblock,
1062 : map[i].br_blockcount,
1063 : type, buffer_list);
1064 1791 : if (error)
1065 0 : goto out;
1066 : }
1067 1112 : } while (nmaps > 0);
1068 :
1069 432 : out:
1070 432 : kmem_free(map);
1071 432 : return error;
1072 : }
1073 :
1074 : /*
1075 : * Called by dqusage_adjust in doing a quotacheck.
1076 : *
1077 : * Given the inode, and a dquot id this updates both the incore dqout as well
1078 : * as the buffer copy. This is so that once the quotacheck is done, we can
1079 : * just log all the buffers, as opposed to logging numerous updates to
1080 : * individual dquots.
1081 : */
1082 : STATIC int
1083 356191 : xfs_qm_quotacheck_dqadjust(
1084 : struct xfs_inode *ip,
1085 : xfs_dqtype_t type,
1086 : xfs_qcnt_t nblks,
1087 : xfs_qcnt_t rtblks)
1088 : {
1089 356191 : struct xfs_mount *mp = ip->i_mount;
1090 356191 : struct xfs_dquot *dqp;
1091 356191 : xfs_dqid_t id;
1092 356191 : int error;
1093 :
1094 356191 : id = xfs_qm_id_for_quotatype(ip, type);
1095 356220 : error = xfs_qm_dqget(mp, id, type, true, &dqp);
1096 356443 : if (error) {
1097 : /*
1098 : * Shouldn't be able to turn off quotas here.
1099 : */
1100 0 : ASSERT(error != -ESRCH);
1101 0 : ASSERT(error != -ENOENT);
1102 0 : return error;
1103 : }
1104 :
1105 356443 : trace_xfs_dqadjust(dqp);
1106 :
1107 : /*
1108 : * Adjust the inode count and the block count to reflect this inode's
1109 : * resource usage.
1110 : */
1111 356442 : dqp->q_ino.count++;
1112 356442 : dqp->q_ino.reserved++;
1113 356442 : if (nblks) {
1114 21505 : dqp->q_blk.count += nblks;
1115 21505 : dqp->q_blk.reserved += nblks;
1116 : }
1117 356442 : if (rtblks) {
1118 0 : dqp->q_rtb.count += rtblks;
1119 0 : dqp->q_rtb.reserved += rtblks;
1120 : }
1121 :
1122 : /*
1123 : * Set default limits, adjust timers (since we changed usages)
1124 : *
1125 : * There are no timers for the default values set in the root dquot.
1126 : */
1127 356442 : if (dqp->q_id) {
1128 11016 : xfs_qm_adjust_dqlimits(dqp);
1129 11016 : xfs_qm_adjust_dqtimers(dqp);
1130 : }
1131 :
1132 356442 : dqp->q_flags |= XFS_DQFLAG_DIRTY;
1133 356442 : xfs_qm_dqput(dqp);
1134 356442 : return 0;
1135 : }
1136 :
1137 : /*
1138 : * callback routine supplied to bulkstat(). Given an inumber, find its
1139 : * dquots and update them to account for resources taken by that inode.
1140 : */
1141 : /* ARGSUSED */
1142 : STATIC int
1143 141189 : xfs_qm_dqusage_adjust(
1144 : struct xfs_mount *mp,
1145 : struct xfs_trans *tp,
1146 : xfs_ino_t ino,
1147 : void *data)
1148 : {
1149 141189 : struct xfs_inode *ip;
1150 141189 : xfs_qcnt_t nblks;
1151 141189 : xfs_filblks_t rtblks = 0; /* total rt blks */
1152 141189 : int error;
1153 :
1154 141189 : ASSERT(XFS_IS_QUOTA_ON(mp));
1155 :
1156 : /*
1157 : * rootino must have its resources accounted for, not so with the quota
1158 : * inodes.
1159 : */
1160 275234 : if (xfs_is_quota_inode(&mp->m_sb, ino))
1161 : return 0;
1162 :
1163 : /*
1164 : * We don't _need_ to take the ilock EXCL here because quotacheck runs
1165 : * at mount time and therefore nobody will be racing chown/chproj.
1166 : */
1167 120307 : error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1168 120479 : if (error == -EINVAL || error == -ENOENT)
1169 : return 0;
1170 120479 : if (error)
1171 : return error;
1172 :
1173 120479 : ASSERT(ip->i_delayed_blks == 0);
1174 :
1175 120479 : if (XFS_IS_REALTIME_INODE(ip)) {
1176 0 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1177 :
1178 0 : error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1179 0 : if (error)
1180 0 : goto error0;
1181 :
1182 0 : xfs_bmap_count_leaves(ifp, &rtblks);
1183 : }
1184 :
1185 120479 : nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1186 :
1187 : /*
1188 : * Add the (disk blocks and inode) resources occupied by this
1189 : * inode to its dquots. We do this adjustment in the incore dquot,
1190 : * and also copy the changes to its buffer.
1191 : * We don't care about putting these changes in a transaction
1192 : * envelope because if we crash in the middle of a 'quotacheck'
1193 : * we have to start from the beginning anyway.
1194 : * Once we're done, we'll log all the dquot bufs.
1195 : *
1196 : * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1197 : * and quotaoffs don't race. (Quotachecks happen at mount time only).
1198 : */
1199 120479 : if (XFS_IS_UQUOTA_ON(mp)) {
1200 119223 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1201 : rtblks);
1202 119282 : if (error)
1203 0 : goto error0;
1204 : }
1205 :
1206 120538 : if (XFS_IS_GQUOTA_ON(mp)) {
1207 118778 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1208 : rtblks);
1209 118779 : if (error)
1210 0 : goto error0;
1211 : }
1212 :
1213 120539 : if (XFS_IS_PQUOTA_ON(mp)) {
1214 118380 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1215 : rtblks);
1216 118380 : if (error)
1217 0 : goto error0;
1218 : }
1219 :
1220 120539 : error0:
1221 120539 : xfs_irele(ip);
1222 120539 : return error;
1223 : }
1224 :
1225 : STATIC int
1226 28141 : xfs_qm_flush_one(
1227 : struct xfs_dquot *dqp,
1228 : void *data)
1229 : {
1230 28141 : struct xfs_mount *mp = dqp->q_mount;
1231 28141 : struct list_head *buffer_list = data;
1232 28141 : struct xfs_buf *bp = NULL;
1233 28141 : int error = 0;
1234 :
1235 28141 : xfs_dqlock(dqp);
1236 28141 : if (dqp->q_flags & XFS_DQFLAG_FREEING)
1237 0 : goto out_unlock;
1238 28141 : if (!XFS_DQ_IS_DIRTY(dqp))
1239 0 : goto out_unlock;
1240 :
1241 : /*
1242 : * The only way the dquot is already flush locked by the time quotacheck
1243 : * gets here is if reclaim flushed it before the dqadjust walk dirtied
1244 : * it for the final time. Quotacheck collects all dquot bufs in the
1245 : * local delwri queue before dquots are dirtied, so reclaim can't have
1246 : * possibly queued it for I/O. The only way out is to push the buffer to
1247 : * cycle the flush lock.
1248 : */
1249 28141 : if (!xfs_dqflock_nowait(dqp)) {
1250 : /* buf is pinned in-core by delwri list */
1251 0 : error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1252 0 : mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1253 0 : if (error)
1254 0 : goto out_unlock;
1255 :
1256 0 : if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1257 0 : error = -EAGAIN;
1258 0 : xfs_buf_relse(bp);
1259 0 : goto out_unlock;
1260 : }
1261 0 : xfs_buf_unlock(bp);
1262 :
1263 0 : xfs_buf_delwri_pushbuf(bp, buffer_list);
1264 0 : xfs_buf_rele(bp);
1265 :
1266 0 : error = -EAGAIN;
1267 0 : goto out_unlock;
1268 : }
1269 :
1270 28141 : error = xfs_qm_dqflush(dqp, &bp);
1271 28141 : if (error)
1272 0 : goto out_unlock;
1273 :
1274 28141 : xfs_buf_delwri_queue(bp, buffer_list);
1275 28141 : xfs_buf_relse(bp);
1276 28141 : out_unlock:
1277 28141 : xfs_dqunlock(dqp);
1278 28141 : return error;
1279 : }
1280 :
1281 : /*
1282 : * Walk thru all the filesystem inodes and construct a consistent view
1283 : * of the disk quota world. If the quotacheck fails, disable quotas.
1284 : */
1285 : STATIC int
1286 7318 : xfs_qm_quotacheck(
1287 : xfs_mount_t *mp)
1288 : {
1289 7318 : int error, error2;
1290 7318 : uint flags;
1291 7318 : LIST_HEAD (buffer_list);
1292 7318 : struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1293 7318 : struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1294 7318 : struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1295 :
1296 7318 : flags = 0;
1297 :
1298 7318 : ASSERT(uip || gip || pip);
1299 7318 : ASSERT(XFS_IS_QUOTA_ON(mp));
1300 :
1301 7318 : xfs_notice(mp, "Quotacheck needed: Please wait.");
1302 :
1303 : /*
1304 : * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1305 : * their counters to zero. We need a clean slate.
1306 : * We don't log our changes till later.
1307 : */
1308 7318 : if (uip) {
1309 7046 : error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1310 : &buffer_list);
1311 7046 : if (error)
1312 0 : goto error_return;
1313 : flags |= XFS_UQUOTA_CHKD;
1314 : }
1315 :
1316 7318 : if (gip) {
1317 6865 : error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1318 : &buffer_list);
1319 6865 : if (error)
1320 0 : goto error_return;
1321 6865 : flags |= XFS_GQUOTA_CHKD;
1322 : }
1323 :
1324 7318 : if (pip) {
1325 6807 : error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1326 : &buffer_list);
1327 6807 : if (error)
1328 0 : goto error_return;
1329 6807 : flags |= XFS_PQUOTA_CHKD;
1330 : }
1331 :
1332 7318 : error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1333 : NULL);
1334 :
1335 : /*
1336 : * On error, the inode walk may have partially populated the dquot
1337 : * caches. We must purge them before disabling quota and tearing down
1338 : * the quotainfo, or else the dquots will leak.
1339 : */
1340 7318 : if (error)
1341 0 : goto error_purge;
1342 :
1343 : /*
1344 : * We've made all the changes that we need to make incore. Flush them
1345 : * down to disk buffers if everything was updated successfully.
1346 : */
1347 7318 : if (XFS_IS_UQUOTA_ON(mp)) {
1348 7046 : error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1349 : &buffer_list);
1350 : }
1351 7318 : if (XFS_IS_GQUOTA_ON(mp)) {
1352 6865 : error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1353 : &buffer_list);
1354 6865 : if (!error)
1355 6865 : error = error2;
1356 : }
1357 7318 : if (XFS_IS_PQUOTA_ON(mp)) {
1358 6807 : error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1359 : &buffer_list);
1360 6807 : if (!error)
1361 6807 : error = error2;
1362 : }
1363 :
1364 7318 : error2 = xfs_buf_delwri_submit(&buffer_list);
1365 7318 : if (!error)
1366 7318 : error = error2;
1367 :
1368 : /*
1369 : * We can get this error if we couldn't do a dquot allocation inside
1370 : * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1371 : * dirty dquots that might be cached, we just want to get rid of them
1372 : * and turn quotaoff. The dquots won't be attached to any of the inodes
1373 : * at this point (because we intentionally didn't in dqget_noattach).
1374 : */
1375 7318 : if (error)
1376 0 : goto error_purge;
1377 :
1378 : /*
1379 : * If one type of quotas is off, then it will lose its
1380 : * quotachecked status, since we won't be doing accounting for
1381 : * that type anymore.
1382 : */
1383 7318 : mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1384 7318 : mp->m_qflags |= flags;
1385 :
1386 7318 : error_return:
1387 7318 : xfs_buf_delwri_cancel(&buffer_list);
1388 :
1389 7318 : if (error) {
1390 0 : xfs_warn(mp,
1391 : "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1392 : error);
1393 : /*
1394 : * We must turn off quotas.
1395 : */
1396 0 : ASSERT(mp->m_quotainfo != NULL);
1397 0 : xfs_qm_destroy_quotainfo(mp);
1398 0 : if (xfs_mount_reset_sbqflags(mp)) {
1399 0 : xfs_warn(mp,
1400 : "Quotacheck: Failed to reset quota flags.");
1401 : }
1402 0 : xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
1403 : } else {
1404 7318 : xfs_notice(mp, "Quotacheck: Done.");
1405 7318 : xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
1406 : }
1407 :
1408 7318 : return error;
1409 :
1410 0 : error_purge:
1411 : /*
1412 : * On error, we may have inodes queued for inactivation. This may try
1413 : * to attach dquots to the inode before running cleanup operations on
1414 : * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1415 : * below that frees mp->m_quotainfo. To avoid this race, flush all the
1416 : * pending inodegc operations before we purge the dquots from memory,
1417 : * ensuring that background inactivation is idle whilst we turn off
1418 : * quotas.
1419 : */
1420 0 : xfs_inodegc_flush(mp);
1421 0 : xfs_qm_dqpurge_all(mp);
1422 0 : goto error_return;
1423 :
1424 : }
1425 :
1426 : /*
1427 : * This is called from xfs_mountfs to start quotas and initialize all
1428 : * necessary data structures like quotainfo. This is also responsible for
1429 : * running a quotacheck as necessary. We are guaranteed that the superblock
1430 : * is consistently read in at this point.
1431 : *
1432 : * If we fail here, the mount will continue with quota turned off. We don't
1433 : * need to inidicate success or failure at all.
1434 : */
1435 : void
1436 51417 : xfs_qm_mount_quotas(
1437 : struct xfs_mount *mp)
1438 : {
1439 51417 : int error = 0;
1440 51417 : uint sbf;
1441 :
1442 : /*
1443 : * If quotas on realtime volumes is not supported, we disable
1444 : * quotas immediately.
1445 : */
1446 51417 : if (mp->m_sb.sb_rextents) {
1447 15206 : xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1448 15206 : mp->m_qflags = 0;
1449 15206 : goto write_changes;
1450 : }
1451 :
1452 36211 : ASSERT(XFS_IS_QUOTA_ON(mp));
1453 :
1454 : /*
1455 : * Allocate the quotainfo structure inside the mount struct, and
1456 : * create quotainode(s), and change/rev superblock if necessary.
1457 : */
1458 36211 : error = xfs_qm_init_quotainfo(mp);
1459 36211 : if (error) {
1460 : /*
1461 : * We must turn off quotas.
1462 : */
1463 69 : ASSERT(mp->m_quotainfo == NULL);
1464 69 : mp->m_qflags = 0;
1465 69 : goto write_changes;
1466 : }
1467 : /*
1468 : * If any of the quotas are not consistent, do a quotacheck.
1469 : */
1470 36142 : if (XFS_QM_NEED_QUOTACHECK(mp)) {
1471 7318 : error = xfs_qm_quotacheck(mp);
1472 7318 : if (error) {
1473 : /* Quotacheck failed and disabled quotas. */
1474 : return;
1475 : }
1476 : }
1477 : /*
1478 : * If one type of quotas is off, then it will lose its
1479 : * quotachecked status, since we won't be doing accounting for
1480 : * that type anymore.
1481 : */
1482 36142 : if (!XFS_IS_UQUOTA_ON(mp))
1483 516 : mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1484 36142 : if (!XFS_IS_GQUOTA_ON(mp))
1485 872 : mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1486 36142 : if (!XFS_IS_PQUOTA_ON(mp))
1487 1011 : mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1488 :
1489 35131 : write_changes:
1490 : /*
1491 : * We actually don't have to acquire the m_sb_lock at all.
1492 : * This can only be called from mount, and that's single threaded. XXX
1493 : */
1494 51417 : spin_lock(&mp->m_sb_lock);
1495 51417 : sbf = mp->m_sb.sb_qflags;
1496 51417 : mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1497 51417 : spin_unlock(&mp->m_sb_lock);
1498 :
1499 51417 : if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1500 7449 : if (xfs_sync_sb(mp, false)) {
1501 : /*
1502 : * We could only have been turning quotas off.
1503 : * We aren't in very good shape actually because
1504 : * the incore structures are convinced that quotas are
1505 : * off, but the on disk superblock doesn't know that !
1506 : */
1507 0 : ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1508 0 : xfs_alert(mp, "%s: Superblock update failed!",
1509 : __func__);
1510 : }
1511 : }
1512 :
1513 51417 : if (error) {
1514 69 : xfs_warn(mp, "Failed to initialize disk quotas.");
1515 69 : return;
1516 : }
1517 : }
1518 :
1519 : /*
1520 : * This is called after the superblock has been read in and we're ready to
1521 : * iget the quota inodes.
1522 : */
1523 : STATIC int
1524 36211 : xfs_qm_init_quotainos(
1525 : xfs_mount_t *mp)
1526 : {
1527 36211 : struct xfs_inode *uip = NULL;
1528 36211 : struct xfs_inode *gip = NULL;
1529 36211 : struct xfs_inode *pip = NULL;
1530 36211 : int error;
1531 36211 : uint flags = 0;
1532 :
1533 36211 : ASSERT(mp->m_quotainfo);
1534 :
1535 : /*
1536 : * Get the uquota and gquota inodes
1537 : */
1538 36211 : if (xfs_has_quota(mp)) {
1539 29174 : if (XFS_IS_UQUOTA_ON(mp) &&
1540 28808 : mp->m_sb.sb_uquotino != NULLFSINO) {
1541 28778 : ASSERT(mp->m_sb.sb_uquotino > 0);
1542 28778 : error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1543 : 0, 0, &uip);
1544 28778 : if (error)
1545 : return error;
1546 : }
1547 29174 : if (XFS_IS_GQUOTA_ON(mp) &&
1548 28673 : mp->m_sb.sb_gquotino != NULLFSINO) {
1549 28560 : ASSERT(mp->m_sb.sb_gquotino > 0);
1550 28560 : error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1551 : 0, 0, &gip);
1552 28560 : if (error)
1553 0 : goto error_rele;
1554 : }
1555 29174 : if (XFS_IS_PQUOTA_ON(mp) &&
1556 28562 : mp->m_sb.sb_pquotino != NULLFSINO) {
1557 28445 : ASSERT(mp->m_sb.sb_pquotino > 0);
1558 28445 : error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1559 : 0, 0, &pip);
1560 28445 : if (error)
1561 0 : goto error_rele;
1562 : }
1563 : } else {
1564 : flags |= XFS_QMOPT_SBVERSION;
1565 : }
1566 :
1567 : /*
1568 : * Create the three inodes, if they don't exist already. The changes
1569 : * made above will get added to a transaction and logged in one of
1570 : * the qino_alloc calls below. If the device is readonly,
1571 : * temporarily switch to read-write to do this.
1572 : */
1573 36211 : if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1574 6917 : error = xfs_qm_qino_alloc(mp, &uip,
1575 : flags | XFS_QMOPT_UQUOTA);
1576 6917 : if (error)
1577 69 : goto error_rele;
1578 :
1579 : flags &= ~XFS_QMOPT_SBVERSION;
1580 : }
1581 36142 : if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1582 6710 : error = xfs_qm_qino_alloc(mp, &gip,
1583 : flags | XFS_QMOPT_GQUOTA);
1584 6710 : if (error)
1585 0 : goto error_rele;
1586 :
1587 : flags &= ~XFS_QMOPT_SBVERSION;
1588 : }
1589 36142 : if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1590 6686 : error = xfs_qm_qino_alloc(mp, &pip,
1591 : flags | XFS_QMOPT_PQUOTA);
1592 6686 : if (error)
1593 0 : goto error_rele;
1594 : }
1595 :
1596 36142 : mp->m_quotainfo->qi_uquotaip = uip;
1597 36142 : mp->m_quotainfo->qi_gquotaip = gip;
1598 36142 : mp->m_quotainfo->qi_pquotaip = pip;
1599 :
1600 36142 : return 0;
1601 :
1602 69 : error_rele:
1603 69 : if (uip)
1604 0 : xfs_irele(uip);
1605 69 : if (gip)
1606 0 : xfs_irele(gip);
1607 69 : if (pip)
1608 0 : xfs_irele(pip);
1609 : return error;
1610 : }
1611 :
1612 : STATIC void
1613 36147 : xfs_qm_destroy_quotainos(
1614 : struct xfs_quotainfo *qi)
1615 : {
1616 36147 : if (qi->qi_uquotaip) {
1617 5 : xfs_irele(qi->qi_uquotaip);
1618 5 : qi->qi_uquotaip = NULL; /* paranoia */
1619 : }
1620 36147 : if (qi->qi_gquotaip) {
1621 5 : xfs_irele(qi->qi_gquotaip);
1622 5 : qi->qi_gquotaip = NULL;
1623 : }
1624 36147 : if (qi->qi_pquotaip) {
1625 5 : xfs_irele(qi->qi_pquotaip);
1626 5 : qi->qi_pquotaip = NULL;
1627 : }
1628 36147 : }
1629 :
1630 : STATIC void
1631 796665 : xfs_qm_dqfree_one(
1632 : struct xfs_dquot *dqp)
1633 : {
1634 796665 : struct xfs_mount *mp = dqp->q_mount;
1635 796665 : struct xfs_quotainfo *qi = mp->m_quotainfo;
1636 :
1637 796665 : mutex_lock(&qi->qi_tree_lock);
1638 796665 : radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1639 :
1640 796665 : qi->qi_dquots--;
1641 796665 : mutex_unlock(&qi->qi_tree_lock);
1642 :
1643 796665 : xfs_qm_dqdestroy(dqp);
1644 796665 : }
1645 :
1646 : /* --------------- utility functions for vnodeops ---------------- */
1647 :
1648 :
1649 : /*
1650 : * Given an inode, a uid, gid and prid make sure that we have
1651 : * allocated relevant dquot(s) on disk, and that we won't exceed inode
1652 : * quotas by creating this file.
1653 : * This also attaches dquot(s) to the given inode after locking it,
1654 : * and returns the dquots corresponding to the uid and/or gid.
1655 : *
1656 : * in : inode (unlocked)
1657 : * out : udquot, gdquot with references taken and unlocked
1658 : */
1659 : int
1660 155206366 : xfs_qm_vop_dqalloc(
1661 : struct xfs_inode *ip,
1662 : kuid_t uid,
1663 : kgid_t gid,
1664 : prid_t prid,
1665 : uint flags,
1666 : struct xfs_dquot **O_udqpp,
1667 : struct xfs_dquot **O_gdqpp,
1668 : struct xfs_dquot **O_pdqpp)
1669 : {
1670 155206366 : struct xfs_mount *mp = ip->i_mount;
1671 155206366 : struct inode *inode = VFS_I(ip);
1672 155206366 : struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1673 155206366 : struct xfs_dquot *uq = NULL;
1674 155206366 : struct xfs_dquot *gq = NULL;
1675 155206366 : struct xfs_dquot *pq = NULL;
1676 155206366 : int error;
1677 155206366 : uint lockflags;
1678 :
1679 155206366 : if (!XFS_IS_QUOTA_ON(mp))
1680 : return 0;
1681 :
1682 80643537 : lockflags = XFS_ILOCK_EXCL;
1683 80643537 : xfs_ilock(ip, lockflags);
1684 :
1685 81211031 : if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1686 435 : gid = inode->i_gid;
1687 :
1688 : /*
1689 : * Attach the dquot(s) to this inode, doing a dquot allocation
1690 : * if necessary. The dquot(s) will not be locked.
1691 : */
1692 81211031 : if (XFS_NOT_DQATTACHED(mp, ip)) {
1693 412880 : error = xfs_qm_dqattach_locked(ip, true);
1694 412889 : if (error) {
1695 1368 : xfs_iunlock(ip, lockflags);
1696 1368 : return error;
1697 : }
1698 : }
1699 :
1700 81209672 : if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1701 80728223 : ASSERT(O_udqpp);
1702 80728223 : if (!uid_eq(inode->i_uid, uid)) {
1703 : /*
1704 : * What we need is the dquot that has this uid, and
1705 : * if we send the inode to dqget, the uid of the inode
1706 : * takes priority over what's sent in the uid argument.
1707 : * We must unlock inode here before calling dqget if
1708 : * we're not sending the inode, because otherwise
1709 : * we'll deadlock by doing trans_reserve while
1710 : * holding ilock.
1711 : */
1712 10084480 : xfs_iunlock(ip, lockflags);
1713 10111474 : error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1714 : XFS_DQTYPE_USER, true, &uq);
1715 10146775 : if (error) {
1716 22842 : ASSERT(error != -ENOENT);
1717 22842 : return error;
1718 : }
1719 : /*
1720 : * Get the ilock in the right order.
1721 : */
1722 10123933 : xfs_dqunlock(uq);
1723 10123577 : lockflags = XFS_ILOCK_SHARED;
1724 10123577 : xfs_ilock(ip, lockflags);
1725 : } else {
1726 : /*
1727 : * Take an extra reference, because we'll return
1728 : * this to caller
1729 : */
1730 70643743 : ASSERT(ip->i_udquot);
1731 70643743 : uq = xfs_qm_dqhold(ip->i_udquot);
1732 : }
1733 : }
1734 81665811 : if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1735 80853587 : ASSERT(O_gdqpp);
1736 80853587 : if (!gid_eq(inode->i_gid, gid)) {
1737 10108770 : xfs_iunlock(ip, lockflags);
1738 10106283 : error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1739 : XFS_DQTYPE_GROUP, true, &gq);
1740 10114248 : if (error) {
1741 6430 : ASSERT(error != -ENOENT);
1742 6430 : goto error_rele;
1743 : }
1744 10107818 : xfs_dqunlock(gq);
1745 10107788 : lockflags = XFS_ILOCK_SHARED;
1746 10107788 : xfs_ilock(ip, lockflags);
1747 : } else {
1748 70744817 : ASSERT(ip->i_gdquot);
1749 70744817 : gq = xfs_qm_dqhold(ip->i_gdquot);
1750 : }
1751 : }
1752 81631283 : if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1753 73094413 : ASSERT(O_pdqpp);
1754 73094413 : if (ip->i_projid != prid) {
1755 408225 : xfs_iunlock(ip, lockflags);
1756 408226 : error = xfs_qm_dqget(mp, prid,
1757 : XFS_DQTYPE_PROJ, true, &pq);
1758 408229 : if (error) {
1759 1324 : ASSERT(error != -ENOENT);
1760 1324 : goto error_rele;
1761 : }
1762 406905 : xfs_dqunlock(pq);
1763 406905 : lockflags = XFS_ILOCK_SHARED;
1764 406905 : xfs_ilock(ip, lockflags);
1765 : } else {
1766 72686188 : ASSERT(ip->i_pdquot);
1767 72686188 : pq = xfs_qm_dqhold(ip->i_pdquot);
1768 : }
1769 : }
1770 81655216 : trace_xfs_dquot_dqalloc(ip);
1771 :
1772 81591814 : xfs_iunlock(ip, lockflags);
1773 81568757 : if (O_udqpp)
1774 81089674 : *O_udqpp = uq;
1775 : else
1776 479083 : xfs_qm_dqrele(uq);
1777 81568751 : if (O_gdqpp)
1778 81089674 : *O_gdqpp = gq;
1779 : else
1780 479077 : xfs_qm_dqrele(gq);
1781 81568753 : if (O_pdqpp)
1782 73398466 : *O_pdqpp = pq;
1783 : else
1784 8170287 : xfs_qm_dqrele(pq);
1785 : return 0;
1786 :
1787 7754 : error_rele:
1788 7754 : xfs_qm_dqrele(gq);
1789 7754 : xfs_qm_dqrele(uq);
1790 7754 : return error;
1791 : }
1792 :
1793 : /*
1794 : * Actually transfer ownership, and do dquot modifications.
1795 : * These were already reserved.
1796 : */
1797 : struct xfs_dquot *
1798 15988670 : xfs_qm_vop_chown(
1799 : struct xfs_trans *tp,
1800 : struct xfs_inode *ip,
1801 : struct xfs_dquot **IO_olddq,
1802 : struct xfs_dquot *newdq)
1803 : {
1804 15988670 : struct xfs_dquot *prevdq;
1805 0 : uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1806 15988670 : XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1807 :
1808 :
1809 15988670 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1810 15980627 : ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1811 :
1812 : /* old dquot */
1813 15980627 : prevdq = *IO_olddq;
1814 15980627 : ASSERT(prevdq);
1815 15980627 : ASSERT(prevdq != newdq);
1816 :
1817 15980627 : xfs_trans_mod_ino_dquot(tp, ip, prevdq, bfield, -(ip->i_nblocks));
1818 15961164 : xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1819 :
1820 : /* the sparkling new dquot */
1821 15968308 : xfs_trans_mod_ino_dquot(tp, ip, newdq, bfield, ip->i_nblocks);
1822 15972570 : xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1823 :
1824 : /*
1825 : * Back when we made quota reservations for the chown, we reserved the
1826 : * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1827 : * switched the dquots, decrease the new dquot's block reservation
1828 : * (having already bumped up the real counter) so that we don't have
1829 : * any reservation to give back when we commit.
1830 : */
1831 15984347 : xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1832 15984347 : -ip->i_delayed_blks);
1833 :
1834 : /*
1835 : * Give the incore reservation for delalloc blocks back to the old
1836 : * dquot. We don't normally handle delalloc quota reservations
1837 : * transactionally, so just lock the dquot and subtract from the
1838 : * reservation. Dirty the transaction because it's too late to turn
1839 : * back now.
1840 : */
1841 15976216 : tp->t_flags |= XFS_TRANS_DIRTY;
1842 15976216 : xfs_dqlock(prevdq);
1843 16007636 : ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1844 16007636 : prevdq->q_blk.reserved -= ip->i_delayed_blks;
1845 16007636 : xfs_dqunlock(prevdq);
1846 :
1847 : /*
1848 : * Take an extra reference, because the inode is going to keep
1849 : * this dquot pointer even after the trans_commit.
1850 : */
1851 16005284 : *IO_olddq = xfs_qm_dqhold(newdq);
1852 :
1853 16004303 : return prevdq;
1854 : }
1855 :
1856 : int
1857 47360953 : xfs_qm_vop_rename_dqattach(
1858 : struct xfs_inode **i_tab)
1859 : {
1860 47360953 : struct xfs_mount *mp = i_tab[0]->i_mount;
1861 47360953 : int i;
1862 :
1863 47360953 : if (!XFS_IS_QUOTA_ON(mp))
1864 : return 0;
1865 :
1866 106584835 : for (i = 0; (i < 4 && i_tab[i]); i++) {
1867 82132557 : struct xfs_inode *ip = i_tab[i];
1868 82132557 : int error;
1869 :
1870 : /*
1871 : * Watch out for duplicate entries in the table.
1872 : */
1873 82132557 : if (i == 0 || ip != i_tab[i-1]) {
1874 80285494 : if (XFS_NOT_DQATTACHED(mp, ip)) {
1875 866459 : error = xfs_qm_dqattach(ip);
1876 866464 : if (error)
1877 906 : return error;
1878 : }
1879 : }
1880 : }
1881 : return 0;
1882 : }
1883 :
1884 : void
1885 127326573 : xfs_qm_vop_create_dqattach(
1886 : struct xfs_trans *tp,
1887 : struct xfs_inode *ip,
1888 : struct xfs_dquot *udqp,
1889 : struct xfs_dquot *gdqp,
1890 : struct xfs_dquot *pdqp)
1891 : {
1892 127326573 : struct xfs_mount *mp = tp->t_mountp;
1893 :
1894 127326573 : if (!XFS_IS_QUOTA_ON(mp))
1895 : return;
1896 :
1897 63707418 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1898 :
1899 63443589 : if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1900 63443836 : ASSERT(ip->i_udquot == NULL);
1901 63443836 : ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1902 :
1903 63551476 : ip->i_udquot = xfs_qm_dqhold(udqp);
1904 : }
1905 63994109 : if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1906 63672492 : ASSERT(ip->i_gdquot == NULL);
1907 63672492 : ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1908 :
1909 63653053 : ip->i_gdquot = xfs_qm_dqhold(gdqp);
1910 : }
1911 63982063 : if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1912 63646538 : ASSERT(ip->i_pdquot == NULL);
1913 63646538 : ASSERT(ip->i_projid == pdqp->q_id);
1914 :
1915 63646538 : ip->i_pdquot = xfs_qm_dqhold(pdqp);
1916 : }
1917 :
1918 63984996 : xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
1919 : }
1920 :
1921 : /* Decide if this inode's dquot is near an enforcement boundary. */
1922 : bool
1923 207323704 : xfs_inode_near_dquot_enforcement(
1924 : struct xfs_inode *ip,
1925 : xfs_dqtype_t type)
1926 : {
1927 207323704 : struct xfs_dquot *dqp;
1928 207323704 : int64_t freesp;
1929 :
1930 : /* We only care for quotas that are enabled and enforced. */
1931 207323704 : dqp = xfs_inode_dquot(ip, type);
1932 207323704 : if (!dqp || !xfs_dquot_is_enforced(dqp))
1933 107144450 : return false;
1934 :
1935 100131299 : if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1936 100130831 : xfs_dquot_res_over_limits(&dqp->q_rtb))
1937 : return true;
1938 :
1939 : /* For space on the data device, check the various thresholds. */
1940 100130831 : if (!dqp->q_prealloc_hi_wmark)
1941 : return false;
1942 :
1943 28602 : if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1944 : return false;
1945 :
1946 364 : if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1947 : return true;
1948 :
1949 240 : freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1950 240 : if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
1951 0 : return true;
1952 :
1953 : return false;
1954 : }
|