Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 : * All Rights Reserved.
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_bit.h"
13 : #include "xfs_sb.h"
14 : #include "xfs_mount.h"
15 : #include "xfs_inode.h"
16 : #include "xfs_iwalk.h"
17 : #include "xfs_quota.h"
18 : #include "xfs_bmap.h"
19 : #include "xfs_bmap_util.h"
20 : #include "xfs_trans.h"
21 : #include "xfs_trans_space.h"
22 : #include "xfs_qm.h"
23 : #include "xfs_trace.h"
24 : #include "xfs_icache.h"
25 : #include "xfs_error.h"
26 : #include "xfs_ag.h"
27 : #include "xfs_ialloc.h"
28 : #include "xfs_log_priv.h"
29 :
30 : /*
31 : * The global quota manager. There is only one of these for the entire
32 : * system, _not_ one per file system. XQM keeps track of the overall
33 : * quota functionality, including maintaining the freelist and hash
34 : * tables of dquots.
35 : */
36 : STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
37 : STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
38 :
39 : STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
40 : STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
41 : /*
42 : * We use the batch lookup interface to iterate over the dquots as it
43 : * currently is the only interface into the radix tree code that allows
44 : * fuzzy lookups instead of exact matches. Holding the lock over multiple
45 : * operations is fine as all callers are used either during mount/umount
46 : * or quotaoff.
47 : */
48 : #define XFS_DQ_LOOKUP_BATCH 32
49 :
50 : STATIC int
51 69264 : xfs_qm_dquot_walk(
52 : struct xfs_mount *mp,
53 : xfs_dqtype_t type,
54 : int (*execute)(struct xfs_dquot *dqp, void *data),
55 : void *data)
56 : {
57 69264 : struct xfs_quotainfo *qi = mp->m_quotainfo;
58 69264 : struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
59 69264 : uint32_t next_index;
60 69264 : int last_error = 0;
61 69264 : int skipped;
62 69264 : int nr_found;
63 :
64 69264 : restart:
65 69264 : skipped = 0;
66 69264 : next_index = 0;
67 69264 : nr_found = 0;
68 :
69 257437 : while (1) {
70 326701 : struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
71 326701 : int error;
72 326701 : int i;
73 :
74 326701 : mutex_lock(&qi->qi_tree_lock);
75 326701 : nr_found = radix_tree_gang_lookup(tree, (void **)batch,
76 : next_index, XFS_DQ_LOOKUP_BATCH);
77 326701 : if (!nr_found) {
78 69262 : mutex_unlock(&qi->qi_tree_lock);
79 69262 : break;
80 : }
81 :
82 7144282 : for (i = 0; i < nr_found; i++) {
83 6886843 : struct xfs_dquot *dqp = batch[i];
84 :
85 6886843 : next_index = dqp->q_id + 1;
86 :
87 6886843 : error = execute(batch[i], data);
88 6886843 : if (error == -EAGAIN) {
89 0 : skipped++;
90 0 : continue;
91 : }
92 6886843 : if (error && last_error != -EFSCORRUPTED)
93 0 : last_error = error;
94 : }
95 :
96 257439 : mutex_unlock(&qi->qi_tree_lock);
97 :
98 : /* bail out if the filesystem is corrupted. */
99 257439 : if (last_error == -EFSCORRUPTED) {
100 : skipped = 0;
101 : break;
102 : }
103 : /* we're done if id overflows back to zero */
104 257439 : if (!next_index)
105 : break;
106 : }
107 :
108 69264 : if (skipped) {
109 0 : delay(1);
110 0 : goto restart;
111 : }
112 :
113 69264 : return last_error;
114 : }
115 :
116 :
117 : /*
118 : * Purge a dquot from all tracking data structures and free it.
119 : */
120 : STATIC int
121 6875819 : xfs_qm_dqpurge(
122 : struct xfs_dquot *dqp,
123 : void *data)
124 : {
125 6875819 : struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
126 6875819 : int error = -EAGAIN;
127 :
128 6875819 : xfs_dqlock(dqp);
129 6875819 : if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
130 0 : goto out_unlock;
131 :
132 6875819 : dqp->q_flags |= XFS_DQFLAG_FREEING;
133 :
134 6875819 : xfs_dqflock(dqp);
135 :
136 : /*
137 : * If we are turning this type of quotas off, we don't care
138 : * about the dirty metadata sitting in this dquot. OTOH, if
139 : * we're unmounting, we do care, so we flush it and wait.
140 : */
141 6875819 : if (XFS_DQ_IS_DIRTY(dqp)) {
142 37116 : struct xfs_buf *bp = NULL;
143 :
144 : /*
145 : * We don't care about getting disk errors here. We need
146 : * to purge this dquot anyway, so we go ahead regardless.
147 : */
148 37116 : error = xfs_qm_dqflush(dqp, &bp);
149 37116 : if (!error) {
150 0 : error = xfs_bwrite(bp);
151 0 : xfs_buf_relse(bp);
152 37116 : } else if (error == -EAGAIN) {
153 0 : dqp->q_flags &= ~XFS_DQFLAG_FREEING;
154 0 : goto out_unlock;
155 : }
156 37116 : xfs_dqflock(dqp);
157 : }
158 :
159 6875819 : ASSERT(atomic_read(&dqp->q_pincount) == 0);
160 13751638 : ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
161 : !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
162 :
163 6875819 : xfs_dqfunlock(dqp);
164 6875819 : xfs_dqunlock(dqp);
165 :
166 6875819 : radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
167 6875819 : qi->qi_dquots--;
168 :
169 : /*
170 : * We move dquots to the freelist as soon as their reference count
171 : * hits zero, so it really should be on the freelist here.
172 : */
173 6875819 : ASSERT(!list_empty(&dqp->q_lru));
174 6875819 : list_lru_del(&qi->qi_lru, &dqp->q_lru);
175 6875819 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
176 :
177 6875819 : xfs_qm_dqdestroy(dqp);
178 6875819 : return 0;
179 :
180 0 : out_unlock:
181 0 : xfs_dqunlock(dqp);
182 0 : return error;
183 : }
184 :
185 : /*
186 : * Purge the dquot cache.
187 : */
188 : static void
189 20323 : xfs_qm_dqpurge_all(
190 : struct xfs_mount *mp)
191 : {
192 20323 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
193 20323 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
194 20323 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
195 20323 : }
196 :
197 : /*
198 : * Just destroy the quotainfo structure.
199 : */
200 : void
201 22482 : xfs_qm_unmount(
202 : struct xfs_mount *mp)
203 : {
204 22482 : if (mp->m_quotainfo) {
205 20323 : xfs_qm_dqpurge_all(mp);
206 20323 : xfs_qm_destroy_quotainfo(mp);
207 : }
208 22482 : }
209 :
210 : /*
211 : * Called from the vfsops layer.
212 : */
213 : void
214 22474 : xfs_qm_unmount_quotas(
215 : xfs_mount_t *mp)
216 : {
217 : /*
218 : * Release the dquots that root inode, et al might be holding,
219 : * before we flush quotas and blow away the quotainfo structure.
220 : */
221 22474 : ASSERT(mp->m_rootip);
222 22474 : xfs_qm_dqdetach(mp->m_rootip);
223 22474 : if (mp->m_rbmip)
224 22474 : xfs_qm_dqdetach(mp->m_rbmip);
225 22474 : if (mp->m_rsumip)
226 22474 : xfs_qm_dqdetach(mp->m_rsumip);
227 :
228 : /*
229 : * Release the quota inodes.
230 : */
231 22474 : if (mp->m_quotainfo) {
232 20321 : if (mp->m_quotainfo->qi_uquotaip) {
233 20157 : xfs_irele(mp->m_quotainfo->qi_uquotaip);
234 20157 : mp->m_quotainfo->qi_uquotaip = NULL;
235 : }
236 20321 : if (mp->m_quotainfo->qi_gquotaip) {
237 20037 : xfs_irele(mp->m_quotainfo->qi_gquotaip);
238 20037 : mp->m_quotainfo->qi_gquotaip = NULL;
239 : }
240 20321 : if (mp->m_quotainfo->qi_pquotaip) {
241 20021 : xfs_irele(mp->m_quotainfo->qi_pquotaip);
242 20021 : mp->m_quotainfo->qi_pquotaip = NULL;
243 : }
244 : }
245 22474 : }
246 :
247 : STATIC int
248 44492968 : xfs_qm_dqattach_one(
249 : struct xfs_inode *ip,
250 : xfs_dqtype_t type,
251 : bool doalloc,
252 : struct xfs_dquot **IO_idqpp)
253 : {
254 44492968 : struct xfs_dquot *dqp;
255 44492968 : int error;
256 :
257 44492968 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
258 44493128 : error = 0;
259 :
260 : /*
261 : * See if we already have it in the inode itself. IO_idqpp is &i_udquot
262 : * or &i_gdquot. This made the code look weird, but made the logic a lot
263 : * simpler.
264 : */
265 44493128 : dqp = *IO_idqpp;
266 44493128 : if (dqp) {
267 0 : trace_xfs_dqattach_found(dqp);
268 0 : return 0;
269 : }
270 :
271 : /*
272 : * Find the dquot from somewhere. This bumps the reference count of
273 : * dquot and returns it locked. This can return ENOENT if dquot didn't
274 : * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
275 : * turned off suddenly.
276 : */
277 44493128 : error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
278 44493698 : if (error)
279 : return error;
280 :
281 44487282 : trace_xfs_dqattach_get(dqp);
282 :
283 : /*
284 : * dqget may have dropped and re-acquired the ilock, but it guarantees
285 : * that the dquot returned is the one that should go in the inode.
286 : */
287 44487297 : *IO_idqpp = dqp;
288 44487297 : xfs_dqunlock(dqp);
289 44487297 : return 0;
290 : }
291 :
292 : static bool
293 823489613 : xfs_qm_need_dqattach(
294 : struct xfs_inode *ip)
295 : {
296 823489613 : struct xfs_mount *mp = ip->i_mount;
297 :
298 823489613 : if (!XFS_IS_QUOTA_ON(mp))
299 : return false;
300 631675093 : if (!XFS_NOT_DQATTACHED(mp, ip))
301 : return false;
302 55762494 : if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
303 0 : return false;
304 : return true;
305 : }
306 :
307 : /*
308 : * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
309 : * into account.
310 : * If @doalloc is true, the dquot(s) will be allocated if needed.
311 : * Inode may get unlocked and relocked in here, and the caller must deal with
312 : * the consequences.
313 : */
314 : int
315 390306928 : xfs_qm_dqattach_locked(
316 : xfs_inode_t *ip,
317 : bool doalloc)
318 : {
319 390306928 : xfs_mount_t *mp = ip->i_mount;
320 390306928 : int error = 0;
321 :
322 390306928 : if (!xfs_qm_need_dqattach(ip))
323 : return 0;
324 :
325 14835451 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
326 :
327 14835422 : if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
328 14834578 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
329 : doalloc, &ip->i_udquot);
330 14834681 : if (error)
331 5050 : goto done;
332 14829631 : ASSERT(ip->i_udquot);
333 : }
334 :
335 14830475 : if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
336 14830125 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
337 : doalloc, &ip->i_gdquot);
338 14830136 : if (error)
339 1132 : goto done;
340 14829004 : ASSERT(ip->i_gdquot);
341 : }
342 :
343 14829354 : if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
344 14828892 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
345 : doalloc, &ip->i_pdquot);
346 14828902 : if (error)
347 258 : goto done;
348 14828644 : ASSERT(ip->i_pdquot);
349 : }
350 :
351 14829106 : done:
352 : /*
353 : * Don't worry about the dquots that we may have attached before any
354 : * error - they'll get detached later if it has not already been done.
355 : */
356 14835546 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
357 : return error;
358 : }
359 :
360 : int
361 433342867 : xfs_qm_dqattach(
362 : struct xfs_inode *ip)
363 : {
364 433342867 : int error;
365 :
366 433342867 : if (!xfs_qm_need_dqattach(ip))
367 : return 0;
368 :
369 13045911 : xfs_ilock(ip, XFS_ILOCK_EXCL);
370 13045916 : error = xfs_qm_dqattach_locked(ip, false);
371 13045955 : xfs_iunlock(ip, XFS_ILOCK_EXCL);
372 :
373 13045955 : return error;
374 : }
375 :
376 : /*
377 : * Release dquots (and their references) if any.
378 : * The inode should be locked EXCL except when this's called by
379 : * xfs_ireclaim.
380 : */
381 : void
382 988792466 : xfs_qm_dqdetach(
383 : xfs_inode_t *ip)
384 : {
385 988792466 : if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
386 : return;
387 :
388 44776029 : trace_xfs_dquot_dqdetach(ip);
389 :
390 89654364 : ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
391 44827182 : if (ip->i_udquot) {
392 44826100 : xfs_qm_dqrele(ip->i_udquot);
393 44826107 : ip->i_udquot = NULL;
394 : }
395 44827189 : if (ip->i_gdquot) {
396 44717378 : xfs_qm_dqrele(ip->i_gdquot);
397 44717382 : ip->i_gdquot = NULL;
398 : }
399 44827193 : if (ip->i_pdquot) {
400 44717045 : xfs_qm_dqrele(ip->i_pdquot);
401 44717067 : ip->i_pdquot = NULL;
402 : }
403 : }
404 :
405 : struct xfs_qm_isolate {
406 : struct list_head buffers;
407 : struct list_head dispose;
408 : };
409 :
410 : static enum lru_status
411 5489565 : xfs_qm_dquot_isolate(
412 : struct list_head *item,
413 : struct list_lru_one *lru,
414 : spinlock_t *lru_lock,
415 : void *arg)
416 : __releases(lru_lock) __acquires(lru_lock)
417 : {
418 5489565 : struct xfs_dquot *dqp = container_of(item,
419 : struct xfs_dquot, q_lru);
420 5489565 : struct xfs_qm_isolate *isol = arg;
421 :
422 5489565 : if (!xfs_dqlock_nowait(dqp))
423 664 : goto out_miss_busy;
424 :
425 : /*
426 : * If something else is freeing this dquot and hasn't yet removed it
427 : * from the LRU, leave it for the freeing task to complete the freeing
428 : * process rather than risk it being free from under us here.
429 : */
430 5488901 : if (dqp->q_flags & XFS_DQFLAG_FREEING)
431 0 : goto out_miss_unlock;
432 :
433 : /*
434 : * This dquot has acquired a reference in the meantime remove it from
435 : * the freelist and try again.
436 : */
437 5488901 : if (dqp->q_nrefs) {
438 1176837 : xfs_dqunlock(dqp);
439 1176837 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
440 :
441 1176837 : trace_xfs_dqreclaim_want(dqp);
442 1176837 : list_lru_isolate(lru, &dqp->q_lru);
443 1176837 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
444 1176837 : return LRU_REMOVED;
445 : }
446 :
447 : /*
448 : * If the dquot is dirty, flush it. If it's already being flushed, just
449 : * skip it so there is time for the IO to complete before we try to
450 : * reclaim it again on the next LRU pass.
451 : */
452 4312064 : if (!xfs_dqflock_nowait(dqp))
453 1523562 : goto out_miss_unlock;
454 :
455 2788502 : if (XFS_DQ_IS_DIRTY(dqp)) {
456 67120 : struct xfs_buf *bp = NULL;
457 67120 : int error;
458 :
459 67120 : trace_xfs_dqreclaim_dirty(dqp);
460 :
461 : /* we have to drop the LRU lock to flush the dquot */
462 67120 : spin_unlock(lru_lock);
463 :
464 67120 : error = xfs_qm_dqflush(dqp, &bp);
465 67120 : if (error)
466 16662 : goto out_unlock_dirty;
467 :
468 50458 : xfs_buf_delwri_queue(bp, &isol->buffers);
469 50458 : xfs_buf_relse(bp);
470 50458 : goto out_unlock_dirty;
471 : }
472 2721382 : xfs_dqfunlock(dqp);
473 :
474 : /*
475 : * Prevent lookups now that we are past the point of no return.
476 : */
477 2721382 : dqp->q_flags |= XFS_DQFLAG_FREEING;
478 2721382 : xfs_dqunlock(dqp);
479 :
480 2721382 : ASSERT(dqp->q_nrefs == 0);
481 2721382 : list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
482 2721382 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
483 2721382 : trace_xfs_dqreclaim_done(dqp);
484 2721382 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
485 2721382 : return LRU_REMOVED;
486 :
487 1523562 : out_miss_unlock:
488 1523562 : xfs_dqunlock(dqp);
489 1524226 : out_miss_busy:
490 1524226 : trace_xfs_dqreclaim_busy(dqp);
491 1524226 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
492 1524226 : return LRU_SKIP;
493 :
494 : out_unlock_dirty:
495 67120 : trace_xfs_dqreclaim_busy(dqp);
496 67120 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
497 67120 : xfs_dqunlock(dqp);
498 67120 : spin_lock(lru_lock);
499 67120 : return LRU_RETRY;
500 : }
501 :
502 : static unsigned long
503 62890 : xfs_qm_shrink_scan(
504 : struct shrinker *shrink,
505 : struct shrink_control *sc)
506 : {
507 62890 : struct xfs_quotainfo *qi = container_of(shrink,
508 : struct xfs_quotainfo, qi_shrinker);
509 62890 : struct xfs_qm_isolate isol;
510 62890 : unsigned long freed;
511 62890 : int error;
512 :
513 62890 : if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
514 : return 0;
515 :
516 62890 : INIT_LIST_HEAD(&isol.buffers);
517 62890 : INIT_LIST_HEAD(&isol.dispose);
518 :
519 62890 : freed = list_lru_shrink_walk(&qi->qi_lru, sc,
520 : xfs_qm_dquot_isolate, &isol);
521 :
522 62890 : error = xfs_buf_delwri_submit(&isol.buffers);
523 62890 : if (error)
524 0 : xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
525 :
526 2784272 : while (!list_empty(&isol.dispose)) {
527 2721382 : struct xfs_dquot *dqp;
528 :
529 2721382 : dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
530 2721382 : list_del_init(&dqp->q_lru);
531 2721382 : xfs_qm_dqfree_one(dqp);
532 : }
533 :
534 : return freed;
535 : }
536 :
537 : static unsigned long
538 3825 : xfs_qm_shrink_count(
539 : struct shrinker *shrink,
540 : struct shrink_control *sc)
541 : {
542 3825 : struct xfs_quotainfo *qi = container_of(shrink,
543 : struct xfs_quotainfo, qi_shrinker);
544 :
545 3825 : return list_lru_shrink_count(&qi->qi_lru, sc);
546 : }
547 :
548 : STATIC void
549 60212 : xfs_qm_set_defquota(
550 : struct xfs_mount *mp,
551 : xfs_dqtype_t type,
552 : struct xfs_quotainfo *qinf)
553 : {
554 60212 : struct xfs_dquot *dqp;
555 60212 : struct xfs_def_quota *defq;
556 60212 : int error;
557 :
558 60212 : error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
559 60212 : if (error)
560 8108 : return;
561 :
562 52104 : defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
563 :
564 : /*
565 : * Timers and warnings have been already set, let's just set the
566 : * default limits for this quota type
567 : */
568 52104 : defq->blk.hard = dqp->q_blk.hardlimit;
569 52104 : defq->blk.soft = dqp->q_blk.softlimit;
570 52104 : defq->ino.hard = dqp->q_ino.hardlimit;
571 52104 : defq->ino.soft = dqp->q_ino.softlimit;
572 52104 : defq->rtb.hard = dqp->q_rtb.hardlimit;
573 52104 : defq->rtb.soft = dqp->q_rtb.softlimit;
574 52104 : xfs_qm_dqdestroy(dqp);
575 : }
576 :
577 : /* Initialize quota time limits from the root dquot. */
578 : static void
579 60960 : xfs_qm_init_timelimits(
580 : struct xfs_mount *mp,
581 : xfs_dqtype_t type)
582 : {
583 60960 : struct xfs_quotainfo *qinf = mp->m_quotainfo;
584 60960 : struct xfs_def_quota *defq;
585 60960 : struct xfs_dquot *dqp;
586 60960 : int error;
587 :
588 60960 : defq = xfs_get_defquota(qinf, type);
589 :
590 60960 : defq->blk.time = XFS_QM_BTIMELIMIT;
591 60960 : defq->ino.time = XFS_QM_ITIMELIMIT;
592 60960 : defq->rtb.time = XFS_QM_RTBTIMELIMIT;
593 :
594 : /*
595 : * We try to get the limits from the superuser's limits fields.
596 : * This is quite hacky, but it is standard quota practice.
597 : *
598 : * Since we may not have done a quotacheck by this point, just read
599 : * the dquot without attaching it to any hashtables or lists.
600 : */
601 60960 : error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
602 60960 : if (error)
603 8856 : return;
604 :
605 : /*
606 : * The warnings and timers set the grace period given to
607 : * a user or group before he or she can not perform any
608 : * more writing. If it is zero, a default is used.
609 : */
610 52104 : if (dqp->q_blk.timer)
611 50 : defq->blk.time = dqp->q_blk.timer;
612 52104 : if (dqp->q_ino.timer)
613 48 : defq->ino.time = dqp->q_ino.timer;
614 52104 : if (dqp->q_rtb.timer)
615 8 : defq->rtb.time = dqp->q_rtb.timer;
616 :
617 52104 : xfs_qm_dqdestroy(dqp);
618 : }
619 :
620 : /*
621 : * This initializes all the quota information that's kept in the
622 : * mount structure
623 : */
624 : STATIC int
625 20336 : xfs_qm_init_quotainfo(
626 : struct xfs_mount *mp)
627 : {
628 20336 : struct xfs_quotainfo *qinf;
629 20336 : int error;
630 :
631 20336 : ASSERT(XFS_IS_QUOTA_ON(mp));
632 :
633 20336 : qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
634 :
635 20336 : error = list_lru_init(&qinf->qi_lru);
636 20336 : if (error)
637 0 : goto out_free_qinf;
638 :
639 : /*
640 : * See if quotainodes are setup, and if not, allocate them,
641 : * and change the superblock accordingly.
642 : */
643 20336 : error = xfs_qm_init_quotainos(mp);
644 20336 : if (error)
645 16 : goto out_free_lru;
646 :
647 20320 : INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
648 20320 : INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
649 20320 : INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
650 20320 : mutex_init(&qinf->qi_tree_lock);
651 :
652 : /* mutex used to serialize quotaoffs */
653 20320 : mutex_init(&qinf->qi_quotaofflock);
654 :
655 : /* Precalc some constants */
656 20320 : qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
657 20320 : qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
658 20320 : if (xfs_has_bigtime(mp)) {
659 20288 : qinf->qi_expiry_min =
660 : xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
661 20288 : qinf->qi_expiry_max =
662 : xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
663 : } else {
664 32 : qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
665 32 : qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
666 : }
667 20320 : trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
668 : qinf->qi_expiry_max);
669 :
670 20320 : mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
671 :
672 20320 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
673 20320 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
674 20320 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
675 :
676 20320 : if (XFS_IS_UQUOTA_ON(mp))
677 20156 : xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
678 20320 : if (XFS_IS_GQUOTA_ON(mp))
679 20036 : xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
680 20320 : if (XFS_IS_PQUOTA_ON(mp))
681 20020 : xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
682 :
683 20320 : qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
684 20320 : qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
685 20320 : qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
686 20320 : qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
687 :
688 20320 : error = register_shrinker(&qinf->qi_shrinker, "xfs-qm:%s",
689 20320 : mp->m_super->s_id);
690 20320 : if (error)
691 0 : goto out_free_inos;
692 :
693 : return 0;
694 :
695 : out_free_inos:
696 0 : mutex_destroy(&qinf->qi_quotaofflock);
697 0 : mutex_destroy(&qinf->qi_tree_lock);
698 0 : xfs_qm_destroy_quotainos(qinf);
699 16 : out_free_lru:
700 16 : list_lru_destroy(&qinf->qi_lru);
701 16 : out_free_qinf:
702 16 : kmem_free(qinf);
703 16 : mp->m_quotainfo = NULL;
704 16 : return error;
705 : }
706 :
707 : /*
708 : * Gets called when unmounting a filesystem or when all quotas get
709 : * turned off.
710 : * This purges the quota inodes, destroys locks and frees itself.
711 : */
712 : void
713 20323 : xfs_qm_destroy_quotainfo(
714 : struct xfs_mount *mp)
715 : {
716 20323 : struct xfs_quotainfo *qi;
717 :
718 20323 : qi = mp->m_quotainfo;
719 20323 : ASSERT(qi != NULL);
720 :
721 20323 : unregister_shrinker(&qi->qi_shrinker);
722 20323 : list_lru_destroy(&qi->qi_lru);
723 20323 : xfs_qm_destroy_quotainos(qi);
724 20323 : mutex_destroy(&qi->qi_tree_lock);
725 20323 : mutex_destroy(&qi->qi_quotaofflock);
726 20323 : kmem_free(qi);
727 20323 : mp->m_quotainfo = NULL;
728 20323 : }
729 :
730 : /*
731 : * Create an inode and return with a reference already taken, but unlocked
732 : * This is how we create quota inodes
733 : */
734 : STATIC int
735 8107 : xfs_qm_qino_alloc(
736 : struct xfs_mount *mp,
737 : struct xfs_inode **ipp,
738 : unsigned int flags)
739 : {
740 8107 : struct xfs_trans *tp;
741 8107 : int error;
742 8107 : bool need_alloc = true;
743 :
744 8107 : *ipp = NULL;
745 : /*
746 : * With superblock that doesn't have separate pquotino, we
747 : * share an inode between gquota and pquota. If the on-disk
748 : * superblock has GQUOTA and the filesystem is now mounted
749 : * with PQUOTA, just use sb_gquotino for sb_pquotino and
750 : * vice-versa.
751 : */
752 8107 : if (!xfs_has_pquotino(mp) &&
753 18 : (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
754 10 : xfs_ino_t ino = NULLFSINO;
755 :
756 10 : if ((flags & XFS_QMOPT_PQUOTA) &&
757 8 : (mp->m_sb.sb_gquotino != NULLFSINO)) {
758 2 : ino = mp->m_sb.sb_gquotino;
759 2 : if (XFS_IS_CORRUPT(mp,
760 : mp->m_sb.sb_pquotino != NULLFSINO))
761 0 : return -EFSCORRUPTED;
762 8 : } else if ((flags & XFS_QMOPT_GQUOTA) &&
763 2 : (mp->m_sb.sb_pquotino != NULLFSINO)) {
764 0 : ino = mp->m_sb.sb_pquotino;
765 0 : if (XFS_IS_CORRUPT(mp,
766 : mp->m_sb.sb_gquotino != NULLFSINO))
767 0 : return -EFSCORRUPTED;
768 : }
769 2 : if (ino != NULLFSINO) {
770 2 : error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
771 2 : if (error)
772 : return error;
773 2 : mp->m_sb.sb_gquotino = NULLFSINO;
774 2 : mp->m_sb.sb_pquotino = NULLFSINO;
775 2 : need_alloc = false;
776 : }
777 : }
778 :
779 16212 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
780 8127 : need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
781 : 0, 0, &tp);
782 8107 : if (error)
783 : return error;
784 :
785 8091 : if (need_alloc) {
786 8089 : xfs_ino_t ino;
787 :
788 8089 : error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
789 8089 : if (!error)
790 8089 : error = xfs_init_new_inode(&nop_mnt_idmap, tp, NULL, ino,
791 : S_IFREG, 1, 0, 0, false, ipp);
792 8089 : if (error) {
793 0 : xfs_trans_cancel(tp);
794 0 : return error;
795 : }
796 : }
797 :
798 : /*
799 : * Make the changes in the superblock, and log those too.
800 : * sbfields arg may contain fields other than *QUOTINO;
801 : * VERSIONNUM for example.
802 : */
803 8091 : spin_lock(&mp->m_sb_lock);
804 8091 : if (flags & XFS_QMOPT_SBVERSION) {
805 2767 : ASSERT(!xfs_has_quota(mp));
806 :
807 2767 : xfs_add_quota(mp);
808 2767 : mp->m_sb.sb_uquotino = NULLFSINO;
809 2767 : mp->m_sb.sb_gquotino = NULLFSINO;
810 2767 : mp->m_sb.sb_pquotino = NULLFSINO;
811 :
812 : /* qflags will get updated fully _after_ quotacheck */
813 2767 : mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
814 : }
815 8091 : if (flags & XFS_QMOPT_UQUOTA)
816 2729 : mp->m_sb.sb_uquotino = (*ipp)->i_ino;
817 5362 : else if (flags & XFS_QMOPT_GQUOTA)
818 2681 : mp->m_sb.sb_gquotino = (*ipp)->i_ino;
819 : else
820 2681 : mp->m_sb.sb_pquotino = (*ipp)->i_ino;
821 8091 : spin_unlock(&mp->m_sb_lock);
822 8091 : xfs_log_sb(tp);
823 :
824 8091 : error = xfs_trans_commit(tp);
825 8091 : if (error) {
826 0 : ASSERT(xfs_is_shutdown(mp));
827 0 : xfs_alert(mp, "%s failed (error %d)!", __func__, error);
828 : }
829 8091 : if (need_alloc)
830 8089 : xfs_finish_inode_setup(*ipp);
831 : return error;
832 : }
833 :
834 :
835 : STATIC void
836 779 : xfs_qm_reset_dqcounts(
837 : struct xfs_mount *mp,
838 : struct xfs_buf *bp,
839 : xfs_dqid_t id,
840 : xfs_dqtype_t type)
841 : {
842 779 : struct xfs_dqblk *dqb;
843 779 : int j;
844 :
845 779 : trace_xfs_reset_dqcounts(bp, _RET_IP_);
846 :
847 : /*
848 : * Reset all counters and timers. They'll be
849 : * started afresh by xfs_qm_quotacheck.
850 : */
851 : #ifdef DEBUG
852 779 : j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
853 : sizeof(struct xfs_dqblk);
854 779 : ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
855 : #endif
856 779 : dqb = bp->b_addr;
857 35459 : for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
858 34680 : struct xfs_disk_dquot *ddq;
859 :
860 34680 : ddq = (struct xfs_disk_dquot *)&dqb[j];
861 :
862 : /*
863 : * Do a sanity check, and if needed, repair the dqblk. Don't
864 : * output any warnings because it's perfectly possible to
865 : * find uninitialised dquot blks. See comment in
866 : * xfs_dquot_verify.
867 : */
868 34680 : if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
869 34590 : (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
870 180 : xfs_dqblk_repair(mp, &dqb[j], id + j, type);
871 :
872 : /*
873 : * Reset type in case we are reusing group quota file for
874 : * project quotas or vice versa
875 : */
876 34680 : ddq->d_type = type;
877 34680 : ddq->d_bcount = 0;
878 34680 : ddq->d_icount = 0;
879 34680 : ddq->d_rtbcount = 0;
880 :
881 : /*
882 : * dquot id 0 stores the default grace period and the maximum
883 : * warning limit that were set by the administrator, so we
884 : * should not reset them.
885 : */
886 34680 : if (ddq->d_id != 0) {
887 34488 : ddq->d_btimer = 0;
888 34488 : ddq->d_itimer = 0;
889 34488 : ddq->d_rtbtimer = 0;
890 34488 : ddq->d_bwarns = 0;
891 34488 : ddq->d_iwarns = 0;
892 34488 : ddq->d_rtbwarns = 0;
893 34488 : if (xfs_has_bigtime(mp))
894 34048 : ddq->d_type |= XFS_DQTYPE_BIGTIME;
895 : }
896 :
897 34680 : if (xfs_has_crc(mp)) {
898 34230 : xfs_update_cksum((char *)&dqb[j],
899 : sizeof(struct xfs_dqblk),
900 : XFS_DQUOT_CRC_OFF);
901 : }
902 : }
903 779 : }
904 :
905 : STATIC int
906 728 : xfs_qm_reset_dqcounts_all(
907 : struct xfs_mount *mp,
908 : xfs_dqid_t firstid,
909 : xfs_fsblock_t bno,
910 : xfs_filblks_t blkcnt,
911 : xfs_dqtype_t type,
912 : struct list_head *buffer_list)
913 : {
914 728 : struct xfs_buf *bp;
915 728 : int error = 0;
916 :
917 728 : ASSERT(blkcnt > 0);
918 :
919 : /*
920 : * Blkcnt arg can be a very big number, and might even be
921 : * larger than the log itself. So, we have to break it up into
922 : * manageable-sized transactions.
923 : * Note that we don't start a permanent transaction here; we might
924 : * not be able to get a log reservation for the whole thing up front,
925 : * and we don't really care to either, because we just discard
926 : * everything if we were to crash in the middle of this loop.
927 : */
928 1507 : while (blkcnt--) {
929 2337 : error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
930 779 : XFS_FSB_TO_DADDR(mp, bno),
931 779 : mp->m_quotainfo->qi_dqchunklen, 0, &bp,
932 : &xfs_dquot_buf_ops);
933 :
934 : /*
935 : * CRC and validation errors will return a EFSCORRUPTED here. If
936 : * this occurs, re-read without CRC validation so that we can
937 : * repair the damage via xfs_qm_reset_dqcounts(). This process
938 : * will leave a trace in the log indicating corruption has
939 : * been detected.
940 : */
941 779 : if (error == -EFSCORRUPTED) {
942 9 : error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
943 3 : XFS_FSB_TO_DADDR(mp, bno),
944 3 : mp->m_quotainfo->qi_dqchunklen, 0, &bp,
945 : NULL);
946 : }
947 :
948 779 : if (error)
949 : break;
950 :
951 : /*
952 : * A corrupt buffer might not have a verifier attached, so
953 : * make sure we have the correct one attached before writeback
954 : * occurs.
955 : */
956 779 : bp->b_ops = &xfs_dquot_buf_ops;
957 779 : xfs_qm_reset_dqcounts(mp, bp, firstid, type);
958 779 : xfs_buf_delwri_queue(bp, buffer_list);
959 779 : xfs_buf_relse(bp);
960 :
961 : /* goto the next block. */
962 779 : bno++;
963 779 : firstid += mp->m_quotainfo->qi_dqperchunk;
964 : }
965 :
966 728 : return error;
967 : }
968 :
969 : /*
970 : * Iterate over all allocated dquot blocks in this quota inode, zeroing all
971 : * counters for every chunk of dquots that we find.
972 : */
973 : STATIC int
974 8295 : xfs_qm_reset_dqcounts_buf(
975 : struct xfs_mount *mp,
976 : struct xfs_inode *qip,
977 : xfs_dqtype_t type,
978 : struct list_head *buffer_list)
979 : {
980 8295 : struct xfs_bmbt_irec *map;
981 8295 : int i, nmaps; /* number of map entries */
982 8295 : int error; /* return value */
983 8295 : xfs_fileoff_t lblkno;
984 8295 : xfs_filblks_t maxlblkcnt;
985 8295 : xfs_dqid_t firstid;
986 8295 : xfs_fsblock_t rablkno;
987 8295 : xfs_filblks_t rablkcnt;
988 :
989 8295 : error = 0;
990 : /*
991 : * This looks racy, but we can't keep an inode lock across a
992 : * trans_reserve. But, this gets called during quotacheck, and that
993 : * happens only at mount time which is single threaded.
994 : */
995 8295 : if (qip->i_nblocks == 0)
996 : return 0;
997 :
998 190 : map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
999 :
1000 190 : lblkno = 0;
1001 190 : maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1002 479 : do {
1003 479 : uint lock_mode;
1004 :
1005 479 : nmaps = XFS_DQITER_MAP_SIZE;
1006 : /*
1007 : * We aren't changing the inode itself. Just changing
1008 : * some of its data. No new blocks are added here, and
1009 : * the inode is never added to the transaction.
1010 : */
1011 479 : lock_mode = xfs_ilock_data_map_shared(qip);
1012 479 : error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1013 : map, &nmaps, 0);
1014 479 : xfs_iunlock(qip, lock_mode);
1015 479 : if (error)
1016 : break;
1017 :
1018 479 : ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1019 1925 : for (i = 0; i < nmaps; i++) {
1020 1446 : ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1021 1446 : ASSERT(map[i].br_blockcount);
1022 :
1023 :
1024 1446 : lblkno += map[i].br_blockcount;
1025 :
1026 1446 : if (map[i].br_startblock == HOLESTARTBLOCK)
1027 718 : continue;
1028 :
1029 728 : firstid = (xfs_dqid_t) map[i].br_startoff *
1030 728 : mp->m_quotainfo->qi_dqperchunk;
1031 : /*
1032 : * Do a read-ahead on the next extent.
1033 : */
1034 728 : if ((i+1 < nmaps) &&
1035 710 : (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1036 10 : rablkcnt = map[i+1].br_blockcount;
1037 10 : rablkno = map[i+1].br_startblock;
1038 20 : while (rablkcnt--) {
1039 20 : xfs_buf_readahead(mp->m_ddev_targp,
1040 10 : XFS_FSB_TO_DADDR(mp, rablkno),
1041 10 : mp->m_quotainfo->qi_dqchunklen,
1042 : &xfs_dquot_buf_ops);
1043 10 : rablkno++;
1044 : }
1045 : }
1046 : /*
1047 : * Iterate thru all the blks in the extent and
1048 : * reset the counters of all the dquots inside them.
1049 : */
1050 728 : error = xfs_qm_reset_dqcounts_all(mp, firstid,
1051 : map[i].br_startblock,
1052 : map[i].br_blockcount,
1053 : type, buffer_list);
1054 728 : if (error)
1055 0 : goto out;
1056 : }
1057 479 : } while (nmaps > 0);
1058 :
1059 190 : out:
1060 190 : kmem_free(map);
1061 190 : return error;
1062 : }
1063 :
1064 : /*
1065 : * Called by dqusage_adjust in doing a quotacheck.
1066 : *
1067 : * Given the inode, and a dquot id this updates both the incore dqout as well
1068 : * as the buffer copy. This is so that once the quotacheck is done, we can
1069 : * just log all the buffers, as opposed to logging numerous updates to
1070 : * individual dquots.
1071 : */
1072 : STATIC int
1073 183744 : xfs_qm_quotacheck_dqadjust(
1074 : struct xfs_inode *ip,
1075 : xfs_dqtype_t type,
1076 : xfs_qcnt_t nblks,
1077 : xfs_qcnt_t rtblks)
1078 : {
1079 183744 : struct xfs_mount *mp = ip->i_mount;
1080 183744 : struct xfs_dquot *dqp;
1081 183744 : xfs_dqid_t id;
1082 183744 : int error;
1083 :
1084 183744 : id = xfs_qm_id_for_quotatype(ip, type);
1085 183738 : error = xfs_qm_dqget(mp, id, type, true, &dqp);
1086 183765 : if (error) {
1087 : /*
1088 : * Shouldn't be able to turn off quotas here.
1089 : */
1090 0 : ASSERT(error != -ESRCH);
1091 0 : ASSERT(error != -ENOENT);
1092 0 : return error;
1093 : }
1094 :
1095 183765 : trace_xfs_dqadjust(dqp);
1096 :
1097 : /*
1098 : * Adjust the inode count and the block count to reflect this inode's
1099 : * resource usage.
1100 : */
1101 183766 : dqp->q_ino.count++;
1102 183766 : dqp->q_ino.reserved++;
1103 183766 : if (nblks) {
1104 7955 : dqp->q_blk.count += nblks;
1105 7955 : dqp->q_blk.reserved += nblks;
1106 : }
1107 183766 : if (rtblks) {
1108 0 : dqp->q_rtb.count += rtblks;
1109 0 : dqp->q_rtb.reserved += rtblks;
1110 : }
1111 :
1112 : /*
1113 : * Set default limits, adjust timers (since we changed usages)
1114 : *
1115 : * There are no timers for the default values set in the root dquot.
1116 : */
1117 183766 : if (dqp->q_id) {
1118 4095 : xfs_qm_adjust_dqlimits(dqp);
1119 4095 : xfs_qm_adjust_dqtimers(dqp);
1120 : }
1121 :
1122 183766 : dqp->q_flags |= XFS_DQFLAG_DIRTY;
1123 183766 : xfs_qm_dqput(dqp);
1124 183766 : return 0;
1125 : }
1126 :
1127 : /*
1128 : * callback routine supplied to bulkstat(). Given an inumber, find its
1129 : * dquots and update them to account for resources taken by that inode.
1130 : */
1131 : /* ARGSUSED */
1132 : STATIC int
1133 70158 : xfs_qm_dqusage_adjust(
1134 : struct xfs_mount *mp,
1135 : struct xfs_trans *tp,
1136 : xfs_ino_t ino,
1137 : void *data)
1138 : {
1139 70158 : struct xfs_inode *ip;
1140 70158 : xfs_qcnt_t nblks;
1141 70158 : xfs_filblks_t rtblks = 0; /* total rt blks */
1142 70158 : int error;
1143 :
1144 70158 : ASSERT(XFS_IS_QUOTA_ON(mp));
1145 :
1146 : /*
1147 : * rootino must have its resources accounted for, not so with the quota
1148 : * inodes.
1149 : */
1150 137477 : if (xfs_is_quota_inode(&mp->m_sb, ino))
1151 : return 0;
1152 :
1153 : /*
1154 : * We don't _need_ to take the ilock EXCL here because quotacheck runs
1155 : * at mount time and therefore nobody will be racing chown/chproj.
1156 : */
1157 61813 : error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1158 61813 : if (error == -EINVAL || error == -ENOENT)
1159 : return 0;
1160 61813 : if (error)
1161 : return error;
1162 :
1163 61813 : ASSERT(ip->i_delayed_blks == 0);
1164 :
1165 61813 : if (XFS_IS_REALTIME_INODE(ip)) {
1166 0 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1167 :
1168 0 : error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1169 0 : if (error)
1170 0 : goto error0;
1171 :
1172 0 : xfs_bmap_count_leaves(ifp, &rtblks);
1173 : }
1174 :
1175 61813 : nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1176 :
1177 : /*
1178 : * Add the (disk blocks and inode) resources occupied by this
1179 : * inode to its dquots. We do this adjustment in the incore dquot,
1180 : * and also copy the changes to its buffer.
1181 : * We don't care about putting these changes in a transaction
1182 : * envelope because if we crash in the middle of a 'quotacheck'
1183 : * we have to start from the beginning anyway.
1184 : * Once we're done, we'll log all the dquot bufs.
1185 : *
1186 : * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1187 : * and quotaoffs don't race. (Quotachecks happen at mount time only).
1188 : */
1189 61813 : if (XFS_IS_UQUOTA_ON(mp)) {
1190 61403 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1191 : rtblks);
1192 61403 : if (error)
1193 0 : goto error0;
1194 : }
1195 :
1196 61813 : if (XFS_IS_GQUOTA_ON(mp)) {
1197 61229 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1198 : rtblks);
1199 61229 : if (error)
1200 0 : goto error0;
1201 : }
1202 :
1203 61813 : if (XFS_IS_PQUOTA_ON(mp)) {
1204 61131 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1205 : rtblks);
1206 61131 : if (error)
1207 0 : goto error0;
1208 : }
1209 :
1210 61813 : error0:
1211 61813 : xfs_irele(ip);
1212 61813 : return error;
1213 : }
1214 :
1215 : STATIC int
1216 11024 : xfs_qm_flush_one(
1217 : struct xfs_dquot *dqp,
1218 : void *data)
1219 : {
1220 11024 : struct xfs_mount *mp = dqp->q_mount;
1221 11024 : struct list_head *buffer_list = data;
1222 11024 : struct xfs_buf *bp = NULL;
1223 11024 : int error = 0;
1224 :
1225 11024 : xfs_dqlock(dqp);
1226 11024 : if (dqp->q_flags & XFS_DQFLAG_FREEING)
1227 0 : goto out_unlock;
1228 11024 : if (!XFS_DQ_IS_DIRTY(dqp))
1229 0 : goto out_unlock;
1230 :
1231 : /*
1232 : * The only way the dquot is already flush locked by the time quotacheck
1233 : * gets here is if reclaim flushed it before the dqadjust walk dirtied
1234 : * it for the final time. Quotacheck collects all dquot bufs in the
1235 : * local delwri queue before dquots are dirtied, so reclaim can't have
1236 : * possibly queued it for I/O. The only way out is to push the buffer to
1237 : * cycle the flush lock.
1238 : */
1239 11024 : if (!xfs_dqflock_nowait(dqp)) {
1240 : /* buf is pinned in-core by delwri list */
1241 0 : error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1242 0 : mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1243 0 : if (error)
1244 0 : goto out_unlock;
1245 :
1246 0 : if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1247 0 : error = -EAGAIN;
1248 0 : xfs_buf_relse(bp);
1249 0 : goto out_unlock;
1250 : }
1251 0 : xfs_buf_unlock(bp);
1252 :
1253 0 : xfs_buf_delwri_pushbuf(bp, buffer_list);
1254 0 : xfs_buf_rele(bp);
1255 :
1256 0 : error = -EAGAIN;
1257 0 : goto out_unlock;
1258 : }
1259 :
1260 11024 : error = xfs_qm_dqflush(dqp, &bp);
1261 11024 : if (error)
1262 0 : goto out_unlock;
1263 :
1264 11024 : xfs_buf_delwri_queue(bp, buffer_list);
1265 11024 : xfs_buf_relse(bp);
1266 11024 : out_unlock:
1267 11024 : xfs_dqunlock(dqp);
1268 11024 : return error;
1269 : }
1270 :
1271 : /*
1272 : * Walk thru all the filesystem inodes and construct a consistent view
1273 : * of the disk quota world. If the quotacheck fails, disable quotas.
1274 : */
1275 : STATIC int
1276 2897 : xfs_qm_quotacheck(
1277 : xfs_mount_t *mp)
1278 : {
1279 2897 : int error, error2;
1280 2897 : uint flags;
1281 2897 : LIST_HEAD (buffer_list);
1282 2897 : struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1283 2897 : struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1284 2897 : struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1285 :
1286 2897 : flags = 0;
1287 :
1288 2897 : ASSERT(uip || gip || pip);
1289 2897 : ASSERT(XFS_IS_QUOTA_ON(mp));
1290 :
1291 2897 : xfs_notice(mp, "Quotacheck needed: Please wait.");
1292 :
1293 : /*
1294 : * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1295 : * their counters to zero. We need a clean slate.
1296 : * We don't log our changes till later.
1297 : */
1298 2897 : if (uip) {
1299 2809 : error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1300 : &buffer_list);
1301 2809 : if (error)
1302 0 : goto error_return;
1303 : flags |= XFS_UQUOTA_CHKD;
1304 : }
1305 :
1306 2897 : if (gip) {
1307 2747 : error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1308 : &buffer_list);
1309 2747 : if (error)
1310 0 : goto error_return;
1311 2747 : flags |= XFS_GQUOTA_CHKD;
1312 : }
1313 :
1314 2897 : if (pip) {
1315 2739 : error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1316 : &buffer_list);
1317 2739 : if (error)
1318 0 : goto error_return;
1319 2739 : flags |= XFS_PQUOTA_CHKD;
1320 : }
1321 :
1322 2897 : error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1323 : NULL);
1324 :
1325 : /*
1326 : * On error, the inode walk may have partially populated the dquot
1327 : * caches. We must purge them before disabling quota and tearing down
1328 : * the quotainfo, or else the dquots will leak.
1329 : */
1330 2897 : if (error)
1331 0 : goto error_purge;
1332 :
1333 : /*
1334 : * We've made all the changes that we need to make incore. Flush them
1335 : * down to disk buffers if everything was updated successfully.
1336 : */
1337 2897 : if (XFS_IS_UQUOTA_ON(mp)) {
1338 2809 : error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1339 : &buffer_list);
1340 : }
1341 2897 : if (XFS_IS_GQUOTA_ON(mp)) {
1342 2747 : error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1343 : &buffer_list);
1344 2747 : if (!error)
1345 2747 : error = error2;
1346 : }
1347 2897 : if (XFS_IS_PQUOTA_ON(mp)) {
1348 2739 : error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1349 : &buffer_list);
1350 2739 : if (!error)
1351 2739 : error = error2;
1352 : }
1353 :
1354 2897 : error2 = xfs_buf_delwri_submit(&buffer_list);
1355 2897 : if (!error)
1356 2897 : error = error2;
1357 :
1358 : /*
1359 : * We can get this error if we couldn't do a dquot allocation inside
1360 : * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1361 : * dirty dquots that might be cached, we just want to get rid of them
1362 : * and turn quotaoff. The dquots won't be attached to any of the inodes
1363 : * at this point (because we intentionally didn't in dqget_noattach).
1364 : */
1365 2897 : if (error)
1366 0 : goto error_purge;
1367 :
1368 : /*
1369 : * If one type of quotas is off, then it will lose its
1370 : * quotachecked status, since we won't be doing accounting for
1371 : * that type anymore.
1372 : */
1373 2897 : mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1374 2897 : mp->m_qflags |= flags;
1375 :
1376 2897 : error_return:
1377 2897 : xfs_buf_delwri_cancel(&buffer_list);
1378 :
1379 2897 : if (error) {
1380 0 : xfs_warn(mp,
1381 : "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1382 : error);
1383 : /*
1384 : * We must turn off quotas.
1385 : */
1386 0 : ASSERT(mp->m_quotainfo != NULL);
1387 0 : xfs_qm_destroy_quotainfo(mp);
1388 0 : if (xfs_mount_reset_sbqflags(mp)) {
1389 0 : xfs_warn(mp,
1390 : "Quotacheck: Failed to reset quota flags.");
1391 : }
1392 : } else
1393 2897 : xfs_notice(mp, "Quotacheck: Done.");
1394 2897 : return error;
1395 :
1396 0 : error_purge:
1397 : /*
1398 : * On error, we may have inodes queued for inactivation. This may try
1399 : * to attach dquots to the inode before running cleanup operations on
1400 : * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1401 : * below that frees mp->m_quotainfo. To avoid this race, flush all the
1402 : * pending inodegc operations before we purge the dquots from memory,
1403 : * ensuring that background inactivation is idle whilst we turn off
1404 : * quotas.
1405 : */
1406 0 : xfs_inodegc_flush(mp);
1407 0 : xfs_qm_dqpurge_all(mp);
1408 0 : goto error_return;
1409 :
1410 : }
1411 :
1412 : /*
1413 : * This is called from xfs_mountfs to start quotas and initialize all
1414 : * necessary data structures like quotainfo. This is also responsible for
1415 : * running a quotacheck as necessary. We are guaranteed that the superblock
1416 : * is consistently read in at this point.
1417 : *
1418 : * If we fail here, the mount will continue with quota turned off. We don't
1419 : * need to inidicate success or failure at all.
1420 : */
1421 : void
1422 20363 : xfs_qm_mount_quotas(
1423 : struct xfs_mount *mp)
1424 : {
1425 20363 : int error = 0;
1426 20363 : uint sbf;
1427 :
1428 : /*
1429 : * If quotas on realtime volumes is not supported, we disable
1430 : * quotas immediately.
1431 : */
1432 20363 : if (mp->m_sb.sb_rextents) {
1433 27 : xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1434 27 : mp->m_qflags = 0;
1435 27 : goto write_changes;
1436 : }
1437 :
1438 20336 : ASSERT(XFS_IS_QUOTA_ON(mp));
1439 :
1440 : /*
1441 : * Allocate the quotainfo structure inside the mount struct, and
1442 : * create quotainode(s), and change/rev superblock if necessary.
1443 : */
1444 20336 : error = xfs_qm_init_quotainfo(mp);
1445 20336 : if (error) {
1446 : /*
1447 : * We must turn off quotas.
1448 : */
1449 16 : ASSERT(mp->m_quotainfo == NULL);
1450 16 : mp->m_qflags = 0;
1451 16 : goto write_changes;
1452 : }
1453 : /*
1454 : * If any of the quotas are not consistent, do a quotacheck.
1455 : */
1456 20320 : if (XFS_QM_NEED_QUOTACHECK(mp)) {
1457 2897 : error = xfs_qm_quotacheck(mp);
1458 2897 : if (error) {
1459 : /* Quotacheck failed and disabled quotas. */
1460 : return;
1461 : }
1462 : }
1463 : /*
1464 : * If one type of quotas is off, then it will lose its
1465 : * quotachecked status, since we won't be doing accounting for
1466 : * that type anymore.
1467 : */
1468 20320 : if (!XFS_IS_UQUOTA_ON(mp))
1469 164 : mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1470 20320 : if (!XFS_IS_GQUOTA_ON(mp))
1471 284 : mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1472 20320 : if (!XFS_IS_PQUOTA_ON(mp))
1473 300 : mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1474 :
1475 20020 : write_changes:
1476 : /*
1477 : * We actually don't have to acquire the m_sb_lock at all.
1478 : * This can only be called from mount, and that's single threaded. XXX
1479 : */
1480 20363 : spin_lock(&mp->m_sb_lock);
1481 20363 : sbf = mp->m_sb.sb_qflags;
1482 20363 : mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1483 20363 : spin_unlock(&mp->m_sb_lock);
1484 :
1485 20363 : if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1486 2933 : if (xfs_sync_sb(mp, false)) {
1487 : /*
1488 : * We could only have been turning quotas off.
1489 : * We aren't in very good shape actually because
1490 : * the incore structures are convinced that quotas are
1491 : * off, but the on disk superblock doesn't know that !
1492 : */
1493 0 : ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1494 0 : xfs_alert(mp, "%s: Superblock update failed!",
1495 : __func__);
1496 : }
1497 : }
1498 :
1499 20363 : if (error) {
1500 16 : xfs_warn(mp, "Failed to initialize disk quotas.");
1501 16 : return;
1502 : }
1503 : }
1504 :
1505 : /*
1506 : * This is called after the superblock has been read in and we're ready to
1507 : * iget the quota inodes.
1508 : */
1509 : STATIC int
1510 20336 : xfs_qm_init_quotainos(
1511 : xfs_mount_t *mp)
1512 : {
1513 20336 : struct xfs_inode *uip = NULL;
1514 20336 : struct xfs_inode *gip = NULL;
1515 20336 : struct xfs_inode *pip = NULL;
1516 20336 : int error;
1517 20336 : uint flags = 0;
1518 :
1519 20336 : ASSERT(mp->m_quotainfo);
1520 :
1521 : /*
1522 : * Get the uquota and gquota inodes
1523 : */
1524 20336 : if (xfs_has_quota(mp)) {
1525 17553 : if (XFS_IS_UQUOTA_ON(mp) &&
1526 17439 : mp->m_sb.sb_uquotino != NULLFSINO) {
1527 17427 : ASSERT(mp->m_sb.sb_uquotino > 0);
1528 17427 : error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1529 : 0, 0, &uip);
1530 17427 : if (error)
1531 : return error;
1532 : }
1533 17553 : if (XFS_IS_GQUOTA_ON(mp) &&
1534 17393 : mp->m_sb.sb_gquotino != NULLFSINO) {
1535 17355 : ASSERT(mp->m_sb.sb_gquotino > 0);
1536 17355 : error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1537 : 0, 0, &gip);
1538 17355 : if (error)
1539 0 : goto error_rele;
1540 : }
1541 17553 : if (XFS_IS_PQUOTA_ON(mp) &&
1542 17375 : mp->m_sb.sb_pquotino != NULLFSINO) {
1543 17339 : ASSERT(mp->m_sb.sb_pquotino > 0);
1544 17339 : error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1545 : 0, 0, &pip);
1546 17339 : if (error)
1547 0 : goto error_rele;
1548 : }
1549 : } else {
1550 : flags |= XFS_QMOPT_SBVERSION;
1551 : }
1552 :
1553 : /*
1554 : * Create the three inodes, if they don't exist already. The changes
1555 : * made above will get added to a transaction and logged in one of
1556 : * the qino_alloc calls below. If the device is readonly,
1557 : * temporarily switch to read-write to do this.
1558 : */
1559 20336 : if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1560 2745 : error = xfs_qm_qino_alloc(mp, &uip,
1561 : flags | XFS_QMOPT_UQUOTA);
1562 2745 : if (error)
1563 16 : goto error_rele;
1564 :
1565 : flags &= ~XFS_QMOPT_SBVERSION;
1566 : }
1567 20320 : if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1568 2681 : error = xfs_qm_qino_alloc(mp, &gip,
1569 : flags | XFS_QMOPT_GQUOTA);
1570 2681 : if (error)
1571 0 : goto error_rele;
1572 :
1573 : flags &= ~XFS_QMOPT_SBVERSION;
1574 : }
1575 20320 : if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1576 2681 : error = xfs_qm_qino_alloc(mp, &pip,
1577 : flags | XFS_QMOPT_PQUOTA);
1578 2681 : if (error)
1579 0 : goto error_rele;
1580 : }
1581 :
1582 20320 : mp->m_quotainfo->qi_uquotaip = uip;
1583 20320 : mp->m_quotainfo->qi_gquotaip = gip;
1584 20320 : mp->m_quotainfo->qi_pquotaip = pip;
1585 :
1586 20320 : return 0;
1587 :
1588 16 : error_rele:
1589 16 : if (uip)
1590 0 : xfs_irele(uip);
1591 16 : if (gip)
1592 0 : xfs_irele(gip);
1593 16 : if (pip)
1594 0 : xfs_irele(pip);
1595 : return error;
1596 : }
1597 :
1598 : STATIC void
1599 20323 : xfs_qm_destroy_quotainos(
1600 : struct xfs_quotainfo *qi)
1601 : {
1602 20323 : if (qi->qi_uquotaip) {
1603 2 : xfs_irele(qi->qi_uquotaip);
1604 2 : qi->qi_uquotaip = NULL; /* paranoia */
1605 : }
1606 20323 : if (qi->qi_gquotaip) {
1607 2 : xfs_irele(qi->qi_gquotaip);
1608 2 : qi->qi_gquotaip = NULL;
1609 : }
1610 20323 : if (qi->qi_pquotaip) {
1611 2 : xfs_irele(qi->qi_pquotaip);
1612 2 : qi->qi_pquotaip = NULL;
1613 : }
1614 20323 : }
1615 :
1616 : STATIC void
1617 2721382 : xfs_qm_dqfree_one(
1618 : struct xfs_dquot *dqp)
1619 : {
1620 2721382 : struct xfs_mount *mp = dqp->q_mount;
1621 2721382 : struct xfs_quotainfo *qi = mp->m_quotainfo;
1622 :
1623 2721382 : mutex_lock(&qi->qi_tree_lock);
1624 2721382 : radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1625 :
1626 2721382 : qi->qi_dquots--;
1627 2721382 : mutex_unlock(&qi->qi_tree_lock);
1628 :
1629 2721382 : xfs_qm_dqdestroy(dqp);
1630 2721382 : }
1631 :
1632 : /* --------------- utility functions for vnodeops ---------------- */
1633 :
1634 :
1635 : /*
1636 : * Given an inode, a uid, gid and prid make sure that we have
1637 : * allocated relevant dquot(s) on disk, and that we won't exceed inode
1638 : * quotas by creating this file.
1639 : * This also attaches dquot(s) to the given inode after locking it,
1640 : * and returns the dquots corresponding to the uid and/or gid.
1641 : *
1642 : * in : inode (unlocked)
1643 : * out : udquot, gdquot with references taken and unlocked
1644 : */
1645 : int
1646 52011200 : xfs_qm_vop_dqalloc(
1647 : struct xfs_inode *ip,
1648 : kuid_t uid,
1649 : kgid_t gid,
1650 : prid_t prid,
1651 : uint flags,
1652 : struct xfs_dquot **O_udqpp,
1653 : struct xfs_dquot **O_gdqpp,
1654 : struct xfs_dquot **O_pdqpp)
1655 : {
1656 52011200 : struct xfs_mount *mp = ip->i_mount;
1657 52011200 : struct inode *inode = VFS_I(ip);
1658 52011200 : struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1659 52011200 : struct xfs_dquot *uq = NULL;
1660 52011200 : struct xfs_dquot *gq = NULL;
1661 52011200 : struct xfs_dquot *pq = NULL;
1662 52011200 : int error;
1663 52011200 : uint lockflags;
1664 :
1665 52011200 : if (!XFS_IS_QUOTA_ON(mp))
1666 : return 0;
1667 :
1668 34672254 : lockflags = XFS_ILOCK_EXCL;
1669 34672254 : xfs_ilock(ip, lockflags);
1670 :
1671 34673032 : if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1672 6 : gid = inode->i_gid;
1673 :
1674 : /*
1675 : * Attach the dquot(s) to this inode, doing a dquot allocation
1676 : * if necessary. The dquot(s) will not be locked.
1677 : */
1678 34673032 : if (XFS_NOT_DQATTACHED(mp, ip)) {
1679 1680154 : error = xfs_qm_dqattach_locked(ip, true);
1680 1680160 : if (error) {
1681 4211 : xfs_iunlock(ip, lockflags);
1682 4211 : return error;
1683 : }
1684 : }
1685 :
1686 34668827 : if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1687 34370863 : ASSERT(O_udqpp);
1688 34370863 : if (!uid_eq(inode->i_uid, uid)) {
1689 : /*
1690 : * What we need is the dquot that has this uid, and
1691 : * if we send the inode to dqget, the uid of the inode
1692 : * takes priority over what's sent in the uid argument.
1693 : * We must unlock inode here before calling dqget if
1694 : * we're not sending the inode, because otherwise
1695 : * we'll deadlock by doing trans_reserve while
1696 : * holding ilock.
1697 : */
1698 5106450 : xfs_iunlock(ip, lockflags);
1699 5106439 : error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1700 : XFS_DQTYPE_USER, true, &uq);
1701 5106487 : if (error) {
1702 24524 : ASSERT(error != -ENOENT);
1703 24524 : return error;
1704 : }
1705 : /*
1706 : * Get the ilock in the right order.
1707 : */
1708 5081963 : xfs_dqunlock(uq);
1709 5081956 : lockflags = XFS_ILOCK_SHARED;
1710 5081956 : xfs_ilock(ip, lockflags);
1711 : } else {
1712 : /*
1713 : * Take an extra reference, because we'll return
1714 : * this to caller
1715 : */
1716 29264413 : ASSERT(ip->i_udquot);
1717 29264413 : uq = xfs_qm_dqhold(ip->i_udquot);
1718 : }
1719 : }
1720 34644816 : if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1721 34236131 : ASSERT(O_gdqpp);
1722 34236131 : if (!gid_eq(inode->i_gid, gid)) {
1723 5078427 : xfs_iunlock(ip, lockflags);
1724 5078439 : error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1725 : XFS_DQTYPE_GROUP, true, &gq);
1726 5078427 : if (error) {
1727 5986 : ASSERT(error != -ENOENT);
1728 5986 : goto error_rele;
1729 : }
1730 5072441 : xfs_dqunlock(gq);
1731 5072449 : lockflags = XFS_ILOCK_SHARED;
1732 5072449 : xfs_ilock(ip, lockflags);
1733 : } else {
1734 29157704 : ASSERT(ip->i_gdquot);
1735 29157704 : gq = xfs_qm_dqhold(ip->i_gdquot);
1736 : }
1737 : }
1738 34638953 : if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1739 30663701 : ASSERT(O_pdqpp);
1740 30663701 : if (ip->i_projid != prid) {
1741 256664 : xfs_iunlock(ip, lockflags);
1742 256663 : error = xfs_qm_dqget(mp, prid,
1743 : XFS_DQTYPE_PROJ, true, &pq);
1744 256664 : if (error) {
1745 1041 : ASSERT(error != -ENOENT);
1746 1041 : goto error_rele;
1747 : }
1748 255623 : xfs_dqunlock(pq);
1749 255623 : lockflags = XFS_ILOCK_SHARED;
1750 255623 : xfs_ilock(ip, lockflags);
1751 : } else {
1752 30407037 : ASSERT(ip->i_pdquot);
1753 30407037 : pq = xfs_qm_dqhold(ip->i_pdquot);
1754 : }
1755 : }
1756 34637992 : trace_xfs_dquot_dqalloc(ip);
1757 :
1758 34637753 : xfs_iunlock(ip, lockflags);
1759 34637816 : if (O_udqpp)
1760 34341954 : *O_udqpp = uq;
1761 : else
1762 295862 : xfs_qm_dqrele(uq);
1763 34637816 : if (O_gdqpp)
1764 34341954 : *O_gdqpp = gq;
1765 : else
1766 295862 : xfs_qm_dqrele(gq);
1767 34637816 : if (O_pdqpp)
1768 30771682 : *O_pdqpp = pq;
1769 : else
1770 3866134 : xfs_qm_dqrele(pq);
1771 : return 0;
1772 :
1773 7027 : error_rele:
1774 7027 : xfs_qm_dqrele(gq);
1775 7027 : xfs_qm_dqrele(uq);
1776 7027 : return error;
1777 : }
1778 :
1779 : /*
1780 : * Actually transfer ownership, and do dquot modifications.
1781 : * These were already reserved.
1782 : */
1783 : struct xfs_dquot *
1784 7631583 : xfs_qm_vop_chown(
1785 : struct xfs_trans *tp,
1786 : struct xfs_inode *ip,
1787 : struct xfs_dquot **IO_olddq,
1788 : struct xfs_dquot *newdq)
1789 : {
1790 7631583 : struct xfs_dquot *prevdq;
1791 0 : uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1792 7631583 : XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1793 :
1794 :
1795 7631583 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1796 7631749 : ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1797 :
1798 : /* old dquot */
1799 7631749 : prevdq = *IO_olddq;
1800 7631749 : ASSERT(prevdq);
1801 7631749 : ASSERT(prevdq != newdq);
1802 :
1803 7631749 : xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
1804 7631695 : xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1805 :
1806 : /* the sparkling new dquot */
1807 7631718 : xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
1808 7631786 : xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1809 :
1810 : /*
1811 : * Back when we made quota reservations for the chown, we reserved the
1812 : * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1813 : * switched the dquots, decrease the new dquot's block reservation
1814 : * (having already bumped up the real counter) so that we don't have
1815 : * any reservation to give back when we commit.
1816 : */
1817 7631766 : xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1818 7631766 : -ip->i_delayed_blks);
1819 :
1820 : /*
1821 : * Give the incore reservation for delalloc blocks back to the old
1822 : * dquot. We don't normally handle delalloc quota reservations
1823 : * transactionally, so just lock the dquot and subtract from the
1824 : * reservation. Dirty the transaction because it's too late to turn
1825 : * back now.
1826 : */
1827 7631802 : tp->t_flags |= XFS_TRANS_DIRTY;
1828 7631802 : xfs_dqlock(prevdq);
1829 7631791 : ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1830 7631791 : prevdq->q_blk.reserved -= ip->i_delayed_blks;
1831 7631791 : xfs_dqunlock(prevdq);
1832 :
1833 : /*
1834 : * Take an extra reference, because the inode is going to keep
1835 : * this dquot pointer even after the trans_commit.
1836 : */
1837 7631675 : *IO_olddq = xfs_qm_dqhold(newdq);
1838 :
1839 7631618 : return prevdq;
1840 : }
1841 :
1842 : int
1843 22741065 : xfs_qm_vop_rename_dqattach(
1844 : struct xfs_inode **i_tab)
1845 : {
1846 22741065 : struct xfs_mount *mp = i_tab[0]->i_mount;
1847 22741065 : int i;
1848 :
1849 22741065 : if (!XFS_IS_QUOTA_ON(mp))
1850 : return 0;
1851 :
1852 63575830 : for (i = 0; (i < 4 && i_tab[i]); i++) {
1853 49017553 : struct xfs_inode *ip = i_tab[i];
1854 49017553 : int error;
1855 :
1856 : /*
1857 : * Watch out for duplicate entries in the table.
1858 : */
1859 49017553 : if (i == 0 || ip != i_tab[i-1]) {
1860 47647342 : if (XFS_NOT_DQATTACHED(mp, ip)) {
1861 3026328 : error = xfs_qm_dqattach(ip);
1862 3026328 : if (error)
1863 730 : return error;
1864 : }
1865 : }
1866 : }
1867 : return 0;
1868 : }
1869 :
1870 : void
1871 47333092 : xfs_qm_vop_create_dqattach(
1872 : struct xfs_trans *tp,
1873 : struct xfs_inode *ip,
1874 : struct xfs_dquot *udqp,
1875 : struct xfs_dquot *gdqp,
1876 : struct xfs_dquot *pdqp)
1877 : {
1878 47333092 : struct xfs_mount *mp = tp->t_mountp;
1879 :
1880 47333092 : if (!XFS_IS_QUOTA_ON(mp))
1881 : return;
1882 :
1883 29996217 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1884 :
1885 29996364 : if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1886 29995654 : ASSERT(ip->i_udquot == NULL);
1887 29995654 : ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1888 :
1889 29992584 : ip->i_udquot = xfs_qm_dqhold(udqp);
1890 29995719 : xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1891 : }
1892 29996561 : if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1893 29887683 : ASSERT(ip->i_gdquot == NULL);
1894 29887683 : ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1895 :
1896 29885232 : ip->i_gdquot = xfs_qm_dqhold(gdqp);
1897 29887489 : xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1898 : }
1899 29996618 : if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1900 29886037 : ASSERT(ip->i_pdquot == NULL);
1901 29886037 : ASSERT(ip->i_projid == pdqp->q_id);
1902 :
1903 29886037 : ip->i_pdquot = xfs_qm_dqhold(pdqp);
1904 29887477 : xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1905 : }
1906 : }
1907 :
1908 : /* Decide if this inode's dquot is near an enforcement boundary. */
1909 : bool
1910 86536201 : xfs_inode_near_dquot_enforcement(
1911 : struct xfs_inode *ip,
1912 : xfs_dqtype_t type)
1913 : {
1914 86536201 : struct xfs_dquot *dqp;
1915 86536201 : int64_t freesp;
1916 :
1917 : /* We only care for quotas that are enabled and enforced. */
1918 86536201 : dqp = xfs_inode_dquot(ip, type);
1919 86536201 : if (!dqp || !xfs_dquot_is_enforced(dqp))
1920 37534850 : return false;
1921 :
1922 49001073 : if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1923 49000929 : xfs_dquot_res_over_limits(&dqp->q_rtb))
1924 : return true;
1925 :
1926 : /* For space on the data device, check the various thresholds. */
1927 49000929 : if (!dqp->q_prealloc_hi_wmark)
1928 : return false;
1929 :
1930 9636 : if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1931 : return false;
1932 :
1933 141 : if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1934 : return true;
1935 :
1936 80 : freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1937 80 : if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
1938 0 : return true;
1939 :
1940 : return false;
1941 : }
|