Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 : * All Rights Reserved.
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_bit.h"
13 : #include "xfs_sb.h"
14 : #include "xfs_mount.h"
15 : #include "xfs_inode.h"
16 : #include "xfs_iwalk.h"
17 : #include "xfs_quota.h"
18 : #include "xfs_bmap.h"
19 : #include "xfs_bmap_util.h"
20 : #include "xfs_trans.h"
21 : #include "xfs_trans_space.h"
22 : #include "xfs_qm.h"
23 : #include "xfs_trace.h"
24 : #include "xfs_icache.h"
25 : #include "xfs_error.h"
26 : #include "xfs_ag.h"
27 : #include "xfs_ialloc.h"
28 : #include "xfs_log_priv.h"
29 :
30 : /*
31 : * The global quota manager. There is only one of these for the entire
32 : * system, _not_ one per file system. XQM keeps track of the overall
33 : * quota functionality, including maintaining the freelist and hash
34 : * tables of dquots.
35 : */
36 : STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
37 : STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
38 :
39 : STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
40 : STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
41 : /*
42 : * We use the batch lookup interface to iterate over the dquots as it
43 : * currently is the only interface into the radix tree code that allows
44 : * fuzzy lookups instead of exact matches. Holding the lock over multiple
45 : * operations is fine as all callers are used either during mount/umount
46 : * or quotaoff.
47 : */
48 : #define XFS_DQ_LOOKUP_BATCH 32
49 :
50 : STATIC int
51 124251 : xfs_qm_dquot_walk(
52 : struct xfs_mount *mp,
53 : xfs_dqtype_t type,
54 : int (*execute)(struct xfs_dquot *dqp, void *data),
55 : void *data)
56 : {
57 124251 : struct xfs_quotainfo *qi = mp->m_quotainfo;
58 124251 : struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
59 124251 : uint32_t next_index;
60 124251 : int last_error = 0;
61 124251 : int skipped;
62 124251 : int nr_found;
63 :
64 124251 : restart:
65 124251 : skipped = 0;
66 124251 : next_index = 0;
67 124251 : nr_found = 0;
68 :
69 508828 : while (1) {
70 633079 : struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
71 633079 : int error;
72 633079 : int i;
73 :
74 633079 : mutex_lock(&qi->qi_tree_lock);
75 633079 : nr_found = radix_tree_gang_lookup(tree, (void **)batch,
76 : next_index, XFS_DQ_LOOKUP_BATCH);
77 633079 : if (!nr_found) {
78 124221 : mutex_unlock(&qi->qi_tree_lock);
79 124221 : break;
80 : }
81 :
82 14167369 : for (i = 0; i < nr_found; i++) {
83 13658511 : struct xfs_dquot *dqp = batch[i];
84 :
85 13658511 : next_index = dqp->q_id + 1;
86 :
87 13658511 : error = execute(batch[i], data);
88 13658511 : if (error == -EAGAIN) {
89 0 : skipped++;
90 0 : continue;
91 : }
92 13658511 : if (error && last_error != -EFSCORRUPTED)
93 0 : last_error = error;
94 : }
95 :
96 508858 : mutex_unlock(&qi->qi_tree_lock);
97 :
98 : /* bail out if the filesystem is corrupted. */
99 508858 : if (last_error == -EFSCORRUPTED) {
100 : skipped = 0;
101 : break;
102 : }
103 : /* we're done if id overflows back to zero */
104 508858 : if (!next_index)
105 : break;
106 : }
107 :
108 124251 : if (skipped) {
109 0 : delay(1);
110 0 : goto restart;
111 : }
112 :
113 124251 : return last_error;
114 : }
115 :
116 :
117 : /*
118 : * Purge a dquot from all tracking data structures and free it.
119 : */
120 : STATIC int
121 13631290 : xfs_qm_dqpurge(
122 : struct xfs_dquot *dqp,
123 : void *data)
124 : {
125 13631290 : struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
126 13631290 : int error = -EAGAIN;
127 :
128 13631290 : xfs_dqlock(dqp);
129 13631290 : if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
130 0 : goto out_unlock;
131 :
132 13631290 : dqp->q_flags |= XFS_DQFLAG_FREEING;
133 :
134 13631290 : xfs_dqflock(dqp);
135 :
136 : /*
137 : * If we are turning this type of quotas off, we don't care
138 : * about the dirty metadata sitting in this dquot. OTOH, if
139 : * we're unmounting, we do care, so we flush it and wait.
140 : */
141 13631290 : if (XFS_DQ_IS_DIRTY(dqp)) {
142 47483 : struct xfs_buf *bp = NULL;
143 :
144 : /*
145 : * We don't care about getting disk errors here. We need
146 : * to purge this dquot anyway, so we go ahead regardless.
147 : */
148 47483 : error = xfs_qm_dqflush(dqp, &bp);
149 47483 : if (!error) {
150 0 : error = xfs_bwrite(bp);
151 0 : xfs_buf_relse(bp);
152 47483 : } else if (error == -EAGAIN) {
153 0 : dqp->q_flags &= ~XFS_DQFLAG_FREEING;
154 0 : goto out_unlock;
155 : }
156 47483 : xfs_dqflock(dqp);
157 : }
158 :
159 13631290 : ASSERT(atomic_read(&dqp->q_pincount) == 0);
160 27262580 : ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
161 : !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
162 :
163 13631290 : xfs_dqfunlock(dqp);
164 13631290 : xfs_dqunlock(dqp);
165 :
166 13631290 : radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
167 13631290 : qi->qi_dquots--;
168 :
169 : /*
170 : * We move dquots to the freelist as soon as their reference count
171 : * hits zero, so it really should be on the freelist here.
172 : */
173 13631290 : ASSERT(!list_empty(&dqp->q_lru));
174 13631290 : list_lru_del(&qi->qi_lru, &dqp->q_lru);
175 13631290 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
176 :
177 13631290 : xfs_qm_dqdestroy(dqp);
178 13631290 : return 0;
179 :
180 0 : out_unlock:
181 0 : xfs_dqunlock(dqp);
182 0 : return error;
183 : }
184 :
185 : /*
186 : * Purge the dquot cache.
187 : */
188 : static void
189 34771 : xfs_qm_dqpurge_all(
190 : struct xfs_mount *mp)
191 : {
192 34771 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
193 34771 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
194 34771 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
195 34771 : }
196 :
197 : /*
198 : * Just destroy the quotainfo structure.
199 : */
200 : void
201 59234 : xfs_qm_unmount(
202 : struct xfs_mount *mp)
203 : {
204 59234 : if (mp->m_quotainfo) {
205 34771 : xfs_qm_dqpurge_all(mp);
206 34771 : xfs_qm_destroy_quotainfo(mp);
207 : }
208 59234 : }
209 :
210 : /*
211 : * Called from the vfsops layer.
212 : */
213 : void
214 59203 : xfs_qm_unmount_quotas(
215 : xfs_mount_t *mp)
216 : {
217 : /*
218 : * Release the dquots that root inode, et al might be holding,
219 : * before we flush quotas and blow away the quotainfo structure.
220 : */
221 59203 : ASSERT(mp->m_rootip);
222 59203 : xfs_qm_dqdetach(mp->m_rootip);
223 59203 : if (mp->m_rbmip)
224 59203 : xfs_qm_dqdetach(mp->m_rbmip);
225 59203 : if (mp->m_rsumip)
226 59203 : xfs_qm_dqdetach(mp->m_rsumip);
227 :
228 : /*
229 : * Release the quota inodes.
230 : */
231 59203 : if (mp->m_quotainfo) {
232 34766 : if (mp->m_quotainfo->qi_uquotaip) {
233 34250 : xfs_irele(mp->m_quotainfo->qi_uquotaip);
234 34250 : mp->m_quotainfo->qi_uquotaip = NULL;
235 : }
236 34766 : if (mp->m_quotainfo->qi_gquotaip) {
237 33894 : xfs_irele(mp->m_quotainfo->qi_gquotaip);
238 33894 : mp->m_quotainfo->qi_gquotaip = NULL;
239 : }
240 34766 : if (mp->m_quotainfo->qi_pquotaip) {
241 33791 : xfs_irele(mp->m_quotainfo->qi_pquotaip);
242 33791 : mp->m_quotainfo->qi_pquotaip = NULL;
243 : }
244 : }
245 59203 : }
246 :
247 : STATIC int
248 15761637 : xfs_qm_dqattach_one(
249 : struct xfs_inode *ip,
250 : xfs_dqtype_t type,
251 : bool doalloc,
252 : struct xfs_dquot **IO_idqpp)
253 : {
254 15761637 : struct xfs_dquot *dqp;
255 15761637 : int error;
256 :
257 15761637 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
258 15761624 : error = 0;
259 :
260 : /*
261 : * See if we already have it in the inode itself. IO_idqpp is &i_udquot
262 : * or &i_gdquot. This made the code look weird, but made the logic a lot
263 : * simpler.
264 : */
265 15761624 : dqp = *IO_idqpp;
266 15761624 : if (dqp) {
267 0 : trace_xfs_dqattach_found(dqp);
268 0 : return 0;
269 : }
270 :
271 : /*
272 : * Find the dquot from somewhere. This bumps the reference count of
273 : * dquot and returns it locked. This can return ENOENT if dquot didn't
274 : * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
275 : * turned off suddenly.
276 : */
277 15761624 : error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
278 15761803 : if (error)
279 : return error;
280 :
281 15758200 : trace_xfs_dqattach_get(dqp);
282 :
283 : /*
284 : * dqget may have dropped and re-acquired the ilock, but it guarantees
285 : * that the dquot returned is the one that should go in the inode.
286 : */
287 15758199 : *IO_idqpp = dqp;
288 15758199 : xfs_dqunlock(dqp);
289 15758199 : return 0;
290 : }
291 :
292 : static bool
293 1521657454 : xfs_qm_need_dqattach(
294 : struct xfs_inode *ip)
295 : {
296 1521657454 : struct xfs_mount *mp = ip->i_mount;
297 :
298 1521657454 : if (!XFS_IS_QUOTA_ON(mp))
299 : return false;
300 924046076 : if (!XFS_NOT_DQATTACHED(mp, ip))
301 : return false;
302 20080768 : if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
303 0 : return false;
304 : return true;
305 : }
306 :
307 : /*
308 : * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
309 : * into account.
310 : * If @doalloc is true, the dquot(s) will be allocated if needed.
311 : * Inode may get unlocked and relocked in here, and the caller must deal with
312 : * the consequences.
313 : */
314 : int
315 654518320 : xfs_qm_dqattach_locked(
316 : xfs_inode_t *ip,
317 : bool doalloc)
318 : {
319 654518320 : xfs_mount_t *mp = ip->i_mount;
320 654518320 : int error = 0;
321 :
322 654518320 : if (!xfs_qm_need_dqattach(ip))
323 : return 0;
324 :
325 5256965 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
326 :
327 5256954 : if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
328 5256003 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
329 : doalloc, &ip->i_udquot);
330 5256141 : if (error)
331 2701 : goto done;
332 5253440 : ASSERT(ip->i_udquot);
333 : }
334 :
335 5254391 : if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
336 5253310 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
337 : doalloc, &ip->i_gdquot);
338 5253311 : if (error)
339 613 : goto done;
340 5252698 : ASSERT(ip->i_gdquot);
341 : }
342 :
343 5253779 : if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
344 5252355 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
345 : doalloc, &ip->i_pdquot);
346 5252354 : if (error)
347 289 : goto done;
348 5252065 : ASSERT(ip->i_pdquot);
349 : }
350 :
351 5253489 : done:
352 : /*
353 : * Don't worry about the dquots that we may have attached before any
354 : * error - they'll get detached later if it has not already been done.
355 : */
356 5257092 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
357 : return error;
358 : }
359 :
360 : int
361 866781398 : xfs_qm_dqattach(
362 : struct xfs_inode *ip)
363 : {
364 866781398 : int error;
365 :
366 866781398 : if (!xfs_qm_need_dqattach(ip))
367 : return 0;
368 :
369 4783445 : xfs_ilock(ip, XFS_ILOCK_EXCL);
370 4783468 : error = xfs_qm_dqattach_locked(ip, false);
371 4783569 : xfs_iunlock(ip, XFS_ILOCK_EXCL);
372 :
373 4783569 : return error;
374 : }
375 :
376 : /*
377 : * Release dquots (and their references) if any.
378 : * The inode should be locked EXCL except when this's called by
379 : * xfs_ireclaim.
380 : */
381 : void
382 1086504156 : xfs_qm_dqdetach(
383 : xfs_inode_t *ip)
384 : {
385 1086504156 : if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
386 : return;
387 :
388 47389587 : trace_xfs_dquot_dqdetach(ip);
389 :
390 94720958 : ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
391 47360479 : if (ip->i_udquot) {
392 47357199 : xfs_qm_dqrele(ip->i_udquot);
393 47350010 : ip->i_udquot = NULL;
394 : }
395 47353290 : if (ip->i_gdquot) {
396 47027452 : xfs_qm_dqrele(ip->i_gdquot);
397 47025130 : ip->i_gdquot = NULL;
398 : }
399 47350968 : if (ip->i_pdquot) {
400 47013934 : xfs_qm_dqrele(ip->i_pdquot);
401 47017773 : ip->i_pdquot = NULL;
402 : }
403 : }
404 :
405 : struct xfs_qm_isolate {
406 : struct list_head buffers;
407 : struct list_head dispose;
408 : };
409 :
410 : static enum lru_status
411 620876 : xfs_qm_dquot_isolate(
412 : struct list_head *item,
413 : struct list_lru_one *lru,
414 : spinlock_t *lru_lock,
415 : void *arg)
416 : __releases(lru_lock) __acquires(lru_lock)
417 : {
418 620876 : struct xfs_dquot *dqp = container_of(item,
419 : struct xfs_dquot, q_lru);
420 620876 : struct xfs_qm_isolate *isol = arg;
421 :
422 620876 : if (!xfs_dqlock_nowait(dqp))
423 203 : goto out_miss_busy;
424 :
425 : /*
426 : * If something else is freeing this dquot and hasn't yet removed it
427 : * from the LRU, leave it for the freeing task to complete the freeing
428 : * process rather than risk it being free from under us here.
429 : */
430 620673 : if (dqp->q_flags & XFS_DQFLAG_FREEING)
431 0 : goto out_miss_unlock;
432 :
433 : /*
434 : * This dquot has acquired a reference in the meantime remove it from
435 : * the freelist and try again.
436 : */
437 620673 : if (dqp->q_nrefs) {
438 37330 : xfs_dqunlock(dqp);
439 37330 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
440 :
441 37330 : trace_xfs_dqreclaim_want(dqp);
442 37330 : list_lru_isolate(lru, &dqp->q_lru);
443 37330 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
444 37330 : return LRU_REMOVED;
445 : }
446 :
447 : /*
448 : * If the dquot is dirty, flush it. If it's already being flushed, just
449 : * skip it so there is time for the IO to complete before we try to
450 : * reclaim it again on the next LRU pass.
451 : */
452 583343 : if (!xfs_dqflock_nowait(dqp))
453 87733 : goto out_miss_unlock;
454 :
455 495610 : if (XFS_DQ_IS_DIRTY(dqp)) {
456 759 : struct xfs_buf *bp = NULL;
457 759 : int error;
458 :
459 759 : trace_xfs_dqreclaim_dirty(dqp);
460 :
461 : /* we have to drop the LRU lock to flush the dquot */
462 759 : spin_unlock(lru_lock);
463 :
464 759 : error = xfs_qm_dqflush(dqp, &bp);
465 759 : if (error)
466 6 : goto out_unlock_dirty;
467 :
468 753 : xfs_buf_delwri_queue(bp, &isol->buffers);
469 753 : xfs_buf_relse(bp);
470 753 : goto out_unlock_dirty;
471 : }
472 494851 : xfs_dqfunlock(dqp);
473 :
474 : /*
475 : * Prevent lookups now that we are past the point of no return.
476 : */
477 494851 : dqp->q_flags |= XFS_DQFLAG_FREEING;
478 494851 : xfs_dqunlock(dqp);
479 :
480 494851 : ASSERT(dqp->q_nrefs == 0);
481 494851 : list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
482 494851 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
483 494851 : trace_xfs_dqreclaim_done(dqp);
484 494851 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
485 494851 : return LRU_REMOVED;
486 :
487 87733 : out_miss_unlock:
488 87733 : xfs_dqunlock(dqp);
489 87936 : out_miss_busy:
490 87936 : trace_xfs_dqreclaim_busy(dqp);
491 87936 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
492 87936 : return LRU_SKIP;
493 :
494 : out_unlock_dirty:
495 759 : trace_xfs_dqreclaim_busy(dqp);
496 759 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
497 759 : xfs_dqunlock(dqp);
498 759 : spin_lock(lru_lock);
499 759 : return LRU_RETRY;
500 : }
501 :
502 : static unsigned long
503 7740 : xfs_qm_shrink_scan(
504 : struct shrinker *shrink,
505 : struct shrink_control *sc)
506 : {
507 7740 : struct xfs_quotainfo *qi = container_of(shrink,
508 : struct xfs_quotainfo, qi_shrinker);
509 7740 : struct xfs_qm_isolate isol;
510 7740 : unsigned long freed;
511 7740 : int error;
512 :
513 7740 : if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
514 : return 0;
515 :
516 7740 : INIT_LIST_HEAD(&isol.buffers);
517 7740 : INIT_LIST_HEAD(&isol.dispose);
518 :
519 7740 : freed = list_lru_shrink_walk(&qi->qi_lru, sc,
520 : xfs_qm_dquot_isolate, &isol);
521 :
522 7740 : error = xfs_buf_delwri_submit(&isol.buffers);
523 7740 : if (error)
524 0 : xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
525 :
526 502591 : while (!list_empty(&isol.dispose)) {
527 494851 : struct xfs_dquot *dqp;
528 :
529 494851 : dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
530 494851 : list_del_init(&dqp->q_lru);
531 494851 : xfs_qm_dqfree_one(dqp);
532 : }
533 :
534 : return freed;
535 : }
536 :
537 : static unsigned long
538 3505 : xfs_qm_shrink_count(
539 : struct shrinker *shrink,
540 : struct shrink_control *sc)
541 : {
542 3505 : struct xfs_quotainfo *qi = container_of(shrink,
543 : struct xfs_quotainfo, qi_shrinker);
544 :
545 3505 : return list_lru_shrink_count(&qi->qi_lru, sc);
546 : }
547 :
548 : STATIC void
549 101932 : xfs_qm_set_defquota(
550 : struct xfs_mount *mp,
551 : xfs_dqtype_t type,
552 : struct xfs_quotainfo *qinf)
553 : {
554 101932 : struct xfs_dquot *dqp;
555 101932 : struct xfs_def_quota *defq;
556 101932 : int error;
557 :
558 101932 : error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
559 101932 : if (error)
560 19472 : return;
561 :
562 82460 : defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
563 :
564 : /*
565 : * Timers and warnings have been already set, let's just set the
566 : * default limits for this quota type
567 : */
568 82460 : defq->blk.hard = dqp->q_blk.hardlimit;
569 82460 : defq->blk.soft = dqp->q_blk.softlimit;
570 82460 : defq->ino.hard = dqp->q_ino.hardlimit;
571 82460 : defq->ino.soft = dqp->q_ino.softlimit;
572 82460 : defq->rtb.hard = dqp->q_rtb.hardlimit;
573 82460 : defq->rtb.soft = dqp->q_rtb.softlimit;
574 82460 : xfs_qm_dqdestroy(dqp);
575 : }
576 :
577 : /* Initialize quota time limits from the root dquot. */
578 : static void
579 104295 : xfs_qm_init_timelimits(
580 : struct xfs_mount *mp,
581 : xfs_dqtype_t type)
582 : {
583 104295 : struct xfs_quotainfo *qinf = mp->m_quotainfo;
584 104295 : struct xfs_def_quota *defq;
585 104295 : struct xfs_dquot *dqp;
586 104295 : int error;
587 :
588 104295 : defq = xfs_get_defquota(qinf, type);
589 :
590 104295 : defq->blk.time = XFS_QM_BTIMELIMIT;
591 104295 : defq->ino.time = XFS_QM_ITIMELIMIT;
592 104295 : defq->rtb.time = XFS_QM_RTBTIMELIMIT;
593 :
594 : /*
595 : * We try to get the limits from the superuser's limits fields.
596 : * This is quite hacky, but it is standard quota practice.
597 : *
598 : * Since we may not have done a quotacheck by this point, just read
599 : * the dquot without attaching it to any hashtables or lists.
600 : */
601 104295 : error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
602 104295 : if (error)
603 21835 : return;
604 :
605 : /*
606 : * The warnings and timers set the grace period given to
607 : * a user or group before he or she can not perform any
608 : * more writing. If it is zero, a default is used.
609 : */
610 82460 : if (dqp->q_blk.timer)
611 150 : defq->blk.time = dqp->q_blk.timer;
612 82460 : if (dqp->q_ino.timer)
613 144 : defq->ino.time = dqp->q_ino.timer;
614 82460 : if (dqp->q_rtb.timer)
615 24 : defq->rtb.time = dqp->q_rtb.timer;
616 :
617 82460 : xfs_qm_dqdestroy(dqp);
618 : }
619 :
620 : /*
621 : * This initializes all the quota information that's kept in the
622 : * mount structure
623 : */
624 : STATIC int
625 34816 : xfs_qm_init_quotainfo(
626 : struct xfs_mount *mp)
627 : {
628 34816 : struct xfs_quotainfo *qinf;
629 34816 : int error;
630 :
631 34816 : ASSERT(XFS_IS_QUOTA_ON(mp));
632 :
633 34816 : qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
634 :
635 34816 : error = list_lru_init(&qinf->qi_lru);
636 34816 : if (error)
637 0 : goto out_free_qinf;
638 :
639 : /*
640 : * See if quotainodes are setup, and if not, allocate them,
641 : * and change the superblock accordingly.
642 : */
643 34816 : error = xfs_qm_init_quotainos(mp);
644 34816 : if (error)
645 51 : goto out_free_lru;
646 :
647 34765 : INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
648 34765 : INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
649 34765 : INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
650 34765 : mutex_init(&qinf->qi_tree_lock);
651 :
652 : /* mutex used to serialize quotaoffs */
653 34765 : mutex_init(&qinf->qi_quotaofflock);
654 :
655 : /* Precalc some constants */
656 34765 : qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
657 34765 : qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
658 34765 : if (xfs_has_bigtime(mp)) {
659 34677 : qinf->qi_expiry_min =
660 : xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
661 34677 : qinf->qi_expiry_max =
662 : xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
663 : } else {
664 88 : qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
665 88 : qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
666 : }
667 34765 : trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
668 : qinf->qi_expiry_max);
669 :
670 34765 : mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
671 :
672 34765 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
673 34765 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
674 34765 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
675 :
676 34765 : if (XFS_IS_UQUOTA_ON(mp))
677 34249 : xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
678 34765 : if (XFS_IS_GQUOTA_ON(mp))
679 33893 : xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
680 34765 : if (XFS_IS_PQUOTA_ON(mp))
681 33790 : xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
682 :
683 34765 : qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
684 34765 : qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
685 34765 : qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
686 34765 : qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
687 :
688 34765 : error = register_shrinker(&qinf->qi_shrinker, "xfs-qm:%s",
689 34765 : mp->m_super->s_id);
690 34765 : if (error)
691 0 : goto out_free_inos;
692 :
693 : return 0;
694 :
695 : out_free_inos:
696 0 : mutex_destroy(&qinf->qi_quotaofflock);
697 0 : mutex_destroy(&qinf->qi_tree_lock);
698 0 : xfs_qm_destroy_quotainos(qinf);
699 51 : out_free_lru:
700 51 : list_lru_destroy(&qinf->qi_lru);
701 51 : out_free_qinf:
702 51 : kmem_free(qinf);
703 51 : mp->m_quotainfo = NULL;
704 51 : return error;
705 : }
706 :
707 : /*
708 : * Gets called when unmounting a filesystem or when all quotas get
709 : * turned off.
710 : * This purges the quota inodes, destroys locks and frees itself.
711 : */
712 : void
713 34771 : xfs_qm_destroy_quotainfo(
714 : struct xfs_mount *mp)
715 : {
716 34771 : struct xfs_quotainfo *qi;
717 :
718 34771 : qi = mp->m_quotainfo;
719 34771 : ASSERT(qi != NULL);
720 :
721 34771 : unregister_shrinker(&qi->qi_shrinker);
722 34771 : list_lru_destroy(&qi->qi_lru);
723 34771 : xfs_qm_destroy_quotainos(qi);
724 34771 : mutex_destroy(&qi->qi_tree_lock);
725 34771 : mutex_destroy(&qi->qi_quotaofflock);
726 34771 : kmem_free(qi);
727 34771 : mp->m_quotainfo = NULL;
728 34771 : }
729 :
730 : /*
731 : * Create an inode and return with a reference already taken, but unlocked
732 : * This is how we create quota inodes
733 : */
734 : STATIC int
735 19478 : xfs_qm_qino_alloc(
736 : struct xfs_mount *mp,
737 : struct xfs_inode **ipp,
738 : unsigned int flags)
739 : {
740 19478 : struct xfs_trans *tp;
741 19478 : int error;
742 19478 : bool need_alloc = true;
743 :
744 19478 : *ipp = NULL;
745 : /*
746 : * With superblock that doesn't have separate pquotino, we
747 : * share an inode between gquota and pquota. If the on-disk
748 : * superblock has GQUOTA and the filesystem is now mounted
749 : * with PQUOTA, just use sb_gquotino for sb_pquotino and
750 : * vice-versa.
751 : */
752 19478 : if (!xfs_has_pquotino(mp) &&
753 48 : (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
754 27 : xfs_ino_t ino = NULLFSINO;
755 :
756 27 : if ((flags & XFS_QMOPT_PQUOTA) &&
757 21 : (mp->m_sb.sb_gquotino != NULLFSINO)) {
758 6 : ino = mp->m_sb.sb_gquotino;
759 6 : if (XFS_IS_CORRUPT(mp,
760 : mp->m_sb.sb_pquotino != NULLFSINO))
761 0 : return -EFSCORRUPTED;
762 21 : } else if ((flags & XFS_QMOPT_GQUOTA) &&
763 6 : (mp->m_sb.sb_pquotino != NULLFSINO)) {
764 0 : ino = mp->m_sb.sb_pquotino;
765 0 : if (XFS_IS_CORRUPT(mp,
766 : mp->m_sb.sb_gquotino != NULLFSINO))
767 0 : return -EFSCORRUPTED;
768 : }
769 6 : if (ino != NULLFSINO) {
770 6 : error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
771 6 : if (error)
772 : return error;
773 6 : mp->m_sb.sb_gquotino = NULLFSINO;
774 6 : mp->m_sb.sb_pquotino = NULLFSINO;
775 6 : need_alloc = false;
776 : }
777 : }
778 :
779 19478 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
780 19529 : need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
781 : 0, 0, &tp);
782 19478 : if (error)
783 : return error;
784 :
785 19427 : if (need_alloc) {
786 19421 : xfs_ino_t ino;
787 :
788 19421 : error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
789 19421 : if (!error)
790 19421 : error = xfs_init_new_inode(&nop_mnt_idmap, tp, NULL, ino,
791 : S_IFREG, 1, 0, 0, false, ipp);
792 19421 : if (error) {
793 0 : xfs_trans_cancel(tp);
794 0 : return error;
795 : }
796 : }
797 :
798 : /*
799 : * Make the changes in the superblock, and log those too.
800 : * sbfields arg may contain fields other than *QUOTINO;
801 : * VERSIONNUM for example.
802 : */
803 19427 : spin_lock(&mp->m_sb_lock);
804 19427 : if (flags & XFS_QMOPT_SBVERSION) {
805 6701 : ASSERT(!xfs_has_quota(mp));
806 :
807 6701 : xfs_add_quota(mp);
808 6701 : mp->m_sb.sb_uquotino = NULLFSINO;
809 6701 : mp->m_sb.sb_gquotino = NULLFSINO;
810 6701 : mp->m_sb.sb_pquotino = NULLFSINO;
811 :
812 : /* qflags will get updated fully _after_ quotacheck */
813 6701 : mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
814 : }
815 19427 : if (flags & XFS_QMOPT_UQUOTA)
816 6581 : mp->m_sb.sb_uquotino = (*ipp)->i_ino;
817 12846 : else if (flags & XFS_QMOPT_GQUOTA)
818 6434 : mp->m_sb.sb_gquotino = (*ipp)->i_ino;
819 : else
820 6412 : mp->m_sb.sb_pquotino = (*ipp)->i_ino;
821 19427 : spin_unlock(&mp->m_sb_lock);
822 19427 : xfs_log_sb(tp);
823 :
824 19427 : error = xfs_trans_commit(tp);
825 19427 : if (error) {
826 0 : ASSERT(xfs_is_shutdown(mp));
827 0 : xfs_alert(mp, "%s failed (error %d)!", __func__, error);
828 : }
829 19427 : if (need_alloc)
830 19421 : xfs_finish_inode_setup(*ipp);
831 : return error;
832 : }
833 :
834 :
835 : STATIC void
836 1962 : xfs_qm_reset_dqcounts(
837 : struct xfs_mount *mp,
838 : struct xfs_buf *bp,
839 : xfs_dqid_t id,
840 : xfs_dqtype_t type)
841 : {
842 1962 : struct xfs_dqblk *dqb;
843 1962 : int j;
844 :
845 1962 : trace_xfs_reset_dqcounts(bp, _RET_IP_);
846 :
847 : /*
848 : * Reset all counters and timers. They'll be
849 : * started afresh by xfs_qm_quotacheck.
850 : */
851 : #ifdef DEBUG
852 1962 : j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
853 : sizeof(struct xfs_dqblk);
854 1962 : ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
855 : #endif
856 1962 : dqb = bp->b_addr;
857 60822 : for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
858 58860 : struct xfs_disk_dquot *ddq;
859 :
860 58860 : ddq = (struct xfs_disk_dquot *)&dqb[j];
861 :
862 : /*
863 : * Do a sanity check, and if needed, repair the dqblk. Don't
864 : * output any warnings because it's perfectly possible to
865 : * find uninitialised dquot blks. See comment in
866 : * xfs_dquot_verify.
867 : */
868 58860 : if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
869 58770 : (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
870 270 : xfs_dqblk_repair(mp, &dqb[j], id + j, type);
871 :
872 : /*
873 : * Reset type in case we are reusing group quota file for
874 : * project quotas or vice versa
875 : */
876 58860 : ddq->d_type = type;
877 58860 : ddq->d_bcount = 0;
878 58860 : ddq->d_icount = 0;
879 58860 : ddq->d_rtbcount = 0;
880 :
881 : /*
882 : * dquot id 0 stores the default grace period and the maximum
883 : * warning limit that were set by the administrator, so we
884 : * should not reset them.
885 : */
886 58860 : if (ddq->d_id != 0) {
887 58386 : ddq->d_btimer = 0;
888 58386 : ddq->d_itimer = 0;
889 58386 : ddq->d_rtbtimer = 0;
890 58386 : ddq->d_bwarns = 0;
891 58386 : ddq->d_iwarns = 0;
892 58386 : ddq->d_rtbwarns = 0;
893 58386 : if (xfs_has_bigtime(mp))
894 57516 : ddq->d_type |= XFS_DQTYPE_BIGTIME;
895 : }
896 :
897 58860 : if (xfs_has_crc(mp)) {
898 57960 : xfs_update_cksum((char *)&dqb[j],
899 : sizeof(struct xfs_dqblk),
900 : XFS_DQUOT_CRC_OFF);
901 : }
902 : }
903 1962 : }
904 :
905 : STATIC int
906 1796 : xfs_qm_reset_dqcounts_all(
907 : struct xfs_mount *mp,
908 : xfs_dqid_t firstid,
909 : xfs_fsblock_t bno,
910 : xfs_filblks_t blkcnt,
911 : xfs_dqtype_t type,
912 : struct list_head *buffer_list)
913 : {
914 1796 : struct xfs_buf *bp;
915 1796 : int error = 0;
916 :
917 1796 : ASSERT(blkcnt > 0);
918 :
919 : /*
920 : * Blkcnt arg can be a very big number, and might even be
921 : * larger than the log itself. So, we have to break it up into
922 : * manageable-sized transactions.
923 : * Note that we don't start a permanent transaction here; we might
924 : * not be able to get a log reservation for the whole thing up front,
925 : * and we don't really care to either, because we just discard
926 : * everything if we were to crash in the middle of this loop.
927 : */
928 3758 : while (blkcnt--) {
929 5886 : error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
930 1962 : XFS_FSB_TO_DADDR(mp, bno),
931 1962 : mp->m_quotainfo->qi_dqchunklen, 0, &bp,
932 : &xfs_dquot_buf_ops);
933 :
934 : /*
935 : * CRC and validation errors will return a EFSCORRUPTED here. If
936 : * this occurs, re-read without CRC validation so that we can
937 : * repair the damage via xfs_qm_reset_dqcounts(). This process
938 : * will leave a trace in the log indicating corruption has
939 : * been detected.
940 : */
941 1962 : if (error == -EFSCORRUPTED) {
942 9 : error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
943 3 : XFS_FSB_TO_DADDR(mp, bno),
944 3 : mp->m_quotainfo->qi_dqchunklen, 0, &bp,
945 : NULL);
946 : }
947 :
948 1962 : if (error)
949 : break;
950 :
951 : /*
952 : * A corrupt buffer might not have a verifier attached, so
953 : * make sure we have the correct one attached before writeback
954 : * occurs.
955 : */
956 1962 : bp->b_ops = &xfs_dquot_buf_ops;
957 1962 : xfs_qm_reset_dqcounts(mp, bp, firstid, type);
958 1962 : xfs_buf_delwri_queue(bp, buffer_list);
959 1962 : xfs_buf_relse(bp);
960 :
961 : /* goto the next block. */
962 1962 : bno++;
963 1962 : firstid += mp->m_quotainfo->qi_dqperchunk;
964 : }
965 :
966 1796 : return error;
967 : }
968 :
969 : /*
970 : * Iterate over all allocated dquot blocks in this quota inode, zeroing all
971 : * counters for every chunk of dquots that we find.
972 : */
973 : STATIC int
974 19938 : xfs_qm_reset_dqcounts_buf(
975 : struct xfs_mount *mp,
976 : struct xfs_inode *qip,
977 : xfs_dqtype_t type,
978 : struct list_head *buffer_list)
979 : {
980 19938 : struct xfs_bmbt_irec *map;
981 19938 : int i, nmaps; /* number of map entries */
982 19938 : int error; /* return value */
983 19938 : xfs_fileoff_t lblkno;
984 19938 : xfs_filblks_t maxlblkcnt;
985 19938 : xfs_dqid_t firstid;
986 19938 : xfs_fsblock_t rablkno;
987 19938 : xfs_filblks_t rablkcnt;
988 :
989 19938 : error = 0;
990 : /*
991 : * This looks racy, but we can't keep an inode lock across a
992 : * trans_reserve. But, this gets called during quotacheck, and that
993 : * happens only at mount time which is single threaded.
994 : */
995 19938 : if (qip->i_nblocks == 0)
996 : return 0;
997 :
998 469 : map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
999 :
1000 469 : lblkno = 0;
1001 469 : maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1002 1181 : do {
1003 1181 : uint lock_mode;
1004 :
1005 1181 : nmaps = XFS_DQITER_MAP_SIZE;
1006 : /*
1007 : * We aren't changing the inode itself. Just changing
1008 : * some of its data. No new blocks are added here, and
1009 : * the inode is never added to the transaction.
1010 : */
1011 1181 : lock_mode = xfs_ilock_data_map_shared(qip);
1012 1181 : error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1013 : map, &nmaps, 0);
1014 1181 : xfs_iunlock(qip, lock_mode);
1015 1181 : if (error)
1016 : break;
1017 :
1018 1181 : ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1019 4753 : for (i = 0; i < nmaps; i++) {
1020 3572 : ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1021 3572 : ASSERT(map[i].br_blockcount);
1022 :
1023 :
1024 3572 : lblkno += map[i].br_blockcount;
1025 :
1026 3572 : if (map[i].br_startblock == HOLESTARTBLOCK)
1027 1776 : continue;
1028 :
1029 1796 : firstid = (xfs_dqid_t) map[i].br_startoff *
1030 1796 : mp->m_quotainfo->qi_dqperchunk;
1031 : /*
1032 : * Do a read-ahead on the next extent.
1033 : */
1034 1796 : if ((i+1 < nmaps) &&
1035 1778 : (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1036 20 : rablkcnt = map[i+1].br_blockcount;
1037 20 : rablkno = map[i+1].br_startblock;
1038 40 : while (rablkcnt--) {
1039 60 : xfs_buf_readahead(mp->m_ddev_targp,
1040 20 : XFS_FSB_TO_DADDR(mp, rablkno),
1041 20 : mp->m_quotainfo->qi_dqchunklen,
1042 : &xfs_dquot_buf_ops);
1043 20 : rablkno++;
1044 : }
1045 : }
1046 : /*
1047 : * Iterate thru all the blks in the extent and
1048 : * reset the counters of all the dquots inside them.
1049 : */
1050 1796 : error = xfs_qm_reset_dqcounts_all(mp, firstid,
1051 : map[i].br_startblock,
1052 : map[i].br_blockcount,
1053 : type, buffer_list);
1054 1796 : if (error)
1055 0 : goto out;
1056 : }
1057 1181 : } while (nmaps > 0);
1058 :
1059 469 : out:
1060 469 : kmem_free(map);
1061 469 : return error;
1062 : }
1063 :
1064 : /*
1065 : * Called by dqusage_adjust in doing a quotacheck.
1066 : *
1067 : * Given the inode, and a dquot id this updates both the incore dqout as well
1068 : * as the buffer copy. This is so that once the quotacheck is done, we can
1069 : * just log all the buffers, as opposed to logging numerous updates to
1070 : * individual dquots.
1071 : */
1072 : STATIC int
1073 348475 : xfs_qm_quotacheck_dqadjust(
1074 : struct xfs_inode *ip,
1075 : xfs_dqtype_t type,
1076 : xfs_qcnt_t nblks,
1077 : xfs_qcnt_t rtblks)
1078 : {
1079 348475 : struct xfs_mount *mp = ip->i_mount;
1080 348475 : struct xfs_dquot *dqp;
1081 348475 : xfs_dqid_t id;
1082 348475 : int error;
1083 :
1084 348475 : id = xfs_qm_id_for_quotatype(ip, type);
1085 348459 : error = xfs_qm_dqget(mp, id, type, true, &dqp);
1086 348557 : if (error) {
1087 : /*
1088 : * Shouldn't be able to turn off quotas here.
1089 : */
1090 0 : ASSERT(error != -ESRCH);
1091 0 : ASSERT(error != -ENOENT);
1092 0 : return error;
1093 : }
1094 :
1095 348557 : trace_xfs_dqadjust(dqp);
1096 :
1097 : /*
1098 : * Adjust the inode count and the block count to reflect this inode's
1099 : * resource usage.
1100 : */
1101 348556 : dqp->q_ino.count++;
1102 348556 : dqp->q_ino.reserved++;
1103 348556 : if (nblks) {
1104 21028 : dqp->q_blk.count += nblks;
1105 21028 : dqp->q_blk.reserved += nblks;
1106 : }
1107 348556 : if (rtblks) {
1108 0 : dqp->q_rtb.count += rtblks;
1109 0 : dqp->q_rtb.reserved += rtblks;
1110 : }
1111 :
1112 : /*
1113 : * Set default limits, adjust timers (since we changed usages)
1114 : *
1115 : * There are no timers for the default values set in the root dquot.
1116 : */
1117 348556 : if (dqp->q_id) {
1118 10760 : xfs_qm_adjust_dqlimits(dqp);
1119 10760 : xfs_qm_adjust_dqtimers(dqp);
1120 : }
1121 :
1122 348556 : dqp->q_flags |= XFS_DQFLAG_DIRTY;
1123 348556 : xfs_qm_dqput(dqp);
1124 348556 : return 0;
1125 : }
1126 :
1127 : /*
1128 : * callback routine supplied to bulkstat(). Given an inumber, find its
1129 : * dquots and update them to account for resources taken by that inode.
1130 : */
1131 : /* ARGSUSED */
1132 : STATIC int
1133 137951 : xfs_qm_dqusage_adjust(
1134 : struct xfs_mount *mp,
1135 : struct xfs_trans *tp,
1136 : xfs_ino_t ino,
1137 : void *data)
1138 : {
1139 137951 : struct xfs_inode *ip;
1140 137951 : xfs_qcnt_t nblks;
1141 137951 : xfs_filblks_t rtblks = 0; /* total rt blks */
1142 137951 : int error;
1143 :
1144 137951 : ASSERT(XFS_IS_QUOTA_ON(mp));
1145 :
1146 : /*
1147 : * rootino must have its resources accounted for, not so with the quota
1148 : * inodes.
1149 : */
1150 269022 : if (xfs_is_quota_inode(&mp->m_sb, ino))
1151 : return 0;
1152 :
1153 : /*
1154 : * We don't _need_ to take the ilock EXCL here because quotacheck runs
1155 : * at mount time and therefore nobody will be racing chown/chproj.
1156 : */
1157 117853 : error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1158 117877 : if (error == -EINVAL || error == -ENOENT)
1159 : return 0;
1160 117877 : if (error)
1161 : return error;
1162 :
1163 117877 : ASSERT(ip->i_delayed_blks == 0);
1164 :
1165 117877 : if (XFS_IS_REALTIME_INODE(ip)) {
1166 0 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1167 :
1168 0 : error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1169 0 : if (error)
1170 0 : goto error0;
1171 :
1172 0 : xfs_bmap_count_leaves(ifp, &rtblks);
1173 : }
1174 :
1175 117877 : nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1176 :
1177 : /*
1178 : * Add the (disk blocks and inode) resources occupied by this
1179 : * inode to its dquots. We do this adjustment in the incore dquot,
1180 : * and also copy the changes to its buffer.
1181 : * We don't care about putting these changes in a transaction
1182 : * envelope because if we crash in the middle of a 'quotacheck'
1183 : * we have to start from the beginning anyway.
1184 : * Once we're done, we'll log all the dquot bufs.
1185 : *
1186 : * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1187 : * and quotaoffs don't race. (Quotachecks happen at mount time only).
1188 : */
1189 117877 : if (XFS_IS_UQUOTA_ON(mp)) {
1190 116623 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1191 : rtblks);
1192 116645 : if (error)
1193 0 : goto error0;
1194 : }
1195 :
1196 117899 : if (XFS_IS_GQUOTA_ON(mp)) {
1197 116138 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1198 : rtblks);
1199 116138 : if (error)
1200 0 : goto error0;
1201 : }
1202 :
1203 117899 : if (XFS_IS_PQUOTA_ON(mp)) {
1204 115776 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1205 : rtblks);
1206 115776 : if (error)
1207 0 : goto error0;
1208 : }
1209 :
1210 117899 : error0:
1211 117899 : xfs_irele(ip);
1212 117899 : return error;
1213 : }
1214 :
1215 : STATIC int
1216 27221 : xfs_qm_flush_one(
1217 : struct xfs_dquot *dqp,
1218 : void *data)
1219 : {
1220 27221 : struct xfs_mount *mp = dqp->q_mount;
1221 27221 : struct list_head *buffer_list = data;
1222 27221 : struct xfs_buf *bp = NULL;
1223 27221 : int error = 0;
1224 :
1225 27221 : xfs_dqlock(dqp);
1226 27221 : if (dqp->q_flags & XFS_DQFLAG_FREEING)
1227 0 : goto out_unlock;
1228 27221 : if (!XFS_DQ_IS_DIRTY(dqp))
1229 0 : goto out_unlock;
1230 :
1231 : /*
1232 : * The only way the dquot is already flush locked by the time quotacheck
1233 : * gets here is if reclaim flushed it before the dqadjust walk dirtied
1234 : * it for the final time. Quotacheck collects all dquot bufs in the
1235 : * local delwri queue before dquots are dirtied, so reclaim can't have
1236 : * possibly queued it for I/O. The only way out is to push the buffer to
1237 : * cycle the flush lock.
1238 : */
1239 27221 : if (!xfs_dqflock_nowait(dqp)) {
1240 : /* buf is pinned in-core by delwri list */
1241 0 : error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1242 0 : mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1243 0 : if (error)
1244 0 : goto out_unlock;
1245 :
1246 0 : if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1247 0 : error = -EAGAIN;
1248 0 : xfs_buf_relse(bp);
1249 0 : goto out_unlock;
1250 : }
1251 0 : xfs_buf_unlock(bp);
1252 :
1253 0 : xfs_buf_delwri_pushbuf(bp, buffer_list);
1254 0 : xfs_buf_rele(bp);
1255 :
1256 0 : error = -EAGAIN;
1257 0 : goto out_unlock;
1258 : }
1259 :
1260 27221 : error = xfs_qm_dqflush(dqp, &bp);
1261 27221 : if (error)
1262 0 : goto out_unlock;
1263 :
1264 27221 : xfs_buf_delwri_queue(bp, buffer_list);
1265 27221 : xfs_buf_relse(bp);
1266 27221 : out_unlock:
1267 27221 : xfs_dqunlock(dqp);
1268 27221 : return error;
1269 : }
1270 :
1271 : /*
1272 : * Walk thru all the filesystem inodes and construct a consistent view
1273 : * of the disk quota world. If the quotacheck fails, disable quotas.
1274 : */
1275 : STATIC int
1276 7054 : xfs_qm_quotacheck(
1277 : xfs_mount_t *mp)
1278 : {
1279 7054 : int error, error2;
1280 7054 : uint flags;
1281 7054 : LIST_HEAD (buffer_list);
1282 7054 : struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1283 7054 : struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1284 7054 : struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1285 :
1286 7054 : flags = 0;
1287 :
1288 7054 : ASSERT(uip || gip || pip);
1289 7054 : ASSERT(XFS_IS_QUOTA_ON(mp));
1290 :
1291 7054 : xfs_notice(mp, "Quotacheck needed: Please wait.");
1292 :
1293 : /*
1294 : * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1295 : * their counters to zero. We need a clean slate.
1296 : * We don't log our changes till later.
1297 : */
1298 7054 : if (uip) {
1299 6782 : error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1300 : &buffer_list);
1301 6782 : if (error)
1302 0 : goto error_return;
1303 : flags |= XFS_UQUOTA_CHKD;
1304 : }
1305 :
1306 7054 : if (gip) {
1307 6601 : error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1308 : &buffer_list);
1309 6601 : if (error)
1310 0 : goto error_return;
1311 6601 : flags |= XFS_GQUOTA_CHKD;
1312 : }
1313 :
1314 7054 : if (pip) {
1315 6555 : error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1316 : &buffer_list);
1317 6555 : if (error)
1318 0 : goto error_return;
1319 6555 : flags |= XFS_PQUOTA_CHKD;
1320 : }
1321 :
1322 7054 : error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1323 : NULL);
1324 :
1325 : /*
1326 : * On error, the inode walk may have partially populated the dquot
1327 : * caches. We must purge them before disabling quota and tearing down
1328 : * the quotainfo, or else the dquots will leak.
1329 : */
1330 7054 : if (error)
1331 0 : goto error_purge;
1332 :
1333 : /*
1334 : * We've made all the changes that we need to make incore. Flush them
1335 : * down to disk buffers if everything was updated successfully.
1336 : */
1337 7054 : if (XFS_IS_UQUOTA_ON(mp)) {
1338 6782 : error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1339 : &buffer_list);
1340 : }
1341 7054 : if (XFS_IS_GQUOTA_ON(mp)) {
1342 6601 : error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1343 : &buffer_list);
1344 6601 : if (!error)
1345 6601 : error = error2;
1346 : }
1347 7054 : if (XFS_IS_PQUOTA_ON(mp)) {
1348 6555 : error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1349 : &buffer_list);
1350 6555 : if (!error)
1351 6555 : error = error2;
1352 : }
1353 :
1354 7054 : error2 = xfs_buf_delwri_submit(&buffer_list);
1355 7054 : if (!error)
1356 7054 : error = error2;
1357 :
1358 : /*
1359 : * We can get this error if we couldn't do a dquot allocation inside
1360 : * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1361 : * dirty dquots that might be cached, we just want to get rid of them
1362 : * and turn quotaoff. The dquots won't be attached to any of the inodes
1363 : * at this point (because we intentionally didn't in dqget_noattach).
1364 : */
1365 7054 : if (error)
1366 0 : goto error_purge;
1367 :
1368 : /*
1369 : * If one type of quotas is off, then it will lose its
1370 : * quotachecked status, since we won't be doing accounting for
1371 : * that type anymore.
1372 : */
1373 7054 : mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1374 7054 : mp->m_qflags |= flags;
1375 :
1376 7054 : error_return:
1377 7054 : xfs_buf_delwri_cancel(&buffer_list);
1378 :
1379 7054 : if (error) {
1380 0 : xfs_warn(mp,
1381 : "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1382 : error);
1383 : /*
1384 : * We must turn off quotas.
1385 : */
1386 0 : ASSERT(mp->m_quotainfo != NULL);
1387 0 : xfs_qm_destroy_quotainfo(mp);
1388 0 : if (xfs_mount_reset_sbqflags(mp)) {
1389 0 : xfs_warn(mp,
1390 : "Quotacheck: Failed to reset quota flags.");
1391 : }
1392 : } else
1393 7054 : xfs_notice(mp, "Quotacheck: Done.");
1394 7054 : return error;
1395 :
1396 0 : error_purge:
1397 : /*
1398 : * On error, we may have inodes queued for inactivation. This may try
1399 : * to attach dquots to the inode before running cleanup operations on
1400 : * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1401 : * below that frees mp->m_quotainfo. To avoid this race, flush all the
1402 : * pending inodegc operations before we purge the dquots from memory,
1403 : * ensuring that background inactivation is idle whilst we turn off
1404 : * quotas.
1405 : */
1406 0 : xfs_inodegc_flush(mp);
1407 0 : xfs_qm_dqpurge_all(mp);
1408 0 : goto error_return;
1409 :
1410 : }
1411 :
1412 : /*
1413 : * This is called from xfs_mountfs to start quotas and initialize all
1414 : * necessary data structures like quotainfo. This is also responsible for
1415 : * running a quotacheck as necessary. We are guaranteed that the superblock
1416 : * is consistently read in at this point.
1417 : *
1418 : * If we fail here, the mount will continue with quota turned off. We don't
1419 : * need to inidicate success or failure at all.
1420 : */
1421 : void
1422 41014 : xfs_qm_mount_quotas(
1423 : struct xfs_mount *mp)
1424 : {
1425 41014 : int error = 0;
1426 41014 : uint sbf;
1427 :
1428 : /*
1429 : * If quotas on realtime volumes is not supported, we disable
1430 : * quotas immediately.
1431 : */
1432 41014 : if (mp->m_sb.sb_rextents) {
1433 6198 : xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1434 6198 : mp->m_qflags = 0;
1435 6198 : goto write_changes;
1436 : }
1437 :
1438 34816 : ASSERT(XFS_IS_QUOTA_ON(mp));
1439 :
1440 : /*
1441 : * Allocate the quotainfo structure inside the mount struct, and
1442 : * create quotainode(s), and change/rev superblock if necessary.
1443 : */
1444 34816 : error = xfs_qm_init_quotainfo(mp);
1445 34816 : if (error) {
1446 : /*
1447 : * We must turn off quotas.
1448 : */
1449 51 : ASSERT(mp->m_quotainfo == NULL);
1450 51 : mp->m_qflags = 0;
1451 51 : goto write_changes;
1452 : }
1453 : /*
1454 : * If any of the quotas are not consistent, do a quotacheck.
1455 : */
1456 34765 : if (XFS_QM_NEED_QUOTACHECK(mp)) {
1457 7054 : error = xfs_qm_quotacheck(mp);
1458 7054 : if (error) {
1459 : /* Quotacheck failed and disabled quotas. */
1460 : return;
1461 : }
1462 : }
1463 : /*
1464 : * If one type of quotas is off, then it will lose its
1465 : * quotachecked status, since we won't be doing accounting for
1466 : * that type anymore.
1467 : */
1468 34765 : if (!XFS_IS_UQUOTA_ON(mp))
1469 516 : mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1470 34765 : if (!XFS_IS_GQUOTA_ON(mp))
1471 872 : mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1472 34765 : if (!XFS_IS_PQUOTA_ON(mp))
1473 975 : mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1474 :
1475 33790 : write_changes:
1476 : /*
1477 : * We actually don't have to acquire the m_sb_lock at all.
1478 : * This can only be called from mount, and that's single threaded. XXX
1479 : */
1480 41014 : spin_lock(&mp->m_sb_lock);
1481 41014 : sbf = mp->m_sb.sb_qflags;
1482 41014 : mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1483 41014 : spin_unlock(&mp->m_sb_lock);
1484 :
1485 41014 : if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1486 7173 : if (xfs_sync_sb(mp, false)) {
1487 : /*
1488 : * We could only have been turning quotas off.
1489 : * We aren't in very good shape actually because
1490 : * the incore structures are convinced that quotas are
1491 : * off, but the on disk superblock doesn't know that !
1492 : */
1493 0 : ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1494 0 : xfs_alert(mp, "%s: Superblock update failed!",
1495 : __func__);
1496 : }
1497 : }
1498 :
1499 41014 : if (error) {
1500 51 : xfs_warn(mp, "Failed to initialize disk quotas.");
1501 51 : return;
1502 : }
1503 : }
1504 :
1505 : /*
1506 : * This is called after the superblock has been read in and we're ready to
1507 : * iget the quota inodes.
1508 : */
1509 : STATIC int
1510 34816 : xfs_qm_init_quotainos(
1511 : xfs_mount_t *mp)
1512 : {
1513 34816 : struct xfs_inode *uip = NULL;
1514 34816 : struct xfs_inode *gip = NULL;
1515 34816 : struct xfs_inode *pip = NULL;
1516 34816 : int error;
1517 34816 : uint flags = 0;
1518 :
1519 34816 : ASSERT(mp->m_quotainfo);
1520 :
1521 : /*
1522 : * Get the uquota and gquota inodes
1523 : */
1524 34816 : if (xfs_has_quota(mp)) {
1525 28064 : if (XFS_IS_UQUOTA_ON(mp) &&
1526 27698 : mp->m_sb.sb_uquotino != NULLFSINO) {
1527 27668 : ASSERT(mp->m_sb.sb_uquotino > 0);
1528 27668 : error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1529 : 0, 0, &uip);
1530 27668 : if (error)
1531 : return error;
1532 : }
1533 28064 : if (XFS_IS_GQUOTA_ON(mp) &&
1534 27563 : mp->m_sb.sb_gquotino != NULLFSINO) {
1535 27459 : ASSERT(mp->m_sb.sb_gquotino > 0);
1536 27459 : error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1537 : 0, 0, &gip);
1538 27459 : if (error)
1539 0 : goto error_rele;
1540 : }
1541 28064 : if (XFS_IS_PQUOTA_ON(mp) &&
1542 27476 : mp->m_sb.sb_pquotino != NULLFSINO) {
1543 27378 : ASSERT(mp->m_sb.sb_pquotino > 0);
1544 27378 : error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1545 : 0, 0, &pip);
1546 27378 : if (error)
1547 0 : goto error_rele;
1548 : }
1549 : } else {
1550 : flags |= XFS_QMOPT_SBVERSION;
1551 : }
1552 :
1553 : /*
1554 : * Create the three inodes, if they don't exist already. The changes
1555 : * made above will get added to a transaction and logged in one of
1556 : * the qino_alloc calls below. If the device is readonly,
1557 : * temporarily switch to read-write to do this.
1558 : */
1559 34816 : if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1560 6632 : error = xfs_qm_qino_alloc(mp, &uip,
1561 : flags | XFS_QMOPT_UQUOTA);
1562 6632 : if (error)
1563 51 : goto error_rele;
1564 :
1565 : flags &= ~XFS_QMOPT_SBVERSION;
1566 : }
1567 34765 : if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1568 6434 : error = xfs_qm_qino_alloc(mp, &gip,
1569 : flags | XFS_QMOPT_GQUOTA);
1570 6434 : if (error)
1571 0 : goto error_rele;
1572 :
1573 : flags &= ~XFS_QMOPT_SBVERSION;
1574 : }
1575 34765 : if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1576 6412 : error = xfs_qm_qino_alloc(mp, &pip,
1577 : flags | XFS_QMOPT_PQUOTA);
1578 6412 : if (error)
1579 0 : goto error_rele;
1580 : }
1581 :
1582 34765 : mp->m_quotainfo->qi_uquotaip = uip;
1583 34765 : mp->m_quotainfo->qi_gquotaip = gip;
1584 34765 : mp->m_quotainfo->qi_pquotaip = pip;
1585 :
1586 34765 : return 0;
1587 :
1588 51 : error_rele:
1589 51 : if (uip)
1590 0 : xfs_irele(uip);
1591 51 : if (gip)
1592 0 : xfs_irele(gip);
1593 51 : if (pip)
1594 0 : xfs_irele(pip);
1595 : return error;
1596 : }
1597 :
1598 : STATIC void
1599 34771 : xfs_qm_destroy_quotainos(
1600 : struct xfs_quotainfo *qi)
1601 : {
1602 34771 : if (qi->qi_uquotaip) {
1603 5 : xfs_irele(qi->qi_uquotaip);
1604 5 : qi->qi_uquotaip = NULL; /* paranoia */
1605 : }
1606 34771 : if (qi->qi_gquotaip) {
1607 5 : xfs_irele(qi->qi_gquotaip);
1608 5 : qi->qi_gquotaip = NULL;
1609 : }
1610 34771 : if (qi->qi_pquotaip) {
1611 5 : xfs_irele(qi->qi_pquotaip);
1612 5 : qi->qi_pquotaip = NULL;
1613 : }
1614 34771 : }
1615 :
1616 : STATIC void
1617 494851 : xfs_qm_dqfree_one(
1618 : struct xfs_dquot *dqp)
1619 : {
1620 494851 : struct xfs_mount *mp = dqp->q_mount;
1621 494851 : struct xfs_quotainfo *qi = mp->m_quotainfo;
1622 :
1623 494851 : mutex_lock(&qi->qi_tree_lock);
1624 494851 : radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1625 :
1626 494851 : qi->qi_dquots--;
1627 494851 : mutex_unlock(&qi->qi_tree_lock);
1628 :
1629 494851 : xfs_qm_dqdestroy(dqp);
1630 494851 : }
1631 :
1632 : /* --------------- utility functions for vnodeops ---------------- */
1633 :
1634 :
1635 : /*
1636 : * Given an inode, a uid, gid and prid make sure that we have
1637 : * allocated relevant dquot(s) on disk, and that we won't exceed inode
1638 : * quotas by creating this file.
1639 : * This also attaches dquot(s) to the given inode after locking it,
1640 : * and returns the dquots corresponding to the uid and/or gid.
1641 : *
1642 : * in : inode (unlocked)
1643 : * out : udquot, gdquot with references taken and unlocked
1644 : */
1645 : int
1646 92641470 : xfs_qm_vop_dqalloc(
1647 : struct xfs_inode *ip,
1648 : kuid_t uid,
1649 : kgid_t gid,
1650 : prid_t prid,
1651 : uint flags,
1652 : struct xfs_dquot **O_udqpp,
1653 : struct xfs_dquot **O_gdqpp,
1654 : struct xfs_dquot **O_pdqpp)
1655 : {
1656 92641470 : struct xfs_mount *mp = ip->i_mount;
1657 92641470 : struct inode *inode = VFS_I(ip);
1658 92641470 : struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1659 92641470 : struct xfs_dquot *uq = NULL;
1660 92641470 : struct xfs_dquot *gq = NULL;
1661 92641470 : struct xfs_dquot *pq = NULL;
1662 92641470 : int error;
1663 92641470 : uint lockflags;
1664 :
1665 92641470 : if (!XFS_IS_QUOTA_ON(mp))
1666 : return 0;
1667 :
1668 49675381 : lockflags = XFS_ILOCK_EXCL;
1669 49675381 : xfs_ilock(ip, lockflags);
1670 :
1671 50043642 : if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1672 435 : gid = inode->i_gid;
1673 :
1674 : /*
1675 : * Attach the dquot(s) to this inode, doing a dquot allocation
1676 : * if necessary. The dquot(s) will not be locked.
1677 : */
1678 50043642 : if (XFS_NOT_DQATTACHED(mp, ip)) {
1679 302000 : error = xfs_qm_dqattach_locked(ip, true);
1680 302014 : if (error) {
1681 1181 : xfs_iunlock(ip, lockflags);
1682 1181 : return error;
1683 : }
1684 : }
1685 :
1686 50042475 : if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1687 49655743 : ASSERT(O_udqpp);
1688 49655743 : if (!uid_eq(inode->i_uid, uid)) {
1689 : /*
1690 : * What we need is the dquot that has this uid, and
1691 : * if we send the inode to dqget, the uid of the inode
1692 : * takes priority over what's sent in the uid argument.
1693 : * We must unlock inode here before calling dqget if
1694 : * we're not sending the inode, because otherwise
1695 : * we'll deadlock by doing trans_reserve while
1696 : * holding ilock.
1697 : */
1698 8960826 : xfs_iunlock(ip, lockflags);
1699 8983638 : error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1700 : XFS_DQTYPE_USER, true, &uq);
1701 9031801 : if (error) {
1702 26952 : ASSERT(error != -ENOENT);
1703 26952 : return error;
1704 : }
1705 : /*
1706 : * Get the ilock in the right order.
1707 : */
1708 9004849 : xfs_dqunlock(uq);
1709 9004667 : lockflags = XFS_ILOCK_SHARED;
1710 9004667 : xfs_ilock(ip, lockflags);
1711 : } else {
1712 : /*
1713 : * Take an extra reference, because we'll return
1714 : * this to caller
1715 : */
1716 40694917 : ASSERT(ip->i_udquot);
1717 40694917 : uq = xfs_qm_dqhold(ip->i_udquot);
1718 : }
1719 : }
1720 50595365 : if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1721 49878939 : ASSERT(O_gdqpp);
1722 49878939 : if (!gid_eq(inode->i_gid, gid)) {
1723 8996758 : xfs_iunlock(ip, lockflags);
1724 8994530 : error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1725 : XFS_DQTYPE_GROUP, true, &gq);
1726 8997625 : if (error) {
1727 7101 : ASSERT(error != -ENOENT);
1728 7101 : goto error_rele;
1729 : }
1730 8990524 : xfs_dqunlock(gq);
1731 8990517 : lockflags = XFS_ILOCK_SHARED;
1732 8990517 : xfs_ilock(ip, lockflags);
1733 : } else {
1734 40882181 : ASSERT(ip->i_gdquot);
1735 40882181 : gq = xfs_qm_dqhold(ip->i_gdquot);
1736 : }
1737 : }
1738 50553911 : if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1739 42699049 : ASSERT(O_pdqpp);
1740 42699049 : if (ip->i_projid != prid) {
1741 326802 : xfs_iunlock(ip, lockflags);
1742 326801 : error = xfs_qm_dqget(mp, prid,
1743 : XFS_DQTYPE_PROJ, true, &pq);
1744 326807 : if (error) {
1745 1503 : ASSERT(error != -ENOENT);
1746 1503 : goto error_rele;
1747 : }
1748 325304 : xfs_dqunlock(pq);
1749 325304 : lockflags = XFS_ILOCK_SHARED;
1750 325304 : xfs_ilock(ip, lockflags);
1751 : } else {
1752 42372247 : ASSERT(ip->i_pdquot);
1753 42372247 : pq = xfs_qm_dqhold(ip->i_pdquot);
1754 : }
1755 : }
1756 50569836 : trace_xfs_dquot_dqalloc(ip);
1757 :
1758 50544338 : xfs_iunlock(ip, lockflags);
1759 50377382 : if (O_udqpp)
1760 49990163 : *O_udqpp = uq;
1761 : else
1762 387219 : xfs_qm_dqrele(uq);
1763 50377379 : if (O_gdqpp)
1764 49990163 : *O_gdqpp = gq;
1765 : else
1766 387216 : xfs_qm_dqrele(gq);
1767 50377379 : if (O_pdqpp)
1768 42885491 : *O_pdqpp = pq;
1769 : else
1770 7491888 : xfs_qm_dqrele(pq);
1771 : return 0;
1772 :
1773 8604 : error_rele:
1774 8604 : xfs_qm_dqrele(gq);
1775 8604 : xfs_qm_dqrele(uq);
1776 8604 : return error;
1777 : }
1778 :
1779 : /*
1780 : * Actually transfer ownership, and do dquot modifications.
1781 : * These were already reserved.
1782 : */
1783 : struct xfs_dquot *
1784 14657765 : xfs_qm_vop_chown(
1785 : struct xfs_trans *tp,
1786 : struct xfs_inode *ip,
1787 : struct xfs_dquot **IO_olddq,
1788 : struct xfs_dquot *newdq)
1789 : {
1790 14657765 : struct xfs_dquot *prevdq;
1791 0 : uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1792 14657765 : XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1793 :
1794 :
1795 14657765 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1796 14656717 : ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1797 :
1798 : /* old dquot */
1799 14656717 : prevdq = *IO_olddq;
1800 14656717 : ASSERT(prevdq);
1801 14656717 : ASSERT(prevdq != newdq);
1802 :
1803 14656717 : xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
1804 14658046 : xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1805 :
1806 : /* the sparkling new dquot */
1807 14659681 : xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
1808 14655980 : xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1809 :
1810 : /*
1811 : * Back when we made quota reservations for the chown, we reserved the
1812 : * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1813 : * switched the dquots, decrease the new dquot's block reservation
1814 : * (having already bumped up the real counter) so that we don't have
1815 : * any reservation to give back when we commit.
1816 : */
1817 14657667 : xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1818 14657667 : -ip->i_delayed_blks);
1819 :
1820 : /*
1821 : * Give the incore reservation for delalloc blocks back to the old
1822 : * dquot. We don't normally handle delalloc quota reservations
1823 : * transactionally, so just lock the dquot and subtract from the
1824 : * reservation. Dirty the transaction because it's too late to turn
1825 : * back now.
1826 : */
1827 14656454 : tp->t_flags |= XFS_TRANS_DIRTY;
1828 14656454 : xfs_dqlock(prevdq);
1829 14666201 : ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1830 14666201 : prevdq->q_blk.reserved -= ip->i_delayed_blks;
1831 14666201 : xfs_dqunlock(prevdq);
1832 :
1833 : /*
1834 : * Take an extra reference, because the inode is going to keep
1835 : * this dquot pointer even after the trans_commit.
1836 : */
1837 14663538 : *IO_olddq = xfs_qm_dqhold(newdq);
1838 :
1839 14667177 : return prevdq;
1840 : }
1841 :
1842 : int
1843 46002603 : xfs_qm_vop_rename_dqattach(
1844 : struct xfs_inode **i_tab)
1845 : {
1846 46002603 : struct xfs_mount *mp = i_tab[0]->i_mount;
1847 46002603 : int i;
1848 :
1849 46002603 : if (!XFS_IS_QUOTA_ON(mp))
1850 : return 0;
1851 :
1852 101034670 : for (i = 0; (i < 4 && i_tab[i]); i++) {
1853 77814430 : struct xfs_inode *ip = i_tab[i];
1854 77814430 : int error;
1855 :
1856 : /*
1857 : * Watch out for duplicate entries in the table.
1858 : */
1859 77814430 : if (i == 0 || ip != i_tab[i-1]) {
1860 75546534 : if (XFS_NOT_DQATTACHED(mp, ip)) {
1861 659297 : error = xfs_qm_dqattach(ip);
1862 659300 : if (error)
1863 792 : return error;
1864 : }
1865 : }
1866 : }
1867 : return 0;
1868 : }
1869 :
1870 : void
1871 84603150 : xfs_qm_vop_create_dqattach(
1872 : struct xfs_trans *tp,
1873 : struct xfs_inode *ip,
1874 : struct xfs_dquot *udqp,
1875 : struct xfs_dquot *gdqp,
1876 : struct xfs_dquot *pdqp)
1877 : {
1878 84603150 : struct xfs_mount *mp = tp->t_mountp;
1879 :
1880 84603150 : if (!XFS_IS_QUOTA_ON(mp))
1881 : return;
1882 :
1883 41908045 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1884 :
1885 41751830 : if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1886 41752536 : ASSERT(ip->i_udquot == NULL);
1887 41752536 : ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1888 :
1889 41749467 : ip->i_udquot = xfs_qm_dqhold(udqp);
1890 42078877 : xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1891 : }
1892 41868369 : if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1893 41546839 : ASSERT(ip->i_gdquot == NULL);
1894 41546839 : ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1895 :
1896 41563626 : ip->i_gdquot = xfs_qm_dqhold(gdqp);
1897 41765154 : xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1898 : }
1899 42080686 : if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1900 41748355 : ASSERT(ip->i_pdquot == NULL);
1901 41748355 : ASSERT(ip->i_projid == pdqp->q_id);
1902 :
1903 41748355 : ip->i_pdquot = xfs_qm_dqhold(pdqp);
1904 41754993 : xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1905 : }
1906 : }
1907 :
1908 : /* Decide if this inode's dquot is near an enforcement boundary. */
1909 : bool
1910 126400027 : xfs_inode_near_dquot_enforcement(
1911 : struct xfs_inode *ip,
1912 : xfs_dqtype_t type)
1913 : {
1914 126400027 : struct xfs_dquot *dqp;
1915 126400027 : int64_t freesp;
1916 :
1917 : /* We only care for quotas that are enabled and enforced. */
1918 126400027 : dqp = xfs_inode_dquot(ip, type);
1919 126400027 : if (!dqp || !xfs_dquot_is_enforced(dqp))
1920 64558083 : return false;
1921 :
1922 61813624 : if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1923 61813176 : xfs_dquot_res_over_limits(&dqp->q_rtb))
1924 : return true;
1925 :
1926 : /* For space on the data device, check the various thresholds. */
1927 61813176 : if (!dqp->q_prealloc_hi_wmark)
1928 : return false;
1929 :
1930 25555 : if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1931 : return false;
1932 :
1933 453 : if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1934 : return true;
1935 :
1936 240 : freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1937 240 : if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
1938 0 : return true;
1939 :
1940 : return false;
1941 : }
|