Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 : * All Rights Reserved.
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_bit.h"
13 : #include "xfs_sb.h"
14 : #include "xfs_mount.h"
15 : #include "xfs_inode.h"
16 : #include "xfs_iwalk.h"
17 : #include "xfs_quota.h"
18 : #include "xfs_bmap.h"
19 : #include "xfs_bmap_util.h"
20 : #include "xfs_trans.h"
21 : #include "xfs_trans_space.h"
22 : #include "xfs_qm.h"
23 : #include "xfs_trace.h"
24 : #include "xfs_icache.h"
25 : #include "xfs_error.h"
26 : #include "xfs_ag.h"
27 : #include "xfs_ialloc.h"
28 : #include "xfs_log_priv.h"
29 : #include "xfs_health.h"
30 :
31 : /*
32 : * The global quota manager. There is only one of these for the entire
33 : * system, _not_ one per file system. XQM keeps track of the overall
34 : * quota functionality, including maintaining the freelist and hash
35 : * tables of dquots.
36 : */
37 : STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
38 : STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
39 :
40 : STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
41 : STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
42 : /*
43 : * We use the batch lookup interface to iterate over the dquots as it
44 : * currently is the only interface into the radix tree code that allows
45 : * fuzzy lookups instead of exact matches. Holding the lock over multiple
46 : * operations is fine as all callers are used either during mount/umount
47 : * or quotaoff.
48 : */
49 : #define XFS_DQ_LOOKUP_BATCH 32
50 :
51 : STATIC int
52 74144 : xfs_qm_dquot_walk(
53 : struct xfs_mount *mp,
54 : xfs_dqtype_t type,
55 : int (*execute)(struct xfs_dquot *dqp, void *data),
56 : void *data)
57 : {
58 74144 : struct xfs_quotainfo *qi = mp->m_quotainfo;
59 74144 : struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
60 74144 : uint32_t next_index;
61 74144 : int last_error = 0;
62 74144 : int skipped;
63 74144 : int nr_found;
64 :
65 74144 : restart:
66 74144 : skipped = 0;
67 74144 : next_index = 0;
68 74144 : nr_found = 0;
69 :
70 292048 : while (1) {
71 366192 : struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
72 366192 : int error;
73 366192 : int i;
74 :
75 366192 : mutex_lock(&qi->qi_tree_lock);
76 366192 : nr_found = radix_tree_gang_lookup(tree, (void **)batch,
77 : next_index, XFS_DQ_LOOKUP_BATCH);
78 366192 : if (!nr_found) {
79 74142 : mutex_unlock(&qi->qi_tree_lock);
80 74142 : break;
81 : }
82 :
83 8214202 : for (i = 0; i < nr_found; i++) {
84 7922152 : struct xfs_dquot *dqp = batch[i];
85 :
86 7922152 : next_index = dqp->q_id + 1;
87 :
88 7922152 : error = execute(batch[i], data);
89 7922152 : if (error == -EAGAIN) {
90 0 : skipped++;
91 0 : continue;
92 : }
93 7922152 : if (error && last_error != -EFSCORRUPTED)
94 0 : last_error = error;
95 : }
96 :
97 292050 : mutex_unlock(&qi->qi_tree_lock);
98 :
99 : /* bail out if the filesystem is corrupted. */
100 292050 : if (last_error == -EFSCORRUPTED) {
101 : skipped = 0;
102 : break;
103 : }
104 : /* we're done if id overflows back to zero */
105 292050 : if (!next_index)
106 : break;
107 : }
108 :
109 74144 : if (skipped) {
110 0 : delay(1);
111 0 : goto restart;
112 : }
113 :
114 74144 : return last_error;
115 : }
116 :
117 :
118 : /*
119 : * Purge a dquot from all tracking data structures and free it.
120 : */
121 : STATIC int
122 7910950 : xfs_qm_dqpurge(
123 : struct xfs_dquot *dqp,
124 : void *data)
125 : {
126 7910950 : struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
127 7910950 : int error = -EAGAIN;
128 :
129 7910950 : xfs_dqlock(dqp);
130 7910950 : if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
131 0 : goto out_unlock;
132 :
133 7910950 : dqp->q_flags |= XFS_DQFLAG_FREEING;
134 :
135 7910950 : xfs_dqflock(dqp);
136 :
137 : /*
138 : * If we are turning this type of quotas off, we don't care
139 : * about the dirty metadata sitting in this dquot. OTOH, if
140 : * we're unmounting, we do care, so we flush it and wait.
141 : */
142 7910950 : if (XFS_DQ_IS_DIRTY(dqp)) {
143 40097 : struct xfs_buf *bp = NULL;
144 :
145 : /*
146 : * We don't care about getting disk errors here. We need
147 : * to purge this dquot anyway, so we go ahead regardless.
148 : */
149 40097 : error = xfs_qm_dqflush(dqp, &bp);
150 40097 : if (!error) {
151 0 : error = xfs_bwrite(bp);
152 0 : xfs_buf_relse(bp);
153 40097 : } else if (error == -EAGAIN) {
154 0 : dqp->q_flags &= ~XFS_DQFLAG_FREEING;
155 0 : goto out_unlock;
156 : }
157 40097 : xfs_dqflock(dqp);
158 : }
159 :
160 7910950 : ASSERT(atomic_read(&dqp->q_pincount) == 0);
161 15821900 : ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
162 : !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
163 :
164 7910950 : xfs_dqfunlock(dqp);
165 7910950 : xfs_dqunlock(dqp);
166 :
167 7910950 : radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
168 7910950 : qi->qi_dquots--;
169 :
170 : /*
171 : * We move dquots to the freelist as soon as their reference count
172 : * hits zero, so it really should be on the freelist here.
173 : */
174 7910950 : ASSERT(!list_empty(&dqp->q_lru));
175 7910950 : list_lru_del(&qi->qi_lru, &dqp->q_lru);
176 7910950 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
177 :
178 7910950 : xfs_qm_dqdestroy(dqp);
179 7910950 : return 0;
180 :
181 0 : out_unlock:
182 0 : xfs_dqunlock(dqp);
183 0 : return error;
184 : }
185 :
186 : /*
187 : * Purge the dquot cache.
188 : */
189 : static void
190 21883 : xfs_qm_dqpurge_all(
191 : struct xfs_mount *mp)
192 : {
193 21883 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
194 21883 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
195 21883 : xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
196 21883 : }
197 :
198 : /*
199 : * Just destroy the quotainfo structure.
200 : */
201 : void
202 24110 : xfs_qm_unmount(
203 : struct xfs_mount *mp)
204 : {
205 24110 : if (mp->m_quotainfo) {
206 21883 : xfs_qm_dqpurge_all(mp);
207 21883 : xfs_qm_destroy_quotainfo(mp);
208 : }
209 24110 : }
210 :
211 : /*
212 : * Called from the vfsops layer.
213 : */
214 : void
215 24104 : xfs_qm_unmount_quotas(
216 : xfs_mount_t *mp)
217 : {
218 : /*
219 : * Release the dquots that root inode, et al might be holding,
220 : * before we flush quotas and blow away the quotainfo structure.
221 : */
222 24104 : ASSERT(mp->m_rootip);
223 24104 : xfs_qm_dqdetach(mp->m_rootip);
224 24104 : if (mp->m_rbmip)
225 24104 : xfs_qm_dqdetach(mp->m_rbmip);
226 24104 : if (mp->m_rsumip)
227 24104 : xfs_qm_dqdetach(mp->m_rsumip);
228 :
229 : /*
230 : * Release the quota inodes.
231 : */
232 24104 : if (mp->m_quotainfo) {
233 21881 : if (mp->m_quotainfo->qi_uquotaip) {
234 21717 : xfs_irele(mp->m_quotainfo->qi_uquotaip);
235 21717 : mp->m_quotainfo->qi_uquotaip = NULL;
236 : }
237 21881 : if (mp->m_quotainfo->qi_gquotaip) {
238 21597 : xfs_irele(mp->m_quotainfo->qi_gquotaip);
239 21597 : mp->m_quotainfo->qi_gquotaip = NULL;
240 : }
241 21881 : if (mp->m_quotainfo->qi_pquotaip) {
242 21569 : xfs_irele(mp->m_quotainfo->qi_pquotaip);
243 21569 : mp->m_quotainfo->qi_pquotaip = NULL;
244 : }
245 : }
246 24104 : }
247 :
248 : STATIC int
249 60280871 : xfs_qm_dqattach_one(
250 : struct xfs_inode *ip,
251 : xfs_dqtype_t type,
252 : bool doalloc,
253 : struct xfs_dquot **IO_idqpp)
254 : {
255 60280871 : struct xfs_dquot *dqp;
256 60280871 : int error;
257 :
258 60280871 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
259 60281046 : error = 0;
260 :
261 : /*
262 : * See if we already have it in the inode itself. IO_idqpp is &i_udquot
263 : * or &i_gdquot. This made the code look weird, but made the logic a lot
264 : * simpler.
265 : */
266 60281046 : dqp = *IO_idqpp;
267 60281046 : if (dqp) {
268 0 : trace_xfs_dqattach_found(dqp);
269 0 : return 0;
270 : }
271 :
272 : /*
273 : * Find the dquot from somewhere. This bumps the reference count of
274 : * dquot and returns it locked. This can return ENOENT if dquot didn't
275 : * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
276 : * turned off suddenly.
277 : */
278 60281046 : error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
279 60282025 : if (error)
280 : return error;
281 :
282 60273966 : trace_xfs_dqattach_get(dqp);
283 :
284 : /*
285 : * dqget may have dropped and re-acquired the ilock, but it guarantees
286 : * that the dquot returned is the one that should go in the inode.
287 : */
288 60274009 : *IO_idqpp = dqp;
289 60274009 : xfs_dqunlock(dqp);
290 60274009 : return 0;
291 : }
292 :
293 : static bool
294 1101372300 : xfs_qm_need_dqattach(
295 : struct xfs_inode *ip)
296 : {
297 1101372300 : struct xfs_mount *mp = ip->i_mount;
298 :
299 1101372300 : if (!XFS_IS_QUOTA_ON(mp))
300 : return false;
301 848943258 : if (!XFS_NOT_DQATTACHED(mp, ip))
302 : return false;
303 75357826 : if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
304 0 : return false;
305 : return true;
306 : }
307 :
308 : /*
309 : * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
310 : * into account.
311 : * If @doalloc is true, the dquot(s) will be allocated if needed.
312 : * Inode may get unlocked and relocked in here, and the caller must deal with
313 : * the consequences.
314 : */
315 : int
316 514702151 : xfs_qm_dqattach_locked(
317 : xfs_inode_t *ip,
318 : bool doalloc)
319 : {
320 514702151 : xfs_mount_t *mp = ip->i_mount;
321 514702151 : int error = 0;
322 :
323 514702151 : if (!xfs_qm_need_dqattach(ip))
324 : return 0;
325 :
326 20099165 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
327 :
328 20099167 : if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
329 20098244 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
330 : doalloc, &ip->i_udquot);
331 20098430 : if (error)
332 6502 : goto done;
333 20091928 : ASSERT(ip->i_udquot);
334 : }
335 :
336 20092851 : if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
337 20092484 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
338 : doalloc, &ip->i_gdquot);
339 20092494 : if (error)
340 1281 : goto done;
341 20091213 : ASSERT(ip->i_gdquot);
342 : }
343 :
344 20091580 : if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
345 20091114 : error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
346 : doalloc, &ip->i_pdquot);
347 20091131 : if (error)
348 290 : goto done;
349 20090841 : ASSERT(ip->i_pdquot);
350 : }
351 :
352 20091307 : done:
353 : /*
354 : * Don't worry about the dquots that we may have attached before any
355 : * error - they'll get detached later if it has not already been done.
356 : */
357 20099380 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
358 : return error;
359 : }
360 :
361 : int
362 586858976 : xfs_qm_dqattach(
363 : struct xfs_inode *ip)
364 : {
365 586858976 : int error;
366 :
367 586858976 : if (!xfs_qm_need_dqattach(ip))
368 : return 0;
369 :
370 17580117 : xfs_ilock(ip, XFS_ILOCK_EXCL);
371 17580134 : error = xfs_qm_dqattach_locked(ip, false);
372 17580291 : xfs_iunlock(ip, XFS_ILOCK_EXCL);
373 :
374 17580291 : return error;
375 : }
376 :
377 : /*
378 : * Release dquots (and their references) if any.
379 : * The inode should be locked EXCL except when this's called by
380 : * xfs_ireclaim.
381 : */
382 : void
383 1055902244 : xfs_qm_dqdetach(
384 : xfs_inode_t *ip)
385 : {
386 1055902244 : if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
387 : return;
388 :
389 59125045 : trace_xfs_dquot_dqdetach(ip);
390 :
391 118342178 : ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
392 59171089 : if (ip->i_udquot) {
393 59170017 : xfs_qm_dqrele(ip->i_udquot);
394 59170007 : ip->i_udquot = NULL;
395 : }
396 59171079 : if (ip->i_gdquot) {
397 59061812 : xfs_qm_dqrele(ip->i_gdquot);
398 59061837 : ip->i_gdquot = NULL;
399 : }
400 59171104 : if (ip->i_pdquot) {
401 59061455 : xfs_qm_dqrele(ip->i_pdquot);
402 59061494 : ip->i_pdquot = NULL;
403 : }
404 : }
405 :
406 : struct xfs_qm_isolate {
407 : struct list_head buffers;
408 : struct list_head dispose;
409 : };
410 :
411 : static enum lru_status
412 8864586 : xfs_qm_dquot_isolate(
413 : struct list_head *item,
414 : struct list_lru_one *lru,
415 : spinlock_t *lru_lock,
416 : void *arg)
417 : __releases(lru_lock) __acquires(lru_lock)
418 : {
419 8864586 : struct xfs_dquot *dqp = container_of(item,
420 : struct xfs_dquot, q_lru);
421 8864586 : struct xfs_qm_isolate *isol = arg;
422 :
423 8864586 : if (!xfs_dqlock_nowait(dqp))
424 731 : goto out_miss_busy;
425 :
426 : /*
427 : * If something else is freeing this dquot and hasn't yet removed it
428 : * from the LRU, leave it for the freeing task to complete the freeing
429 : * process rather than risk it being free from under us here.
430 : */
431 8863855 : if (dqp->q_flags & XFS_DQFLAG_FREEING)
432 0 : goto out_miss_unlock;
433 :
434 : /*
435 : * This dquot has acquired a reference in the meantime remove it from
436 : * the freelist and try again.
437 : */
438 8863855 : if (dqp->q_nrefs) {
439 1836224 : xfs_dqunlock(dqp);
440 1836224 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
441 :
442 1836224 : trace_xfs_dqreclaim_want(dqp);
443 1836224 : list_lru_isolate(lru, &dqp->q_lru);
444 1836224 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
445 1836224 : return LRU_REMOVED;
446 : }
447 :
448 : /*
449 : * If the dquot is dirty, flush it. If it's already being flushed, just
450 : * skip it so there is time for the IO to complete before we try to
451 : * reclaim it again on the next LRU pass.
452 : */
453 7027631 : if (!xfs_dqflock_nowait(dqp))
454 2325271 : goto out_miss_unlock;
455 :
456 4702360 : if (XFS_DQ_IS_DIRTY(dqp)) {
457 95890 : struct xfs_buf *bp = NULL;
458 95890 : int error;
459 :
460 95890 : trace_xfs_dqreclaim_dirty(dqp);
461 :
462 : /* we have to drop the LRU lock to flush the dquot */
463 95890 : spin_unlock(lru_lock);
464 :
465 95890 : error = xfs_qm_dqflush(dqp, &bp);
466 95890 : if (error)
467 17494 : goto out_unlock_dirty;
468 :
469 78396 : xfs_buf_delwri_queue(bp, &isol->buffers);
470 78396 : xfs_buf_relse(bp);
471 78396 : goto out_unlock_dirty;
472 : }
473 4606470 : xfs_dqfunlock(dqp);
474 :
475 : /*
476 : * Prevent lookups now that we are past the point of no return.
477 : */
478 4606470 : dqp->q_flags |= XFS_DQFLAG_FREEING;
479 4606470 : xfs_dqunlock(dqp);
480 :
481 4606470 : ASSERT(dqp->q_nrefs == 0);
482 4606470 : list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
483 4606470 : XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
484 4606470 : trace_xfs_dqreclaim_done(dqp);
485 4606470 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
486 4606470 : return LRU_REMOVED;
487 :
488 2325271 : out_miss_unlock:
489 2325271 : xfs_dqunlock(dqp);
490 2326002 : out_miss_busy:
491 2326002 : trace_xfs_dqreclaim_busy(dqp);
492 2326002 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
493 2326002 : return LRU_SKIP;
494 :
495 : out_unlock_dirty:
496 95890 : trace_xfs_dqreclaim_busy(dqp);
497 95890 : XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
498 95890 : xfs_dqunlock(dqp);
499 95890 : spin_lock(lru_lock);
500 95890 : return LRU_RETRY;
501 : }
502 :
503 : static unsigned long
504 102739 : xfs_qm_shrink_scan(
505 : struct shrinker *shrink,
506 : struct shrink_control *sc)
507 : {
508 102739 : struct xfs_quotainfo *qi = container_of(shrink,
509 : struct xfs_quotainfo, qi_shrinker);
510 102739 : struct xfs_qm_isolate isol;
511 102739 : unsigned long freed;
512 102739 : int error;
513 :
514 102739 : if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
515 : return 0;
516 :
517 102731 : INIT_LIST_HEAD(&isol.buffers);
518 102731 : INIT_LIST_HEAD(&isol.dispose);
519 :
520 102731 : freed = list_lru_shrink_walk(&qi->qi_lru, sc,
521 : xfs_qm_dquot_isolate, &isol);
522 :
523 102731 : error = xfs_buf_delwri_submit(&isol.buffers);
524 102731 : if (error)
525 0 : xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
526 :
527 4709201 : while (!list_empty(&isol.dispose)) {
528 4606470 : struct xfs_dquot *dqp;
529 :
530 4606470 : dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
531 4606470 : list_del_init(&dqp->q_lru);
532 4606470 : xfs_qm_dqfree_one(dqp);
533 : }
534 :
535 : return freed;
536 : }
537 :
538 : static unsigned long
539 5050 : xfs_qm_shrink_count(
540 : struct shrinker *shrink,
541 : struct shrink_control *sc)
542 : {
543 5050 : struct xfs_quotainfo *qi = container_of(shrink,
544 : struct xfs_quotainfo, qi_shrinker);
545 :
546 5050 : return list_lru_shrink_count(&qi->qi_lru, sc);
547 : }
548 :
549 : STATIC void
550 64874 : xfs_qm_set_defquota(
551 : struct xfs_mount *mp,
552 : xfs_dqtype_t type,
553 : struct xfs_quotainfo *qinf)
554 : {
555 64874 : struct xfs_dquot *dqp;
556 64874 : struct xfs_def_quota *defq;
557 64874 : int error;
558 :
559 64874 : error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
560 64874 : if (error)
561 8314 : return;
562 :
563 56560 : defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
564 :
565 : /*
566 : * Timers and warnings have been already set, let's just set the
567 : * default limits for this quota type
568 : */
569 56560 : defq->blk.hard = dqp->q_blk.hardlimit;
570 56560 : defq->blk.soft = dqp->q_blk.softlimit;
571 56560 : defq->ino.hard = dqp->q_ino.hardlimit;
572 56560 : defq->ino.soft = dqp->q_ino.softlimit;
573 56560 : defq->rtb.hard = dqp->q_rtb.hardlimit;
574 56560 : defq->rtb.soft = dqp->q_rtb.softlimit;
575 56560 : xfs_qm_dqdestroy(dqp);
576 : }
577 :
578 : /* Initialize quota time limits from the root dquot. */
579 : static void
580 65634 : xfs_qm_init_timelimits(
581 : struct xfs_mount *mp,
582 : xfs_dqtype_t type)
583 : {
584 65634 : struct xfs_quotainfo *qinf = mp->m_quotainfo;
585 65634 : struct xfs_def_quota *defq;
586 65634 : struct xfs_dquot *dqp;
587 65634 : int error;
588 :
589 65634 : defq = xfs_get_defquota(qinf, type);
590 :
591 65634 : defq->blk.time = XFS_QM_BTIMELIMIT;
592 65634 : defq->ino.time = XFS_QM_ITIMELIMIT;
593 65634 : defq->rtb.time = XFS_QM_RTBTIMELIMIT;
594 :
595 : /*
596 : * We try to get the limits from the superuser's limits fields.
597 : * This is quite hacky, but it is standard quota practice.
598 : *
599 : * Since we may not have done a quotacheck by this point, just read
600 : * the dquot without attaching it to any hashtables or lists.
601 : */
602 65634 : error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
603 65634 : if (error)
604 9074 : return;
605 :
606 : /*
607 : * The warnings and timers set the grace period given to
608 : * a user or group before he or she can not perform any
609 : * more writing. If it is zero, a default is used.
610 : */
611 56560 : if (dqp->q_blk.timer)
612 50 : defq->blk.time = dqp->q_blk.timer;
613 56560 : if (dqp->q_ino.timer)
614 48 : defq->ino.time = dqp->q_ino.timer;
615 56560 : if (dqp->q_rtb.timer)
616 8 : defq->rtb.time = dqp->q_rtb.timer;
617 :
618 56560 : xfs_qm_dqdestroy(dqp);
619 : }
620 :
621 : /*
622 : * This initializes all the quota information that's kept in the
623 : * mount structure
624 : */
625 : STATIC int
626 21894 : xfs_qm_init_quotainfo(
627 : struct xfs_mount *mp)
628 : {
629 21894 : struct xfs_quotainfo *qinf;
630 21894 : int error;
631 :
632 21894 : ASSERT(XFS_IS_QUOTA_ON(mp));
633 :
634 21894 : qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
635 :
636 21894 : error = list_lru_init(&qinf->qi_lru);
637 21894 : if (error)
638 0 : goto out_free_qinf;
639 :
640 : /*
641 : * See if quotainodes are setup, and if not, allocate them,
642 : * and change the superblock accordingly.
643 : */
644 21894 : error = xfs_qm_init_quotainos(mp);
645 21894 : if (error)
646 16 : goto out_free_lru;
647 :
648 21878 : INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
649 21878 : INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
650 21878 : INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
651 21878 : mutex_init(&qinf->qi_tree_lock);
652 :
653 : /* mutex used to serialize quotaoffs */
654 21878 : mutex_init(&qinf->qi_quotaofflock);
655 :
656 : /* Precalc some constants */
657 21878 : qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
658 21878 : qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
659 21878 : if (xfs_has_bigtime(mp)) {
660 21825 : qinf->qi_expiry_min =
661 : xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
662 21825 : qinf->qi_expiry_max =
663 : xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
664 : } else {
665 53 : qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
666 53 : qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
667 : }
668 21878 : trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
669 : qinf->qi_expiry_max);
670 :
671 21878 : mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
672 :
673 21878 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
674 21878 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
675 21878 : xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
676 :
677 21878 : if (XFS_IS_UQUOTA_ON(mp))
678 21714 : xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
679 21878 : if (XFS_IS_GQUOTA_ON(mp))
680 21594 : xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
681 21878 : if (XFS_IS_PQUOTA_ON(mp))
682 21566 : xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
683 :
684 21878 : qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
685 21878 : qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
686 21878 : qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
687 21878 : qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
688 :
689 21878 : error = register_shrinker(&qinf->qi_shrinker, "xfs-qm:%s",
690 21878 : mp->m_super->s_id);
691 21878 : if (error)
692 0 : goto out_free_inos;
693 :
694 21878 : xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks);
695 21878 : xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks);
696 :
697 21878 : return 0;
698 :
699 : out_free_inos:
700 0 : mutex_destroy(&qinf->qi_quotaofflock);
701 0 : mutex_destroy(&qinf->qi_tree_lock);
702 0 : xfs_qm_destroy_quotainos(qinf);
703 16 : out_free_lru:
704 16 : list_lru_destroy(&qinf->qi_lru);
705 16 : out_free_qinf:
706 16 : kmem_free(qinf);
707 16 : mp->m_quotainfo = NULL;
708 16 : return error;
709 : }
710 :
711 : /*
712 : * Gets called when unmounting a filesystem or when all quotas get
713 : * turned off.
714 : * This purges the quota inodes, destroys locks and frees itself.
715 : */
716 : void
717 21883 : xfs_qm_destroy_quotainfo(
718 : struct xfs_mount *mp)
719 : {
720 21883 : struct xfs_quotainfo *qi;
721 :
722 21883 : qi = mp->m_quotainfo;
723 21883 : ASSERT(qi != NULL);
724 :
725 21883 : unregister_shrinker(&qi->qi_shrinker);
726 21883 : list_lru_destroy(&qi->qi_lru);
727 21883 : xfs_qm_destroy_quotainos(qi);
728 21883 : mutex_destroy(&qi->qi_tree_lock);
729 21883 : mutex_destroy(&qi->qi_quotaofflock);
730 21883 : kmem_free(qi);
731 21883 : mp->m_quotainfo = NULL;
732 21883 : }
733 :
734 : /*
735 : * Create an inode and return with a reference already taken, but unlocked
736 : * This is how we create quota inodes
737 : */
738 : STATIC int
739 8313 : xfs_qm_qino_alloc(
740 : struct xfs_mount *mp,
741 : struct xfs_inode **ipp,
742 : unsigned int flags)
743 : {
744 8313 : struct xfs_trans *tp;
745 8313 : int error;
746 8313 : bool need_alloc = true;
747 :
748 8313 : *ipp = NULL;
749 : /*
750 : * With superblock that doesn't have separate pquotino, we
751 : * share an inode between gquota and pquota. If the on-disk
752 : * superblock has GQUOTA and the filesystem is now mounted
753 : * with PQUOTA, just use sb_gquotino for sb_pquotino and
754 : * vice-versa.
755 : */
756 8313 : if (!xfs_has_pquotino(mp) &&
757 18 : (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
758 10 : xfs_ino_t ino = NULLFSINO;
759 :
760 10 : if ((flags & XFS_QMOPT_PQUOTA) &&
761 8 : (mp->m_sb.sb_gquotino != NULLFSINO)) {
762 2 : ino = mp->m_sb.sb_gquotino;
763 2 : if (XFS_IS_CORRUPT(mp,
764 : mp->m_sb.sb_pquotino != NULLFSINO)) {
765 0 : xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
766 0 : return -EFSCORRUPTED;
767 : }
768 8 : } else if ((flags & XFS_QMOPT_GQUOTA) &&
769 2 : (mp->m_sb.sb_pquotino != NULLFSINO)) {
770 0 : ino = mp->m_sb.sb_pquotino;
771 0 : if (XFS_IS_CORRUPT(mp,
772 : mp->m_sb.sb_gquotino != NULLFSINO)) {
773 0 : xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
774 0 : return -EFSCORRUPTED;
775 : }
776 : }
777 2 : if (ino != NULLFSINO) {
778 2 : error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
779 2 : if (error)
780 : return error;
781 2 : mp->m_sb.sb_gquotino = NULLFSINO;
782 2 : mp->m_sb.sb_pquotino = NULLFSINO;
783 2 : need_alloc = false;
784 : }
785 : }
786 :
787 16624 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
788 8339 : need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
789 : 0, 0, &tp);
790 8313 : if (error)
791 : return error;
792 :
793 8297 : if (need_alloc) {
794 8295 : xfs_ino_t ino;
795 :
796 8295 : error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
797 8295 : if (!error)
798 8295 : error = xfs_init_new_inode(&nop_mnt_idmap, tp, NULL, ino,
799 : S_IFREG, 1, 0, 0, false, ipp);
800 8295 : if (error) {
801 0 : xfs_trans_cancel(tp);
802 0 : return error;
803 : }
804 : }
805 :
806 : /*
807 : * Make the changes in the superblock, and log those too.
808 : * sbfields arg may contain fields other than *QUOTINO;
809 : * VERSIONNUM for example.
810 : */
811 8297 : spin_lock(&mp->m_sb_lock);
812 8297 : if (flags & XFS_QMOPT_SBVERSION) {
813 2833 : ASSERT(!xfs_has_quota(mp));
814 :
815 2833 : xfs_add_quota(mp);
816 2833 : mp->m_sb.sb_uquotino = NULLFSINO;
817 2833 : mp->m_sb.sb_gquotino = NULLFSINO;
818 2833 : mp->m_sb.sb_pquotino = NULLFSINO;
819 :
820 : /* qflags will get updated fully _after_ quotacheck */
821 2833 : mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
822 : }
823 8297 : if (flags & XFS_QMOPT_UQUOTA)
824 2795 : mp->m_sb.sb_uquotino = (*ipp)->i_ino;
825 5502 : else if (flags & XFS_QMOPT_GQUOTA)
826 2751 : mp->m_sb.sb_gquotino = (*ipp)->i_ino;
827 : else
828 2751 : mp->m_sb.sb_pquotino = (*ipp)->i_ino;
829 8297 : spin_unlock(&mp->m_sb_lock);
830 8297 : xfs_log_sb(tp);
831 :
832 8297 : error = xfs_trans_commit(tp);
833 8297 : if (error) {
834 0 : ASSERT(xfs_is_shutdown(mp));
835 0 : xfs_alert(mp, "%s failed (error %d)!", __func__, error);
836 : }
837 8297 : if (need_alloc) {
838 8295 : xfs_iunlock(*ipp, XFS_ILOCK_EXCL);
839 8295 : xfs_finish_inode_setup(*ipp);
840 : }
841 : return error;
842 : }
843 :
844 :
845 : STATIC void
846 803 : xfs_qm_reset_dqcounts(
847 : struct xfs_mount *mp,
848 : struct xfs_buf *bp,
849 : xfs_dqid_t id,
850 : xfs_dqtype_t type)
851 : {
852 803 : struct xfs_dqblk *dqb;
853 803 : int j;
854 :
855 803 : trace_xfs_reset_dqcounts(bp, _RET_IP_);
856 :
857 : /*
858 : * Reset all counters and timers. They'll be
859 : * started afresh by xfs_qm_quotacheck.
860 : */
861 : #ifdef DEBUG
862 803 : j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
863 : sizeof(struct xfs_dqblk);
864 803 : ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
865 : #endif
866 803 : dqb = bp->b_addr;
867 36713 : for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
868 35910 : struct xfs_disk_dquot *ddq;
869 :
870 35910 : ddq = (struct xfs_disk_dquot *)&dqb[j];
871 :
872 : /*
873 : * Do a sanity check, and if needed, repair the dqblk. Don't
874 : * output any warnings because it's perfectly possible to
875 : * find uninitialised dquot blks. See comment in
876 : * xfs_dquot_verify.
877 : */
878 35910 : if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
879 35820 : (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
880 180 : xfs_dqblk_repair(mp, &dqb[j], id + j, type);
881 :
882 : /*
883 : * Reset type in case we are reusing group quota file for
884 : * project quotas or vice versa
885 : */
886 35910 : ddq->d_type = type;
887 35910 : ddq->d_bcount = 0;
888 35910 : ddq->d_icount = 0;
889 35910 : ddq->d_rtbcount = 0;
890 :
891 : /*
892 : * dquot id 0 stores the default grace period and the maximum
893 : * warning limit that were set by the administrator, so we
894 : * should not reset them.
895 : */
896 35910 : if (ddq->d_id != 0) {
897 35724 : ddq->d_btimer = 0;
898 35724 : ddq->d_itimer = 0;
899 35724 : ddq->d_rtbtimer = 0;
900 35724 : ddq->d_bwarns = 0;
901 35724 : ddq->d_iwarns = 0;
902 35724 : ddq->d_rtbwarns = 0;
903 35724 : if (xfs_has_bigtime(mp))
904 35284 : ddq->d_type |= XFS_DQTYPE_BIGTIME;
905 : }
906 :
907 35910 : if (xfs_has_crc(mp)) {
908 35460 : xfs_update_cksum((char *)&dqb[j],
909 : sizeof(struct xfs_dqblk),
910 : XFS_DQUOT_CRC_OFF);
911 : }
912 : }
913 803 : }
914 :
915 : STATIC int
916 753 : xfs_qm_reset_dqcounts_all(
917 : struct xfs_mount *mp,
918 : xfs_dqid_t firstid,
919 : xfs_fsblock_t bno,
920 : xfs_filblks_t blkcnt,
921 : xfs_dqtype_t type,
922 : struct list_head *buffer_list)
923 : {
924 753 : struct xfs_buf *bp;
925 753 : int error = 0;
926 :
927 753 : ASSERT(blkcnt > 0);
928 :
929 : /*
930 : * Blkcnt arg can be a very big number, and might even be
931 : * larger than the log itself. So, we have to break it up into
932 : * manageable-sized transactions.
933 : * Note that we don't start a permanent transaction here; we might
934 : * not be able to get a log reservation for the whole thing up front,
935 : * and we don't really care to either, because we just discard
936 : * everything if we were to crash in the middle of this loop.
937 : */
938 1556 : while (blkcnt--) {
939 803 : error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
940 803 : XFS_FSB_TO_DADDR(mp, bno),
941 803 : mp->m_quotainfo->qi_dqchunklen, 0, &bp,
942 : &xfs_dquot_buf_ops);
943 :
944 : /*
945 : * CRC and validation errors will return a EFSCORRUPTED here. If
946 : * this occurs, re-read without CRC validation so that we can
947 : * repair the damage via xfs_qm_reset_dqcounts(). This process
948 : * will leave a trace in the log indicating corruption has
949 : * been detected.
950 : */
951 803 : if (error == -EFSCORRUPTED) {
952 3 : error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
953 3 : XFS_FSB_TO_DADDR(mp, bno),
954 3 : mp->m_quotainfo->qi_dqchunklen, 0, &bp,
955 : NULL);
956 : }
957 :
958 803 : if (error)
959 : break;
960 :
961 : /*
962 : * A corrupt buffer might not have a verifier attached, so
963 : * make sure we have the correct one attached before writeback
964 : * occurs.
965 : */
966 803 : bp->b_ops = &xfs_dquot_buf_ops;
967 803 : xfs_qm_reset_dqcounts(mp, bp, firstid, type);
968 803 : xfs_buf_delwri_queue(bp, buffer_list);
969 803 : xfs_buf_relse(bp);
970 :
971 : /* goto the next block. */
972 803 : bno++;
973 803 : firstid += mp->m_quotainfo->qi_dqperchunk;
974 : }
975 :
976 753 : return error;
977 : }
978 :
979 : /*
980 : * Iterate over all allocated dquot blocks in this quota inode, zeroing all
981 : * counters for every chunk of dquots that we find.
982 : */
983 : STATIC int
984 8495 : xfs_qm_reset_dqcounts_buf(
985 : struct xfs_mount *mp,
986 : struct xfs_inode *qip,
987 : xfs_dqtype_t type,
988 : struct list_head *buffer_list)
989 : {
990 8495 : struct xfs_bmbt_irec *map;
991 8495 : int i, nmaps; /* number of map entries */
992 8495 : int error; /* return value */
993 8495 : xfs_fileoff_t lblkno;
994 8495 : xfs_filblks_t maxlblkcnt;
995 8495 : xfs_dqid_t firstid;
996 8495 : xfs_fsblock_t rablkno;
997 8495 : xfs_filblks_t rablkcnt;
998 :
999 8495 : error = 0;
1000 : /*
1001 : * This looks racy, but we can't keep an inode lock across a
1002 : * trans_reserve. But, this gets called during quotacheck, and that
1003 : * happens only at mount time which is single threaded.
1004 : */
1005 8495 : if (qip->i_nblocks == 0)
1006 : return 0;
1007 :
1008 184 : map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
1009 :
1010 184 : lblkno = 0;
1011 184 : maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1012 472 : do {
1013 472 : uint lock_mode;
1014 :
1015 472 : nmaps = XFS_DQITER_MAP_SIZE;
1016 : /*
1017 : * We aren't changing the inode itself. Just changing
1018 : * some of its data. No new blocks are added here, and
1019 : * the inode is never added to the transaction.
1020 : */
1021 472 : lock_mode = xfs_ilock_data_map_shared(qip);
1022 472 : error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1023 : map, &nmaps, 0);
1024 472 : xfs_iunlock(qip, lock_mode);
1025 472 : if (error)
1026 : break;
1027 :
1028 472 : ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1029 1967 : for (i = 0; i < nmaps; i++) {
1030 1495 : ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1031 1495 : ASSERT(map[i].br_blockcount);
1032 :
1033 :
1034 1495 : lblkno += map[i].br_blockcount;
1035 :
1036 1495 : if (map[i].br_startblock == HOLESTARTBLOCK)
1037 742 : continue;
1038 :
1039 753 : firstid = (xfs_dqid_t) map[i].br_startoff *
1040 753 : mp->m_quotainfo->qi_dqperchunk;
1041 : /*
1042 : * Do a read-ahead on the next extent.
1043 : */
1044 753 : if ((i+1 < nmaps) &&
1045 741 : (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1046 10 : rablkcnt = map[i+1].br_blockcount;
1047 10 : rablkno = map[i+1].br_startblock;
1048 20 : while (rablkcnt--) {
1049 10 : xfs_buf_readahead(mp->m_ddev_targp,
1050 10 : XFS_FSB_TO_DADDR(mp, rablkno),
1051 10 : mp->m_quotainfo->qi_dqchunklen,
1052 : &xfs_dquot_buf_ops);
1053 10 : rablkno++;
1054 : }
1055 : }
1056 : /*
1057 : * Iterate thru all the blks in the extent and
1058 : * reset the counters of all the dquots inside them.
1059 : */
1060 753 : error = xfs_qm_reset_dqcounts_all(mp, firstid,
1061 : map[i].br_startblock,
1062 : map[i].br_blockcount,
1063 : type, buffer_list);
1064 753 : if (error)
1065 0 : goto out;
1066 : }
1067 472 : } while (nmaps > 0);
1068 :
1069 184 : out:
1070 184 : kmem_free(map);
1071 184 : return error;
1072 : }
1073 :
1074 : /*
1075 : * Called by dqusage_adjust in doing a quotacheck.
1076 : *
1077 : * Given the inode, and a dquot id this updates both the incore dqout as well
1078 : * as the buffer copy. This is so that once the quotacheck is done, we can
1079 : * just log all the buffers, as opposed to logging numerous updates to
1080 : * individual dquots.
1081 : */
1082 : STATIC int
1083 182653 : xfs_qm_quotacheck_dqadjust(
1084 : struct xfs_inode *ip,
1085 : xfs_dqtype_t type,
1086 : xfs_qcnt_t nblks,
1087 : xfs_qcnt_t rtblks)
1088 : {
1089 182653 : struct xfs_mount *mp = ip->i_mount;
1090 182653 : struct xfs_dquot *dqp;
1091 182653 : xfs_dqid_t id;
1092 182653 : int error;
1093 :
1094 182653 : id = xfs_qm_id_for_quotatype(ip, type);
1095 182644 : error = xfs_qm_dqget(mp, id, type, true, &dqp);
1096 182740 : if (error) {
1097 : /*
1098 : * Shouldn't be able to turn off quotas here.
1099 : */
1100 0 : ASSERT(error != -ESRCH);
1101 0 : ASSERT(error != -ENOENT);
1102 0 : return error;
1103 : }
1104 :
1105 182740 : trace_xfs_dqadjust(dqp);
1106 :
1107 : /*
1108 : * Adjust the inode count and the block count to reflect this inode's
1109 : * resource usage.
1110 : */
1111 182740 : dqp->q_ino.count++;
1112 182740 : dqp->q_ino.reserved++;
1113 182740 : if (nblks) {
1114 7444 : dqp->q_blk.count += nblks;
1115 7444 : dqp->q_blk.reserved += nblks;
1116 : }
1117 182740 : if (rtblks) {
1118 0 : dqp->q_rtb.count += rtblks;
1119 0 : dqp->q_rtb.reserved += rtblks;
1120 : }
1121 :
1122 : /*
1123 : * Set default limits, adjust timers (since we changed usages)
1124 : *
1125 : * There are no timers for the default values set in the root dquot.
1126 : */
1127 182740 : if (dqp->q_id) {
1128 4003 : xfs_qm_adjust_dqlimits(dqp);
1129 4003 : xfs_qm_adjust_dqtimers(dqp);
1130 : }
1131 :
1132 182740 : dqp->q_flags |= XFS_DQFLAG_DIRTY;
1133 182740 : xfs_qm_dqput(dqp);
1134 182740 : return 0;
1135 : }
1136 :
1137 : /*
1138 : * callback routine supplied to bulkstat(). Given an inumber, find its
1139 : * dquots and update them to account for resources taken by that inode.
1140 : */
1141 : /* ARGSUSED */
1142 : STATIC int
1143 70016 : xfs_qm_dqusage_adjust(
1144 : struct xfs_mount *mp,
1145 : struct xfs_trans *tp,
1146 : xfs_ino_t ino,
1147 : void *data)
1148 : {
1149 70016 : struct xfs_inode *ip;
1150 70016 : xfs_qcnt_t nblks;
1151 70016 : xfs_filblks_t rtblks = 0; /* total rt blks */
1152 70016 : int error;
1153 :
1154 70016 : ASSERT(XFS_IS_QUOTA_ON(mp));
1155 :
1156 : /*
1157 : * rootino must have its resources accounted for, not so with the quota
1158 : * inodes.
1159 : */
1160 137125 : if (xfs_is_quota_inode(&mp->m_sb, ino))
1161 : return 0;
1162 :
1163 : /*
1164 : * We don't _need_ to take the ilock EXCL here because quotacheck runs
1165 : * at mount time and therefore nobody will be racing chown/chproj.
1166 : */
1167 61471 : error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1168 61476 : if (error == -EINVAL || error == -ENOENT)
1169 : return 0;
1170 61476 : if (error)
1171 : return error;
1172 :
1173 61476 : ASSERT(ip->i_delayed_blks == 0);
1174 :
1175 61476 : if (XFS_IS_REALTIME_INODE(ip)) {
1176 0 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1177 :
1178 0 : error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1179 0 : if (error)
1180 0 : goto error0;
1181 :
1182 0 : xfs_bmap_count_leaves(ifp, &rtblks);
1183 : }
1184 :
1185 61476 : nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1186 :
1187 : /*
1188 : * Add the (disk blocks and inode) resources occupied by this
1189 : * inode to its dquots. We do this adjustment in the incore dquot,
1190 : * and also copy the changes to its buffer.
1191 : * We don't care about putting these changes in a transaction
1192 : * envelope because if we crash in the middle of a 'quotacheck'
1193 : * we have to start from the beginning anyway.
1194 : * Once we're done, we'll log all the dquot bufs.
1195 : *
1196 : * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1197 : * and quotaoffs don't race. (Quotachecks happen at mount time only).
1198 : */
1199 61476 : if (XFS_IS_UQUOTA_ON(mp)) {
1200 61066 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1201 : rtblks);
1202 61066 : if (error)
1203 0 : goto error0;
1204 : }
1205 :
1206 61476 : if (XFS_IS_GQUOTA_ON(mp)) {
1207 60892 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1208 : rtblks);
1209 60892 : if (error)
1210 0 : goto error0;
1211 : }
1212 :
1213 61476 : if (XFS_IS_PQUOTA_ON(mp)) {
1214 60782 : error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1215 : rtblks);
1216 60782 : if (error)
1217 0 : goto error0;
1218 : }
1219 :
1220 61476 : error0:
1221 61476 : xfs_irele(ip);
1222 61476 : return error;
1223 : }
1224 :
1225 : STATIC int
1226 11202 : xfs_qm_flush_one(
1227 : struct xfs_dquot *dqp,
1228 : void *data)
1229 : {
1230 11202 : struct xfs_mount *mp = dqp->q_mount;
1231 11202 : struct list_head *buffer_list = data;
1232 11202 : struct xfs_buf *bp = NULL;
1233 11202 : int error = 0;
1234 :
1235 11202 : xfs_dqlock(dqp);
1236 11202 : if (dqp->q_flags & XFS_DQFLAG_FREEING)
1237 0 : goto out_unlock;
1238 11202 : if (!XFS_DQ_IS_DIRTY(dqp))
1239 0 : goto out_unlock;
1240 :
1241 : /*
1242 : * The only way the dquot is already flush locked by the time quotacheck
1243 : * gets here is if reclaim flushed it before the dqadjust walk dirtied
1244 : * it for the final time. Quotacheck collects all dquot bufs in the
1245 : * local delwri queue before dquots are dirtied, so reclaim can't have
1246 : * possibly queued it for I/O. The only way out is to push the buffer to
1247 : * cycle the flush lock.
1248 : */
1249 11202 : if (!xfs_dqflock_nowait(dqp)) {
1250 : /* buf is pinned in-core by delwri list */
1251 0 : error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1252 0 : mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1253 0 : if (error)
1254 0 : goto out_unlock;
1255 :
1256 0 : if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1257 0 : error = -EAGAIN;
1258 0 : xfs_buf_relse(bp);
1259 0 : goto out_unlock;
1260 : }
1261 0 : xfs_buf_unlock(bp);
1262 :
1263 0 : xfs_buf_delwri_pushbuf(bp, buffer_list);
1264 0 : xfs_buf_rele(bp);
1265 :
1266 0 : error = -EAGAIN;
1267 0 : goto out_unlock;
1268 : }
1269 :
1270 11202 : error = xfs_qm_dqflush(dqp, &bp);
1271 11202 : if (error)
1272 0 : goto out_unlock;
1273 :
1274 11202 : xfs_buf_delwri_queue(bp, buffer_list);
1275 11202 : xfs_buf_relse(bp);
1276 11202 : out_unlock:
1277 11202 : xfs_dqunlock(dqp);
1278 11202 : return error;
1279 : }
1280 :
1281 : /*
1282 : * Walk thru all the filesystem inodes and construct a consistent view
1283 : * of the disk quota world. If the quotacheck fails, disable quotas.
1284 : */
1285 : STATIC int
1286 2965 : xfs_qm_quotacheck(
1287 : xfs_mount_t *mp)
1288 : {
1289 2965 : int error, error2;
1290 2965 : uint flags;
1291 2965 : LIST_HEAD (buffer_list);
1292 2965 : struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1293 2965 : struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1294 2965 : struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1295 :
1296 2965 : flags = 0;
1297 :
1298 2965 : ASSERT(uip || gip || pip);
1299 2965 : ASSERT(XFS_IS_QUOTA_ON(mp));
1300 :
1301 2965 : xfs_notice(mp, "Quotacheck needed: Please wait.");
1302 :
1303 : /*
1304 : * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1305 : * their counters to zero. We need a clean slate.
1306 : * We don't log our changes till later.
1307 : */
1308 2965 : if (uip) {
1309 2877 : error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1310 : &buffer_list);
1311 2877 : if (error)
1312 0 : goto error_return;
1313 : flags |= XFS_UQUOTA_CHKD;
1314 : }
1315 :
1316 2965 : if (gip) {
1317 2815 : error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1318 : &buffer_list);
1319 2815 : if (error)
1320 0 : goto error_return;
1321 2815 : flags |= XFS_GQUOTA_CHKD;
1322 : }
1323 :
1324 2965 : if (pip) {
1325 2803 : error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1326 : &buffer_list);
1327 2803 : if (error)
1328 0 : goto error_return;
1329 2803 : flags |= XFS_PQUOTA_CHKD;
1330 : }
1331 :
1332 2965 : error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1333 : NULL);
1334 :
1335 : /*
1336 : * On error, the inode walk may have partially populated the dquot
1337 : * caches. We must purge them before disabling quota and tearing down
1338 : * the quotainfo, or else the dquots will leak.
1339 : */
1340 2965 : if (error)
1341 0 : goto error_purge;
1342 :
1343 : /*
1344 : * We've made all the changes that we need to make incore. Flush them
1345 : * down to disk buffers if everything was updated successfully.
1346 : */
1347 2965 : if (XFS_IS_UQUOTA_ON(mp)) {
1348 2877 : error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1349 : &buffer_list);
1350 : }
1351 2965 : if (XFS_IS_GQUOTA_ON(mp)) {
1352 2815 : error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1353 : &buffer_list);
1354 2815 : if (!error)
1355 2815 : error = error2;
1356 : }
1357 2965 : if (XFS_IS_PQUOTA_ON(mp)) {
1358 2803 : error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1359 : &buffer_list);
1360 2803 : if (!error)
1361 2803 : error = error2;
1362 : }
1363 :
1364 2965 : error2 = xfs_buf_delwri_submit(&buffer_list);
1365 2965 : if (!error)
1366 2965 : error = error2;
1367 :
1368 : /*
1369 : * We can get this error if we couldn't do a dquot allocation inside
1370 : * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1371 : * dirty dquots that might be cached, we just want to get rid of them
1372 : * and turn quotaoff. The dquots won't be attached to any of the inodes
1373 : * at this point (because we intentionally didn't in dqget_noattach).
1374 : */
1375 2965 : if (error)
1376 0 : goto error_purge;
1377 :
1378 : /*
1379 : * If one type of quotas is off, then it will lose its
1380 : * quotachecked status, since we won't be doing accounting for
1381 : * that type anymore.
1382 : */
1383 2965 : mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1384 2965 : mp->m_qflags |= flags;
1385 :
1386 2965 : error_return:
1387 2965 : xfs_buf_delwri_cancel(&buffer_list);
1388 :
1389 2965 : if (error) {
1390 0 : xfs_warn(mp,
1391 : "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1392 : error);
1393 : /*
1394 : * We must turn off quotas.
1395 : */
1396 0 : ASSERT(mp->m_quotainfo != NULL);
1397 0 : xfs_qm_destroy_quotainfo(mp);
1398 0 : if (xfs_mount_reset_sbqflags(mp)) {
1399 0 : xfs_warn(mp,
1400 : "Quotacheck: Failed to reset quota flags.");
1401 : }
1402 0 : xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
1403 : } else {
1404 2965 : xfs_notice(mp, "Quotacheck: Done.");
1405 2965 : xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
1406 : }
1407 :
1408 2965 : return error;
1409 :
1410 0 : error_purge:
1411 : /*
1412 : * On error, we may have inodes queued for inactivation. This may try
1413 : * to attach dquots to the inode before running cleanup operations on
1414 : * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1415 : * below that frees mp->m_quotainfo. To avoid this race, flush all the
1416 : * pending inodegc operations before we purge the dquots from memory,
1417 : * ensuring that background inactivation is idle whilst we turn off
1418 : * quotas.
1419 : */
1420 0 : xfs_inodegc_flush(mp);
1421 0 : xfs_qm_dqpurge_all(mp);
1422 0 : goto error_return;
1423 :
1424 : }
1425 :
1426 : /*
1427 : * This is called from xfs_mountfs to start quotas and initialize all
1428 : * necessary data structures like quotainfo. This is also responsible for
1429 : * running a quotacheck as necessary. We are guaranteed that the superblock
1430 : * is consistently read in at this point.
1431 : *
1432 : * If we fail here, the mount will continue with quota turned off. We don't
1433 : * need to inidicate success or failure at all.
1434 : */
1435 : void
1436 22075 : xfs_qm_mount_quotas(
1437 : struct xfs_mount *mp)
1438 : {
1439 22075 : int error = 0;
1440 22075 : uint sbf;
1441 :
1442 : /*
1443 : * If quotas on realtime volumes is not supported, we disable
1444 : * quotas immediately.
1445 : */
1446 22075 : if (mp->m_sb.sb_rextents) {
1447 181 : xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1448 181 : mp->m_qflags = 0;
1449 181 : goto write_changes;
1450 : }
1451 :
1452 21894 : ASSERT(XFS_IS_QUOTA_ON(mp));
1453 :
1454 : /*
1455 : * Allocate the quotainfo structure inside the mount struct, and
1456 : * create quotainode(s), and change/rev superblock if necessary.
1457 : */
1458 21894 : error = xfs_qm_init_quotainfo(mp);
1459 21894 : if (error) {
1460 : /*
1461 : * We must turn off quotas.
1462 : */
1463 16 : ASSERT(mp->m_quotainfo == NULL);
1464 16 : mp->m_qflags = 0;
1465 16 : goto write_changes;
1466 : }
1467 : /*
1468 : * If any of the quotas are not consistent, do a quotacheck.
1469 : */
1470 21878 : if (XFS_QM_NEED_QUOTACHECK(mp)) {
1471 2965 : error = xfs_qm_quotacheck(mp);
1472 2965 : if (error) {
1473 : /* Quotacheck failed and disabled quotas. */
1474 : return;
1475 : }
1476 : }
1477 : /*
1478 : * If one type of quotas is off, then it will lose its
1479 : * quotachecked status, since we won't be doing accounting for
1480 : * that type anymore.
1481 : */
1482 21878 : if (!XFS_IS_UQUOTA_ON(mp))
1483 164 : mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1484 21878 : if (!XFS_IS_GQUOTA_ON(mp))
1485 284 : mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1486 21878 : if (!XFS_IS_PQUOTA_ON(mp))
1487 312 : mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1488 :
1489 21566 : write_changes:
1490 : /*
1491 : * We actually don't have to acquire the m_sb_lock at all.
1492 : * This can only be called from mount, and that's single threaded. XXX
1493 : */
1494 22075 : spin_lock(&mp->m_sb_lock);
1495 22075 : sbf = mp->m_sb.sb_qflags;
1496 22075 : mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1497 22075 : spin_unlock(&mp->m_sb_lock);
1498 :
1499 22075 : if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1500 3005 : if (xfs_sync_sb(mp, false)) {
1501 : /*
1502 : * We could only have been turning quotas off.
1503 : * We aren't in very good shape actually because
1504 : * the incore structures are convinced that quotas are
1505 : * off, but the on disk superblock doesn't know that !
1506 : */
1507 0 : ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1508 0 : xfs_alert(mp, "%s: Superblock update failed!",
1509 : __func__);
1510 : }
1511 : }
1512 :
1513 22075 : if (error) {
1514 16 : xfs_warn(mp, "Failed to initialize disk quotas.");
1515 16 : return;
1516 : }
1517 : }
1518 :
1519 : /*
1520 : * This is called after the superblock has been read in and we're ready to
1521 : * iget the quota inodes.
1522 : */
1523 : STATIC int
1524 21894 : xfs_qm_init_quotainos(
1525 : xfs_mount_t *mp)
1526 : {
1527 21894 : struct xfs_inode *uip = NULL;
1528 21894 : struct xfs_inode *gip = NULL;
1529 21894 : struct xfs_inode *pip = NULL;
1530 21894 : int error;
1531 21894 : uint flags = 0;
1532 :
1533 21894 : ASSERT(mp->m_quotainfo);
1534 :
1535 : /*
1536 : * Get the uquota and gquota inodes
1537 : */
1538 21894 : if (xfs_has_quota(mp)) {
1539 19045 : if (XFS_IS_UQUOTA_ON(mp) &&
1540 18931 : mp->m_sb.sb_uquotino != NULLFSINO) {
1541 18919 : ASSERT(mp->m_sb.sb_uquotino > 0);
1542 18919 : error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1543 : 0, 0, &uip);
1544 18919 : if (error)
1545 : return error;
1546 : }
1547 19045 : if (XFS_IS_GQUOTA_ON(mp) &&
1548 18885 : mp->m_sb.sb_gquotino != NULLFSINO) {
1549 18843 : ASSERT(mp->m_sb.sb_gquotino > 0);
1550 18843 : error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1551 : 0, 0, &gip);
1552 18843 : if (error)
1553 0 : goto error_rele;
1554 : }
1555 19045 : if (XFS_IS_PQUOTA_ON(mp) &&
1556 18859 : mp->m_sb.sb_pquotino != NULLFSINO) {
1557 18815 : ASSERT(mp->m_sb.sb_pquotino > 0);
1558 18815 : error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1559 : 0, 0, &pip);
1560 18815 : if (error)
1561 0 : goto error_rele;
1562 : }
1563 : } else {
1564 : flags |= XFS_QMOPT_SBVERSION;
1565 : }
1566 :
1567 : /*
1568 : * Create the three inodes, if they don't exist already. The changes
1569 : * made above will get added to a transaction and logged in one of
1570 : * the qino_alloc calls below. If the device is readonly,
1571 : * temporarily switch to read-write to do this.
1572 : */
1573 21894 : if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1574 2811 : error = xfs_qm_qino_alloc(mp, &uip,
1575 : flags | XFS_QMOPT_UQUOTA);
1576 2811 : if (error)
1577 16 : goto error_rele;
1578 :
1579 : flags &= ~XFS_QMOPT_SBVERSION;
1580 : }
1581 21878 : if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1582 2751 : error = xfs_qm_qino_alloc(mp, &gip,
1583 : flags | XFS_QMOPT_GQUOTA);
1584 2751 : if (error)
1585 0 : goto error_rele;
1586 :
1587 : flags &= ~XFS_QMOPT_SBVERSION;
1588 : }
1589 21878 : if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1590 2751 : error = xfs_qm_qino_alloc(mp, &pip,
1591 : flags | XFS_QMOPT_PQUOTA);
1592 2751 : if (error)
1593 0 : goto error_rele;
1594 : }
1595 :
1596 21878 : mp->m_quotainfo->qi_uquotaip = uip;
1597 21878 : mp->m_quotainfo->qi_gquotaip = gip;
1598 21878 : mp->m_quotainfo->qi_pquotaip = pip;
1599 :
1600 21878 : return 0;
1601 :
1602 16 : error_rele:
1603 16 : if (uip)
1604 0 : xfs_irele(uip);
1605 16 : if (gip)
1606 0 : xfs_irele(gip);
1607 16 : if (pip)
1608 0 : xfs_irele(pip);
1609 : return error;
1610 : }
1611 :
1612 : STATIC void
1613 21883 : xfs_qm_destroy_quotainos(
1614 : struct xfs_quotainfo *qi)
1615 : {
1616 21883 : if (qi->qi_uquotaip) {
1617 2 : xfs_irele(qi->qi_uquotaip);
1618 2 : qi->qi_uquotaip = NULL; /* paranoia */
1619 : }
1620 21883 : if (qi->qi_gquotaip) {
1621 2 : xfs_irele(qi->qi_gquotaip);
1622 2 : qi->qi_gquotaip = NULL;
1623 : }
1624 21883 : if (qi->qi_pquotaip) {
1625 2 : xfs_irele(qi->qi_pquotaip);
1626 2 : qi->qi_pquotaip = NULL;
1627 : }
1628 21883 : }
1629 :
1630 : STATIC void
1631 4606470 : xfs_qm_dqfree_one(
1632 : struct xfs_dquot *dqp)
1633 : {
1634 4606470 : struct xfs_mount *mp = dqp->q_mount;
1635 4606470 : struct xfs_quotainfo *qi = mp->m_quotainfo;
1636 :
1637 4606470 : mutex_lock(&qi->qi_tree_lock);
1638 4606470 : radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1639 :
1640 4606470 : qi->qi_dquots--;
1641 4606470 : mutex_unlock(&qi->qi_tree_lock);
1642 :
1643 4606470 : xfs_qm_dqdestroy(dqp);
1644 4606470 : }
1645 :
1646 : /* --------------- utility functions for vnodeops ---------------- */
1647 :
1648 :
1649 : /*
1650 : * Given an inode, a uid, gid and prid make sure that we have
1651 : * allocated relevant dquot(s) on disk, and that we won't exceed inode
1652 : * quotas by creating this file.
1653 : * This also attaches dquot(s) to the given inode after locking it,
1654 : * and returns the dquots corresponding to the uid and/or gid.
1655 : *
1656 : * in : inode (unlocked)
1657 : * out : udquot, gdquot with references taken and unlocked
1658 : */
1659 : int
1660 78310299 : xfs_qm_vop_dqalloc(
1661 : struct xfs_inode *ip,
1662 : kuid_t uid,
1663 : kgid_t gid,
1664 : prid_t prid,
1665 : uint flags,
1666 : struct xfs_dquot **O_udqpp,
1667 : struct xfs_dquot **O_gdqpp,
1668 : struct xfs_dquot **O_pdqpp)
1669 : {
1670 78310299 : struct xfs_mount *mp = ip->i_mount;
1671 78310299 : struct inode *inode = VFS_I(ip);
1672 78310299 : struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1673 78310299 : struct xfs_dquot *uq = NULL;
1674 78310299 : struct xfs_dquot *gq = NULL;
1675 78310299 : struct xfs_dquot *pq = NULL;
1676 78310299 : int error;
1677 78310299 : uint lockflags;
1678 :
1679 78310299 : if (!XFS_IS_QUOTA_ON(mp))
1680 : return 0;
1681 :
1682 44412438 : lockflags = XFS_ILOCK_EXCL;
1683 44412438 : xfs_ilock(ip, lockflags);
1684 :
1685 44411612 : if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1686 6 : gid = inode->i_gid;
1687 :
1688 : /*
1689 : * Attach the dquot(s) to this inode, doing a dquot allocation
1690 : * if necessary. The dquot(s) will not be locked.
1691 : */
1692 44411612 : if (XFS_NOT_DQATTACHED(mp, ip)) {
1693 2404527 : error = xfs_qm_dqattach_locked(ip, true);
1694 2404534 : if (error) {
1695 5566 : xfs_iunlock(ip, lockflags);
1696 5566 : return error;
1697 : }
1698 : }
1699 :
1700 44406053 : if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1701 44040026 : ASSERT(O_udqpp);
1702 44040026 : if (!uid_eq(inode->i_uid, uid)) {
1703 : /*
1704 : * What we need is the dquot that has this uid, and
1705 : * if we send the inode to dqget, the uid of the inode
1706 : * takes priority over what's sent in the uid argument.
1707 : * We must unlock inode here before calling dqget if
1708 : * we're not sending the inode, because otherwise
1709 : * we'll deadlock by doing trans_reserve while
1710 : * holding ilock.
1711 : */
1712 5839328 : xfs_iunlock(ip, lockflags);
1713 5839322 : error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1714 : XFS_DQTYPE_USER, true, &uq);
1715 5839908 : if (error) {
1716 14947 : ASSERT(error != -ENOENT);
1717 14947 : return error;
1718 : }
1719 : /*
1720 : * Get the ilock in the right order.
1721 : */
1722 5824961 : xfs_dqunlock(uq);
1723 5824969 : lockflags = XFS_ILOCK_SHARED;
1724 5824969 : xfs_ilock(ip, lockflags);
1725 : } else {
1726 : /*
1727 : * Take an extra reference, because we'll return
1728 : * this to caller
1729 : */
1730 38200698 : ASSERT(ip->i_udquot);
1731 38200698 : uq = xfs_qm_dqhold(ip->i_udquot);
1732 : }
1733 : }
1734 44397937 : if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1735 43921722 : ASSERT(O_gdqpp);
1736 43921722 : if (!gid_eq(inode->i_gid, gid)) {
1737 5822185 : xfs_iunlock(ip, lockflags);
1738 5822180 : error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1739 : XFS_DQTYPE_GROUP, true, &gq);
1740 5822186 : if (error) {
1741 4367 : ASSERT(error != -ENOENT);
1742 4367 : goto error_rele;
1743 : }
1744 5817819 : xfs_dqunlock(gq);
1745 5817822 : lockflags = XFS_ILOCK_SHARED;
1746 5817822 : xfs_ilock(ip, lockflags);
1747 : } else {
1748 38099537 : ASSERT(ip->i_gdquot);
1749 38099537 : gq = xfs_qm_dqhold(ip->i_gdquot);
1750 : }
1751 : }
1752 44393746 : if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1753 39960489 : ASSERT(O_pdqpp);
1754 39960489 : if (ip->i_projid != prid) {
1755 317302 : xfs_iunlock(ip, lockflags);
1756 317302 : error = xfs_qm_dqget(mp, prid,
1757 : XFS_DQTYPE_PROJ, true, &pq);
1758 317303 : if (error) {
1759 841 : ASSERT(error != -ENOENT);
1760 841 : goto error_rele;
1761 : }
1762 316462 : xfs_dqunlock(pq);
1763 316462 : lockflags = XFS_ILOCK_SHARED;
1764 316462 : xfs_ilock(ip, lockflags);
1765 : } else {
1766 39643187 : ASSERT(ip->i_pdquot);
1767 39643187 : pq = xfs_qm_dqhold(ip->i_pdquot);
1768 : }
1769 : }
1770 44393170 : trace_xfs_dquot_dqalloc(ip);
1771 :
1772 44392860 : xfs_iunlock(ip, lockflags);
1773 44392777 : if (O_udqpp)
1774 44028580 : *O_udqpp = uq;
1775 : else
1776 364197 : xfs_qm_dqrele(uq);
1777 44392777 : if (O_gdqpp)
1778 44028580 : *O_gdqpp = gq;
1779 : else
1780 364197 : xfs_qm_dqrele(gq);
1781 44392777 : if (O_pdqpp)
1782 40068162 : *O_pdqpp = pq;
1783 : else
1784 4324615 : xfs_qm_dqrele(pq);
1785 : return 0;
1786 :
1787 5208 : error_rele:
1788 5208 : xfs_qm_dqrele(gq);
1789 5208 : xfs_qm_dqrele(uq);
1790 5208 : return error;
1791 : }
1792 :
1793 : /*
1794 : * Actually transfer ownership, and do dquot modifications.
1795 : * These were already reserved.
1796 : */
1797 : struct xfs_dquot *
1798 8563886 : xfs_qm_vop_chown(
1799 : struct xfs_trans *tp,
1800 : struct xfs_inode *ip,
1801 : struct xfs_dquot **IO_olddq,
1802 : struct xfs_dquot *newdq)
1803 : {
1804 8563886 : struct xfs_dquot *prevdq;
1805 0 : uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1806 8563886 : XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1807 :
1808 :
1809 8563886 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1810 8564048 : ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1811 :
1812 : /* old dquot */
1813 8564048 : prevdq = *IO_olddq;
1814 8564048 : ASSERT(prevdq);
1815 8564048 : ASSERT(prevdq != newdq);
1816 :
1817 8564048 : xfs_trans_mod_ino_dquot(tp, ip, prevdq, bfield, -(ip->i_nblocks));
1818 8564312 : xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1819 :
1820 : /* the sparkling new dquot */
1821 8564102 : xfs_trans_mod_ino_dquot(tp, ip, newdq, bfield, ip->i_nblocks);
1822 8564337 : xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1823 :
1824 : /*
1825 : * Back when we made quota reservations for the chown, we reserved the
1826 : * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1827 : * switched the dquots, decrease the new dquot's block reservation
1828 : * (having already bumped up the real counter) so that we don't have
1829 : * any reservation to give back when we commit.
1830 : */
1831 8563986 : xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1832 8563986 : -ip->i_delayed_blks);
1833 :
1834 : /*
1835 : * Give the incore reservation for delalloc blocks back to the old
1836 : * dquot. We don't normally handle delalloc quota reservations
1837 : * transactionally, so just lock the dquot and subtract from the
1838 : * reservation. Dirty the transaction because it's too late to turn
1839 : * back now.
1840 : */
1841 8564231 : tp->t_flags |= XFS_TRANS_DIRTY;
1842 8564231 : xfs_dqlock(prevdq);
1843 8564431 : ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1844 8564431 : prevdq->q_blk.reserved -= ip->i_delayed_blks;
1845 8564431 : xfs_dqunlock(prevdq);
1846 :
1847 : /*
1848 : * Take an extra reference, because the inode is going to keep
1849 : * this dquot pointer even after the trans_commit.
1850 : */
1851 8564054 : *IO_olddq = xfs_qm_dqhold(newdq);
1852 :
1853 8564037 : return prevdq;
1854 : }
1855 :
1856 : int
1857 29482268 : xfs_qm_vop_rename_dqattach(
1858 : struct xfs_inode **i_tab)
1859 : {
1860 29482268 : struct xfs_mount *mp = i_tab[0]->i_mount;
1861 29482268 : int i;
1862 :
1863 29482268 : if (!XFS_IS_QUOTA_ON(mp))
1864 : return 0;
1865 :
1866 79553766 : for (i = 0; (i < 4 && i_tab[i]); i++) {
1867 61273193 : struct xfs_inode *ip = i_tab[i];
1868 61273193 : int error;
1869 :
1870 : /*
1871 : * Watch out for duplicate entries in the table.
1872 : */
1873 61273193 : if (i == 0 || ip != i_tab[i-1]) {
1874 60183160 : if (XFS_NOT_DQATTACHED(mp, ip)) {
1875 4083276 : error = xfs_qm_dqattach(ip);
1876 4083276 : if (error)
1877 803 : return error;
1878 : }
1879 : }
1880 : }
1881 : return 0;
1882 : }
1883 :
1884 : void
1885 69429132 : xfs_qm_vop_create_dqattach(
1886 : struct xfs_trans *tp,
1887 : struct xfs_inode *ip,
1888 : struct xfs_dquot *udqp,
1889 : struct xfs_dquot *gdqp,
1890 : struct xfs_dquot *pdqp)
1891 : {
1892 69429132 : struct xfs_mount *mp = tp->t_mountp;
1893 :
1894 69429132 : if (!XFS_IS_QUOTA_ON(mp))
1895 : return;
1896 :
1897 39076493 : ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1898 :
1899 39077189 : if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1900 39076460 : ASSERT(ip->i_udquot == NULL);
1901 39076460 : ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1902 :
1903 39075023 : ip->i_udquot = xfs_qm_dqhold(udqp);
1904 : }
1905 39078103 : if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1906 38969928 : ASSERT(ip->i_gdquot == NULL);
1907 38969928 : ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1908 :
1909 38968171 : ip->i_gdquot = xfs_qm_dqhold(gdqp);
1910 : }
1911 39078155 : if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1912 38969988 : ASSERT(ip->i_pdquot == NULL);
1913 38969988 : ASSERT(ip->i_projid == pdqp->q_id);
1914 :
1915 38969988 : ip->i_pdquot = xfs_qm_dqhold(pdqp);
1916 : }
1917 :
1918 39078033 : xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
1919 : }
1920 :
1921 : /* Decide if this inode's dquot is near an enforcement boundary. */
1922 : bool
1923 123471665 : xfs_inode_near_dquot_enforcement(
1924 : struct xfs_inode *ip,
1925 : xfs_dqtype_t type)
1926 : {
1927 123471665 : struct xfs_dquot *dqp;
1928 123471665 : int64_t freesp;
1929 :
1930 : /* We only care for quotas that are enabled and enforced. */
1931 123471665 : dqp = xfs_inode_dquot(ip, type);
1932 123471665 : if (!dqp || !xfs_dquot_is_enforced(dqp))
1933 65897418 : return false;
1934 :
1935 57569265 : if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1936 57569118 : xfs_dquot_res_over_limits(&dqp->q_rtb))
1937 : return true;
1938 :
1939 : /* For space on the data device, check the various thresholds. */
1940 57569118 : if (!dqp->q_prealloc_hi_wmark)
1941 : return false;
1942 :
1943 9046 : if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1944 : return false;
1945 :
1946 106 : if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1947 : return true;
1948 :
1949 80 : freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1950 80 : if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
1951 0 : return true;
1952 :
1953 : return false;
1954 : }
|