Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 :
3 : #include "misc.h"
4 : #include "ctree.h"
5 : #include "block-rsv.h"
6 : #include "space-info.h"
7 : #include "transaction.h"
8 : #include "block-group.h"
9 : #include "disk-io.h"
10 : #include "fs.h"
11 : #include "accessors.h"
12 :
13 : /*
14 : * HOW DO BLOCK RESERVES WORK
15 : *
16 : * Think of block_rsv's as buckets for logically grouped metadata
17 : * reservations. Each block_rsv has a ->size and a ->reserved. ->size is
18 : * how large we want our block rsv to be, ->reserved is how much space is
19 : * currently reserved for this block reserve.
20 : *
21 : * ->failfast exists for the truncate case, and is described below.
22 : *
23 : * NORMAL OPERATION
24 : *
25 : * -> Reserve
26 : * Entrance: btrfs_block_rsv_add, btrfs_block_rsv_refill
27 : *
28 : * We call into btrfs_reserve_metadata_bytes() with our bytes, which is
29 : * accounted for in space_info->bytes_may_use, and then add the bytes to
30 : * ->reserved, and ->size in the case of btrfs_block_rsv_add.
31 : *
32 : * ->size is an over-estimation of how much we may use for a particular
33 : * operation.
34 : *
35 : * -> Use
36 : * Entrance: btrfs_use_block_rsv
37 : *
38 : * When we do a btrfs_alloc_tree_block() we call into btrfs_use_block_rsv()
39 : * to determine the appropriate block_rsv to use, and then verify that
40 : * ->reserved has enough space for our tree block allocation. Once
41 : * successful we subtract fs_info->nodesize from ->reserved.
42 : *
43 : * -> Finish
44 : * Entrance: btrfs_block_rsv_release
45 : *
46 : * We are finished with our operation, subtract our individual reservation
47 : * from ->size, and then subtract ->size from ->reserved and free up the
48 : * excess if there is any.
49 : *
50 : * There is some logic here to refill the delayed refs rsv or the global rsv
51 : * as needed, otherwise the excess is subtracted from
52 : * space_info->bytes_may_use.
53 : *
54 : * TYPES OF BLOCK RESERVES
55 : *
56 : * BLOCK_RSV_TRANS, BLOCK_RSV_DELOPS, BLOCK_RSV_CHUNK
57 : * These behave normally, as described above, just within the confines of the
58 : * lifetime of their particular operation (transaction for the whole trans
59 : * handle lifetime, for example).
60 : *
61 : * BLOCK_RSV_GLOBAL
62 : * It is impossible to properly account for all the space that may be required
63 : * to make our extent tree updates. This block reserve acts as an overflow
64 : * buffer in case our delayed refs reserve does not reserve enough space to
65 : * update the extent tree.
66 : *
67 : * We can steal from this in some cases as well, notably on evict() or
68 : * truncate() in order to help users recover from ENOSPC conditions.
69 : *
70 : * BLOCK_RSV_DELALLOC
71 : * The individual item sizes are determined by the per-inode size
72 : * calculations, which are described with the delalloc code. This is pretty
73 : * straightforward, it's just the calculation of ->size encodes a lot of
74 : * different items, and thus it gets used when updating inodes, inserting file
75 : * extents, and inserting checksums.
76 : *
77 : * BLOCK_RSV_DELREFS
78 : * We keep a running tally of how many delayed refs we have on the system.
79 : * We assume each one of these delayed refs are going to use a full
80 : * reservation. We use the transaction items and pre-reserve space for every
81 : * operation, and use this reservation to refill any gap between ->size and
82 : * ->reserved that may exist.
83 : *
84 : * From there it's straightforward, removing a delayed ref means we remove its
85 : * count from ->size and free up reservations as necessary. Since this is
86 : * the most dynamic block reserve in the system, we will try to refill this
87 : * block reserve first with any excess returned by any other block reserve.
88 : *
89 : * BLOCK_RSV_EMPTY
90 : * This is the fallback block reserve to make us try to reserve space if we
91 : * don't have a specific bucket for this allocation. It is mostly used for
92 : * updating the device tree and such, since that is a separate pool we're
93 : * content to just reserve space from the space_info on demand.
94 : *
95 : * BLOCK_RSV_TEMP
96 : * This is used by things like truncate and iput. We will temporarily
97 : * allocate a block reserve, set it to some size, and then truncate bytes
98 : * until we have no space left. With ->failfast set we'll simply return
99 : * ENOSPC from btrfs_use_block_rsv() to signal that we need to unwind and try
100 : * to make a new reservation. This is because these operations are
101 : * unbounded, so we want to do as much work as we can, and then back off and
102 : * re-reserve.
103 : */
104 :
105 0 : static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
106 : struct btrfs_block_rsv *block_rsv,
107 : struct btrfs_block_rsv *dest, u64 num_bytes,
108 : u64 *qgroup_to_release_ret)
109 : {
110 0 : struct btrfs_space_info *space_info = block_rsv->space_info;
111 0 : u64 qgroup_to_release = 0;
112 0 : u64 ret;
113 :
114 0 : spin_lock(&block_rsv->lock);
115 0 : if (num_bytes == (u64)-1) {
116 0 : num_bytes = block_rsv->size;
117 0 : qgroup_to_release = block_rsv->qgroup_rsv_size;
118 : }
119 0 : block_rsv->size -= num_bytes;
120 0 : if (block_rsv->reserved >= block_rsv->size) {
121 0 : num_bytes = block_rsv->reserved - block_rsv->size;
122 0 : block_rsv->reserved = block_rsv->size;
123 0 : block_rsv->full = true;
124 : } else {
125 : num_bytes = 0;
126 : }
127 0 : if (qgroup_to_release_ret &&
128 0 : block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
129 0 : qgroup_to_release = block_rsv->qgroup_rsv_reserved -
130 : block_rsv->qgroup_rsv_size;
131 0 : block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
132 : } else {
133 : qgroup_to_release = 0;
134 : }
135 0 : spin_unlock(&block_rsv->lock);
136 :
137 0 : ret = num_bytes;
138 0 : if (num_bytes > 0) {
139 0 : if (dest) {
140 0 : spin_lock(&dest->lock);
141 0 : if (!dest->full) {
142 0 : u64 bytes_to_add;
143 :
144 0 : bytes_to_add = dest->size - dest->reserved;
145 0 : bytes_to_add = min(num_bytes, bytes_to_add);
146 0 : dest->reserved += bytes_to_add;
147 0 : if (dest->reserved >= dest->size)
148 0 : dest->full = true;
149 0 : num_bytes -= bytes_to_add;
150 : }
151 0 : spin_unlock(&dest->lock);
152 : }
153 0 : if (num_bytes)
154 0 : btrfs_space_info_free_bytes_may_use(fs_info,
155 : space_info,
156 : num_bytes);
157 : }
158 0 : if (qgroup_to_release_ret)
159 0 : *qgroup_to_release_ret = qgroup_to_release;
160 0 : return ret;
161 : }
162 :
163 0 : int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
164 : struct btrfs_block_rsv *dst, u64 num_bytes,
165 : bool update_size)
166 : {
167 0 : int ret;
168 :
169 0 : ret = btrfs_block_rsv_use_bytes(src, num_bytes);
170 0 : if (ret)
171 : return ret;
172 :
173 0 : btrfs_block_rsv_add_bytes(dst, num_bytes, update_size);
174 0 : return 0;
175 : }
176 :
177 0 : void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type)
178 : {
179 0 : memset(rsv, 0, sizeof(*rsv));
180 0 : spin_lock_init(&rsv->lock);
181 0 : rsv->type = type;
182 0 : }
183 :
184 0 : void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
185 : struct btrfs_block_rsv *rsv,
186 : enum btrfs_rsv_type type)
187 : {
188 0 : btrfs_init_block_rsv(rsv, type);
189 0 : rsv->space_info = btrfs_find_space_info(fs_info,
190 : BTRFS_BLOCK_GROUP_METADATA);
191 0 : }
192 :
193 0 : struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
194 : enum btrfs_rsv_type type)
195 : {
196 0 : struct btrfs_block_rsv *block_rsv;
197 :
198 0 : block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
199 0 : if (!block_rsv)
200 : return NULL;
201 :
202 0 : btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
203 0 : return block_rsv;
204 : }
205 :
206 0 : void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
207 : struct btrfs_block_rsv *rsv)
208 : {
209 0 : if (!rsv)
210 : return;
211 0 : btrfs_block_rsv_release(fs_info, rsv, (u64)-1, NULL);
212 0 : kfree(rsv);
213 : }
214 :
215 0 : int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
216 : struct btrfs_block_rsv *block_rsv, u64 num_bytes,
217 : enum btrfs_reserve_flush_enum flush)
218 : {
219 0 : int ret;
220 :
221 0 : if (num_bytes == 0)
222 : return 0;
223 :
224 0 : ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
225 0 : if (!ret)
226 0 : btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
227 :
228 : return ret;
229 : }
230 :
231 0 : int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent)
232 : {
233 0 : u64 num_bytes = 0;
234 0 : int ret = -ENOSPC;
235 :
236 0 : spin_lock(&block_rsv->lock);
237 0 : num_bytes = mult_perc(block_rsv->size, min_percent);
238 0 : if (block_rsv->reserved >= num_bytes)
239 0 : ret = 0;
240 0 : spin_unlock(&block_rsv->lock);
241 :
242 0 : return ret;
243 : }
244 :
245 0 : int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
246 : struct btrfs_block_rsv *block_rsv, u64 num_bytes,
247 : enum btrfs_reserve_flush_enum flush)
248 : {
249 0 : int ret = -ENOSPC;
250 :
251 0 : if (!block_rsv)
252 : return 0;
253 :
254 0 : spin_lock(&block_rsv->lock);
255 0 : if (block_rsv->reserved >= num_bytes)
256 : ret = 0;
257 : else
258 0 : num_bytes -= block_rsv->reserved;
259 0 : spin_unlock(&block_rsv->lock);
260 :
261 0 : if (!ret)
262 : return 0;
263 :
264 0 : ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
265 0 : if (!ret) {
266 0 : btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
267 0 : return 0;
268 : }
269 :
270 : return ret;
271 : }
272 :
273 0 : u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
274 : struct btrfs_block_rsv *block_rsv, u64 num_bytes,
275 : u64 *qgroup_to_release)
276 : {
277 0 : struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
278 0 : struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
279 0 : struct btrfs_block_rsv *target = NULL;
280 :
281 : /*
282 : * If we are the delayed_rsv then push to the global rsv, otherwise dump
283 : * into the delayed rsv if it is not full.
284 : */
285 0 : if (block_rsv == delayed_rsv)
286 : target = global_rsv;
287 0 : else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv))
288 : target = delayed_rsv;
289 :
290 0 : if (target && block_rsv->space_info != target->space_info)
291 0 : target = NULL;
292 :
293 0 : return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
294 : qgroup_to_release);
295 : }
296 :
297 0 : int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
298 : {
299 0 : int ret = -ENOSPC;
300 :
301 0 : spin_lock(&block_rsv->lock);
302 0 : if (block_rsv->reserved >= num_bytes) {
303 0 : block_rsv->reserved -= num_bytes;
304 0 : if (block_rsv->reserved < block_rsv->size)
305 0 : block_rsv->full = false;
306 : ret = 0;
307 : }
308 0 : spin_unlock(&block_rsv->lock);
309 0 : return ret;
310 : }
311 :
312 0 : void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
313 : u64 num_bytes, bool update_size)
314 : {
315 0 : spin_lock(&block_rsv->lock);
316 0 : block_rsv->reserved += num_bytes;
317 0 : if (update_size)
318 0 : block_rsv->size += num_bytes;
319 0 : else if (block_rsv->reserved >= block_rsv->size)
320 0 : block_rsv->full = true;
321 0 : spin_unlock(&block_rsv->lock);
322 0 : }
323 :
324 0 : void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
325 : {
326 0 : struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
327 0 : struct btrfs_space_info *sinfo = block_rsv->space_info;
328 0 : struct btrfs_root *root, *tmp;
329 0 : u64 num_bytes = btrfs_root_used(&fs_info->tree_root->root_item);
330 0 : unsigned int min_items = 1;
331 :
332 : /*
333 : * The global block rsv is based on the size of the extent tree, the
334 : * checksum tree and the root tree. If the fs is empty we want to set
335 : * it to a minimal amount for safety.
336 : *
337 : * We also are going to need to modify the minimum of the tree root and
338 : * any global roots we could touch.
339 : */
340 0 : read_lock(&fs_info->global_root_lock);
341 0 : rbtree_postorder_for_each_entry_safe(root, tmp, &fs_info->global_root_tree,
342 : rb_node) {
343 0 : if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID ||
344 0 : root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID ||
345 : root->root_key.objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) {
346 0 : num_bytes += btrfs_root_used(&root->root_item);
347 0 : min_items++;
348 : }
349 : }
350 0 : read_unlock(&fs_info->global_root_lock);
351 :
352 : /*
353 : * But we also want to reserve enough space so we can do the fallback
354 : * global reserve for an unlink, which is an additional
355 : * BTRFS_UNLINK_METADATA_UNITS items.
356 : *
357 : * But we also need space for the delayed ref updates from the unlink,
358 : * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for
359 : * each unlink metadata item.
360 : */
361 0 : min_items += BTRFS_UNLINK_METADATA_UNITS;
362 :
363 0 : num_bytes = max_t(u64, num_bytes,
364 : btrfs_calc_insert_metadata_size(fs_info, min_items) +
365 : btrfs_calc_delayed_ref_bytes(fs_info,
366 : BTRFS_UNLINK_METADATA_UNITS));
367 :
368 0 : spin_lock(&sinfo->lock);
369 0 : spin_lock(&block_rsv->lock);
370 :
371 0 : block_rsv->size = min_t(u64, num_bytes, SZ_512M);
372 :
373 0 : if (block_rsv->reserved < block_rsv->size) {
374 0 : num_bytes = block_rsv->size - block_rsv->reserved;
375 0 : btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
376 : num_bytes);
377 0 : block_rsv->reserved = block_rsv->size;
378 0 : } else if (block_rsv->reserved > block_rsv->size) {
379 0 : num_bytes = block_rsv->reserved - block_rsv->size;
380 0 : btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
381 0 : -num_bytes);
382 0 : block_rsv->reserved = block_rsv->size;
383 0 : btrfs_try_granting_tickets(fs_info, sinfo);
384 : }
385 :
386 0 : block_rsv->full = (block_rsv->reserved == block_rsv->size);
387 :
388 0 : if (block_rsv->size >= sinfo->total_bytes)
389 0 : sinfo->force_alloc = CHUNK_ALLOC_FORCE;
390 0 : spin_unlock(&block_rsv->lock);
391 0 : spin_unlock(&sinfo->lock);
392 0 : }
393 :
394 0 : void btrfs_init_root_block_rsv(struct btrfs_root *root)
395 : {
396 0 : struct btrfs_fs_info *fs_info = root->fs_info;
397 :
398 0 : switch (root->root_key.objectid) {
399 0 : case BTRFS_CSUM_TREE_OBJECTID:
400 : case BTRFS_EXTENT_TREE_OBJECTID:
401 : case BTRFS_FREE_SPACE_TREE_OBJECTID:
402 : case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
403 0 : root->block_rsv = &fs_info->delayed_refs_rsv;
404 0 : break;
405 0 : case BTRFS_ROOT_TREE_OBJECTID:
406 : case BTRFS_DEV_TREE_OBJECTID:
407 : case BTRFS_QUOTA_TREE_OBJECTID:
408 0 : root->block_rsv = &fs_info->global_block_rsv;
409 0 : break;
410 0 : case BTRFS_CHUNK_TREE_OBJECTID:
411 0 : root->block_rsv = &fs_info->chunk_block_rsv;
412 0 : break;
413 0 : default:
414 0 : root->block_rsv = NULL;
415 0 : break;
416 : }
417 0 : }
418 :
419 0 : void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
420 : {
421 0 : struct btrfs_space_info *space_info;
422 :
423 0 : space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
424 0 : fs_info->chunk_block_rsv.space_info = space_info;
425 :
426 0 : space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
427 0 : fs_info->global_block_rsv.space_info = space_info;
428 0 : fs_info->trans_block_rsv.space_info = space_info;
429 0 : fs_info->empty_block_rsv.space_info = space_info;
430 0 : fs_info->delayed_block_rsv.space_info = space_info;
431 0 : fs_info->delayed_refs_rsv.space_info = space_info;
432 :
433 0 : btrfs_update_global_block_rsv(fs_info);
434 0 : }
435 :
436 0 : void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info)
437 : {
438 0 : btrfs_block_rsv_release(fs_info, &fs_info->global_block_rsv, (u64)-1,
439 : NULL);
440 0 : WARN_ON(fs_info->trans_block_rsv.size > 0);
441 0 : WARN_ON(fs_info->trans_block_rsv.reserved > 0);
442 0 : WARN_ON(fs_info->chunk_block_rsv.size > 0);
443 0 : WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
444 0 : WARN_ON(fs_info->delayed_block_rsv.size > 0);
445 0 : WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
446 0 : WARN_ON(fs_info->delayed_refs_rsv.reserved > 0);
447 0 : WARN_ON(fs_info->delayed_refs_rsv.size > 0);
448 0 : }
449 :
450 0 : static struct btrfs_block_rsv *get_block_rsv(
451 : const struct btrfs_trans_handle *trans,
452 : const struct btrfs_root *root)
453 : {
454 0 : struct btrfs_fs_info *fs_info = root->fs_info;
455 0 : struct btrfs_block_rsv *block_rsv = NULL;
456 :
457 0 : if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
458 0 : (root == fs_info->uuid_root) ||
459 0 : (trans->adding_csums &&
460 0 : root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID))
461 0 : block_rsv = trans->block_rsv;
462 :
463 0 : if (!block_rsv)
464 0 : block_rsv = root->block_rsv;
465 :
466 0 : if (!block_rsv)
467 0 : block_rsv = &fs_info->empty_block_rsv;
468 :
469 0 : return block_rsv;
470 : }
471 :
472 0 : struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
473 : struct btrfs_root *root,
474 : u32 blocksize)
475 : {
476 0 : struct btrfs_fs_info *fs_info = root->fs_info;
477 0 : struct btrfs_block_rsv *block_rsv;
478 0 : struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
479 0 : int ret;
480 0 : bool global_updated = false;
481 :
482 0 : block_rsv = get_block_rsv(trans, root);
483 :
484 0 : if (unlikely(block_rsv->size == 0))
485 0 : goto try_reserve;
486 0 : again:
487 0 : ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
488 0 : if (!ret)
489 : return block_rsv;
490 :
491 0 : if (block_rsv->failfast)
492 0 : return ERR_PTR(ret);
493 :
494 0 : if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
495 0 : global_updated = true;
496 0 : btrfs_update_global_block_rsv(fs_info);
497 0 : goto again;
498 : }
499 :
500 : /*
501 : * The global reserve still exists to save us from ourselves, so don't
502 : * warn_on if we are short on our delayed refs reserve.
503 : */
504 0 : if (block_rsv->type != BTRFS_BLOCK_RSV_DELREFS &&
505 0 : btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
506 0 : static DEFINE_RATELIMIT_STATE(_rs,
507 : DEFAULT_RATELIMIT_INTERVAL * 10,
508 : /*DEFAULT_RATELIMIT_BURST*/ 1);
509 0 : if (__ratelimit(&_rs))
510 0 : WARN(1, KERN_DEBUG
511 : "BTRFS: block rsv %d returned %d\n",
512 : block_rsv->type, ret);
513 : }
514 0 : try_reserve:
515 0 : ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, blocksize,
516 : BTRFS_RESERVE_NO_FLUSH);
517 0 : if (!ret)
518 : return block_rsv;
519 : /*
520 : * If we couldn't reserve metadata bytes try and use some from
521 : * the global reserve if its space type is the same as the global
522 : * reservation.
523 : */
524 0 : if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
525 0 : block_rsv->space_info == global_rsv->space_info) {
526 0 : ret = btrfs_block_rsv_use_bytes(global_rsv, blocksize);
527 0 : if (!ret)
528 : return global_rsv;
529 : }
530 :
531 : /*
532 : * All hope is lost, but of course our reservations are overly
533 : * pessimistic, so instead of possibly having an ENOSPC abort here, try
534 : * one last time to force a reservation if there's enough actual space
535 : * on disk to make the reservation.
536 : */
537 0 : ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, blocksize,
538 : BTRFS_RESERVE_FLUSH_EMERGENCY);
539 0 : if (!ret)
540 : return block_rsv;
541 :
542 0 : return ERR_PTR(ret);
543 : }
544 :
545 0 : int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
546 : struct btrfs_block_rsv *rsv)
547 : {
548 0 : u64 needed_bytes;
549 0 : int ret;
550 :
551 : /* 1 for slack space, 1 for updating the inode */
552 0 : needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
553 : btrfs_calc_metadata_size(fs_info, 1);
554 :
555 0 : spin_lock(&rsv->lock);
556 0 : if (rsv->reserved < needed_bytes)
557 : ret = -ENOSPC;
558 : else
559 0 : ret = 0;
560 0 : spin_unlock(&rsv->lock);
561 0 : return ret;
562 : }
|