Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 :
3 : #include "misc.h"
4 : #include "ctree.h"
5 : #include "block-rsv.h"
6 : #include "space-info.h"
7 : #include "transaction.h"
8 : #include "block-group.h"
9 : #include "disk-io.h"
10 : #include "fs.h"
11 : #include "accessors.h"
12 :
13 : /*
14 : * HOW DO BLOCK RESERVES WORK
15 : *
16 : * Think of block_rsv's as buckets for logically grouped metadata
17 : * reservations. Each block_rsv has a ->size and a ->reserved. ->size is
18 : * how large we want our block rsv to be, ->reserved is how much space is
19 : * currently reserved for this block reserve.
20 : *
21 : * ->failfast exists for the truncate case, and is described below.
22 : *
23 : * NORMAL OPERATION
24 : *
25 : * -> Reserve
26 : * Entrance: btrfs_block_rsv_add, btrfs_block_rsv_refill
27 : *
28 : * We call into btrfs_reserve_metadata_bytes() with our bytes, which is
29 : * accounted for in space_info->bytes_may_use, and then add the bytes to
30 : * ->reserved, and ->size in the case of btrfs_block_rsv_add.
31 : *
32 : * ->size is an over-estimation of how much we may use for a particular
33 : * operation.
34 : *
35 : * -> Use
36 : * Entrance: btrfs_use_block_rsv
37 : *
38 : * When we do a btrfs_alloc_tree_block() we call into btrfs_use_block_rsv()
39 : * to determine the appropriate block_rsv to use, and then verify that
40 : * ->reserved has enough space for our tree block allocation. Once
41 : * successful we subtract fs_info->nodesize from ->reserved.
42 : *
43 : * -> Finish
44 : * Entrance: btrfs_block_rsv_release
45 : *
46 : * We are finished with our operation, subtract our individual reservation
47 : * from ->size, and then subtract ->size from ->reserved and free up the
48 : * excess if there is any.
49 : *
50 : * There is some logic here to refill the delayed refs rsv or the global rsv
51 : * as needed, otherwise the excess is subtracted from
52 : * space_info->bytes_may_use.
53 : *
54 : * TYPES OF BLOCK RESERVES
55 : *
56 : * BLOCK_RSV_TRANS, BLOCK_RSV_DELOPS, BLOCK_RSV_CHUNK
57 : * These behave normally, as described above, just within the confines of the
58 : * lifetime of their particular operation (transaction for the whole trans
59 : * handle lifetime, for example).
60 : *
61 : * BLOCK_RSV_GLOBAL
62 : * It is impossible to properly account for all the space that may be required
63 : * to make our extent tree updates. This block reserve acts as an overflow
64 : * buffer in case our delayed refs reserve does not reserve enough space to
65 : * update the extent tree.
66 : *
67 : * We can steal from this in some cases as well, notably on evict() or
68 : * truncate() in order to help users recover from ENOSPC conditions.
69 : *
70 : * BLOCK_RSV_DELALLOC
71 : * The individual item sizes are determined by the per-inode size
72 : * calculations, which are described with the delalloc code. This is pretty
73 : * straightforward, it's just the calculation of ->size encodes a lot of
74 : * different items, and thus it gets used when updating inodes, inserting file
75 : * extents, and inserting checksums.
76 : *
77 : * BLOCK_RSV_DELREFS
78 : * We keep a running tally of how many delayed refs we have on the system.
79 : * We assume each one of these delayed refs are going to use a full
80 : * reservation. We use the transaction items and pre-reserve space for every
81 : * operation, and use this reservation to refill any gap between ->size and
82 : * ->reserved that may exist.
83 : *
84 : * From there it's straightforward, removing a delayed ref means we remove its
85 : * count from ->size and free up reservations as necessary. Since this is
86 : * the most dynamic block reserve in the system, we will try to refill this
87 : * block reserve first with any excess returned by any other block reserve.
88 : *
89 : * BLOCK_RSV_EMPTY
90 : * This is the fallback block reserve to make us try to reserve space if we
91 : * don't have a specific bucket for this allocation. It is mostly used for
92 : * updating the device tree and such, since that is a separate pool we're
93 : * content to just reserve space from the space_info on demand.
94 : *
95 : * BLOCK_RSV_TEMP
96 : * This is used by things like truncate and iput. We will temporarily
97 : * allocate a block reserve, set it to some size, and then truncate bytes
98 : * until we have no space left. With ->failfast set we'll simply return
99 : * ENOSPC from btrfs_use_block_rsv() to signal that we need to unwind and try
100 : * to make a new reservation. This is because these operations are
101 : * unbounded, so we want to do as much work as we can, and then back off and
102 : * re-reserve.
103 : */
104 :
105 0 : static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
106 : struct btrfs_block_rsv *block_rsv,
107 : struct btrfs_block_rsv *dest, u64 num_bytes,
108 : u64 *qgroup_to_release_ret)
109 : {
110 0 : struct btrfs_space_info *space_info = block_rsv->space_info;
111 0 : u64 qgroup_to_release = 0;
112 0 : u64 ret;
113 :
114 0 : spin_lock(&block_rsv->lock);
115 0 : if (num_bytes == (u64)-1) {
116 0 : num_bytes = block_rsv->size;
117 0 : qgroup_to_release = block_rsv->qgroup_rsv_size;
118 : }
119 0 : block_rsv->size -= num_bytes;
120 0 : if (block_rsv->reserved >= block_rsv->size) {
121 0 : num_bytes = block_rsv->reserved - block_rsv->size;
122 0 : block_rsv->reserved = block_rsv->size;
123 0 : block_rsv->full = true;
124 : } else {
125 : num_bytes = 0;
126 : }
127 0 : if (qgroup_to_release_ret &&
128 0 : block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
129 0 : qgroup_to_release = block_rsv->qgroup_rsv_reserved -
130 : block_rsv->qgroup_rsv_size;
131 0 : block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
132 : } else {
133 : qgroup_to_release = 0;
134 : }
135 0 : spin_unlock(&block_rsv->lock);
136 :
137 0 : ret = num_bytes;
138 0 : if (num_bytes > 0) {
139 0 : if (dest) {
140 0 : spin_lock(&dest->lock);
141 0 : if (!dest->full) {
142 0 : u64 bytes_to_add;
143 :
144 0 : bytes_to_add = dest->size - dest->reserved;
145 0 : bytes_to_add = min(num_bytes, bytes_to_add);
146 0 : dest->reserved += bytes_to_add;
147 0 : if (dest->reserved >= dest->size)
148 0 : dest->full = true;
149 0 : num_bytes -= bytes_to_add;
150 : }
151 0 : spin_unlock(&dest->lock);
152 : }
153 0 : if (num_bytes)
154 0 : btrfs_space_info_free_bytes_may_use(fs_info,
155 : space_info,
156 : num_bytes);
157 : }
158 0 : if (qgroup_to_release_ret)
159 0 : *qgroup_to_release_ret = qgroup_to_release;
160 0 : return ret;
161 : }
162 :
163 0 : int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
164 : struct btrfs_block_rsv *dst, u64 num_bytes,
165 : bool update_size)
166 : {
167 0 : int ret;
168 :
169 0 : ret = btrfs_block_rsv_use_bytes(src, num_bytes);
170 0 : if (ret)
171 : return ret;
172 :
173 0 : btrfs_block_rsv_add_bytes(dst, num_bytes, update_size);
174 0 : return 0;
175 : }
176 :
177 0 : void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type)
178 : {
179 0 : memset(rsv, 0, sizeof(*rsv));
180 0 : spin_lock_init(&rsv->lock);
181 0 : rsv->type = type;
182 0 : }
183 :
184 0 : void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
185 : struct btrfs_block_rsv *rsv,
186 : enum btrfs_rsv_type type)
187 : {
188 0 : btrfs_init_block_rsv(rsv, type);
189 0 : rsv->space_info = btrfs_find_space_info(fs_info,
190 : BTRFS_BLOCK_GROUP_METADATA);
191 0 : }
192 :
193 0 : struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
194 : enum btrfs_rsv_type type)
195 : {
196 0 : struct btrfs_block_rsv *block_rsv;
197 :
198 0 : block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
199 0 : if (!block_rsv)
200 : return NULL;
201 :
202 0 : btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
203 0 : return block_rsv;
204 : }
205 :
206 0 : void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
207 : struct btrfs_block_rsv *rsv)
208 : {
209 0 : if (!rsv)
210 : return;
211 0 : btrfs_block_rsv_release(fs_info, rsv, (u64)-1, NULL);
212 0 : kfree(rsv);
213 : }
214 :
215 0 : int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
216 : struct btrfs_block_rsv *block_rsv, u64 num_bytes,
217 : enum btrfs_reserve_flush_enum flush)
218 : {
219 0 : int ret;
220 :
221 0 : if (num_bytes == 0)
222 : return 0;
223 :
224 0 : ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
225 0 : if (!ret)
226 0 : btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
227 :
228 : return ret;
229 : }
230 :
231 0 : int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent)
232 : {
233 0 : u64 num_bytes = 0;
234 0 : int ret = -ENOSPC;
235 :
236 0 : spin_lock(&block_rsv->lock);
237 0 : num_bytes = mult_perc(block_rsv->size, min_percent);
238 0 : if (block_rsv->reserved >= num_bytes)
239 0 : ret = 0;
240 0 : spin_unlock(&block_rsv->lock);
241 :
242 0 : return ret;
243 : }
244 :
245 0 : int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
246 : struct btrfs_block_rsv *block_rsv, u64 num_bytes,
247 : enum btrfs_reserve_flush_enum flush)
248 : {
249 0 : int ret = -ENOSPC;
250 :
251 0 : if (!block_rsv)
252 : return 0;
253 :
254 0 : spin_lock(&block_rsv->lock);
255 0 : if (block_rsv->reserved >= num_bytes)
256 : ret = 0;
257 : else
258 0 : num_bytes -= block_rsv->reserved;
259 0 : spin_unlock(&block_rsv->lock);
260 :
261 0 : if (!ret)
262 : return 0;
263 :
264 0 : ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
265 0 : if (!ret) {
266 0 : btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
267 0 : return 0;
268 : }
269 :
270 : return ret;
271 : }
272 :
273 0 : u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
274 : struct btrfs_block_rsv *block_rsv, u64 num_bytes,
275 : u64 *qgroup_to_release)
276 : {
277 0 : struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
278 0 : struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
279 0 : struct btrfs_block_rsv *target = NULL;
280 :
281 : /*
282 : * If we are the delayed_rsv then push to the global rsv, otherwise dump
283 : * into the delayed rsv if it is not full.
284 : */
285 0 : if (block_rsv == delayed_rsv)
286 : target = global_rsv;
287 0 : else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv))
288 : target = delayed_rsv;
289 :
290 0 : if (target && block_rsv->space_info != target->space_info)
291 0 : target = NULL;
292 :
293 0 : return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
294 : qgroup_to_release);
295 : }
296 :
297 0 : int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
298 : {
299 0 : int ret = -ENOSPC;
300 :
301 0 : spin_lock(&block_rsv->lock);
302 0 : if (block_rsv->reserved >= num_bytes) {
303 0 : block_rsv->reserved -= num_bytes;
304 0 : if (block_rsv->reserved < block_rsv->size)
305 0 : block_rsv->full = false;
306 : ret = 0;
307 : }
308 0 : spin_unlock(&block_rsv->lock);
309 0 : return ret;
310 : }
311 :
312 0 : void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
313 : u64 num_bytes, bool update_size)
314 : {
315 0 : spin_lock(&block_rsv->lock);
316 0 : block_rsv->reserved += num_bytes;
317 0 : if (update_size)
318 0 : block_rsv->size += num_bytes;
319 0 : else if (block_rsv->reserved >= block_rsv->size)
320 0 : block_rsv->full = true;
321 0 : spin_unlock(&block_rsv->lock);
322 0 : }
323 :
324 0 : void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
325 : {
326 0 : struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
327 0 : struct btrfs_space_info *sinfo = block_rsv->space_info;
328 0 : struct btrfs_root *root, *tmp;
329 0 : u64 num_bytes = btrfs_root_used(&fs_info->tree_root->root_item);
330 0 : unsigned int min_items = 1;
331 :
332 : /*
333 : * The global block rsv is based on the size of the extent tree, the
334 : * checksum tree and the root tree. If the fs is empty we want to set
335 : * it to a minimal amount for safety.
336 : *
337 : * We also are going to need to modify the minimum of the tree root and
338 : * any global roots we could touch.
339 : */
340 0 : read_lock(&fs_info->global_root_lock);
341 0 : rbtree_postorder_for_each_entry_safe(root, tmp, &fs_info->global_root_tree,
342 : rb_node) {
343 0 : if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID ||
344 0 : root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID ||
345 : root->root_key.objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) {
346 0 : num_bytes += btrfs_root_used(&root->root_item);
347 0 : min_items++;
348 : }
349 : }
350 0 : read_unlock(&fs_info->global_root_lock);
351 :
352 0 : if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
353 0 : num_bytes += btrfs_root_used(&fs_info->block_group_root->root_item);
354 0 : min_items++;
355 : }
356 :
357 : /*
358 : * But we also want to reserve enough space so we can do the fallback
359 : * global reserve for an unlink, which is an additional
360 : * BTRFS_UNLINK_METADATA_UNITS items.
361 : *
362 : * But we also need space for the delayed ref updates from the unlink,
363 : * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for
364 : * each unlink metadata item.
365 : */
366 0 : min_items += BTRFS_UNLINK_METADATA_UNITS;
367 :
368 0 : num_bytes = max_t(u64, num_bytes,
369 : btrfs_calc_insert_metadata_size(fs_info, min_items) +
370 : btrfs_calc_delayed_ref_bytes(fs_info,
371 : BTRFS_UNLINK_METADATA_UNITS));
372 :
373 0 : spin_lock(&sinfo->lock);
374 0 : spin_lock(&block_rsv->lock);
375 :
376 0 : block_rsv->size = min_t(u64, num_bytes, SZ_512M);
377 :
378 0 : if (block_rsv->reserved < block_rsv->size) {
379 0 : num_bytes = block_rsv->size - block_rsv->reserved;
380 0 : btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
381 : num_bytes);
382 0 : block_rsv->reserved = block_rsv->size;
383 0 : } else if (block_rsv->reserved > block_rsv->size) {
384 0 : num_bytes = block_rsv->reserved - block_rsv->size;
385 0 : btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
386 0 : -num_bytes);
387 0 : block_rsv->reserved = block_rsv->size;
388 0 : btrfs_try_granting_tickets(fs_info, sinfo);
389 : }
390 :
391 0 : block_rsv->full = (block_rsv->reserved == block_rsv->size);
392 :
393 0 : if (block_rsv->size >= sinfo->total_bytes)
394 0 : sinfo->force_alloc = CHUNK_ALLOC_FORCE;
395 0 : spin_unlock(&block_rsv->lock);
396 0 : spin_unlock(&sinfo->lock);
397 0 : }
398 :
399 0 : void btrfs_init_root_block_rsv(struct btrfs_root *root)
400 : {
401 0 : struct btrfs_fs_info *fs_info = root->fs_info;
402 :
403 0 : switch (root->root_key.objectid) {
404 0 : case BTRFS_CSUM_TREE_OBJECTID:
405 : case BTRFS_EXTENT_TREE_OBJECTID:
406 : case BTRFS_FREE_SPACE_TREE_OBJECTID:
407 : case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
408 0 : root->block_rsv = &fs_info->delayed_refs_rsv;
409 0 : break;
410 0 : case BTRFS_ROOT_TREE_OBJECTID:
411 : case BTRFS_DEV_TREE_OBJECTID:
412 : case BTRFS_QUOTA_TREE_OBJECTID:
413 0 : root->block_rsv = &fs_info->global_block_rsv;
414 0 : break;
415 0 : case BTRFS_CHUNK_TREE_OBJECTID:
416 0 : root->block_rsv = &fs_info->chunk_block_rsv;
417 0 : break;
418 0 : default:
419 0 : root->block_rsv = NULL;
420 0 : break;
421 : }
422 0 : }
423 :
424 0 : void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
425 : {
426 0 : struct btrfs_space_info *space_info;
427 :
428 0 : space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
429 0 : fs_info->chunk_block_rsv.space_info = space_info;
430 :
431 0 : space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
432 0 : fs_info->global_block_rsv.space_info = space_info;
433 0 : fs_info->trans_block_rsv.space_info = space_info;
434 0 : fs_info->empty_block_rsv.space_info = space_info;
435 0 : fs_info->delayed_block_rsv.space_info = space_info;
436 0 : fs_info->delayed_refs_rsv.space_info = space_info;
437 :
438 0 : btrfs_update_global_block_rsv(fs_info);
439 0 : }
440 :
441 0 : void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info)
442 : {
443 0 : btrfs_block_rsv_release(fs_info, &fs_info->global_block_rsv, (u64)-1,
444 : NULL);
445 0 : WARN_ON(fs_info->trans_block_rsv.size > 0);
446 0 : WARN_ON(fs_info->trans_block_rsv.reserved > 0);
447 0 : WARN_ON(fs_info->chunk_block_rsv.size > 0);
448 0 : WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
449 0 : WARN_ON(fs_info->delayed_block_rsv.size > 0);
450 0 : WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
451 0 : WARN_ON(fs_info->delayed_refs_rsv.reserved > 0);
452 0 : WARN_ON(fs_info->delayed_refs_rsv.size > 0);
453 0 : }
454 :
455 0 : static struct btrfs_block_rsv *get_block_rsv(
456 : const struct btrfs_trans_handle *trans,
457 : const struct btrfs_root *root)
458 : {
459 0 : struct btrfs_fs_info *fs_info = root->fs_info;
460 0 : struct btrfs_block_rsv *block_rsv = NULL;
461 :
462 0 : if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
463 0 : (root == fs_info->uuid_root) ||
464 0 : (trans->adding_csums &&
465 0 : root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID))
466 0 : block_rsv = trans->block_rsv;
467 :
468 0 : if (!block_rsv)
469 0 : block_rsv = root->block_rsv;
470 :
471 0 : if (!block_rsv)
472 0 : block_rsv = &fs_info->empty_block_rsv;
473 :
474 0 : return block_rsv;
475 : }
476 :
477 0 : struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
478 : struct btrfs_root *root,
479 : u32 blocksize)
480 : {
481 0 : struct btrfs_fs_info *fs_info = root->fs_info;
482 0 : struct btrfs_block_rsv *block_rsv;
483 0 : struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
484 0 : int ret;
485 0 : bool global_updated = false;
486 :
487 0 : block_rsv = get_block_rsv(trans, root);
488 :
489 0 : if (unlikely(block_rsv->size == 0))
490 0 : goto try_reserve;
491 0 : again:
492 0 : ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
493 0 : if (!ret)
494 : return block_rsv;
495 :
496 0 : if (block_rsv->failfast)
497 0 : return ERR_PTR(ret);
498 :
499 0 : if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
500 0 : global_updated = true;
501 0 : btrfs_update_global_block_rsv(fs_info);
502 0 : goto again;
503 : }
504 :
505 : /*
506 : * The global reserve still exists to save us from ourselves, so don't
507 : * warn_on if we are short on our delayed refs reserve.
508 : */
509 0 : if (block_rsv->type != BTRFS_BLOCK_RSV_DELREFS &&
510 0 : btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
511 0 : static DEFINE_RATELIMIT_STATE(_rs,
512 : DEFAULT_RATELIMIT_INTERVAL * 10,
513 : /*DEFAULT_RATELIMIT_BURST*/ 1);
514 0 : if (__ratelimit(&_rs))
515 0 : WARN(1, KERN_DEBUG
516 : "BTRFS: block rsv %d returned %d\n",
517 : block_rsv->type, ret);
518 : }
519 0 : try_reserve:
520 0 : ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, blocksize,
521 : BTRFS_RESERVE_NO_FLUSH);
522 0 : if (!ret)
523 : return block_rsv;
524 : /*
525 : * If we couldn't reserve metadata bytes try and use some from
526 : * the global reserve if its space type is the same as the global
527 : * reservation.
528 : */
529 0 : if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
530 0 : block_rsv->space_info == global_rsv->space_info) {
531 0 : ret = btrfs_block_rsv_use_bytes(global_rsv, blocksize);
532 0 : if (!ret)
533 : return global_rsv;
534 : }
535 :
536 : /*
537 : * All hope is lost, but of course our reservations are overly
538 : * pessimistic, so instead of possibly having an ENOSPC abort here, try
539 : * one last time to force a reservation if there's enough actual space
540 : * on disk to make the reservation.
541 : */
542 0 : ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, blocksize,
543 : BTRFS_RESERVE_FLUSH_EMERGENCY);
544 0 : if (!ret)
545 : return block_rsv;
546 :
547 0 : return ERR_PTR(ret);
548 : }
549 :
550 0 : int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
551 : struct btrfs_block_rsv *rsv)
552 : {
553 0 : u64 needed_bytes;
554 0 : int ret;
555 :
556 : /* 1 for slack space, 1 for updating the inode */
557 0 : needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
558 : btrfs_calc_metadata_size(fs_info, 1);
559 :
560 0 : spin_lock(&rsv->lock);
561 0 : if (rsv->reserved < needed_bytes)
562 : ret = -ENOSPC;
563 : else
564 0 : ret = 0;
565 0 : spin_unlock(&rsv->lock);
566 0 : return ret;
567 : }
|