Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 2009 Oracle. All rights reserved.
4 : */
5 :
6 : #include <linux/sched.h>
7 : #include <linux/slab.h>
8 : #include <linux/sort.h>
9 : #include "messages.h"
10 : #include "ctree.h"
11 : #include "delayed-ref.h"
12 : #include "transaction.h"
13 : #include "qgroup.h"
14 : #include "space-info.h"
15 : #include "tree-mod-log.h"
16 : #include "fs.h"
17 :
18 : struct kmem_cache *btrfs_delayed_ref_head_cachep;
19 : struct kmem_cache *btrfs_delayed_tree_ref_cachep;
20 : struct kmem_cache *btrfs_delayed_data_ref_cachep;
21 : struct kmem_cache *btrfs_delayed_extent_op_cachep;
22 : /*
23 : * delayed back reference update tracking. For subvolume trees
24 : * we queue up extent allocations and backref maintenance for
25 : * delayed processing. This avoids deep call chains where we
26 : * add extents in the middle of btrfs_search_slot, and it allows
27 : * us to buffer up frequently modified backrefs in an rb tree instead
28 : * of hammering updates on the extent allocation tree.
29 : */
30 :
31 3655719 : bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
32 : {
33 3655719 : struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34 3655719 : struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
35 3655719 : bool ret = false;
36 3655719 : u64 reserved;
37 :
38 3655719 : spin_lock(&global_rsv->lock);
39 3655719 : reserved = global_rsv->reserved;
40 3655719 : spin_unlock(&global_rsv->lock);
41 :
42 : /*
43 : * Since the global reserve is just kind of magic we don't really want
44 : * to rely on it to save our bacon, so if our size is more than the
45 : * delayed_refs_rsv and the global rsv then it's time to think about
46 : * bailing.
47 : */
48 3655719 : spin_lock(&delayed_refs_rsv->lock);
49 3655719 : reserved += delayed_refs_rsv->reserved;
50 3655719 : if (delayed_refs_rsv->size >= reserved)
51 2137564 : ret = true;
52 3655719 : spin_unlock(&delayed_refs_rsv->lock);
53 3655718 : return ret;
54 : }
55 :
56 : /*
57 : * Release a ref head's reservation.
58 : *
59 : * @fs_info: the filesystem
60 : * @nr: number of items to drop
61 : *
62 : * Drops the delayed ref head's count from the delayed refs rsv and free any
63 : * excess reservation we had.
64 : */
65 49135827 : void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
66 : {
67 49135827 : struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
68 49135827 : const u64 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr);
69 49135827 : u64 released = 0;
70 :
71 49135827 : released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
72 49135818 : if (released)
73 16588697 : trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
74 : 0, released, 0);
75 49135808 : }
76 :
77 : /*
78 : * Adjust the size of the delayed refs rsv.
79 : *
80 : * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
81 : * it'll calculate the additional size and add it to the delayed_refs_rsv.
82 : */
83 88899690 : void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
84 : {
85 88899690 : struct btrfs_fs_info *fs_info = trans->fs_info;
86 88899690 : struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
87 88899690 : u64 num_bytes;
88 :
89 88899690 : if (!trans->delayed_ref_updates)
90 : return;
91 :
92 49141365 : num_bytes = btrfs_calc_delayed_ref_bytes(fs_info,
93 : trans->delayed_ref_updates);
94 :
95 49141365 : spin_lock(&delayed_rsv->lock);
96 49141383 : delayed_rsv->size += num_bytes;
97 49141383 : delayed_rsv->full = false;
98 49141383 : spin_unlock(&delayed_rsv->lock);
99 49141376 : trans->delayed_ref_updates = 0;
100 : }
101 :
102 : /*
103 : * Transfer bytes to our delayed refs rsv.
104 : *
105 : * @fs_info: the filesystem
106 : * @src: source block rsv to transfer from
107 : * @num_bytes: number of bytes to transfer
108 : *
109 : * This transfers up to the num_bytes amount from the src rsv to the
110 : * delayed_refs_rsv. Any extra bytes are returned to the space info.
111 : */
112 1384413 : void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
113 : struct btrfs_block_rsv *src,
114 : u64 num_bytes)
115 : {
116 1384413 : struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
117 1384413 : u64 to_free = 0;
118 :
119 1384413 : spin_lock(&src->lock);
120 1384443 : src->reserved -= num_bytes;
121 1384443 : src->size -= num_bytes;
122 1384443 : spin_unlock(&src->lock);
123 :
124 1384438 : spin_lock(&delayed_refs_rsv->lock);
125 1384443 : if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
126 1280708 : u64 delta = delayed_refs_rsv->size -
127 : delayed_refs_rsv->reserved;
128 1280708 : if (num_bytes > delta) {
129 722518 : to_free = num_bytes - delta;
130 722518 : num_bytes = delta;
131 : }
132 : } else {
133 : to_free = num_bytes;
134 : num_bytes = 0;
135 : }
136 :
137 1280708 : if (num_bytes)
138 1280708 : delayed_refs_rsv->reserved += num_bytes;
139 1384443 : if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
140 841042 : delayed_refs_rsv->full = true;
141 1384443 : spin_unlock(&delayed_refs_rsv->lock);
142 :
143 1384436 : if (num_bytes)
144 1280708 : trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
145 : 0, num_bytes, 1);
146 1384428 : if (to_free)
147 826239 : btrfs_space_info_free_bytes_may_use(fs_info,
148 : delayed_refs_rsv->space_info, to_free);
149 1384442 : }
150 :
151 : /*
152 : * Refill based on our delayed refs usage.
153 : *
154 : * @fs_info: the filesystem
155 : * @flush: control how we can flush for this reservation.
156 : *
157 : * This will refill the delayed block_rsv up to 1 items size worth of space and
158 : * will return -ENOSPC if we can't make the reservation.
159 : */
160 2285539 : int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
161 : enum btrfs_reserve_flush_enum flush)
162 : {
163 2285539 : struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
164 2285539 : u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
165 2285539 : u64 num_bytes = 0;
166 2285539 : int ret = -ENOSPC;
167 :
168 2285539 : spin_lock(&block_rsv->lock);
169 2285543 : if (block_rsv->reserved < block_rsv->size) {
170 2284983 : num_bytes = block_rsv->size - block_rsv->reserved;
171 2284983 : num_bytes = min(num_bytes, limit);
172 : }
173 2285543 : spin_unlock(&block_rsv->lock);
174 :
175 2285543 : if (!num_bytes)
176 : return 0;
177 :
178 2284983 : ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
179 2284983 : if (ret)
180 : return ret;
181 2281641 : btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
182 2281641 : trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
183 : 0, num_bytes, 1);
184 2281641 : return 0;
185 : }
186 :
187 : /*
188 : * compare two delayed tree backrefs with same bytenr and type
189 : */
190 9170906 : static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
191 : struct btrfs_delayed_tree_ref *ref2)
192 : {
193 9170906 : if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
194 497923 : if (ref1->root < ref2->root)
195 : return -1;
196 432307 : if (ref1->root > ref2->root)
197 28640 : return 1;
198 : } else {
199 8672983 : if (ref1->parent < ref2->parent)
200 : return -1;
201 3726894 : if (ref1->parent > ref2->parent)
202 2977942 : return 1;
203 : }
204 : return 0;
205 : }
206 :
207 : /*
208 : * compare two delayed data backrefs with same bytenr and type
209 : */
210 257140034 : static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
211 : struct btrfs_delayed_data_ref *ref2)
212 : {
213 257140034 : if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
214 248428547 : if (ref1->root < ref2->root)
215 : return -1;
216 248413004 : if (ref1->root > ref2->root)
217 : return 1;
218 248391613 : if (ref1->objectid < ref2->objectid)
219 : return -1;
220 247705276 : if (ref1->objectid > ref2->objectid)
221 : return 1;
222 241808481 : if (ref1->offset < ref2->offset)
223 : return -1;
224 241128736 : if (ref1->offset > ref2->offset)
225 234839839 : return 1;
226 : } else {
227 8711487 : if (ref1->parent < ref2->parent)
228 : return -1;
229 6192629 : if (ref1->parent > ref2->parent)
230 5313642 : return 1;
231 : }
232 : return 0;
233 : }
234 :
235 267118894 : static int comp_refs(struct btrfs_delayed_ref_node *ref1,
236 : struct btrfs_delayed_ref_node *ref2,
237 : bool check_seq)
238 : {
239 267118894 : int ret = 0;
240 :
241 267118894 : if (ref1->type < ref2->type)
242 : return -1;
243 266497376 : if (ref1->type > ref2->type)
244 : return 1;
245 266310872 : if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
246 : ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
247 9170838 : ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
248 : btrfs_delayed_node_to_tree_ref(ref2));
249 : else
250 257140034 : ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
251 : btrfs_delayed_node_to_data_ref(ref2));
252 266310872 : if (ret)
253 : return ret;
254 8320503 : if (check_seq) {
255 8288150 : if (ref1->seq < ref2->seq)
256 : return -1;
257 8288150 : if (ref1->seq > ref2->seq)
258 156296 : return 1;
259 : }
260 : return 0;
261 : }
262 :
263 : /* insert a new ref to head ref rbtree */
264 71498313 : static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
265 : struct rb_node *node)
266 : {
267 71498313 : struct rb_node **p = &root->rb_root.rb_node;
268 71498313 : struct rb_node *parent_node = NULL;
269 71498313 : struct btrfs_delayed_ref_head *entry;
270 71498313 : struct btrfs_delayed_ref_head *ins;
271 71498313 : u64 bytenr;
272 71498313 : bool leftmost = true;
273 :
274 71498313 : ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
275 71498313 : bytenr = ins->bytenr;
276 1097185714 : while (*p) {
277 1048588638 : parent_node = *p;
278 1048588638 : entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
279 : href_node);
280 :
281 1048588638 : if (bytenr < entry->bytenr) {
282 380292737 : p = &(*p)->rb_left;
283 668295901 : } else if (bytenr > entry->bytenr) {
284 645394664 : p = &(*p)->rb_right;
285 645394664 : leftmost = false;
286 : } else {
287 22901237 : return entry;
288 : }
289 : }
290 :
291 48597076 : rb_link_node(node, parent_node, p);
292 48597076 : rb_insert_color_cached(node, root, leftmost);
293 48597076 : return NULL;
294 : }
295 :
296 71426614 : static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
297 : struct btrfs_delayed_ref_node *ins)
298 : {
299 71426614 : struct rb_node **p = &root->rb_root.rb_node;
300 71426614 : struct rb_node *node = &ins->ref_node;
301 71426614 : struct rb_node *parent_node = NULL;
302 71426614 : struct btrfs_delayed_ref_node *entry;
303 71426614 : bool leftmost = true;
304 :
305 326229132 : while (*p) {
306 262934372 : int comp;
307 :
308 262934372 : parent_node = *p;
309 262934372 : entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
310 : ref_node);
311 262934372 : comp = comp_refs(ins, entry, true);
312 262934372 : if (comp < 0) {
313 5381469 : p = &(*p)->rb_left;
314 257552903 : } else if (comp > 0) {
315 249421049 : p = &(*p)->rb_right;
316 249421049 : leftmost = false;
317 : } else {
318 8131854 : return entry;
319 : }
320 : }
321 :
322 63294760 : rb_link_node(node, parent_node, p);
323 63294760 : rb_insert_color_cached(node, root, leftmost);
324 63294760 : return NULL;
325 : }
326 :
327 : static struct btrfs_delayed_ref_head *find_first_ref_head(
328 : struct btrfs_delayed_ref_root *dr)
329 : {
330 600939 : struct rb_node *n;
331 600939 : struct btrfs_delayed_ref_head *entry;
332 :
333 600939 : n = rb_first_cached(&dr->href_root);
334 600939 : if (!n)
335 : return NULL;
336 :
337 232910 : entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
338 :
339 232910 : return entry;
340 : }
341 :
342 : /*
343 : * Find a head entry based on bytenr. This returns the delayed ref head if it
344 : * was able to find one, or NULL if nothing was in that spot. If return_bigger
345 : * is given, the next bigger entry is returned if no exact match is found.
346 : */
347 60369945 : static struct btrfs_delayed_ref_head *find_ref_head(
348 : struct btrfs_delayed_ref_root *dr, u64 bytenr,
349 : bool return_bigger)
350 : {
351 60369945 : struct rb_root *root = &dr->href_root.rb_root;
352 60369945 : struct rb_node *n;
353 60369945 : struct btrfs_delayed_ref_head *entry;
354 :
355 60369945 : n = root->rb_node;
356 60369945 : entry = NULL;
357 553131471 : while (n) {
358 535770740 : entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
359 :
360 535770740 : if (bytenr < entry->bytenr)
361 317548892 : n = n->rb_left;
362 218221848 : else if (bytenr > entry->bytenr)
363 175212634 : n = n->rb_right;
364 : else
365 43009214 : return entry;
366 : }
367 17360731 : if (entry && return_bigger) {
368 9240456 : if (bytenr > entry->bytenr) {
369 4088203 : n = rb_next(&entry->href_node);
370 4088203 : if (!n)
371 : return NULL;
372 3855293 : entry = rb_entry(n, struct btrfs_delayed_ref_head,
373 : href_node);
374 : }
375 9007546 : return entry;
376 : }
377 : return NULL;
378 : }
379 :
380 48258361 : int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
381 : struct btrfs_delayed_ref_head *head)
382 : {
383 48258361 : lockdep_assert_held(&delayed_refs->lock);
384 48258361 : if (mutex_trylock(&head->mutex))
385 : return 0;
386 :
387 0 : refcount_inc(&head->refs);
388 0 : spin_unlock(&delayed_refs->lock);
389 :
390 0 : mutex_lock(&head->mutex);
391 0 : spin_lock(&delayed_refs->lock);
392 0 : if (RB_EMPTY_NODE(&head->href_node)) {
393 0 : mutex_unlock(&head->mutex);
394 0 : btrfs_put_delayed_ref_head(head);
395 0 : return -EAGAIN;
396 : }
397 0 : btrfs_put_delayed_ref_head(head);
398 0 : return 0;
399 : }
400 :
401 3370178 : static inline void drop_delayed_ref(struct btrfs_delayed_ref_root *delayed_refs,
402 : struct btrfs_delayed_ref_head *head,
403 : struct btrfs_delayed_ref_node *ref)
404 : {
405 3370178 : lockdep_assert_held(&head->lock);
406 3370178 : rb_erase_cached(&ref->ref_node, &head->ref_tree);
407 3370183 : RB_CLEAR_NODE(&ref->ref_node);
408 3370183 : if (!list_empty(&ref->add_list))
409 3336647 : list_del(&ref->add_list);
410 3370183 : btrfs_put_delayed_ref(ref);
411 3370180 : atomic_dec(&delayed_refs->num_entries);
412 3370181 : }
413 :
414 31334755 : static bool merge_ref(struct btrfs_delayed_ref_root *delayed_refs,
415 : struct btrfs_delayed_ref_head *head,
416 : struct btrfs_delayed_ref_node *ref,
417 : u64 seq)
418 : {
419 31334755 : struct btrfs_delayed_ref_node *next;
420 31334755 : struct rb_node *node = rb_next(&ref->ref_node);
421 31334755 : bool done = false;
422 :
423 31367110 : while (!done && node) {
424 4184582 : int mod;
425 :
426 4184582 : next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
427 4184582 : node = rb_next(node);
428 4184610 : if (seq && next->seq >= seq)
429 : break;
430 4184609 : if (comp_refs(ref, next, false))
431 : break;
432 :
433 32354 : if (ref->action == next->action) {
434 0 : mod = next->ref_mod;
435 : } else {
436 32354 : if (ref->ref_mod < next->ref_mod) {
437 0 : swap(ref, next);
438 0 : done = true;
439 : }
440 32354 : mod = -next->ref_mod;
441 : }
442 :
443 32354 : drop_delayed_ref(delayed_refs, head, next);
444 32355 : ref->ref_mod += mod;
445 32355 : if (ref->ref_mod == 0) {
446 32355 : drop_delayed_ref(delayed_refs, head, ref);
447 32355 : done = true;
448 : } else {
449 : /*
450 : * Can't have multiples of the same ref on a tree block.
451 : */
452 0 : WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
453 : ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
454 : }
455 : }
456 :
457 31334823 : return done;
458 : }
459 :
460 108189355 : void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
461 : struct btrfs_delayed_ref_root *delayed_refs,
462 : struct btrfs_delayed_ref_head *head)
463 : {
464 108189355 : struct btrfs_delayed_ref_node *ref;
465 108189355 : struct rb_node *node;
466 108189355 : u64 seq = 0;
467 :
468 108189355 : lockdep_assert_held(&head->lock);
469 :
470 108189355 : if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
471 : return;
472 :
473 : /* We don't have too many refs to merge for data. */
474 59956676 : if (head->is_data)
475 : return;
476 :
477 27182651 : seq = btrfs_tree_mod_log_lowest_seq(fs_info);
478 27215040 : again:
479 58517358 : for (node = rb_first_cached(&head->ref_tree); node;
480 31302373 : node = rb_next(node)) {
481 31334810 : ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
482 31334810 : if (seq && ref->seq >= seq)
483 41 : continue;
484 31334769 : if (merge_ref(delayed_refs, head, ref, seq))
485 32355 : goto again;
486 : }
487 : }
488 :
489 944411 : int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
490 : {
491 944411 : int ret = 0;
492 944411 : u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
493 :
494 944412 : if (min_seq != 0 && seq >= min_seq) {
495 79 : btrfs_debug(fs_info,
496 : "holding back delayed_ref %llu, lowest is %llu",
497 : seq, min_seq);
498 79 : ret = 1;
499 : }
500 :
501 944412 : return ret;
502 : }
503 :
504 49265018 : struct btrfs_delayed_ref_head *btrfs_select_ref_head(
505 : struct btrfs_delayed_ref_root *delayed_refs)
506 : {
507 49265222 : struct btrfs_delayed_ref_head *head;
508 :
509 49265222 : lockdep_assert_held(&delayed_refs->lock);
510 49265222 : again:
511 49265222 : head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
512 : true);
513 49265222 : if (!head && delayed_refs->run_delayed_start != 0) {
514 600939 : delayed_refs->run_delayed_start = 0;
515 600939 : head = find_first_ref_head(delayed_refs);
516 : }
517 48897193 : if (!head)
518 1005682 : return NULL;
519 :
520 48266754 : while (head->processing) {
521 8424 : struct rb_node *node;
522 :
523 8424 : node = rb_next(&head->href_node);
524 8423 : if (!node) {
525 1209 : if (delayed_refs->run_delayed_start == 0)
526 : return NULL;
527 204 : delayed_refs->run_delayed_start = 0;
528 204 : goto again;
529 : }
530 7214 : head = rb_entry(node, struct btrfs_delayed_ref_head,
531 : href_node);
532 : }
533 :
534 48258330 : head->processing = true;
535 48258330 : WARN_ON(delayed_refs->num_heads_ready == 0);
536 48258330 : delayed_refs->num_heads_ready--;
537 48258330 : delayed_refs->run_delayed_start = head->bytenr +
538 48258330 : head->num_bytes;
539 48258330 : return head;
540 : }
541 :
542 48597076 : void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
543 : struct btrfs_delayed_ref_head *head)
544 : {
545 48597076 : lockdep_assert_held(&delayed_refs->lock);
546 48597076 : lockdep_assert_held(&head->lock);
547 :
548 48597076 : rb_erase_cached(&head->href_node, &delayed_refs->href_root);
549 48597076 : RB_CLEAR_NODE(&head->href_node);
550 48597076 : atomic_dec(&delayed_refs->num_entries);
551 48597077 : delayed_refs->num_heads--;
552 48597077 : if (!head->processing)
553 338826 : delayed_refs->num_heads_ready--;
554 48597077 : }
555 :
556 : /*
557 : * Helper to insert the ref_node to the tail or merge with tail.
558 : *
559 : * Return false if the ref was inserted.
560 : * Return true if the ref was merged into an existing one (and therefore can be
561 : * freed by the caller).
562 : */
563 71426614 : static bool insert_delayed_ref(struct btrfs_delayed_ref_root *root,
564 : struct btrfs_delayed_ref_head *href,
565 : struct btrfs_delayed_ref_node *ref)
566 : {
567 71426614 : struct btrfs_delayed_ref_node *exist;
568 71426614 : int mod;
569 :
570 71426614 : spin_lock(&href->lock);
571 71426614 : exist = tree_insert(&href->ref_tree, ref);
572 71426612 : if (!exist) {
573 63294758 : if (ref->action == BTRFS_ADD_DELAYED_REF)
574 41351933 : list_add_tail(&ref->add_list, &href->ref_add_list);
575 63294760 : atomic_inc(&root->num_entries);
576 63294760 : spin_unlock(&href->lock);
577 63294760 : return false;
578 : }
579 :
580 : /* Now we are sure we can merge */
581 8131854 : if (exist->action == ref->action) {
582 4458234 : mod = ref->ref_mod;
583 : } else {
584 : /* Need to change action */
585 3673620 : if (exist->ref_mod < ref->ref_mod) {
586 0 : exist->action = ref->action;
587 0 : mod = -exist->ref_mod;
588 0 : exist->ref_mod = ref->ref_mod;
589 0 : if (ref->action == BTRFS_ADD_DELAYED_REF)
590 0 : list_add_tail(&exist->add_list,
591 : &href->ref_add_list);
592 0 : else if (ref->action == BTRFS_DROP_DELAYED_REF) {
593 0 : ASSERT(!list_empty(&exist->add_list));
594 0 : list_del(&exist->add_list);
595 : } else {
596 : ASSERT(0);
597 : }
598 : } else
599 3673620 : mod = -ref->ref_mod;
600 : }
601 8131854 : exist->ref_mod += mod;
602 :
603 : /* remove existing tail if its ref_mod is zero */
604 8131854 : if (exist->ref_mod == 0)
605 3305474 : drop_delayed_ref(root, href, exist);
606 8131854 : spin_unlock(&href->lock);
607 8131854 : return true;
608 : }
609 :
610 : /*
611 : * helper function to update the accounting in the head ref
612 : * existing and update must have the same bytenr
613 : */
614 22901237 : static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
615 : struct btrfs_delayed_ref_head *existing,
616 : struct btrfs_delayed_ref_head *update)
617 : {
618 22901237 : struct btrfs_delayed_ref_root *delayed_refs =
619 22901237 : &trans->transaction->delayed_refs;
620 22901237 : struct btrfs_fs_info *fs_info = trans->fs_info;
621 22901237 : int old_ref_mod;
622 :
623 22901237 : BUG_ON(existing->is_data != update->is_data);
624 :
625 22901237 : spin_lock(&existing->lock);
626 22901237 : if (update->must_insert_reserved) {
627 : /* if the extent was freed and then
628 : * reallocated before the delayed ref
629 : * entries were processed, we can end up
630 : * with an existing head ref without
631 : * the must_insert_reserved flag set.
632 : * Set it again here
633 : */
634 0 : existing->must_insert_reserved = update->must_insert_reserved;
635 :
636 : /*
637 : * update the num_bytes so we make sure the accounting
638 : * is done correctly
639 : */
640 0 : existing->num_bytes = update->num_bytes;
641 :
642 : }
643 :
644 22901237 : if (update->extent_op) {
645 29963 : if (!existing->extent_op) {
646 29898 : existing->extent_op = update->extent_op;
647 : } else {
648 65 : if (update->extent_op->update_key) {
649 0 : memcpy(&existing->extent_op->key,
650 : &update->extent_op->key,
651 : sizeof(update->extent_op->key));
652 0 : existing->extent_op->update_key = true;
653 : }
654 65 : if (update->extent_op->update_flags) {
655 65 : existing->extent_op->flags_to_set |=
656 65 : update->extent_op->flags_to_set;
657 65 : existing->extent_op->update_flags = true;
658 : }
659 65 : btrfs_free_delayed_extent_op(update->extent_op);
660 : }
661 : }
662 : /*
663 : * update the reference mod on the head to reflect this new operation,
664 : * only need the lock for this case cause we could be processing it
665 : * currently, for refs we just added we know we're a-ok.
666 : */
667 22901237 : old_ref_mod = existing->total_ref_mod;
668 22901237 : existing->ref_mod += update->ref_mod;
669 22901237 : existing->total_ref_mod += update->ref_mod;
670 :
671 : /*
672 : * If we are going to from a positive ref mod to a negative or vice
673 : * versa we need to make sure to adjust pending_csums accordingly.
674 : */
675 22901237 : if (existing->is_data) {
676 17684693 : u64 csum_leaves =
677 17684693 : btrfs_csum_bytes_to_leaves(fs_info,
678 : existing->num_bytes);
679 :
680 17684693 : if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
681 5895 : delayed_refs->pending_csums -= existing->num_bytes;
682 5895 : btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
683 : }
684 17684693 : if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
685 11443 : delayed_refs->pending_csums += existing->num_bytes;
686 11443 : trans->delayed_ref_updates += csum_leaves;
687 : }
688 : }
689 :
690 22901237 : spin_unlock(&existing->lock);
691 22901237 : }
692 :
693 71497133 : static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
694 : struct btrfs_qgroup_extent_record *qrecord,
695 : u64 bytenr, u64 num_bytes, u64 ref_root,
696 : u64 reserved, int action, bool is_data,
697 : bool is_system)
698 : {
699 71497133 : int count_mod = 1;
700 71497133 : bool must_insert_reserved = false;
701 :
702 : /* If reserved is provided, it must be a data extent. */
703 71497133 : BUG_ON(!is_data && reserved);
704 :
705 71497133 : switch (action) {
706 : case BTRFS_UPDATE_DELAYED_HEAD:
707 : count_mod = 0;
708 : break;
709 : case BTRFS_DROP_DELAYED_REF:
710 : /*
711 : * The head node stores the sum of all the mods, so dropping a ref
712 : * should drop the sum in the head node by one.
713 : */
714 : count_mod = -1;
715 : break;
716 : case BTRFS_ADD_DELAYED_EXTENT:
717 : /*
718 : * BTRFS_ADD_DELAYED_EXTENT means that we need to update the
719 : * reserved accounting when the extent is finally added, or if a
720 : * later modification deletes the delayed ref without ever
721 : * inserting the extent into the extent allocation tree.
722 : * ref->must_insert_reserved is the flag used to record that
723 : * accounting mods are required.
724 : *
725 : * Once we record must_insert_reserved, switch the action to
726 : * BTRFS_ADD_DELAYED_REF because other special casing is not
727 : * required.
728 : */
729 : must_insert_reserved = true;
730 : break;
731 : }
732 :
733 71497133 : refcount_set(&head_ref->refs, 1);
734 71497133 : head_ref->bytenr = bytenr;
735 71497133 : head_ref->num_bytes = num_bytes;
736 71497133 : head_ref->ref_mod = count_mod;
737 71497133 : head_ref->must_insert_reserved = must_insert_reserved;
738 71497133 : head_ref->is_data = is_data;
739 71497133 : head_ref->is_system = is_system;
740 71497133 : head_ref->ref_tree = RB_ROOT_CACHED;
741 71497133 : INIT_LIST_HEAD(&head_ref->ref_add_list);
742 71497133 : RB_CLEAR_NODE(&head_ref->href_node);
743 71497133 : head_ref->processing = false;
744 71497133 : head_ref->total_ref_mod = count_mod;
745 71497133 : spin_lock_init(&head_ref->lock);
746 71494612 : mutex_init(&head_ref->mutex);
747 :
748 71496430 : if (qrecord) {
749 222468 : if (ref_root && reserved) {
750 17547 : qrecord->data_rsv = reserved;
751 17547 : qrecord->data_rsv_refroot = ref_root;
752 : }
753 222468 : qrecord->bytenr = bytenr;
754 222468 : qrecord->num_bytes = num_bytes;
755 222468 : qrecord->old_roots = NULL;
756 : }
757 71496430 : }
758 :
759 : /*
760 : * helper function to actually insert a head node into the rbtree.
761 : * this does all the dirty work in terms of maintaining the correct
762 : * overall modification count.
763 : */
764 : static noinline struct btrfs_delayed_ref_head *
765 71498309 : add_delayed_ref_head(struct btrfs_trans_handle *trans,
766 : struct btrfs_delayed_ref_head *head_ref,
767 : struct btrfs_qgroup_extent_record *qrecord,
768 : int action, bool *qrecord_inserted_ret)
769 : {
770 71498309 : struct btrfs_delayed_ref_head *existing;
771 71498309 : struct btrfs_delayed_ref_root *delayed_refs;
772 71498309 : bool qrecord_inserted = false;
773 :
774 71498309 : delayed_refs = &trans->transaction->delayed_refs;
775 :
776 : /* Record qgroup extent info if provided */
777 71498309 : if (qrecord) {
778 222482 : if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
779 : delayed_refs, qrecord))
780 48583 : kfree(qrecord);
781 : else
782 : qrecord_inserted = true;
783 : }
784 :
785 71498309 : trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
786 :
787 71498307 : existing = htree_insert(&delayed_refs->href_root,
788 : &head_ref->href_node);
789 71498312 : if (existing) {
790 22901237 : update_existing_head_ref(trans, existing, head_ref);
791 : /*
792 : * we've updated the existing ref, free the newly
793 : * allocated ref
794 : */
795 22901237 : kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
796 22901237 : head_ref = existing;
797 : } else {
798 48597075 : if (head_ref->is_data && head_ref->ref_mod < 0) {
799 6801156 : delayed_refs->pending_csums += head_ref->num_bytes;
800 6801156 : trans->delayed_ref_updates +=
801 6801156 : btrfs_csum_bytes_to_leaves(trans->fs_info,
802 : head_ref->num_bytes);
803 : }
804 48597075 : delayed_refs->num_heads++;
805 48597075 : delayed_refs->num_heads_ready++;
806 48597075 : atomic_inc(&delayed_refs->num_entries);
807 48597076 : trans->delayed_ref_updates++;
808 : }
809 71498313 : if (qrecord_inserted_ret)
810 71426614 : *qrecord_inserted_ret = qrecord_inserted;
811 :
812 71498313 : return head_ref;
813 : }
814 :
815 : /*
816 : * init_delayed_ref_common - Initialize the structure which represents a
817 : * modification to a an extent.
818 : *
819 : * @fs_info: Internal to the mounted filesystem mount structure.
820 : *
821 : * @ref: The structure which is going to be initialized.
822 : *
823 : * @bytenr: The logical address of the extent for which a modification is
824 : * going to be recorded.
825 : *
826 : * @num_bytes: Size of the extent whose modification is being recorded.
827 : *
828 : * @ref_root: The id of the root where this modification has originated, this
829 : * can be either one of the well-known metadata trees or the
830 : * subvolume id which references this extent.
831 : *
832 : * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
833 : * BTRFS_ADD_DELAYED_EXTENT
834 : *
835 : * @ref_type: Holds the type of the extent which is being recorded, can be
836 : * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
837 : * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
838 : * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
839 : */
840 71425811 : static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
841 : struct btrfs_delayed_ref_node *ref,
842 : u64 bytenr, u64 num_bytes, u64 ref_root,
843 : int action, u8 ref_type)
844 : {
845 71425811 : u64 seq = 0;
846 :
847 71425811 : if (action == BTRFS_ADD_DELAYED_EXTENT)
848 11262288 : action = BTRFS_ADD_DELAYED_REF;
849 :
850 71425811 : if (is_fstree(ref_root))
851 55287856 : seq = atomic64_read(&fs_info->tree_mod_seq);
852 :
853 71425811 : refcount_set(&ref->refs, 1);
854 71425811 : ref->bytenr = bytenr;
855 71425811 : ref->num_bytes = num_bytes;
856 71425811 : ref->ref_mod = 1;
857 71425811 : ref->action = action;
858 71425811 : ref->seq = seq;
859 71425811 : ref->type = ref_type;
860 71425811 : RB_CLEAR_NODE(&ref->ref_node);
861 71425811 : INIT_LIST_HEAD(&ref->add_list);
862 71425811 : }
863 :
864 : /*
865 : * add a delayed tree ref. This does all of the accounting required
866 : * to make sure the delayed ref is eventually processed before this
867 : * transaction commits.
868 : */
869 29390035 : int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
870 : struct btrfs_ref *generic_ref,
871 : struct btrfs_delayed_extent_op *extent_op)
872 : {
873 29390035 : struct btrfs_fs_info *fs_info = trans->fs_info;
874 29390035 : struct btrfs_delayed_tree_ref *ref;
875 29390035 : struct btrfs_delayed_ref_head *head_ref;
876 29390035 : struct btrfs_delayed_ref_root *delayed_refs;
877 29390035 : struct btrfs_qgroup_extent_record *record = NULL;
878 29390035 : bool qrecord_inserted;
879 29390035 : bool is_system;
880 29390035 : bool merged;
881 29390035 : int action = generic_ref->action;
882 29390035 : int level = generic_ref->tree_ref.level;
883 29390035 : u64 bytenr = generic_ref->bytenr;
884 29390035 : u64 num_bytes = generic_ref->len;
885 29390035 : u64 parent = generic_ref->parent;
886 29390035 : u8 ref_type;
887 :
888 29390035 : is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
889 :
890 29390035 : ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
891 29390035 : ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
892 29390294 : if (!ref)
893 : return -ENOMEM;
894 :
895 29390294 : head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
896 29390194 : if (!head_ref) {
897 0 : kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
898 0 : return -ENOMEM;
899 : }
900 :
901 58780388 : if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
902 267086 : !generic_ref->skip_qgroup) {
903 161872 : record = kzalloc(sizeof(*record), GFP_NOFS);
904 161871 : if (!record) {
905 0 : kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
906 0 : kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
907 0 : return -ENOMEM;
908 : }
909 : }
910 :
911 29390193 : if (parent)
912 : ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
913 : else
914 11704072 : ref_type = BTRFS_TREE_BLOCK_REF_KEY;
915 :
916 29390193 : init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
917 : generic_ref->tree_ref.owning_root, action,
918 : ref_type);
919 29388935 : ref->root = generic_ref->tree_ref.owning_root;
920 29388935 : ref->parent = parent;
921 29388935 : ref->level = level;
922 :
923 29388935 : init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
924 : generic_ref->tree_ref.owning_root, 0, action,
925 : false, is_system);
926 29390113 : head_ref->extent_op = extent_op;
927 :
928 29390113 : delayed_refs = &trans->transaction->delayed_refs;
929 29390113 : spin_lock(&delayed_refs->lock);
930 :
931 : /*
932 : * insert both the head node and the new ref without dropping
933 : * the spin lock
934 : */
935 29390902 : head_ref = add_delayed_ref_head(trans, head_ref, record,
936 : action, &qrecord_inserted);
937 :
938 29390902 : merged = insert_delayed_ref(delayed_refs, head_ref, &ref->node);
939 29390902 : spin_unlock(&delayed_refs->lock);
940 :
941 : /*
942 : * Need to update the delayed_refs_rsv with any changes we may have
943 : * made.
944 : */
945 29390902 : btrfs_update_delayed_refs_rsv(trans);
946 :
947 37093276 : trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
948 : action == BTRFS_ADD_DELAYED_EXTENT ?
949 : BTRFS_ADD_DELAYED_REF : action);
950 29390900 : if (merged)
951 1087910 : kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
952 :
953 29390900 : if (qrecord_inserted)
954 123613 : btrfs_qgroup_trace_extent_post(trans, record);
955 :
956 : return 0;
957 : }
958 :
959 : /*
960 : * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
961 : */
962 42035515 : int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
963 : struct btrfs_ref *generic_ref,
964 : u64 reserved)
965 : {
966 42035515 : struct btrfs_fs_info *fs_info = trans->fs_info;
967 42035515 : struct btrfs_delayed_data_ref *ref;
968 42035515 : struct btrfs_delayed_ref_head *head_ref;
969 42035515 : struct btrfs_delayed_ref_root *delayed_refs;
970 42035515 : struct btrfs_qgroup_extent_record *record = NULL;
971 42035515 : bool qrecord_inserted;
972 42035515 : int action = generic_ref->action;
973 42035515 : bool merged;
974 42035515 : u64 bytenr = generic_ref->bytenr;
975 42035515 : u64 num_bytes = generic_ref->len;
976 42035515 : u64 parent = generic_ref->parent;
977 42035515 : u64 ref_root = generic_ref->data_ref.owning_root;
978 42035515 : u64 owner = generic_ref->data_ref.ino;
979 42035515 : u64 offset = generic_ref->data_ref.offset;
980 42035515 : u8 ref_type;
981 :
982 42035515 : ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
983 42035515 : ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
984 42035563 : if (!ref)
985 : return -ENOMEM;
986 :
987 42035563 : if (parent)
988 : ref_type = BTRFS_SHARED_DATA_REF_KEY;
989 : else
990 25430066 : ref_type = BTRFS_EXTENT_DATA_REF_KEY;
991 42035563 : init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
992 : ref_root, action, ref_type);
993 42035273 : ref->root = ref_root;
994 42035273 : ref->parent = parent;
995 42035273 : ref->objectid = owner;
996 42035273 : ref->offset = offset;
997 :
998 :
999 42035273 : head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1000 42035561 : if (!head_ref) {
1001 0 : kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1002 0 : return -ENOMEM;
1003 : }
1004 :
1005 84071122 : if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1006 106102 : !generic_ref->skip_qgroup) {
1007 60601 : record = kzalloc(sizeof(*record), GFP_NOFS);
1008 60602 : if (!record) {
1009 0 : kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1010 0 : kmem_cache_free(btrfs_delayed_ref_head_cachep,
1011 : head_ref);
1012 0 : return -ENOMEM;
1013 : }
1014 : }
1015 :
1016 42035562 : init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1017 : reserved, action, true, false);
1018 42035392 : head_ref->extent_op = NULL;
1019 :
1020 42035392 : delayed_refs = &trans->transaction->delayed_refs;
1021 42035392 : spin_lock(&delayed_refs->lock);
1022 :
1023 : /*
1024 : * insert both the head node and the new ref without dropping
1025 : * the spin lock
1026 : */
1027 42035709 : head_ref = add_delayed_ref_head(trans, head_ref, record,
1028 : action, &qrecord_inserted);
1029 :
1030 42035713 : merged = insert_delayed_ref(delayed_refs, head_ref, &ref->node);
1031 42035711 : spin_unlock(&delayed_refs->lock);
1032 :
1033 : /*
1034 : * Need to update the delayed_refs_rsv with any changes we may have
1035 : * made.
1036 : */
1037 42035712 : btrfs_update_delayed_refs_rsv(trans);
1038 :
1039 45595855 : trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1040 : action == BTRFS_ADD_DELAYED_EXTENT ?
1041 : BTRFS_ADD_DELAYED_REF : action);
1042 42035704 : if (merged)
1043 7043944 : kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1044 :
1045 :
1046 42035704 : if (qrecord_inserted)
1047 50286 : return btrfs_qgroup_trace_extent_post(trans, record);
1048 : return 0;
1049 : }
1050 :
1051 71699 : int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1052 : u64 bytenr, u64 num_bytes,
1053 : struct btrfs_delayed_extent_op *extent_op)
1054 : {
1055 71699 : struct btrfs_delayed_ref_head *head_ref;
1056 71699 : struct btrfs_delayed_ref_root *delayed_refs;
1057 :
1058 71699 : head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1059 71699 : if (!head_ref)
1060 : return -ENOMEM;
1061 :
1062 71699 : init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1063 : BTRFS_UPDATE_DELAYED_HEAD, false, false);
1064 71699 : head_ref->extent_op = extent_op;
1065 :
1066 71699 : delayed_refs = &trans->transaction->delayed_refs;
1067 71699 : spin_lock(&delayed_refs->lock);
1068 :
1069 71699 : add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1070 : NULL);
1071 :
1072 71699 : spin_unlock(&delayed_refs->lock);
1073 :
1074 : /*
1075 : * Need to update the delayed_refs_rsv with any changes we may have
1076 : * made.
1077 : */
1078 71699 : btrfs_update_delayed_refs_rsv(trans);
1079 71699 : return 0;
1080 : }
1081 :
1082 : /*
1083 : * This does a simple search for the head node for a given extent. Returns the
1084 : * head node if found, or NULL if not.
1085 : */
1086 : struct btrfs_delayed_ref_head *
1087 11104724 : btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1088 : {
1089 11104724 : lockdep_assert_held(&delayed_refs->lock);
1090 :
1091 11104724 : return find_ref_head(delayed_refs, bytenr, false);
1092 : }
1093 :
1094 0 : void __cold btrfs_delayed_ref_exit(void)
1095 : {
1096 0 : kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1097 0 : kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1098 0 : kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1099 0 : kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1100 0 : }
1101 :
1102 11 : int __init btrfs_delayed_ref_init(void)
1103 : {
1104 11 : btrfs_delayed_ref_head_cachep = kmem_cache_create(
1105 : "btrfs_delayed_ref_head",
1106 : sizeof(struct btrfs_delayed_ref_head), 0,
1107 : SLAB_MEM_SPREAD, NULL);
1108 11 : if (!btrfs_delayed_ref_head_cachep)
1109 0 : goto fail;
1110 :
1111 11 : btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1112 : "btrfs_delayed_tree_ref",
1113 : sizeof(struct btrfs_delayed_tree_ref), 0,
1114 : SLAB_MEM_SPREAD, NULL);
1115 11 : if (!btrfs_delayed_tree_ref_cachep)
1116 0 : goto fail;
1117 :
1118 11 : btrfs_delayed_data_ref_cachep = kmem_cache_create(
1119 : "btrfs_delayed_data_ref",
1120 : sizeof(struct btrfs_delayed_data_ref), 0,
1121 : SLAB_MEM_SPREAD, NULL);
1122 11 : if (!btrfs_delayed_data_ref_cachep)
1123 0 : goto fail;
1124 :
1125 11 : btrfs_delayed_extent_op_cachep = kmem_cache_create(
1126 : "btrfs_delayed_extent_op",
1127 : sizeof(struct btrfs_delayed_extent_op), 0,
1128 : SLAB_MEM_SPREAD, NULL);
1129 11 : if (!btrfs_delayed_extent_op_cachep)
1130 0 : goto fail;
1131 :
1132 : return 0;
1133 0 : fail:
1134 0 : btrfs_delayed_ref_exit();
1135 0 : return -ENOMEM;
1136 : }
|