Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 2008 Red Hat. All rights reserved.
4 : */
5 :
6 : #include <linux/pagemap.h>
7 : #include <linux/sched.h>
8 : #include <linux/sched/signal.h>
9 : #include <linux/slab.h>
10 : #include <linux/math64.h>
11 : #include <linux/ratelimit.h>
12 : #include <linux/error-injection.h>
13 : #include <linux/sched/mm.h>
14 : #include "ctree.h"
15 : #include "fs.h"
16 : #include "messages.h"
17 : #include "misc.h"
18 : #include "free-space-cache.h"
19 : #include "transaction.h"
20 : #include "disk-io.h"
21 : #include "extent_io.h"
22 : #include "volumes.h"
23 : #include "space-info.h"
24 : #include "delalloc-space.h"
25 : #include "block-group.h"
26 : #include "discard.h"
27 : #include "subpage.h"
28 : #include "inode-item.h"
29 : #include "accessors.h"
30 : #include "file-item.h"
31 : #include "file.h"
32 : #include "super.h"
33 :
34 : #define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
35 : #define MAX_CACHE_BYTES_PER_GIG SZ_64K
36 : #define FORCE_EXTENT_THRESHOLD SZ_1M
37 :
38 : static struct kmem_cache *btrfs_free_space_cachep;
39 : static struct kmem_cache *btrfs_free_space_bitmap_cachep;
40 :
41 : struct btrfs_trim_range {
42 : u64 start;
43 : u64 bytes;
44 : struct list_head list;
45 : };
46 :
47 : static int link_free_space(struct btrfs_free_space_ctl *ctl,
48 : struct btrfs_free_space *info);
49 : static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
50 : struct btrfs_free_space *info, bool update_stat);
51 : static int search_bitmap(struct btrfs_free_space_ctl *ctl,
52 : struct btrfs_free_space *bitmap_info, u64 *offset,
53 : u64 *bytes, bool for_alloc);
54 : static void free_bitmap(struct btrfs_free_space_ctl *ctl,
55 : struct btrfs_free_space *bitmap_info);
56 : static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
57 : struct btrfs_free_space *info, u64 offset,
58 : u64 bytes, bool update_stats);
59 :
60 26372 : static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
61 : {
62 26372 : struct btrfs_free_space *info;
63 26372 : struct rb_node *node;
64 :
65 159065 : while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
66 132693 : info = rb_entry(node, struct btrfs_free_space, offset_index);
67 132693 : if (!info->bitmap) {
68 132401 : unlink_free_space(ctl, info, true);
69 132401 : kmem_cache_free(btrfs_free_space_cachep, info);
70 : } else {
71 292 : free_bitmap(ctl, info);
72 : }
73 :
74 132693 : cond_resched_lock(&ctl->tree_lock);
75 : }
76 26372 : }
77 :
78 1069 : static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
79 : struct btrfs_path *path,
80 : u64 offset)
81 : {
82 1069 : struct btrfs_fs_info *fs_info = root->fs_info;
83 1069 : struct btrfs_key key;
84 1069 : struct btrfs_key location;
85 1069 : struct btrfs_disk_key disk_key;
86 1069 : struct btrfs_free_space_header *header;
87 1069 : struct extent_buffer *leaf;
88 1069 : struct inode *inode = NULL;
89 1069 : unsigned nofs_flag;
90 1069 : int ret;
91 :
92 1069 : key.objectid = BTRFS_FREE_SPACE_OBJECTID;
93 1069 : key.offset = offset;
94 1069 : key.type = 0;
95 :
96 1069 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
97 1069 : if (ret < 0)
98 0 : return ERR_PTR(ret);
99 1069 : if (ret > 0) {
100 1062 : btrfs_release_path(path);
101 1062 : return ERR_PTR(-ENOENT);
102 : }
103 :
104 7 : leaf = path->nodes[0];
105 7 : header = btrfs_item_ptr(leaf, path->slots[0],
106 : struct btrfs_free_space_header);
107 7 : btrfs_free_space_key(leaf, header, &disk_key);
108 7 : btrfs_disk_key_to_cpu(&location, &disk_key);
109 7 : btrfs_release_path(path);
110 :
111 : /*
112 : * We are often under a trans handle at this point, so we need to make
113 : * sure NOFS is set to keep us from deadlocking.
114 : */
115 7 : nofs_flag = memalloc_nofs_save();
116 7 : inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path);
117 7 : btrfs_release_path(path);
118 7 : memalloc_nofs_restore(nofs_flag);
119 7 : if (IS_ERR(inode))
120 : return inode;
121 :
122 7 : mapping_set_gfp_mask(inode->i_mapping,
123 : mapping_gfp_constraint(inode->i_mapping,
124 : ~(__GFP_FS | __GFP_HIGHMEM)));
125 :
126 7 : return inode;
127 : }
128 :
129 1145 : struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
130 : struct btrfs_path *path)
131 : {
132 1145 : struct btrfs_fs_info *fs_info = block_group->fs_info;
133 1145 : struct inode *inode = NULL;
134 1145 : u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
135 :
136 1145 : spin_lock(&block_group->lock);
137 1145 : if (block_group->inode)
138 76 : inode = igrab(block_group->inode);
139 1145 : spin_unlock(&block_group->lock);
140 1145 : if (inode)
141 : return inode;
142 :
143 1069 : inode = __lookup_free_space_inode(fs_info->tree_root, path,
144 : block_group->start);
145 1069 : if (IS_ERR(inode))
146 : return inode;
147 :
148 7 : spin_lock(&block_group->lock);
149 7 : if (!((BTRFS_I(inode)->flags & flags) == flags)) {
150 0 : btrfs_info(fs_info, "Old style space inode found, converting.");
151 0 : BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
152 : BTRFS_INODE_NODATACOW;
153 0 : block_group->disk_cache_state = BTRFS_DC_CLEAR;
154 : }
155 :
156 7 : if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags))
157 7 : block_group->inode = igrab(inode);
158 7 : spin_unlock(&block_group->lock);
159 :
160 7 : return inode;
161 : }
162 :
163 4 : static int __create_free_space_inode(struct btrfs_root *root,
164 : struct btrfs_trans_handle *trans,
165 : struct btrfs_path *path,
166 : u64 ino, u64 offset)
167 : {
168 4 : struct btrfs_key key;
169 4 : struct btrfs_disk_key disk_key;
170 4 : struct btrfs_free_space_header *header;
171 4 : struct btrfs_inode_item *inode_item;
172 4 : struct extent_buffer *leaf;
173 : /* We inline CRCs for the free disk space cache */
174 4 : const u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC |
175 : BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
176 4 : int ret;
177 :
178 4 : ret = btrfs_insert_empty_inode(trans, root, path, ino);
179 4 : if (ret)
180 : return ret;
181 :
182 4 : leaf = path->nodes[0];
183 4 : inode_item = btrfs_item_ptr(leaf, path->slots[0],
184 : struct btrfs_inode_item);
185 4 : btrfs_item_key(leaf, &disk_key, path->slots[0]);
186 4 : memzero_extent_buffer(leaf, (unsigned long)inode_item,
187 : sizeof(*inode_item));
188 4 : btrfs_set_inode_generation(leaf, inode_item, trans->transid);
189 4 : btrfs_set_inode_size(leaf, inode_item, 0);
190 4 : btrfs_set_inode_nbytes(leaf, inode_item, 0);
191 4 : btrfs_set_inode_uid(leaf, inode_item, 0);
192 4 : btrfs_set_inode_gid(leaf, inode_item, 0);
193 4 : btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
194 4 : btrfs_set_inode_flags(leaf, inode_item, flags);
195 4 : btrfs_set_inode_nlink(leaf, inode_item, 1);
196 4 : btrfs_set_inode_transid(leaf, inode_item, trans->transid);
197 4 : btrfs_set_inode_block_group(leaf, inode_item, offset);
198 4 : btrfs_mark_buffer_dirty(leaf);
199 4 : btrfs_release_path(path);
200 :
201 4 : key.objectid = BTRFS_FREE_SPACE_OBJECTID;
202 4 : key.offset = offset;
203 4 : key.type = 0;
204 4 : ret = btrfs_insert_empty_item(trans, root, path, &key,
205 : sizeof(struct btrfs_free_space_header));
206 4 : if (ret < 0) {
207 0 : btrfs_release_path(path);
208 0 : return ret;
209 : }
210 :
211 4 : leaf = path->nodes[0];
212 4 : header = btrfs_item_ptr(leaf, path->slots[0],
213 : struct btrfs_free_space_header);
214 4 : memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
215 4 : btrfs_set_free_space_key(leaf, header, &disk_key);
216 4 : btrfs_mark_buffer_dirty(leaf);
217 4 : btrfs_release_path(path);
218 :
219 4 : return 0;
220 : }
221 :
222 4 : int create_free_space_inode(struct btrfs_trans_handle *trans,
223 : struct btrfs_block_group *block_group,
224 : struct btrfs_path *path)
225 : {
226 4 : int ret;
227 4 : u64 ino;
228 :
229 4 : ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino);
230 4 : if (ret < 0)
231 : return ret;
232 :
233 4 : return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
234 : ino, block_group->start);
235 : }
236 :
237 : /*
238 : * inode is an optional sink: if it is NULL, btrfs_remove_free_space_inode
239 : * handles lookup, otherwise it takes ownership and iputs the inode.
240 : * Don't reuse an inode pointer after passing it into this function.
241 : */
242 538 : int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
243 : struct inode *inode,
244 : struct btrfs_block_group *block_group)
245 : {
246 538 : struct btrfs_path *path;
247 538 : struct btrfs_key key;
248 538 : int ret = 0;
249 :
250 538 : path = btrfs_alloc_path();
251 538 : if (!path)
252 : return -ENOMEM;
253 :
254 538 : if (!inode)
255 6 : inode = lookup_free_space_inode(block_group, path);
256 538 : if (IS_ERR(inode)) {
257 536 : if (PTR_ERR(inode) != -ENOENT)
258 0 : ret = PTR_ERR(inode);
259 536 : goto out;
260 : }
261 2 : ret = btrfs_orphan_add(trans, BTRFS_I(inode));
262 2 : if (ret) {
263 0 : btrfs_add_delayed_iput(BTRFS_I(inode));
264 0 : goto out;
265 : }
266 2 : clear_nlink(inode);
267 : /* One for the block groups ref */
268 2 : spin_lock(&block_group->lock);
269 2 : if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) {
270 2 : block_group->inode = NULL;
271 2 : spin_unlock(&block_group->lock);
272 2 : iput(inode);
273 : } else {
274 0 : spin_unlock(&block_group->lock);
275 : }
276 : /* One for the lookup ref */
277 2 : btrfs_add_delayed_iput(BTRFS_I(inode));
278 :
279 2 : key.objectid = BTRFS_FREE_SPACE_OBJECTID;
280 2 : key.type = 0;
281 2 : key.offset = block_group->start;
282 2 : ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path,
283 : -1, 1);
284 2 : if (ret) {
285 0 : if (ret > 0)
286 : ret = 0;
287 0 : goto out;
288 : }
289 2 : ret = btrfs_del_item(trans, trans->fs_info->tree_root, path);
290 538 : out:
291 538 : btrfs_free_path(path);
292 538 : return ret;
293 : }
294 :
295 12 : int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
296 : struct btrfs_block_group *block_group,
297 : struct inode *vfs_inode)
298 : {
299 12 : struct btrfs_truncate_control control = {
300 : .inode = BTRFS_I(vfs_inode),
301 : .new_size = 0,
302 : .ino = btrfs_ino(BTRFS_I(vfs_inode)),
303 : .min_type = BTRFS_EXTENT_DATA_KEY,
304 : .clear_extent_range = true,
305 : };
306 : struct btrfs_inode *inode = BTRFS_I(vfs_inode);
307 12 : struct btrfs_root *root = inode->root;
308 12 : struct extent_state *cached_state = NULL;
309 12 : int ret = 0;
310 12 : bool locked = false;
311 :
312 12 : if (block_group) {
313 0 : struct btrfs_path *path = btrfs_alloc_path();
314 :
315 0 : if (!path) {
316 0 : ret = -ENOMEM;
317 0 : goto fail;
318 : }
319 0 : locked = true;
320 0 : mutex_lock(&trans->transaction->cache_write_mutex);
321 0 : if (!list_empty(&block_group->io_list)) {
322 0 : list_del_init(&block_group->io_list);
323 :
324 0 : btrfs_wait_cache_io(trans, block_group, path);
325 0 : btrfs_put_block_group(block_group);
326 : }
327 :
328 : /*
329 : * now that we've truncated the cache away, its no longer
330 : * setup or written
331 : */
332 0 : spin_lock(&block_group->lock);
333 0 : block_group->disk_cache_state = BTRFS_DC_CLEAR;
334 0 : spin_unlock(&block_group->lock);
335 0 : btrfs_free_path(path);
336 : }
337 :
338 12 : btrfs_i_size_write(inode, 0);
339 12 : truncate_pagecache(vfs_inode, 0);
340 :
341 12 : lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
342 12 : btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
343 :
344 : /*
345 : * We skip the throttling logic for free space cache inodes, so we don't
346 : * need to check for -EAGAIN.
347 : */
348 12 : ret = btrfs_truncate_inode_items(trans, root, &control);
349 :
350 12 : inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
351 12 : btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
352 :
353 12 : unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
354 12 : if (ret)
355 0 : goto fail;
356 :
357 12 : ret = btrfs_update_inode(trans, root, inode);
358 :
359 12 : fail:
360 12 : if (locked)
361 0 : mutex_unlock(&trans->transaction->cache_write_mutex);
362 12 : if (ret)
363 0 : btrfs_abort_transaction(trans, ret);
364 :
365 12 : return ret;
366 : }
367 :
368 1 : static void readahead_cache(struct inode *inode)
369 : {
370 1 : struct file_ra_state ra;
371 1 : unsigned long last_index;
372 :
373 1 : file_ra_state_init(&ra, inode->i_mapping);
374 1 : last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
375 :
376 1 : page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index);
377 1 : }
378 :
379 41 : static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
380 : int write)
381 : {
382 41 : int num_pages;
383 :
384 41 : num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
385 :
386 : /* Make sure we can fit our crcs and generation into the first page */
387 41 : if (write && (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
388 : return -ENOSPC;
389 :
390 41 : memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
391 :
392 41 : io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
393 41 : if (!io_ctl->pages)
394 : return -ENOMEM;
395 :
396 41 : io_ctl->num_pages = num_pages;
397 41 : io_ctl->fs_info = btrfs_sb(inode->i_sb);
398 41 : io_ctl->inode = inode;
399 :
400 41 : return 0;
401 : }
402 : ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
403 :
404 : static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
405 : {
406 41 : kfree(io_ctl->pages);
407 41 : io_ctl->pages = NULL;
408 : }
409 :
410 : static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
411 : {
412 682 : if (io_ctl->cur) {
413 641 : io_ctl->cur = NULL;
414 641 : io_ctl->orig = NULL;
415 : }
416 : }
417 :
418 641 : static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
419 : {
420 641 : ASSERT(io_ctl->index < io_ctl->num_pages);
421 641 : io_ctl->page = io_ctl->pages[io_ctl->index++];
422 641 : io_ctl->cur = page_address(io_ctl->page);
423 641 : io_ctl->orig = io_ctl->cur;
424 641 : io_ctl->size = PAGE_SIZE;
425 641 : if (clear)
426 640 : clear_page(io_ctl->cur);
427 641 : }
428 :
429 41 : static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
430 : {
431 41 : int i;
432 :
433 41 : io_ctl_unmap_page(io_ctl);
434 :
435 697 : for (i = 0; i < io_ctl->num_pages; i++) {
436 656 : if (io_ctl->pages[i]) {
437 656 : btrfs_page_clear_checked(io_ctl->fs_info,
438 : io_ctl->pages[i],
439 : page_offset(io_ctl->pages[i]),
440 : PAGE_SIZE);
441 656 : unlock_page(io_ctl->pages[i]);
442 656 : put_page(io_ctl->pages[i]);
443 : }
444 : }
445 41 : }
446 :
447 41 : static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
448 : {
449 41 : struct page *page;
450 41 : struct inode *inode = io_ctl->inode;
451 41 : gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
452 41 : int i;
453 :
454 697 : for (i = 0; i < io_ctl->num_pages; i++) {
455 656 : int ret;
456 :
457 656 : page = find_or_create_page(inode->i_mapping, i, mask);
458 656 : if (!page) {
459 0 : io_ctl_drop_pages(io_ctl);
460 0 : return -ENOMEM;
461 : }
462 :
463 656 : ret = set_page_extent_mapped(page);
464 656 : if (ret < 0) {
465 0 : unlock_page(page);
466 0 : put_page(page);
467 0 : io_ctl_drop_pages(io_ctl);
468 0 : return ret;
469 : }
470 :
471 656 : io_ctl->pages[i] = page;
472 656 : if (uptodate && !PageUptodate(page)) {
473 0 : btrfs_read_folio(NULL, page_folio(page));
474 0 : lock_page(page);
475 0 : if (page->mapping != inode->i_mapping) {
476 0 : btrfs_err(BTRFS_I(inode)->root->fs_info,
477 : "free space cache page truncated");
478 0 : io_ctl_drop_pages(io_ctl);
479 0 : return -EIO;
480 : }
481 0 : if (!PageUptodate(page)) {
482 0 : btrfs_err(BTRFS_I(inode)->root->fs_info,
483 : "error reading free space cache");
484 0 : io_ctl_drop_pages(io_ctl);
485 0 : return -EIO;
486 : }
487 : }
488 : }
489 :
490 697 : for (i = 0; i < io_ctl->num_pages; i++)
491 656 : clear_page_dirty_for_io(io_ctl->pages[i]);
492 :
493 : return 0;
494 : }
495 :
496 40 : static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
497 : {
498 40 : io_ctl_map_page(io_ctl, 1);
499 :
500 : /*
501 : * Skip the csum areas. If we don't check crcs then we just have a
502 : * 64bit chunk at the front of the first page.
503 : */
504 40 : io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
505 40 : io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
506 :
507 40 : put_unaligned_le64(generation, io_ctl->cur);
508 40 : io_ctl->cur += sizeof(u64);
509 40 : }
510 :
511 1 : static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
512 : {
513 1 : u64 cache_gen;
514 :
515 : /*
516 : * Skip the crc area. If we don't check crcs then we just have a 64bit
517 : * chunk at the front of the first page.
518 : */
519 1 : io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
520 1 : io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
521 :
522 1 : cache_gen = get_unaligned_le64(io_ctl->cur);
523 1 : if (cache_gen != generation) {
524 0 : btrfs_err_rl(io_ctl->fs_info,
525 : "space cache generation (%llu) does not match inode (%llu)",
526 : cache_gen, generation);
527 0 : io_ctl_unmap_page(io_ctl);
528 0 : return -EIO;
529 : }
530 1 : io_ctl->cur += sizeof(u64);
531 1 : return 0;
532 : }
533 :
534 640 : static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
535 : {
536 640 : u32 *tmp;
537 640 : u32 crc = ~(u32)0;
538 640 : unsigned offset = 0;
539 :
540 640 : if (index == 0)
541 40 : offset = sizeof(u32) * io_ctl->num_pages;
542 :
543 640 : crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
544 640 : btrfs_crc32c_final(crc, (u8 *)&crc);
545 640 : io_ctl_unmap_page(io_ctl);
546 640 : tmp = page_address(io_ctl->pages[0]);
547 640 : tmp += index;
548 640 : *tmp = crc;
549 640 : }
550 :
551 1 : static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
552 : {
553 1 : u32 *tmp, val;
554 1 : u32 crc = ~(u32)0;
555 1 : unsigned offset = 0;
556 :
557 1 : if (index == 0)
558 1 : offset = sizeof(u32) * io_ctl->num_pages;
559 :
560 1 : tmp = page_address(io_ctl->pages[0]);
561 1 : tmp += index;
562 1 : val = *tmp;
563 :
564 1 : io_ctl_map_page(io_ctl, 0);
565 1 : crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
566 1 : btrfs_crc32c_final(crc, (u8 *)&crc);
567 1 : if (val != crc) {
568 0 : btrfs_err_rl(io_ctl->fs_info,
569 : "csum mismatch on free space cache");
570 0 : io_ctl_unmap_page(io_ctl);
571 0 : return -EIO;
572 : }
573 :
574 : return 0;
575 : }
576 :
577 219 : static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
578 : void *bitmap)
579 : {
580 219 : struct btrfs_free_space_entry *entry;
581 :
582 219 : if (!io_ctl->cur)
583 : return -ENOSPC;
584 :
585 219 : entry = io_ctl->cur;
586 219 : put_unaligned_le64(offset, &entry->offset);
587 219 : put_unaligned_le64(bytes, &entry->bytes);
588 219 : entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
589 : BTRFS_FREE_SPACE_EXTENT;
590 219 : io_ctl->cur += sizeof(struct btrfs_free_space_entry);
591 219 : io_ctl->size -= sizeof(struct btrfs_free_space_entry);
592 :
593 219 : if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
594 : return 0;
595 :
596 0 : io_ctl_set_crc(io_ctl, io_ctl->index - 1);
597 :
598 : /* No more pages to map */
599 0 : if (io_ctl->index >= io_ctl->num_pages)
600 : return 0;
601 :
602 : /* map the next page */
603 0 : io_ctl_map_page(io_ctl, 1);
604 0 : return 0;
605 : }
606 :
607 0 : static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
608 : {
609 0 : if (!io_ctl->cur)
610 : return -ENOSPC;
611 :
612 : /*
613 : * If we aren't at the start of the current page, unmap this one and
614 : * map the next one if there is any left.
615 : */
616 0 : if (io_ctl->cur != io_ctl->orig) {
617 0 : io_ctl_set_crc(io_ctl, io_ctl->index - 1);
618 0 : if (io_ctl->index >= io_ctl->num_pages)
619 : return -ENOSPC;
620 0 : io_ctl_map_page(io_ctl, 0);
621 : }
622 :
623 0 : copy_page(io_ctl->cur, bitmap);
624 0 : io_ctl_set_crc(io_ctl, io_ctl->index - 1);
625 0 : if (io_ctl->index < io_ctl->num_pages)
626 0 : io_ctl_map_page(io_ctl, 0);
627 : return 0;
628 : }
629 :
630 40 : static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
631 : {
632 : /*
633 : * If we're not on the boundary we know we've modified the page and we
634 : * need to crc the page.
635 : */
636 40 : if (io_ctl->cur != io_ctl->orig)
637 40 : io_ctl_set_crc(io_ctl, io_ctl->index - 1);
638 : else
639 0 : io_ctl_unmap_page(io_ctl);
640 :
641 640 : while (io_ctl->index < io_ctl->num_pages) {
642 600 : io_ctl_map_page(io_ctl, 1);
643 600 : io_ctl_set_crc(io_ctl, io_ctl->index - 1);
644 : }
645 40 : }
646 :
647 6 : static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
648 : struct btrfs_free_space *entry, u8 *type)
649 : {
650 6 : struct btrfs_free_space_entry *e;
651 6 : int ret;
652 :
653 6 : if (!io_ctl->cur) {
654 0 : ret = io_ctl_check_crc(io_ctl, io_ctl->index);
655 0 : if (ret)
656 : return ret;
657 : }
658 :
659 6 : e = io_ctl->cur;
660 6 : entry->offset = get_unaligned_le64(&e->offset);
661 6 : entry->bytes = get_unaligned_le64(&e->bytes);
662 6 : *type = e->type;
663 6 : io_ctl->cur += sizeof(struct btrfs_free_space_entry);
664 6 : io_ctl->size -= sizeof(struct btrfs_free_space_entry);
665 :
666 6 : if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
667 : return 0;
668 :
669 0 : io_ctl_unmap_page(io_ctl);
670 :
671 : return 0;
672 : }
673 :
674 0 : static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
675 : struct btrfs_free_space *entry)
676 : {
677 0 : int ret;
678 :
679 0 : ret = io_ctl_check_crc(io_ctl, io_ctl->index);
680 0 : if (ret)
681 : return ret;
682 :
683 0 : copy_page(entry->bitmap, io_ctl->cur);
684 0 : io_ctl_unmap_page(io_ctl);
685 :
686 : return 0;
687 : }
688 :
689 3214 : static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
690 : {
691 3214 : struct btrfs_block_group *block_group = ctl->block_group;
692 3214 : u64 max_bytes;
693 3214 : u64 bitmap_bytes;
694 3214 : u64 extent_bytes;
695 3214 : u64 size = block_group->length;
696 3214 : u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
697 3214 : u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
698 :
699 3214 : max_bitmaps = max_t(u64, max_bitmaps, 1);
700 :
701 3214 : if (ctl->total_bitmaps > max_bitmaps)
702 0 : btrfs_err(block_group->fs_info,
703 : "invalid free space control: bg start=%llu len=%llu total_bitmaps=%u unit=%u max_bitmaps=%llu bytes_per_bg=%llu",
704 : block_group->start, block_group->length,
705 : ctl->total_bitmaps, ctl->unit, max_bitmaps,
706 : bytes_per_bg);
707 3214 : ASSERT(ctl->total_bitmaps <= max_bitmaps);
708 :
709 : /*
710 : * We are trying to keep the total amount of memory used per 1GiB of
711 : * space to be MAX_CACHE_BYTES_PER_GIG. However, with a reclamation
712 : * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of
713 : * bitmaps, we may end up using more memory than this.
714 : */
715 3214 : if (size < SZ_1G)
716 : max_bytes = MAX_CACHE_BYTES_PER_GIG;
717 : else
718 916 : max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
719 :
720 3214 : bitmap_bytes = ctl->total_bitmaps * ctl->unit;
721 :
722 : /*
723 : * we want the extent entry threshold to always be at most 1/2 the max
724 : * bytes we can have, or whatever is less than that.
725 : */
726 3214 : extent_bytes = max_bytes - bitmap_bytes;
727 3214 : extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
728 :
729 3214 : ctl->extents_thresh =
730 : div_u64(extent_bytes, sizeof(struct btrfs_free_space));
731 3214 : }
732 :
733 1 : static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
734 : struct btrfs_free_space_ctl *ctl,
735 : struct btrfs_path *path, u64 offset)
736 : {
737 1 : struct btrfs_fs_info *fs_info = root->fs_info;
738 1 : struct btrfs_free_space_header *header;
739 1 : struct extent_buffer *leaf;
740 1 : struct btrfs_io_ctl io_ctl;
741 1 : struct btrfs_key key;
742 1 : struct btrfs_free_space *e, *n;
743 1 : LIST_HEAD(bitmaps);
744 1 : u64 num_entries;
745 1 : u64 num_bitmaps;
746 1 : u64 generation;
747 1 : u8 type;
748 1 : int ret = 0;
749 :
750 : /* Nothing in the space cache, goodbye */
751 1 : if (!i_size_read(inode))
752 : return 0;
753 :
754 1 : key.objectid = BTRFS_FREE_SPACE_OBJECTID;
755 1 : key.offset = offset;
756 1 : key.type = 0;
757 :
758 1 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
759 1 : if (ret < 0)
760 : return 0;
761 1 : else if (ret > 0) {
762 0 : btrfs_release_path(path);
763 0 : return 0;
764 : }
765 :
766 1 : ret = -1;
767 :
768 1 : leaf = path->nodes[0];
769 1 : header = btrfs_item_ptr(leaf, path->slots[0],
770 : struct btrfs_free_space_header);
771 1 : num_entries = btrfs_free_space_entries(leaf, header);
772 1 : num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
773 1 : generation = btrfs_free_space_generation(leaf, header);
774 1 : btrfs_release_path(path);
775 :
776 1 : if (!BTRFS_I(inode)->generation) {
777 0 : btrfs_info(fs_info,
778 : "the free space cache file (%llu) is invalid, skip it",
779 : offset);
780 0 : return 0;
781 : }
782 :
783 1 : if (BTRFS_I(inode)->generation != generation) {
784 0 : btrfs_err(fs_info,
785 : "free space inode generation (%llu) did not match free space cache generation (%llu)",
786 : BTRFS_I(inode)->generation, generation);
787 0 : return 0;
788 : }
789 :
790 1 : if (!num_entries)
791 : return 0;
792 :
793 1 : ret = io_ctl_init(&io_ctl, inode, 0);
794 1 : if (ret)
795 : return ret;
796 :
797 1 : readahead_cache(inode);
798 :
799 1 : ret = io_ctl_prepare_pages(&io_ctl, true);
800 1 : if (ret)
801 0 : goto out;
802 :
803 1 : ret = io_ctl_check_crc(&io_ctl, 0);
804 1 : if (ret)
805 0 : goto free_cache;
806 :
807 1 : ret = io_ctl_check_generation(&io_ctl, generation);
808 1 : if (ret)
809 0 : goto free_cache;
810 :
811 7 : while (num_entries) {
812 6 : e = kmem_cache_zalloc(btrfs_free_space_cachep,
813 : GFP_NOFS);
814 6 : if (!e) {
815 0 : ret = -ENOMEM;
816 0 : goto free_cache;
817 : }
818 :
819 6 : ret = io_ctl_read_entry(&io_ctl, e, &type);
820 6 : if (ret) {
821 0 : kmem_cache_free(btrfs_free_space_cachep, e);
822 0 : goto free_cache;
823 : }
824 :
825 6 : if (!e->bytes) {
826 0 : ret = -1;
827 0 : kmem_cache_free(btrfs_free_space_cachep, e);
828 0 : goto free_cache;
829 : }
830 :
831 6 : if (type == BTRFS_FREE_SPACE_EXTENT) {
832 6 : spin_lock(&ctl->tree_lock);
833 6 : ret = link_free_space(ctl, e);
834 6 : spin_unlock(&ctl->tree_lock);
835 6 : if (ret) {
836 0 : btrfs_err(fs_info,
837 : "Duplicate entries in free space cache, dumping");
838 0 : kmem_cache_free(btrfs_free_space_cachep, e);
839 0 : goto free_cache;
840 : }
841 : } else {
842 0 : ASSERT(num_bitmaps);
843 0 : num_bitmaps--;
844 0 : e->bitmap = kmem_cache_zalloc(
845 : btrfs_free_space_bitmap_cachep, GFP_NOFS);
846 0 : if (!e->bitmap) {
847 0 : ret = -ENOMEM;
848 0 : kmem_cache_free(
849 : btrfs_free_space_cachep, e);
850 0 : goto free_cache;
851 : }
852 0 : spin_lock(&ctl->tree_lock);
853 0 : ret = link_free_space(ctl, e);
854 0 : if (ret) {
855 0 : spin_unlock(&ctl->tree_lock);
856 0 : btrfs_err(fs_info,
857 : "Duplicate entries in free space cache, dumping");
858 0 : kmem_cache_free(btrfs_free_space_cachep, e);
859 0 : goto free_cache;
860 : }
861 0 : ctl->total_bitmaps++;
862 0 : recalculate_thresholds(ctl);
863 0 : spin_unlock(&ctl->tree_lock);
864 0 : list_add_tail(&e->list, &bitmaps);
865 : }
866 :
867 6 : num_entries--;
868 : }
869 :
870 1 : io_ctl_unmap_page(&io_ctl);
871 :
872 : /*
873 : * We add the bitmaps at the end of the entries in order that
874 : * the bitmap entries are added to the cache.
875 : */
876 1 : list_for_each_entry_safe(e, n, &bitmaps, list) {
877 0 : list_del_init(&e->list);
878 0 : ret = io_ctl_read_bitmap(&io_ctl, e);
879 0 : if (ret)
880 0 : goto free_cache;
881 : }
882 :
883 1 : io_ctl_drop_pages(&io_ctl);
884 1 : ret = 1;
885 1 : out:
886 1 : io_ctl_free(&io_ctl);
887 1 : return ret;
888 0 : free_cache:
889 0 : io_ctl_drop_pages(&io_ctl);
890 :
891 0 : spin_lock(&ctl->tree_lock);
892 0 : __btrfs_remove_free_space_cache(ctl);
893 0 : spin_unlock(&ctl->tree_lock);
894 0 : goto out;
895 : }
896 :
897 1 : static int copy_free_space_cache(struct btrfs_block_group *block_group,
898 : struct btrfs_free_space_ctl *ctl)
899 : {
900 1 : struct btrfs_free_space *info;
901 1 : struct rb_node *n;
902 1 : int ret = 0;
903 :
904 7 : while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) {
905 6 : info = rb_entry(n, struct btrfs_free_space, offset_index);
906 6 : if (!info->bitmap) {
907 6 : const u64 offset = info->offset;
908 6 : const u64 bytes = info->bytes;
909 :
910 6 : unlink_free_space(ctl, info, true);
911 6 : spin_unlock(&ctl->tree_lock);
912 6 : kmem_cache_free(btrfs_free_space_cachep, info);
913 6 : ret = btrfs_add_free_space(block_group, offset, bytes);
914 6 : spin_lock(&ctl->tree_lock);
915 : } else {
916 0 : u64 offset = info->offset;
917 0 : u64 bytes = ctl->unit;
918 :
919 0 : ret = search_bitmap(ctl, info, &offset, &bytes, false);
920 0 : if (ret == 0) {
921 0 : bitmap_clear_bits(ctl, info, offset, bytes, true);
922 0 : spin_unlock(&ctl->tree_lock);
923 0 : ret = btrfs_add_free_space(block_group, offset,
924 : bytes);
925 0 : spin_lock(&ctl->tree_lock);
926 : } else {
927 0 : free_bitmap(ctl, info);
928 0 : ret = 0;
929 : }
930 : }
931 6 : cond_resched_lock(&ctl->tree_lock);
932 : }
933 1 : return ret;
934 : }
935 :
936 : static struct lock_class_key btrfs_free_space_inode_key;
937 :
938 22 : int load_free_space_cache(struct btrfs_block_group *block_group)
939 : {
940 22 : struct btrfs_fs_info *fs_info = block_group->fs_info;
941 22 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
942 22 : struct btrfs_free_space_ctl tmp_ctl = {};
943 22 : struct inode *inode;
944 22 : struct btrfs_path *path;
945 22 : int ret = 0;
946 22 : bool matched;
947 22 : u64 used = block_group->used;
948 :
949 : /*
950 : * Because we could potentially discard our loaded free space, we want
951 : * to load everything into a temporary structure first, and then if it's
952 : * valid copy it all into the actual free space ctl.
953 : */
954 22 : btrfs_init_free_space_ctl(block_group, &tmp_ctl);
955 :
956 : /*
957 : * If this block group has been marked to be cleared for one reason or
958 : * another then we can't trust the on disk cache, so just return.
959 : */
960 22 : spin_lock(&block_group->lock);
961 22 : if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
962 20 : spin_unlock(&block_group->lock);
963 20 : return 0;
964 : }
965 2 : spin_unlock(&block_group->lock);
966 :
967 2 : path = btrfs_alloc_path();
968 2 : if (!path)
969 : return 0;
970 2 : path->search_commit_root = 1;
971 2 : path->skip_locking = 1;
972 :
973 : /*
974 : * We must pass a path with search_commit_root set to btrfs_iget in
975 : * order to avoid a deadlock when allocating extents for the tree root.
976 : *
977 : * When we are COWing an extent buffer from the tree root, when looking
978 : * for a free extent, at extent-tree.c:find_free_extent(), we can find
979 : * block group without its free space cache loaded. When we find one
980 : * we must load its space cache which requires reading its free space
981 : * cache's inode item from the root tree. If this inode item is located
982 : * in the same leaf that we started COWing before, then we end up in
983 : * deadlock on the extent buffer (trying to read lock it when we
984 : * previously write locked it).
985 : *
986 : * It's safe to read the inode item using the commit root because
987 : * block groups, once loaded, stay in memory forever (until they are
988 : * removed) as well as their space caches once loaded. New block groups
989 : * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
990 : * we will never try to read their inode item while the fs is mounted.
991 : */
992 2 : inode = lookup_free_space_inode(block_group, path);
993 2 : if (IS_ERR(inode)) {
994 1 : btrfs_free_path(path);
995 1 : return 0;
996 : }
997 :
998 : /* We may have converted the inode and made the cache invalid. */
999 1 : spin_lock(&block_group->lock);
1000 1 : if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
1001 0 : spin_unlock(&block_group->lock);
1002 0 : btrfs_free_path(path);
1003 0 : goto out;
1004 : }
1005 1 : spin_unlock(&block_group->lock);
1006 :
1007 : /*
1008 : * Reinitialize the class of struct inode's mapping->invalidate_lock for
1009 : * free space inodes to prevent false positives related to locks for normal
1010 : * inodes.
1011 : */
1012 1 : lockdep_set_class(&(&inode->i_data)->invalidate_lock,
1013 : &btrfs_free_space_inode_key);
1014 :
1015 1 : ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl,
1016 : path, block_group->start);
1017 1 : btrfs_free_path(path);
1018 1 : if (ret <= 0)
1019 0 : goto out;
1020 :
1021 1 : matched = (tmp_ctl.free_space == (block_group->length - used -
1022 1 : block_group->bytes_super));
1023 :
1024 1 : if (matched) {
1025 1 : spin_lock(&tmp_ctl.tree_lock);
1026 1 : ret = copy_free_space_cache(block_group, &tmp_ctl);
1027 1 : spin_unlock(&tmp_ctl.tree_lock);
1028 : /*
1029 : * ret == 1 means we successfully loaded the free space cache,
1030 : * so we need to re-set it here.
1031 : */
1032 1 : if (ret == 0)
1033 : ret = 1;
1034 : } else {
1035 : /*
1036 : * We need to call the _locked variant so we don't try to update
1037 : * the discard counters.
1038 : */
1039 0 : spin_lock(&tmp_ctl.tree_lock);
1040 0 : __btrfs_remove_free_space_cache(&tmp_ctl);
1041 0 : spin_unlock(&tmp_ctl.tree_lock);
1042 0 : btrfs_warn(fs_info,
1043 : "block group %llu has wrong amount of free space",
1044 : block_group->start);
1045 0 : ret = -1;
1046 : }
1047 0 : out:
1048 0 : if (ret < 0) {
1049 : /* This cache is bogus, make sure it gets cleared */
1050 0 : spin_lock(&block_group->lock);
1051 0 : block_group->disk_cache_state = BTRFS_DC_CLEAR;
1052 0 : spin_unlock(&block_group->lock);
1053 0 : ret = 0;
1054 :
1055 0 : btrfs_warn(fs_info,
1056 : "failed to load free space cache for block group %llu, rebuilding it now",
1057 : block_group->start);
1058 : }
1059 :
1060 1 : spin_lock(&ctl->tree_lock);
1061 1 : btrfs_discard_update_discardable(block_group);
1062 1 : spin_unlock(&ctl->tree_lock);
1063 1 : iput(inode);
1064 1 : return ret;
1065 : }
1066 :
1067 : static noinline_for_stack
1068 40 : int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
1069 : struct btrfs_free_space_ctl *ctl,
1070 : struct btrfs_block_group *block_group,
1071 : int *entries, int *bitmaps,
1072 : struct list_head *bitmap_list)
1073 : {
1074 40 : int ret;
1075 40 : struct btrfs_free_cluster *cluster = NULL;
1076 40 : struct btrfs_free_cluster *cluster_locked = NULL;
1077 40 : struct rb_node *node = rb_first(&ctl->free_space_offset);
1078 40 : struct btrfs_trim_range *trim_entry;
1079 :
1080 : /* Get the cluster for this block_group if it exists */
1081 40 : if (block_group && !list_empty(&block_group->cluster_list)) {
1082 40 : cluster = list_entry(block_group->cluster_list.next,
1083 : struct btrfs_free_cluster,
1084 : block_group_list);
1085 : }
1086 :
1087 40 : if (!node && cluster) {
1088 14 : cluster_locked = cluster;
1089 14 : spin_lock(&cluster_locked->lock);
1090 14 : node = rb_first(&cluster->root);
1091 14 : cluster = NULL;
1092 : }
1093 :
1094 : /* Write out the extent entries */
1095 206 : while (node) {
1096 166 : struct btrfs_free_space *e;
1097 :
1098 166 : e = rb_entry(node, struct btrfs_free_space, offset_index);
1099 166 : *entries += 1;
1100 :
1101 166 : ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
1102 166 : e->bitmap);
1103 166 : if (ret)
1104 0 : goto fail;
1105 :
1106 166 : if (e->bitmap) {
1107 0 : list_add_tail(&e->list, bitmap_list);
1108 0 : *bitmaps += 1;
1109 : }
1110 166 : node = rb_next(node);
1111 166 : if (!node && cluster) {
1112 26 : node = rb_first(&cluster->root);
1113 26 : cluster_locked = cluster;
1114 26 : spin_lock(&cluster_locked->lock);
1115 26 : cluster = NULL;
1116 : }
1117 : }
1118 40 : if (cluster_locked) {
1119 40 : spin_unlock(&cluster_locked->lock);
1120 40 : cluster_locked = NULL;
1121 : }
1122 :
1123 : /*
1124 : * Make sure we don't miss any range that was removed from our rbtree
1125 : * because trimming is running. Otherwise after a umount+mount (or crash
1126 : * after committing the transaction) we would leak free space and get
1127 : * an inconsistent free space cache report from fsck.
1128 : */
1129 40 : list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
1130 0 : ret = io_ctl_add_entry(io_ctl, trim_entry->start,
1131 : trim_entry->bytes, NULL);
1132 0 : if (ret)
1133 0 : goto fail;
1134 0 : *entries += 1;
1135 : }
1136 :
1137 : return 0;
1138 0 : fail:
1139 0 : if (cluster_locked)
1140 0 : spin_unlock(&cluster_locked->lock);
1141 : return -ENOSPC;
1142 : }
1143 :
1144 : static noinline_for_stack int
1145 40 : update_cache_item(struct btrfs_trans_handle *trans,
1146 : struct btrfs_root *root,
1147 : struct inode *inode,
1148 : struct btrfs_path *path, u64 offset,
1149 : int entries, int bitmaps)
1150 : {
1151 40 : struct btrfs_key key;
1152 40 : struct btrfs_free_space_header *header;
1153 40 : struct extent_buffer *leaf;
1154 40 : int ret;
1155 :
1156 40 : key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1157 40 : key.offset = offset;
1158 40 : key.type = 0;
1159 :
1160 40 : ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1161 40 : if (ret < 0) {
1162 0 : clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1163 : EXTENT_DELALLOC, NULL);
1164 0 : goto fail;
1165 : }
1166 40 : leaf = path->nodes[0];
1167 40 : if (ret > 0) {
1168 0 : struct btrfs_key found_key;
1169 0 : ASSERT(path->slots[0]);
1170 0 : path->slots[0]--;
1171 0 : btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1172 0 : if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1173 0 : found_key.offset != offset) {
1174 0 : clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1175 0 : inode->i_size - 1, EXTENT_DELALLOC,
1176 : NULL);
1177 0 : btrfs_release_path(path);
1178 0 : goto fail;
1179 : }
1180 : }
1181 :
1182 40 : BTRFS_I(inode)->generation = trans->transid;
1183 40 : header = btrfs_item_ptr(leaf, path->slots[0],
1184 : struct btrfs_free_space_header);
1185 40 : btrfs_set_free_space_entries(leaf, header, entries);
1186 40 : btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1187 40 : btrfs_set_free_space_generation(leaf, header, trans->transid);
1188 40 : btrfs_mark_buffer_dirty(leaf);
1189 40 : btrfs_release_path(path);
1190 :
1191 40 : return 0;
1192 :
1193 : fail:
1194 : return -1;
1195 : }
1196 :
1197 40 : static noinline_for_stack int write_pinned_extent_entries(
1198 : struct btrfs_trans_handle *trans,
1199 : struct btrfs_block_group *block_group,
1200 : struct btrfs_io_ctl *io_ctl,
1201 : int *entries)
1202 : {
1203 40 : u64 start, extent_start, extent_end, len;
1204 40 : struct extent_io_tree *unpin = NULL;
1205 40 : int ret;
1206 :
1207 40 : if (!block_group)
1208 : return 0;
1209 :
1210 : /*
1211 : * We want to add any pinned extents to our free space cache
1212 : * so we don't leak the space
1213 : *
1214 : * We shouldn't have switched the pinned extents yet so this is the
1215 : * right one
1216 : */
1217 40 : unpin = &trans->transaction->pinned_extents;
1218 :
1219 40 : start = block_group->start;
1220 :
1221 93 : while (start < block_group->start + block_group->length) {
1222 93 : ret = find_first_extent_bit(unpin, start,
1223 : &extent_start, &extent_end,
1224 : EXTENT_DIRTY, NULL);
1225 93 : if (ret)
1226 : return 0;
1227 :
1228 : /* This pinned extent is out of our range */
1229 53 : if (extent_start >= block_group->start + block_group->length)
1230 : return 0;
1231 :
1232 53 : extent_start = max(extent_start, start);
1233 53 : extent_end = min(block_group->start + block_group->length,
1234 : extent_end + 1);
1235 53 : len = extent_end - extent_start;
1236 :
1237 53 : *entries += 1;
1238 53 : ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1239 53 : if (ret)
1240 : return -ENOSPC;
1241 :
1242 53 : start = extent_end;
1243 : }
1244 :
1245 : return 0;
1246 : }
1247 :
1248 : static noinline_for_stack int
1249 40 : write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1250 : {
1251 40 : struct btrfs_free_space *entry, *next;
1252 40 : int ret;
1253 :
1254 : /* Write out the bitmaps */
1255 40 : list_for_each_entry_safe(entry, next, bitmap_list, list) {
1256 0 : ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1257 0 : if (ret)
1258 : return -ENOSPC;
1259 0 : list_del_init(&entry->list);
1260 : }
1261 :
1262 : return 0;
1263 : }
1264 :
1265 40 : static int flush_dirty_cache(struct inode *inode)
1266 : {
1267 40 : int ret;
1268 :
1269 40 : ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1270 40 : if (ret)
1271 0 : clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1272 : EXTENT_DELALLOC, NULL);
1273 :
1274 40 : return ret;
1275 : }
1276 :
1277 : static void noinline_for_stack
1278 0 : cleanup_bitmap_list(struct list_head *bitmap_list)
1279 : {
1280 0 : struct btrfs_free_space *entry, *next;
1281 :
1282 0 : list_for_each_entry_safe(entry, next, bitmap_list, list)
1283 0 : list_del_init(&entry->list);
1284 0 : }
1285 :
1286 : static void noinline_for_stack
1287 0 : cleanup_write_cache_enospc(struct inode *inode,
1288 : struct btrfs_io_ctl *io_ctl,
1289 : struct extent_state **cached_state)
1290 : {
1291 0 : io_ctl_drop_pages(io_ctl);
1292 0 : unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1293 : cached_state);
1294 0 : }
1295 :
1296 40 : static int __btrfs_wait_cache_io(struct btrfs_root *root,
1297 : struct btrfs_trans_handle *trans,
1298 : struct btrfs_block_group *block_group,
1299 : struct btrfs_io_ctl *io_ctl,
1300 : struct btrfs_path *path, u64 offset)
1301 : {
1302 40 : int ret;
1303 40 : struct inode *inode = io_ctl->inode;
1304 :
1305 40 : if (!inode)
1306 : return 0;
1307 :
1308 : /* Flush the dirty pages in the cache file. */
1309 40 : ret = flush_dirty_cache(inode);
1310 40 : if (ret)
1311 0 : goto out;
1312 :
1313 : /* Update the cache item to tell everyone this cache file is valid. */
1314 40 : ret = update_cache_item(trans, root, inode, path, offset,
1315 : io_ctl->entries, io_ctl->bitmaps);
1316 40 : out:
1317 40 : if (ret) {
1318 0 : invalidate_inode_pages2(inode->i_mapping);
1319 0 : BTRFS_I(inode)->generation = 0;
1320 0 : if (block_group)
1321 : btrfs_debug(root->fs_info,
1322 : "failed to write free space cache for block group %llu error %d",
1323 : block_group->start, ret);
1324 : }
1325 40 : btrfs_update_inode(trans, root, BTRFS_I(inode));
1326 :
1327 40 : if (block_group) {
1328 : /* the dirty list is protected by the dirty_bgs_lock */
1329 40 : spin_lock(&trans->transaction->dirty_bgs_lock);
1330 :
1331 : /* the disk_cache_state is protected by the block group lock */
1332 40 : spin_lock(&block_group->lock);
1333 :
1334 : /*
1335 : * only mark this as written if we didn't get put back on
1336 : * the dirty list while waiting for IO. Otherwise our
1337 : * cache state won't be right, and we won't get written again
1338 : */
1339 40 : if (!ret && list_empty(&block_group->dirty_list))
1340 16 : block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1341 24 : else if (ret)
1342 0 : block_group->disk_cache_state = BTRFS_DC_ERROR;
1343 :
1344 40 : spin_unlock(&block_group->lock);
1345 40 : spin_unlock(&trans->transaction->dirty_bgs_lock);
1346 40 : io_ctl->inode = NULL;
1347 40 : iput(inode);
1348 : }
1349 :
1350 : return ret;
1351 :
1352 : }
1353 :
1354 40 : int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
1355 : struct btrfs_block_group *block_group,
1356 : struct btrfs_path *path)
1357 : {
1358 40 : return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
1359 : block_group, &block_group->io_ctl,
1360 : path, block_group->start);
1361 : }
1362 :
1363 : /*
1364 : * Write out cached info to an inode.
1365 : *
1366 : * @root: root the inode belongs to
1367 : * @inode: freespace inode we are writing out
1368 : * @ctl: free space cache we are going to write out
1369 : * @block_group: block_group for this cache if it belongs to a block_group
1370 : * @io_ctl: holds context for the io
1371 : * @trans: the trans handle
1372 : *
1373 : * This function writes out a free space cache struct to disk for quick recovery
1374 : * on mount. This will return 0 if it was successful in writing the cache out,
1375 : * or an errno if it was not.
1376 : */
1377 40 : static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1378 : struct btrfs_free_space_ctl *ctl,
1379 : struct btrfs_block_group *block_group,
1380 : struct btrfs_io_ctl *io_ctl,
1381 : struct btrfs_trans_handle *trans)
1382 : {
1383 40 : struct extent_state *cached_state = NULL;
1384 40 : LIST_HEAD(bitmap_list);
1385 40 : int entries = 0;
1386 40 : int bitmaps = 0;
1387 40 : int ret;
1388 40 : int must_iput = 0;
1389 :
1390 40 : if (!i_size_read(inode))
1391 : return -EIO;
1392 :
1393 40 : WARN_ON(io_ctl->pages);
1394 40 : ret = io_ctl_init(io_ctl, inode, 1);
1395 40 : if (ret)
1396 : return ret;
1397 :
1398 40 : if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1399 0 : down_write(&block_group->data_rwsem);
1400 0 : spin_lock(&block_group->lock);
1401 0 : if (block_group->delalloc_bytes) {
1402 0 : block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1403 0 : spin_unlock(&block_group->lock);
1404 0 : up_write(&block_group->data_rwsem);
1405 0 : BTRFS_I(inode)->generation = 0;
1406 0 : ret = 0;
1407 0 : must_iput = 1;
1408 0 : goto out;
1409 : }
1410 0 : spin_unlock(&block_group->lock);
1411 : }
1412 :
1413 : /* Lock all pages first so we can lock the extent safely. */
1414 40 : ret = io_ctl_prepare_pages(io_ctl, false);
1415 40 : if (ret)
1416 0 : goto out_unlock;
1417 :
1418 40 : lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1419 : &cached_state);
1420 :
1421 40 : io_ctl_set_generation(io_ctl, trans->transid);
1422 :
1423 40 : mutex_lock(&ctl->cache_writeout_mutex);
1424 : /* Write out the extent entries in the free space cache */
1425 40 : spin_lock(&ctl->tree_lock);
1426 40 : ret = write_cache_extent_entries(io_ctl, ctl,
1427 : block_group, &entries, &bitmaps,
1428 : &bitmap_list);
1429 40 : if (ret)
1430 0 : goto out_nospc_locked;
1431 :
1432 : /*
1433 : * Some spaces that are freed in the current transaction are pinned,
1434 : * they will be added into free space cache after the transaction is
1435 : * committed, we shouldn't lose them.
1436 : *
1437 : * If this changes while we are working we'll get added back to
1438 : * the dirty list and redo it. No locking needed
1439 : */
1440 40 : ret = write_pinned_extent_entries(trans, block_group, io_ctl, &entries);
1441 40 : if (ret)
1442 0 : goto out_nospc_locked;
1443 :
1444 : /*
1445 : * At last, we write out all the bitmaps and keep cache_writeout_mutex
1446 : * locked while doing it because a concurrent trim can be manipulating
1447 : * or freeing the bitmap.
1448 : */
1449 40 : ret = write_bitmap_entries(io_ctl, &bitmap_list);
1450 40 : spin_unlock(&ctl->tree_lock);
1451 40 : mutex_unlock(&ctl->cache_writeout_mutex);
1452 40 : if (ret)
1453 0 : goto out_nospc;
1454 :
1455 : /* Zero out the rest of the pages just to make sure */
1456 40 : io_ctl_zero_remaining_pages(io_ctl);
1457 :
1458 : /* Everything is written out, now we dirty the pages in the file. */
1459 40 : ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages,
1460 40 : io_ctl->num_pages, 0, i_size_read(inode),
1461 : &cached_state, false);
1462 40 : if (ret)
1463 0 : goto out_nospc;
1464 :
1465 40 : if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1466 0 : up_write(&block_group->data_rwsem);
1467 : /*
1468 : * Release the pages and unlock the extent, we will flush
1469 : * them out later
1470 : */
1471 40 : io_ctl_drop_pages(io_ctl);
1472 40 : io_ctl_free(io_ctl);
1473 :
1474 40 : unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1475 : &cached_state);
1476 :
1477 : /*
1478 : * at this point the pages are under IO and we're happy,
1479 : * The caller is responsible for waiting on them and updating
1480 : * the cache and the inode
1481 : */
1482 40 : io_ctl->entries = entries;
1483 40 : io_ctl->bitmaps = bitmaps;
1484 :
1485 40 : ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1486 40 : if (ret)
1487 0 : goto out;
1488 :
1489 : return 0;
1490 :
1491 0 : out_nospc_locked:
1492 0 : cleanup_bitmap_list(&bitmap_list);
1493 0 : spin_unlock(&ctl->tree_lock);
1494 0 : mutex_unlock(&ctl->cache_writeout_mutex);
1495 :
1496 0 : out_nospc:
1497 0 : cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
1498 :
1499 0 : out_unlock:
1500 0 : if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1501 0 : up_write(&block_group->data_rwsem);
1502 :
1503 0 : out:
1504 0 : io_ctl->inode = NULL;
1505 0 : io_ctl_free(io_ctl);
1506 0 : if (ret) {
1507 0 : invalidate_inode_pages2(inode->i_mapping);
1508 0 : BTRFS_I(inode)->generation = 0;
1509 : }
1510 0 : btrfs_update_inode(trans, root, BTRFS_I(inode));
1511 0 : if (must_iput)
1512 0 : iput(inode);
1513 : return ret;
1514 : }
1515 :
1516 40 : int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
1517 : struct btrfs_block_group *block_group,
1518 : struct btrfs_path *path)
1519 : {
1520 40 : struct btrfs_fs_info *fs_info = trans->fs_info;
1521 40 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1522 40 : struct inode *inode;
1523 40 : int ret = 0;
1524 :
1525 40 : spin_lock(&block_group->lock);
1526 40 : if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1527 0 : spin_unlock(&block_group->lock);
1528 0 : return 0;
1529 : }
1530 40 : spin_unlock(&block_group->lock);
1531 :
1532 40 : inode = lookup_free_space_inode(block_group, path);
1533 40 : if (IS_ERR(inode))
1534 : return 0;
1535 :
1536 40 : ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
1537 : block_group, &block_group->io_ctl, trans);
1538 40 : if (ret) {
1539 0 : btrfs_debug(fs_info,
1540 : "failed to write free space cache for block group %llu error %d",
1541 : block_group->start, ret);
1542 0 : spin_lock(&block_group->lock);
1543 0 : block_group->disk_cache_state = BTRFS_DC_ERROR;
1544 0 : spin_unlock(&block_group->lock);
1545 :
1546 0 : block_group->io_ctl.inode = NULL;
1547 0 : iput(inode);
1548 : }
1549 :
1550 : /*
1551 : * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1552 : * to wait for IO and put the inode
1553 : */
1554 :
1555 : return ret;
1556 : }
1557 :
1558 : static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1559 : u64 offset)
1560 : {
1561 5204665 : ASSERT(offset >= bitmap_start);
1562 5204665 : offset -= bitmap_start;
1563 5204665 : return (unsigned long)(div_u64(offset, unit));
1564 : }
1565 :
1566 : static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1567 : {
1568 4455016 : return (unsigned long)(div_u64(bytes, unit));
1569 : }
1570 :
1571 : static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1572 : u64 offset)
1573 : {
1574 4417106 : u64 bitmap_start;
1575 4417106 : u64 bytes_per_bitmap;
1576 :
1577 4417106 : bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1578 4417106 : bitmap_start = offset - ctl->start;
1579 4417106 : bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1580 4417106 : bitmap_start *= bytes_per_bitmap;
1581 4417106 : bitmap_start += ctl->start;
1582 :
1583 3489042 : return bitmap_start;
1584 : }
1585 :
1586 7301470 : static int tree_insert_offset(struct btrfs_free_space_ctl *ctl,
1587 : struct btrfs_free_cluster *cluster,
1588 : struct btrfs_free_space *new_entry)
1589 : {
1590 7301470 : struct rb_root *root;
1591 7301470 : struct rb_node **p;
1592 7301470 : struct rb_node *parent = NULL;
1593 :
1594 7301470 : lockdep_assert_held(&ctl->tree_lock);
1595 :
1596 7301470 : if (cluster) {
1597 1210018 : lockdep_assert_held(&cluster->lock);
1598 1210018 : root = &cluster->root;
1599 : } else {
1600 6091452 : root = &ctl->free_space_offset;
1601 : }
1602 :
1603 7301470 : p = &root->rb_node;
1604 :
1605 54650957 : while (*p) {
1606 47349487 : struct btrfs_free_space *info;
1607 :
1608 47349487 : parent = *p;
1609 47349487 : info = rb_entry(parent, struct btrfs_free_space, offset_index);
1610 :
1611 47349487 : if (new_entry->offset < info->offset) {
1612 9257911 : p = &(*p)->rb_left;
1613 38091576 : } else if (new_entry->offset > info->offset) {
1614 38081361 : p = &(*p)->rb_right;
1615 : } else {
1616 : /*
1617 : * we could have a bitmap entry and an extent entry
1618 : * share the same offset. If this is the case, we want
1619 : * the extent entry to always be found first if we do a
1620 : * linear search through the tree, since we want to have
1621 : * the quickest allocation time, and allocating from an
1622 : * extent is faster than allocating from a bitmap. So
1623 : * if we're inserting a bitmap and we find an entry at
1624 : * this offset, we want to go right, or after this entry
1625 : * logically. If we are inserting an extent and we've
1626 : * found a bitmap, we want to go left, or before
1627 : * logically.
1628 : */
1629 10215 : if (new_entry->bitmap) {
1630 174 : if (info->bitmap) {
1631 0 : WARN_ON_ONCE(1);
1632 0 : return -EEXIST;
1633 : }
1634 174 : p = &(*p)->rb_right;
1635 : } else {
1636 10041 : if (!info->bitmap) {
1637 0 : WARN_ON_ONCE(1);
1638 0 : return -EEXIST;
1639 : }
1640 10041 : p = &(*p)->rb_left;
1641 : }
1642 : }
1643 : }
1644 :
1645 7301470 : rb_link_node(&new_entry->offset_index, parent, p);
1646 7301470 : rb_insert_color(&new_entry->offset_index, root);
1647 :
1648 7301470 : return 0;
1649 : }
1650 :
1651 : /*
1652 : * This is a little subtle. We *only* have ->max_extent_size set if we actually
1653 : * searched through the bitmap and figured out the largest ->max_extent_size,
1654 : * otherwise it's 0. In the case that it's 0 we don't want to tell the
1655 : * allocator the wrong thing, we want to use the actual real max_extent_size
1656 : * we've found already if it's larger, or we want to use ->bytes.
1657 : *
1658 : * This matters because find_free_space() will skip entries who's ->bytes is
1659 : * less than the required bytes. So if we didn't search down this bitmap, we
1660 : * may pick some previous entry that has a smaller ->max_extent_size than we
1661 : * have. For example, assume we have two entries, one that has
1662 : * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set
1663 : * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will
1664 : * call into find_free_space(), and return with max_extent_size == 4K, because
1665 : * that first bitmap entry had ->max_extent_size set, but the second one did
1666 : * not. If instead we returned 8K we'd come in searching for 8K, and find the
1667 : * 8K contiguous range.
1668 : *
1669 : * Consider the other case, we have 2 8K chunks in that second entry and still
1670 : * don't have ->max_extent_size set. We'll return 16K, and the next time the
1671 : * allocator comes in it'll fully search our second bitmap, and this time it'll
1672 : * get an uptodate value of 8K as the maximum chunk size. Then we'll get the
1673 : * right allocation the next loop through.
1674 : */
1675 : static inline u64 get_max_extent_size(const struct btrfs_free_space *entry)
1676 : {
1677 48664396 : if (entry->bitmap && entry->max_extent_size)
1678 : return entry->max_extent_size;
1679 94047565 : return entry->bytes;
1680 : }
1681 :
1682 : /*
1683 : * We want the largest entry to be leftmost, so this is inverted from what you'd
1684 : * normally expect.
1685 : */
1686 46691068 : static bool entry_less(struct rb_node *node, const struct rb_node *parent)
1687 : {
1688 46691068 : const struct btrfs_free_space *entry, *exist;
1689 :
1690 46691068 : entry = rb_entry(node, struct btrfs_free_space, bytes_index);
1691 46691068 : exist = rb_entry(parent, struct btrfs_free_space, bytes_index);
1692 46691068 : return get_max_extent_size(exist) < get_max_extent_size(entry);
1693 : }
1694 :
1695 : /*
1696 : * searches the tree for the given offset.
1697 : *
1698 : * fuzzy - If this is set, then we are trying to make an allocation, and we just
1699 : * want a section that has at least bytes size and comes at or after the given
1700 : * offset.
1701 : */
1702 : static struct btrfs_free_space *
1703 11743876 : tree_search_offset(struct btrfs_free_space_ctl *ctl,
1704 : u64 offset, int bitmap_only, int fuzzy)
1705 : {
1706 11743876 : struct rb_node *n = ctl->free_space_offset.rb_node;
1707 11743876 : struct btrfs_free_space *entry = NULL, *prev = NULL;
1708 :
1709 11743876 : lockdep_assert_held(&ctl->tree_lock);
1710 :
1711 : /* find entry that is closest to the 'offset' */
1712 98956588 : while (n) {
1713 89571505 : entry = rb_entry(n, struct btrfs_free_space, offset_index);
1714 89571505 : prev = entry;
1715 :
1716 89571505 : if (offset < entry->offset)
1717 28672576 : n = n->rb_left;
1718 60898929 : else if (offset > entry->offset)
1719 58540136 : n = n->rb_right;
1720 : else
1721 : break;
1722 :
1723 : entry = NULL;
1724 : }
1725 :
1726 11743876 : if (bitmap_only) {
1727 2081301 : if (!entry)
1728 : return NULL;
1729 1900917 : if (entry->bitmap)
1730 : return entry;
1731 :
1732 : /*
1733 : * bitmap entry and extent entry may share same offset,
1734 : * in that case, bitmap entry comes after extent entry.
1735 : */
1736 146501 : n = rb_next(n);
1737 146501 : if (!n)
1738 : return NULL;
1739 145789 : entry = rb_entry(n, struct btrfs_free_space, offset_index);
1740 145789 : if (entry->offset != offset)
1741 : return NULL;
1742 :
1743 142558 : WARN_ON(!entry->bitmap);
1744 : return entry;
1745 9662575 : } else if (entry) {
1746 457881 : if (entry->bitmap) {
1747 : /*
1748 : * if previous extent entry covers the offset,
1749 : * we should return it instead of the bitmap entry
1750 : */
1751 48927 : n = rb_prev(&entry->offset_index);
1752 48927 : if (n) {
1753 36230 : prev = rb_entry(n, struct btrfs_free_space,
1754 : offset_index);
1755 36230 : if (!prev->bitmap &&
1756 32244 : prev->offset + prev->bytes > offset)
1757 3069 : entry = prev;
1758 : }
1759 : }
1760 457881 : return entry;
1761 : }
1762 :
1763 9204694 : if (!prev)
1764 : return NULL;
1765 :
1766 : /* find last entry before the 'offset' */
1767 9130014 : entry = prev;
1768 9130014 : if (entry->offset > offset) {
1769 3386568 : n = rb_prev(&entry->offset_index);
1770 3386568 : if (n) {
1771 : entry = rb_entry(n, struct btrfs_free_space,
1772 : offset_index);
1773 : ASSERT(entry->offset <= offset);
1774 : } else {
1775 644464 : if (fuzzy)
1776 : return entry;
1777 : else
1778 14904 : return NULL;
1779 : }
1780 : }
1781 :
1782 8485550 : if (entry->bitmap) {
1783 169095 : n = rb_prev(&entry->offset_index);
1784 169095 : if (n) {
1785 129129 : prev = rb_entry(n, struct btrfs_free_space,
1786 : offset_index);
1787 129129 : if (!prev->bitmap &&
1788 114234 : prev->offset + prev->bytes > offset)
1789 : return prev;
1790 : }
1791 169081 : if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1792 : return entry;
1793 8316455 : } else if (entry->offset + entry->bytes > offset)
1794 : return entry;
1795 :
1796 7941426 : if (!fuzzy)
1797 : return NULL;
1798 :
1799 1052855 : while (1) {
1800 1052855 : n = rb_next(&entry->offset_index);
1801 1052856 : if (!n)
1802 : return NULL;
1803 1031944 : entry = rb_entry(n, struct btrfs_free_space, offset_index);
1804 1031944 : if (entry->bitmap) {
1805 979 : if (entry->offset + BITS_PER_BITMAP *
1806 979 : ctl->unit > offset)
1807 : break;
1808 : } else {
1809 1030965 : if (entry->offset + entry->bytes > offset)
1810 : break;
1811 : }
1812 : }
1813 : return entry;
1814 : }
1815 :
1816 4881418 : static inline void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1817 : struct btrfs_free_space *info,
1818 : bool update_stat)
1819 : {
1820 4881418 : lockdep_assert_held(&ctl->tree_lock);
1821 :
1822 4881418 : rb_erase(&info->offset_index, &ctl->free_space_offset);
1823 4881419 : rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes);
1824 4881414 : ctl->free_extents--;
1825 :
1826 4881414 : if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1827 2843010 : ctl->discardable_extents[BTRFS_STAT_CURR]--;
1828 2843010 : ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes;
1829 : }
1830 :
1831 4881414 : if (update_stat)
1832 4879137 : ctl->free_space -= info->bytes;
1833 4881414 : }
1834 :
1835 6062821 : static int link_free_space(struct btrfs_free_space_ctl *ctl,
1836 : struct btrfs_free_space *info)
1837 : {
1838 6062821 : int ret = 0;
1839 :
1840 6062821 : lockdep_assert_held(&ctl->tree_lock);
1841 :
1842 6062821 : ASSERT(info->bytes || info->bitmap);
1843 6062821 : ret = tree_insert_offset(ctl, NULL, info);
1844 6062818 : if (ret)
1845 : return ret;
1846 :
1847 6062818 : rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less);
1848 :
1849 6062825 : if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1850 4009996 : ctl->discardable_extents[BTRFS_STAT_CURR]++;
1851 4009996 : ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
1852 : }
1853 :
1854 6062825 : ctl->free_space += info->bytes;
1855 6062825 : ctl->free_extents++;
1856 6062825 : return ret;
1857 : }
1858 :
1859 3114647 : static void relink_bitmap_entry(struct btrfs_free_space_ctl *ctl,
1860 : struct btrfs_free_space *info)
1861 : {
1862 3114647 : ASSERT(info->bitmap);
1863 :
1864 : /*
1865 : * If our entry is empty it's because we're on a cluster and we don't
1866 : * want to re-link it into our ctl bytes index.
1867 : */
1868 3114647 : if (RB_EMPTY_NODE(&info->bytes_index))
1869 : return;
1870 :
1871 1532208 : lockdep_assert_held(&ctl->tree_lock);
1872 :
1873 1532208 : rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes);
1874 1532212 : rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less);
1875 : }
1876 :
1877 1484839 : static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1878 : struct btrfs_free_space *info,
1879 : u64 offset, u64 bytes, bool update_stat)
1880 : {
1881 1484839 : unsigned long start, count, end;
1882 1484839 : int extent_delta = -1;
1883 :
1884 1484839 : start = offset_to_bit(info->offset, ctl->unit, offset);
1885 1484839 : count = bytes_to_bits(bytes, ctl->unit);
1886 1484839 : end = start + count;
1887 1484839 : ASSERT(end <= BITS_PER_BITMAP);
1888 :
1889 1484839 : bitmap_clear(info->bitmap, start, count);
1890 :
1891 1484838 : info->bytes -= bytes;
1892 1484838 : if (info->max_extent_size > ctl->unit)
1893 37813 : info->max_extent_size = 0;
1894 :
1895 1484838 : relink_bitmap_entry(ctl, info);
1896 :
1897 2969500 : if (start && test_bit(start - 1, info->bitmap))
1898 622 : extent_delta++;
1899 :
1900 2969440 : if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
1901 1006340 : extent_delta++;
1902 :
1903 1484826 : info->bitmap_extents += extent_delta;
1904 1484826 : if (!btrfs_free_space_trimmed(info)) {
1905 1474188 : ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1906 1474188 : ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
1907 : }
1908 :
1909 1484826 : if (update_stat)
1910 348704 : ctl->free_space -= bytes;
1911 1484826 : }
1912 :
1913 1572838 : static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1914 : struct btrfs_free_space *info, u64 offset,
1915 : u64 bytes)
1916 : {
1917 1572838 : unsigned long start, count, end;
1918 1572838 : int extent_delta = 1;
1919 :
1920 1572838 : start = offset_to_bit(info->offset, ctl->unit, offset);
1921 1572838 : count = bytes_to_bits(bytes, ctl->unit);
1922 1572838 : end = start + count;
1923 1572838 : ASSERT(end <= BITS_PER_BITMAP);
1924 :
1925 1572838 : bitmap_set(info->bitmap, start, count);
1926 :
1927 : /*
1928 : * We set some bytes, we have no idea what the max extent size is
1929 : * anymore.
1930 : */
1931 1572837 : info->max_extent_size = 0;
1932 1572837 : info->bytes += bytes;
1933 1572837 : ctl->free_space += bytes;
1934 :
1935 1572837 : relink_bitmap_entry(ctl, info);
1936 :
1937 3145492 : if (start && test_bit(start - 1, info->bitmap))
1938 511569 : extent_delta--;
1939 :
1940 3145429 : if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
1941 514421 : extent_delta--;
1942 :
1943 1572837 : info->bitmap_extents += extent_delta;
1944 1572837 : if (!btrfs_free_space_trimmed(info)) {
1945 1545147 : ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1946 1545147 : ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes;
1947 : }
1948 1572837 : }
1949 :
1950 : /*
1951 : * If we can not find suitable extent, we will use bytes to record
1952 : * the size of the max extent.
1953 : */
1954 1848685 : static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1955 : struct btrfs_free_space *bitmap_info, u64 *offset,
1956 : u64 *bytes, bool for_alloc)
1957 : {
1958 1848685 : unsigned long found_bits = 0;
1959 1848685 : unsigned long max_bits = 0;
1960 1848685 : unsigned long bits, i;
1961 1848685 : unsigned long next_zero;
1962 1848685 : unsigned long extent_bits;
1963 :
1964 : /*
1965 : * Skip searching the bitmap if we don't have a contiguous section that
1966 : * is large enough for this allocation.
1967 : */
1968 1848685 : if (for_alloc &&
1969 1827764 : bitmap_info->max_extent_size &&
1970 490085 : bitmap_info->max_extent_size < *bytes) {
1971 452129 : *bytes = bitmap_info->max_extent_size;
1972 452129 : return -1;
1973 : }
1974 :
1975 1396556 : i = offset_to_bit(bitmap_info->offset, ctl->unit,
1976 1396556 : max_t(u64, *offset, bitmap_info->offset));
1977 1396556 : bits = bytes_to_bits(*bytes, ctl->unit);
1978 :
1979 10788677 : for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1980 10731376 : if (for_alloc && bits == 1) {
1981 : found_bits = 1;
1982 : break;
1983 : }
1984 10626028 : next_zero = find_next_zero_bit(bitmap_info->bitmap,
1985 : BITS_PER_BITMAP, i);
1986 10626360 : extent_bits = next_zero - i;
1987 10626360 : if (extent_bits >= bits) {
1988 : found_bits = extent_bits;
1989 : break;
1990 9392121 : } else if (extent_bits > max_bits) {
1991 : max_bits = extent_bits;
1992 : }
1993 9392121 : i = next_zero;
1994 : }
1995 :
1996 1396562 : if (found_bits) {
1997 1339587 : *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1998 1339587 : *bytes = (u64)(found_bits) * ctl->unit;
1999 1339587 : return 0;
2000 : }
2001 :
2002 56975 : *bytes = (u64)(max_bits) * ctl->unit;
2003 56975 : bitmap_info->max_extent_size = *bytes;
2004 56975 : relink_bitmap_entry(ctl, bitmap_info);
2005 56975 : return -1;
2006 : }
2007 :
2008 : /* Cache the size of the max extent in bytes */
2009 : static struct btrfs_free_space *
2010 4302153 : find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
2011 : unsigned long align, u64 *max_extent_size, bool use_bytes_index)
2012 : {
2013 4302153 : struct btrfs_free_space *entry;
2014 4302153 : struct rb_node *node;
2015 4302153 : u64 tmp;
2016 4302153 : u64 align_off;
2017 4302153 : int ret;
2018 :
2019 4302153 : if (!ctl->free_space_offset.rb_node)
2020 1081 : goto out;
2021 4301072 : again:
2022 4348347 : if (use_bytes_index) {
2023 2674771 : node = rb_first_cached(&ctl->free_space_bytes);
2024 : } else {
2025 1673576 : entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset),
2026 : 0, 1);
2027 1673577 : if (!entry)
2028 20459 : goto out;
2029 1653118 : node = &entry->offset_index;
2030 : }
2031 :
2032 5938076 : for (; node; node = rb_next(node)) {
2033 5906342 : if (use_bytes_index)
2034 3117799 : entry = rb_entry(node, struct btrfs_free_space,
2035 : bytes_index);
2036 : else
2037 : entry = rb_entry(node, struct btrfs_free_space,
2038 : offset_index);
2039 :
2040 : /*
2041 : * If we are using the bytes index then all subsequent entries
2042 : * in this tree are going to be < bytes, so simply set the max
2043 : * extent size and exit the loop.
2044 : *
2045 : * If we're using the offset index then we need to keep going
2046 : * through the rest of the tree.
2047 : */
2048 5906342 : if (entry->bytes < *bytes) {
2049 1411726 : *max_extent_size = max(get_max_extent_size(entry),
2050 : *max_extent_size);
2051 1411726 : if (use_bytes_index)
2052 : break;
2053 1150360 : continue;
2054 : }
2055 :
2056 : /* make sure the space returned is big enough
2057 : * to match our requested alignment
2058 : */
2059 4494616 : if (*bytes >= align) {
2060 4494616 : tmp = entry->offset - ctl->start + align - 1;
2061 4494616 : tmp = div64_u64(tmp, align);
2062 4494616 : tmp = tmp * align + ctl->start;
2063 4494616 : align_off = tmp - entry->offset;
2064 : } else {
2065 0 : align_off = 0;
2066 0 : tmp = entry->offset;
2067 : }
2068 :
2069 : /*
2070 : * We don't break here if we're using the bytes index because we
2071 : * may have another entry that has the correct alignment that is
2072 : * the right size, so we don't want to miss that possibility.
2073 : * At worst this adds another loop through the logic, but if we
2074 : * broke here we could prematurely ENOSPC.
2075 : */
2076 4494616 : if (entry->bytes < *bytes + align_off) {
2077 0 : *max_extent_size = max(get_max_extent_size(entry),
2078 : *max_extent_size);
2079 0 : continue;
2080 : }
2081 :
2082 4494616 : if (entry->bitmap) {
2083 692065 : struct rb_node *old_next = rb_next(node);
2084 692066 : u64 size = *bytes;
2085 :
2086 692066 : ret = search_bitmap(ctl, entry, &tmp, &size, true);
2087 692066 : if (!ret) {
2088 184965 : *offset = tmp;
2089 184965 : *bytes = size;
2090 184965 : return entry;
2091 : } else {
2092 1014202 : *max_extent_size =
2093 507101 : max(get_max_extent_size(entry),
2094 : *max_extent_size);
2095 : }
2096 :
2097 : /*
2098 : * The bitmap may have gotten re-arranged in the space
2099 : * index here because the max_extent_size may have been
2100 : * updated. Start from the beginning again if this
2101 : * happened.
2102 : */
2103 507101 : if (use_bytes_index && old_next != rb_next(node))
2104 47275 : goto again;
2105 459827 : continue;
2106 : }
2107 :
2108 3802551 : *offset = tmp;
2109 3802551 : *bytes = entry->bytes - align_off;
2110 3802551 : return entry;
2111 : }
2112 293098 : out:
2113 : return NULL;
2114 : }
2115 :
2116 1607 : static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
2117 : struct btrfs_free_space *info, u64 offset)
2118 : {
2119 1607 : info->offset = offset_to_bitmap(ctl, offset);
2120 1607 : info->bytes = 0;
2121 1607 : info->bitmap_extents = 0;
2122 1607 : INIT_LIST_HEAD(&info->list);
2123 1607 : link_free_space(ctl, info);
2124 1607 : ctl->total_bitmaps++;
2125 1607 : recalculate_thresholds(ctl);
2126 1607 : }
2127 :
2128 1278 : static void free_bitmap(struct btrfs_free_space_ctl *ctl,
2129 : struct btrfs_free_space *bitmap_info)
2130 : {
2131 : /*
2132 : * Normally when this is called, the bitmap is completely empty. However,
2133 : * if we are blowing up the free space cache for one reason or another
2134 : * via __btrfs_remove_free_space_cache(), then it may not be freed and
2135 : * we may leave stats on the table.
2136 : */
2137 1278 : if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) {
2138 230 : ctl->discardable_extents[BTRFS_STAT_CURR] -=
2139 230 : bitmap_info->bitmap_extents;
2140 230 : ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes;
2141 :
2142 : }
2143 1278 : unlink_free_space(ctl, bitmap_info, true);
2144 1278 : kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
2145 1278 : kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
2146 1278 : ctl->total_bitmaps--;
2147 1278 : recalculate_thresholds(ctl);
2148 1278 : }
2149 :
2150 1365 : static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
2151 : struct btrfs_free_space *bitmap_info,
2152 : u64 *offset, u64 *bytes)
2153 : {
2154 1365 : u64 end;
2155 1365 : u64 search_start, search_bytes;
2156 1365 : int ret;
2157 :
2158 1365 : again:
2159 1365 : end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
2160 :
2161 : /*
2162 : * We need to search for bits in this bitmap. We could only cover some
2163 : * of the extent in this bitmap thanks to how we add space, so we need
2164 : * to search for as much as it as we can and clear that amount, and then
2165 : * go searching for the next bit.
2166 : */
2167 1365 : search_start = *offset;
2168 1365 : search_bytes = ctl->unit;
2169 1365 : search_bytes = min(search_bytes, end - search_start + 1);
2170 1365 : ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
2171 : false);
2172 1365 : if (ret < 0 || search_start != *offset)
2173 : return -EINVAL;
2174 :
2175 : /* We may have found more bits than what we need */
2176 1365 : search_bytes = min(search_bytes, *bytes);
2177 :
2178 : /* Cannot clear past the end of the bitmap */
2179 1365 : search_bytes = min(search_bytes, end - search_start + 1);
2180 :
2181 1365 : bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes, true);
2182 1365 : *offset += search_bytes;
2183 1365 : *bytes -= search_bytes;
2184 :
2185 1365 : if (*bytes) {
2186 0 : struct rb_node *next = rb_next(&bitmap_info->offset_index);
2187 0 : if (!bitmap_info->bytes)
2188 0 : free_bitmap(ctl, bitmap_info);
2189 :
2190 : /*
2191 : * no entry after this bitmap, but we still have bytes to
2192 : * remove, so something has gone wrong.
2193 : */
2194 0 : if (!next)
2195 : return -EINVAL;
2196 :
2197 0 : bitmap_info = rb_entry(next, struct btrfs_free_space,
2198 : offset_index);
2199 :
2200 : /*
2201 : * if the next entry isn't a bitmap we need to return to let the
2202 : * extent stuff do its work.
2203 : */
2204 0 : if (!bitmap_info->bitmap)
2205 : return -EAGAIN;
2206 :
2207 : /*
2208 : * Ok the next item is a bitmap, but it may not actually hold
2209 : * the information for the rest of this free space stuff, so
2210 : * look for it, and if we don't find it return so we can try
2211 : * everything over again.
2212 : */
2213 0 : search_start = *offset;
2214 0 : search_bytes = ctl->unit;
2215 0 : ret = search_bitmap(ctl, bitmap_info, &search_start,
2216 : &search_bytes, false);
2217 0 : if (ret < 0 || search_start != *offset)
2218 : return -EAGAIN;
2219 :
2220 0 : goto again;
2221 1365 : } else if (!bitmap_info->bytes)
2222 11 : free_bitmap(ctl, bitmap_info);
2223 :
2224 : return 0;
2225 : }
2226 :
2227 1572838 : static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
2228 : struct btrfs_free_space *info, u64 offset,
2229 : u64 bytes, enum btrfs_trim_state trim_state)
2230 : {
2231 1572838 : u64 bytes_to_set = 0;
2232 1572838 : u64 end;
2233 :
2234 : /*
2235 : * This is a tradeoff to make bitmap trim state minimal. We mark the
2236 : * whole bitmap untrimmed if at any point we add untrimmed regions.
2237 : */
2238 1572838 : if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) {
2239 1529318 : if (btrfs_free_space_trimmed(info)) {
2240 1666 : ctl->discardable_extents[BTRFS_STAT_CURR] +=
2241 1666 : info->bitmap_extents;
2242 1666 : ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
2243 : }
2244 1529318 : info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2245 : }
2246 :
2247 1572838 : end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
2248 :
2249 1572838 : bytes_to_set = min(end - offset, bytes);
2250 :
2251 1572838 : bitmap_set_bits(ctl, info, offset, bytes_to_set);
2252 :
2253 1572837 : return bytes_to_set;
2254 :
2255 : }
2256 :
2257 3326046 : static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
2258 : struct btrfs_free_space *info)
2259 : {
2260 3326046 : struct btrfs_block_group *block_group = ctl->block_group;
2261 3326046 : struct btrfs_fs_info *fs_info = block_group->fs_info;
2262 3326046 : bool forced = false;
2263 :
2264 : #ifdef CONFIG_BTRFS_DEBUG
2265 : if (btrfs_should_fragment_free_space(block_group))
2266 : forced = true;
2267 : #endif
2268 :
2269 : /* This is a way to reclaim large regions from the bitmaps. */
2270 3326046 : if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD)
2271 : return false;
2272 :
2273 : /*
2274 : * If we are below the extents threshold then we can add this as an
2275 : * extent, and don't have to deal with the bitmap
2276 : */
2277 3280375 : if (!forced && ctl->free_extents < ctl->extents_thresh) {
2278 : /*
2279 : * If this block group has some small extents we don't want to
2280 : * use up all of our free slots in the cache with them, we want
2281 : * to reserve them to larger extents, however if we have plenty
2282 : * of cache left then go ahead an dadd them, no sense in adding
2283 : * the overhead of a bitmap if we don't have to.
2284 : */
2285 1858298 : if (info->bytes <= fs_info->sectorsize * 8) {
2286 1641083 : if (ctl->free_extents * 3 <= ctl->extents_thresh)
2287 : return false;
2288 : } else {
2289 : return false;
2290 : }
2291 : }
2292 :
2293 : /*
2294 : * The original block groups from mkfs can be really small, like 8
2295 : * megabytes, so don't bother with a bitmap for those entries. However
2296 : * some block groups can be smaller than what a bitmap would cover but
2297 : * are still large enough that they could overflow the 32k memory limit,
2298 : * so allow those block groups to still be allowed to have a bitmap
2299 : * entry.
2300 : */
2301 2818951 : if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
2302 1246149 : return false;
2303 :
2304 : return true;
2305 : }
2306 :
2307 : static const struct btrfs_free_space_op free_space_op = {
2308 : .use_bitmap = use_bitmap,
2309 : };
2310 :
2311 3326044 : static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
2312 : struct btrfs_free_space *info)
2313 : {
2314 3326044 : struct btrfs_free_space *bitmap_info;
2315 3326044 : struct btrfs_block_group *block_group = NULL;
2316 3326044 : int added = 0;
2317 3326044 : u64 bytes, offset, bytes_added;
2318 3326044 : enum btrfs_trim_state trim_state;
2319 3326044 : int ret;
2320 :
2321 3326044 : bytes = info->bytes;
2322 3326044 : offset = info->offset;
2323 3326044 : trim_state = info->trim_state;
2324 :
2325 3326044 : if (!ctl->op->use_bitmap(ctl, info))
2326 : return 0;
2327 :
2328 1572801 : if (ctl->op == &free_space_op)
2329 1572801 : block_group = ctl->block_group;
2330 1572801 : again:
2331 : /*
2332 : * Since we link bitmaps right into the cluster we need to see if we
2333 : * have a cluster here, and if so and it has our bitmap we need to add
2334 : * the free space to that bitmap.
2335 : */
2336 1576052 : if (block_group && !list_empty(&block_group->cluster_list)) {
2337 1353329 : struct btrfs_free_cluster *cluster;
2338 1353329 : struct rb_node *node;
2339 1353329 : struct btrfs_free_space *entry;
2340 :
2341 1353329 : cluster = list_entry(block_group->cluster_list.next,
2342 : struct btrfs_free_cluster,
2343 : block_group_list);
2344 1353329 : spin_lock(&cluster->lock);
2345 1353329 : node = rb_first(&cluster->root);
2346 1353329 : if (!node) {
2347 118 : spin_unlock(&cluster->lock);
2348 118 : goto no_cluster_bitmap;
2349 : }
2350 :
2351 1353211 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
2352 1353211 : if (!entry->bitmap) {
2353 691500 : spin_unlock(&cluster->lock);
2354 691500 : goto no_cluster_bitmap;
2355 : }
2356 :
2357 661711 : if (entry->offset == offset_to_bitmap(ctl, offset)) {
2358 446711 : bytes_added = add_bytes_to_bitmap(ctl, entry, offset,
2359 : bytes, trim_state);
2360 446711 : bytes -= bytes_added;
2361 446711 : offset += bytes_added;
2362 : }
2363 661711 : spin_unlock(&cluster->lock);
2364 661711 : if (!bytes) {
2365 446711 : ret = 1;
2366 446711 : goto out;
2367 : }
2368 : }
2369 :
2370 437723 : no_cluster_bitmap:
2371 1129341 : bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2372 : 1, 0);
2373 1129341 : if (!bitmap_info) {
2374 3214 : ASSERT(added == 0);
2375 3214 : goto new_bitmap;
2376 : }
2377 :
2378 1126127 : bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
2379 : trim_state);
2380 1126128 : bytes -= bytes_added;
2381 1126128 : offset += bytes_added;
2382 1126128 : added = 0;
2383 :
2384 1126128 : if (!bytes) {
2385 1126091 : ret = 1;
2386 1126091 : goto out;
2387 : } else
2388 37 : goto again;
2389 :
2390 : new_bitmap:
2391 3214 : if (info && info->bitmap) {
2392 1607 : add_new_bitmap(ctl, info, offset);
2393 1607 : added = 1;
2394 1607 : info = NULL;
2395 1607 : goto again;
2396 : } else {
2397 1607 : spin_unlock(&ctl->tree_lock);
2398 :
2399 : /* no pre-allocated info, allocate a new one */
2400 1607 : if (!info) {
2401 0 : info = kmem_cache_zalloc(btrfs_free_space_cachep,
2402 : GFP_NOFS);
2403 0 : if (!info) {
2404 0 : spin_lock(&ctl->tree_lock);
2405 0 : ret = -ENOMEM;
2406 0 : goto out;
2407 : }
2408 : }
2409 :
2410 : /* allocate the bitmap */
2411 1607 : info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
2412 : GFP_NOFS);
2413 1607 : info->trim_state = BTRFS_TRIM_STATE_TRIMMED;
2414 1607 : spin_lock(&ctl->tree_lock);
2415 1607 : if (!info->bitmap) {
2416 0 : ret = -ENOMEM;
2417 0 : goto out;
2418 : }
2419 1607 : goto again;
2420 : }
2421 :
2422 1572802 : out:
2423 1572802 : if (info) {
2424 1571195 : if (info->bitmap)
2425 0 : kmem_cache_free(btrfs_free_space_bitmap_cachep,
2426 : info->bitmap);
2427 1571195 : kmem_cache_free(btrfs_free_space_cachep, info);
2428 : }
2429 :
2430 : return ret;
2431 : }
2432 :
2433 : /*
2434 : * Free space merging rules:
2435 : * 1) Merge trimmed areas together
2436 : * 2) Let untrimmed areas coalesce with trimmed areas
2437 : * 3) Always pull neighboring regions from bitmaps
2438 : *
2439 : * The above rules are for when we merge free space based on btrfs_trim_state.
2440 : * Rules 2 and 3 are subtle because they are suboptimal, but are done for the
2441 : * same reason: to promote larger extent regions which makes life easier for
2442 : * find_free_extent(). Rule 2 enables coalescing based on the common path
2443 : * being returning free space from btrfs_finish_extent_commit(). So when free
2444 : * space is trimmed, it will prevent aggregating trimmed new region and
2445 : * untrimmed regions in the rb_tree. Rule 3 is purely to obtain larger extents
2446 : * and provide find_free_extent() with the largest extents possible hoping for
2447 : * the reuse path.
2448 : */
2449 4156743 : static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2450 : struct btrfs_free_space *info, bool update_stat)
2451 : {
2452 4156743 : struct btrfs_free_space *left_info = NULL;
2453 4156743 : struct btrfs_free_space *right_info;
2454 4156743 : bool merged = false;
2455 4156743 : u64 offset = info->offset;
2456 4156743 : u64 bytes = info->bytes;
2457 4156743 : const bool is_trimmed = btrfs_free_space_trimmed(info);
2458 4156743 : struct rb_node *right_prev = NULL;
2459 :
2460 : /*
2461 : * first we want to see if there is free space adjacent to the range we
2462 : * are adding, if there is remove that struct and add a new one to
2463 : * cover the entire range
2464 : */
2465 4156743 : right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2466 4156742 : if (right_info)
2467 521832 : right_prev = rb_prev(&right_info->offset_index);
2468 :
2469 521832 : if (right_prev)
2470 : left_info = rb_entry(right_prev, struct btrfs_free_space, offset_index);
2471 3676191 : else if (!right_info)
2472 3634910 : left_info = tree_search_offset(ctl, offset - 1, 0, 0);
2473 :
2474 : /* See try_merge_free_space() comment. */
2475 4156741 : if (right_info && !right_info->bitmap &&
2476 3167 : (!is_trimmed || btrfs_free_space_trimmed(right_info))) {
2477 352129 : unlink_free_space(ctl, right_info, update_stat);
2478 352129 : info->bytes += right_info->bytes;
2479 352129 : kmem_cache_free(btrfs_free_space_cachep, right_info);
2480 352129 : merged = true;
2481 : }
2482 :
2483 : /* See try_merge_free_space() comment. */
2484 4156741 : if (left_info && !left_info->bitmap &&
2485 756666 : left_info->offset + left_info->bytes == offset &&
2486 3496 : (!is_trimmed || btrfs_free_space_trimmed(left_info))) {
2487 488712 : unlink_free_space(ctl, left_info, update_stat);
2488 488712 : info->offset = left_info->offset;
2489 488712 : info->bytes += left_info->bytes;
2490 488712 : kmem_cache_free(btrfs_free_space_cachep, left_info);
2491 488712 : merged = true;
2492 : }
2493 :
2494 4156741 : return merged;
2495 : }
2496 :
2497 460740 : static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
2498 : struct btrfs_free_space *info,
2499 : bool update_stat)
2500 : {
2501 460740 : struct btrfs_free_space *bitmap;
2502 460740 : unsigned long i;
2503 460740 : unsigned long j;
2504 460740 : const u64 end = info->offset + info->bytes;
2505 460740 : const u64 bitmap_offset = offset_to_bitmap(ctl, end);
2506 460740 : u64 bytes;
2507 :
2508 460740 : bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2509 460740 : if (!bitmap)
2510 : return false;
2511 :
2512 385171 : i = offset_to_bit(bitmap->offset, ctl->unit, end);
2513 385171 : j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
2514 385171 : if (j == i)
2515 : return false;
2516 100833 : bytes = (j - i) * ctl->unit;
2517 100833 : info->bytes += bytes;
2518 :
2519 : /* See try_merge_free_space() comment. */
2520 100833 : if (!btrfs_free_space_trimmed(bitmap))
2521 97974 : info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2522 :
2523 100833 : bitmap_clear_bits(ctl, bitmap, end, bytes, update_stat);
2524 :
2525 100833 : if (!bitmap->bytes)
2526 354 : free_bitmap(ctl, bitmap);
2527 :
2528 : return true;
2529 : }
2530 :
2531 460522 : static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
2532 : struct btrfs_free_space *info,
2533 : bool update_stat)
2534 : {
2535 460522 : struct btrfs_free_space *bitmap;
2536 460522 : u64 bitmap_offset;
2537 460522 : unsigned long i;
2538 460522 : unsigned long j;
2539 460522 : unsigned long prev_j;
2540 460522 : u64 bytes;
2541 :
2542 460522 : bitmap_offset = offset_to_bitmap(ctl, info->offset);
2543 : /* If we're on a boundary, try the previous logical bitmap. */
2544 460522 : if (bitmap_offset == info->offset) {
2545 20914 : if (info->offset == 0)
2546 : return false;
2547 20914 : bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
2548 : }
2549 :
2550 460522 : bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2551 460522 : if (!bitmap)
2552 : return false;
2553 :
2554 364478 : i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
2555 364478 : j = 0;
2556 364478 : prev_j = (unsigned long)-1;
2557 3944854648 : for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
2558 3944854616 : if (j > i)
2559 : break;
2560 3944490170 : prev_j = j;
2561 : }
2562 364478 : if (prev_j == i)
2563 : return false;
2564 :
2565 45828 : if (prev_j == (unsigned long)-1)
2566 27 : bytes = (i + 1) * ctl->unit;
2567 : else
2568 45801 : bytes = (i - prev_j) * ctl->unit;
2569 :
2570 45828 : info->offset -= bytes;
2571 45828 : info->bytes += bytes;
2572 :
2573 : /* See try_merge_free_space() comment. */
2574 45828 : if (!btrfs_free_space_trimmed(bitmap))
2575 45789 : info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2576 :
2577 45828 : bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat);
2578 :
2579 45828 : if (!bitmap->bytes)
2580 30 : free_bitmap(ctl, bitmap);
2581 :
2582 : return true;
2583 : }
2584 :
2585 : /*
2586 : * We prefer always to allocate from extent entries, both for clustered and
2587 : * non-clustered allocation requests. So when attempting to add a new extent
2588 : * entry, try to see if there's adjacent free space in bitmap entries, and if
2589 : * there is, migrate that space from the bitmaps to the extent.
2590 : * Like this we get better chances of satisfying space allocation requests
2591 : * because we attempt to satisfy them based on a single cache entry, and never
2592 : * on 2 or more entries - even if the entries represent a contiguous free space
2593 : * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2594 : * ends).
2595 : */
2596 2445463 : static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
2597 : struct btrfs_free_space *info,
2598 : bool update_stat)
2599 : {
2600 : /*
2601 : * Only work with disconnected entries, as we can change their offset,
2602 : * and must be extent entries.
2603 : */
2604 2445463 : ASSERT(!info->bitmap);
2605 2445463 : ASSERT(RB_EMPTY_NODE(&info->offset_index));
2606 :
2607 2445463 : if (ctl->total_bitmaps > 0) {
2608 460740 : bool stole_end;
2609 460740 : bool stole_front = false;
2610 :
2611 460740 : stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
2612 460740 : if (ctl->total_bitmaps > 0)
2613 460522 : stole_front = steal_from_bitmap_to_front(ctl, info,
2614 : update_stat);
2615 :
2616 460740 : if (stole_end || stole_front)
2617 138478 : try_merge_free_space(ctl, info, update_stat);
2618 : }
2619 2445463 : }
2620 :
2621 3989902 : int __btrfs_add_free_space(struct btrfs_block_group *block_group,
2622 : u64 offset, u64 bytes,
2623 : enum btrfs_trim_state trim_state)
2624 : {
2625 3989902 : struct btrfs_fs_info *fs_info = block_group->fs_info;
2626 3989902 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2627 3989902 : struct btrfs_free_space *info;
2628 3989902 : int ret = 0;
2629 3989902 : u64 filter_bytes = bytes;
2630 :
2631 3989902 : ASSERT(!btrfs_is_zoned(fs_info));
2632 :
2633 3989902 : info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2634 3989904 : if (!info)
2635 : return -ENOMEM;
2636 :
2637 3989904 : info->offset = offset;
2638 3989904 : info->bytes = bytes;
2639 3989904 : info->trim_state = trim_state;
2640 3989904 : RB_CLEAR_NODE(&info->offset_index);
2641 3989904 : RB_CLEAR_NODE(&info->bytes_index);
2642 :
2643 3989904 : spin_lock(&ctl->tree_lock);
2644 :
2645 3989906 : if (try_merge_free_space(ctl, info, true))
2646 663860 : goto link;
2647 :
2648 : /*
2649 : * There was no extent directly to the left or right of this new
2650 : * extent then we know we're going to have to allocate a new extent, so
2651 : * before we do that see if we need to drop this into a bitmap
2652 : */
2653 3326044 : ret = insert_into_bitmap(ctl, info);
2654 3326047 : if (ret < 0) {
2655 0 : goto out;
2656 3326047 : } else if (ret) {
2657 1572803 : ret = 0;
2658 1572803 : goto out;
2659 : }
2660 1753244 : link:
2661 : /*
2662 : * Only steal free space from adjacent bitmaps if we're sure we're not
2663 : * going to add the new free space to existing bitmap entries - because
2664 : * that would mean unnecessary work that would be reverted. Therefore
2665 : * attempt to steal space from bitmaps if we're adding an extent entry.
2666 : */
2667 2417104 : steal_from_bitmap(ctl, info, true);
2668 :
2669 2417104 : filter_bytes = max(filter_bytes, info->bytes);
2670 :
2671 2417104 : ret = link_free_space(ctl, info);
2672 2417104 : if (ret)
2673 0 : kmem_cache_free(btrfs_free_space_cachep, info);
2674 2417104 : out:
2675 3989907 : btrfs_discard_update_discardable(block_group);
2676 3989906 : spin_unlock(&ctl->tree_lock);
2677 :
2678 3989906 : if (ret) {
2679 0 : btrfs_crit(fs_info, "unable to add free space :%d", ret);
2680 3989906 : ASSERT(ret != -EEXIST);
2681 : }
2682 :
2683 3989906 : if (trim_state != BTRFS_TRIM_STATE_TRIMMED) {
2684 3817814 : btrfs_discard_check_filter(block_group, filter_bytes);
2685 3817814 : btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
2686 : }
2687 :
2688 : return ret;
2689 : }
2690 :
2691 0 : static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
2692 : u64 bytenr, u64 size, bool used)
2693 : {
2694 0 : struct btrfs_space_info *sinfo = block_group->space_info;
2695 0 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2696 0 : u64 offset = bytenr - block_group->start;
2697 0 : u64 to_free, to_unusable;
2698 0 : int bg_reclaim_threshold = 0;
2699 0 : bool initial = (size == block_group->length);
2700 0 : u64 reclaimable_unusable;
2701 :
2702 0 : WARN_ON(!initial && offset + size > block_group->zone_capacity);
2703 :
2704 0 : if (!initial)
2705 0 : bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
2706 :
2707 0 : spin_lock(&ctl->tree_lock);
2708 : /* Count initial region as zone_unusable until it gets activated. */
2709 0 : if (!used)
2710 : to_free = size;
2711 0 : else if (initial &&
2712 0 : test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
2713 0 : (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
2714 : to_free = 0;
2715 0 : else if (initial)
2716 0 : to_free = block_group->zone_capacity;
2717 0 : else if (offset >= block_group->alloc_offset)
2718 : to_free = size;
2719 0 : else if (offset + size <= block_group->alloc_offset)
2720 : to_free = 0;
2721 : else
2722 0 : to_free = offset + size - block_group->alloc_offset;
2723 0 : to_unusable = size - to_free;
2724 :
2725 0 : ctl->free_space += to_free;
2726 : /*
2727 : * If the block group is read-only, we should account freed space into
2728 : * bytes_readonly.
2729 : */
2730 0 : if (!block_group->ro)
2731 0 : block_group->zone_unusable += to_unusable;
2732 0 : spin_unlock(&ctl->tree_lock);
2733 0 : if (!used) {
2734 0 : spin_lock(&block_group->lock);
2735 0 : block_group->alloc_offset -= size;
2736 0 : spin_unlock(&block_group->lock);
2737 : }
2738 :
2739 0 : reclaimable_unusable = block_group->zone_unusable -
2740 0 : (block_group->length - block_group->zone_capacity);
2741 : /* All the region is now unusable. Mark it as unused and reclaim */
2742 0 : if (block_group->zone_unusable == block_group->length &&
2743 0 : block_group->alloc_offset) {
2744 0 : btrfs_mark_bg_unused(block_group);
2745 0 : } else if (bg_reclaim_threshold &&
2746 : reclaimable_unusable >=
2747 0 : mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) {
2748 0 : btrfs_mark_bg_to_reclaim(block_group);
2749 : }
2750 :
2751 0 : return 0;
2752 : }
2753 :
2754 3804785 : int btrfs_add_free_space(struct btrfs_block_group *block_group,
2755 : u64 bytenr, u64 size)
2756 : {
2757 3804785 : enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2758 :
2759 3804785 : if (btrfs_is_zoned(block_group->fs_info))
2760 0 : return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2761 : true);
2762 :
2763 3804785 : if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC))
2764 39 : trim_state = BTRFS_TRIM_STATE_TRIMMED;
2765 :
2766 3804785 : return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
2767 : }
2768 :
2769 0 : int btrfs_add_free_space_unused(struct btrfs_block_group *block_group,
2770 : u64 bytenr, u64 size)
2771 : {
2772 0 : if (btrfs_is_zoned(block_group->fs_info))
2773 0 : return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2774 : false);
2775 :
2776 0 : return btrfs_add_free_space(block_group, bytenr, size);
2777 : }
2778 :
2779 : /*
2780 : * This is a subtle distinction because when adding free space back in general,
2781 : * we want it to be added as untrimmed for async. But in the case where we add
2782 : * it on loading of a block group, we want to consider it trimmed.
2783 : */
2784 124580 : int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
2785 : u64 bytenr, u64 size)
2786 : {
2787 124580 : enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2788 :
2789 124580 : if (btrfs_is_zoned(block_group->fs_info))
2790 0 : return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2791 : true);
2792 :
2793 124580 : if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) ||
2794 : btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
2795 111525 : trim_state = BTRFS_TRIM_STATE_TRIMMED;
2796 :
2797 124580 : return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
2798 : }
2799 :
2800 88184 : int btrfs_remove_free_space(struct btrfs_block_group *block_group,
2801 : u64 offset, u64 bytes)
2802 : {
2803 88184 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2804 88184 : struct btrfs_free_space *info;
2805 88184 : int ret;
2806 88184 : bool re_search = false;
2807 :
2808 88184 : if (btrfs_is_zoned(block_group->fs_info)) {
2809 : /*
2810 : * This can happen with conventional zones when replaying log.
2811 : * Since the allocation info of tree-log nodes are not recorded
2812 : * to the extent-tree, calculate_alloc_pointer() failed to
2813 : * advance the allocation pointer after last allocated tree log
2814 : * node blocks.
2815 : *
2816 : * This function is called from
2817 : * btrfs_pin_extent_for_log_replay() when replaying the log.
2818 : * Advance the pointer not to overwrite the tree-log nodes.
2819 : */
2820 0 : if (block_group->start + block_group->alloc_offset <
2821 0 : offset + bytes) {
2822 0 : block_group->alloc_offset =
2823 0 : offset + bytes - block_group->start;
2824 : }
2825 0 : return 0;
2826 : }
2827 :
2828 88184 : spin_lock(&ctl->tree_lock);
2829 :
2830 : again:
2831 133478 : ret = 0;
2832 133478 : if (!bytes)
2833 45294 : goto out_lock;
2834 :
2835 88184 : info = tree_search_offset(ctl, offset, 0, 0);
2836 88184 : if (!info) {
2837 : /*
2838 : * oops didn't find an extent that matched the space we wanted
2839 : * to remove, look for a bitmap instead
2840 : */
2841 1359 : info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2842 : 1, 0);
2843 1359 : if (!info) {
2844 : /*
2845 : * If we found a partial bit of our free space in a
2846 : * bitmap but then couldn't find the other part this may
2847 : * be a problem, so WARN about it.
2848 : */
2849 0 : WARN_ON(re_search);
2850 0 : goto out_lock;
2851 : }
2852 : }
2853 :
2854 88184 : re_search = false;
2855 88184 : if (!info->bitmap) {
2856 86819 : unlink_free_space(ctl, info, true);
2857 86819 : if (offset == info->offset) {
2858 45294 : u64 to_free = min(bytes, info->bytes);
2859 :
2860 45294 : info->bytes -= to_free;
2861 45294 : info->offset += to_free;
2862 45294 : if (info->bytes) {
2863 20596 : ret = link_free_space(ctl, info);
2864 20596 : WARN_ON(ret);
2865 : } else {
2866 24698 : kmem_cache_free(btrfs_free_space_cachep, info);
2867 : }
2868 :
2869 45294 : offset += to_free;
2870 45294 : bytes -= to_free;
2871 45294 : goto again;
2872 : } else {
2873 41525 : u64 old_end = info->bytes + info->offset;
2874 :
2875 41525 : info->bytes = offset - info->offset;
2876 41525 : ret = link_free_space(ctl, info);
2877 41525 : WARN_ON(ret);
2878 41525 : if (ret)
2879 0 : goto out_lock;
2880 :
2881 : /* Not enough bytes in this entry to satisfy us */
2882 41525 : if (old_end < offset + bytes) {
2883 0 : bytes -= old_end - offset;
2884 0 : offset = old_end;
2885 0 : goto again;
2886 41525 : } else if (old_end == offset + bytes) {
2887 : /* all done */
2888 14923 : goto out_lock;
2889 : }
2890 26602 : spin_unlock(&ctl->tree_lock);
2891 :
2892 26602 : ret = __btrfs_add_free_space(block_group,
2893 : offset + bytes,
2894 : old_end - (offset + bytes),
2895 : info->trim_state);
2896 26602 : WARN_ON(ret);
2897 26602 : goto out;
2898 : }
2899 : }
2900 :
2901 1365 : ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2902 1365 : if (ret == -EAGAIN) {
2903 0 : re_search = true;
2904 0 : goto again;
2905 : }
2906 1365 : out_lock:
2907 61582 : btrfs_discard_update_discardable(block_group);
2908 61582 : spin_unlock(&ctl->tree_lock);
2909 : out:
2910 : return ret;
2911 : }
2912 :
2913 0 : void btrfs_dump_free_space(struct btrfs_block_group *block_group,
2914 : u64 bytes)
2915 : {
2916 0 : struct btrfs_fs_info *fs_info = block_group->fs_info;
2917 0 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2918 0 : struct btrfs_free_space *info;
2919 0 : struct rb_node *n;
2920 0 : int count = 0;
2921 :
2922 : /*
2923 : * Zoned btrfs does not use free space tree and cluster. Just print
2924 : * out the free space after the allocation offset.
2925 : */
2926 0 : if (btrfs_is_zoned(fs_info)) {
2927 0 : btrfs_info(fs_info, "free space %llu active %d",
2928 : block_group->zone_capacity - block_group->alloc_offset,
2929 : test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2930 : &block_group->runtime_flags));
2931 0 : return;
2932 : }
2933 :
2934 0 : spin_lock(&ctl->tree_lock);
2935 0 : for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2936 0 : info = rb_entry(n, struct btrfs_free_space, offset_index);
2937 0 : if (info->bytes >= bytes && !block_group->ro)
2938 0 : count++;
2939 0 : btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2940 : info->offset, info->bytes,
2941 : (info->bitmap) ? "yes" : "no");
2942 : }
2943 0 : spin_unlock(&ctl->tree_lock);
2944 0 : btrfs_info(fs_info, "block group has cluster?: %s",
2945 : list_empty(&block_group->cluster_list) ? "no" : "yes");
2946 0 : btrfs_info(fs_info,
2947 : "%d blocks of free space at or bigger than bytes is", count);
2948 : }
2949 :
2950 26429 : void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
2951 : struct btrfs_free_space_ctl *ctl)
2952 : {
2953 26429 : struct btrfs_fs_info *fs_info = block_group->fs_info;
2954 :
2955 26429 : spin_lock_init(&ctl->tree_lock);
2956 26429 : ctl->unit = fs_info->sectorsize;
2957 26429 : ctl->start = block_group->start;
2958 26429 : ctl->block_group = block_group;
2959 26429 : ctl->op = &free_space_op;
2960 26429 : ctl->free_space_bytes = RB_ROOT_CACHED;
2961 26429 : INIT_LIST_HEAD(&ctl->trimming_ranges);
2962 26429 : mutex_init(&ctl->cache_writeout_mutex);
2963 :
2964 : /*
2965 : * we only want to have 32k of ram per block group for keeping
2966 : * track of free space, and if we pass 1/2 of that we want to
2967 : * start converting things over to using bitmaps
2968 : */
2969 26429 : ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
2970 26429 : }
2971 :
2972 : /*
2973 : * for a given cluster, put all of its extents back into the free
2974 : * space cache. If the block group passed doesn't match the block group
2975 : * pointed to by the cluster, someone else raced in and freed the
2976 : * cluster already. In that case, we just return without changing anything
2977 : */
2978 9613 : static void __btrfs_return_cluster_to_free_space(
2979 : struct btrfs_block_group *block_group,
2980 : struct btrfs_free_cluster *cluster)
2981 : {
2982 9613 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2983 9613 : struct rb_node *node;
2984 :
2985 9613 : lockdep_assert_held(&ctl->tree_lock);
2986 :
2987 9613 : spin_lock(&cluster->lock);
2988 9613 : if (cluster->block_group != block_group) {
2989 0 : spin_unlock(&cluster->lock);
2990 0 : return;
2991 : }
2992 :
2993 9613 : cluster->block_group = NULL;
2994 9613 : cluster->window_start = 0;
2995 9613 : list_del_init(&cluster->block_group_list);
2996 :
2997 9613 : node = rb_first(&cluster->root);
2998 38247 : while (node) {
2999 28634 : struct btrfs_free_space *entry;
3000 :
3001 28634 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
3002 28634 : node = rb_next(&entry->offset_index);
3003 28634 : rb_erase(&entry->offset_index, &cluster->root);
3004 28634 : RB_CLEAR_NODE(&entry->offset_index);
3005 :
3006 28634 : if (!entry->bitmap) {
3007 : /* Merging treats extents as if they were new */
3008 28359 : if (!btrfs_free_space_trimmed(entry)) {
3009 2817 : ctl->discardable_extents[BTRFS_STAT_CURR]--;
3010 2817 : ctl->discardable_bytes[BTRFS_STAT_CURR] -=
3011 2817 : entry->bytes;
3012 : }
3013 :
3014 28359 : try_merge_free_space(ctl, entry, false);
3015 28359 : steal_from_bitmap(ctl, entry, false);
3016 :
3017 : /* As we insert directly, update these statistics */
3018 28359 : if (!btrfs_free_space_trimmed(entry)) {
3019 2889 : ctl->discardable_extents[BTRFS_STAT_CURR]++;
3020 2889 : ctl->discardable_bytes[BTRFS_STAT_CURR] +=
3021 2889 : entry->bytes;
3022 : }
3023 : }
3024 28634 : tree_insert_offset(ctl, NULL, entry);
3025 28634 : rb_add_cached(&entry->bytes_index, &ctl->free_space_bytes,
3026 : entry_less);
3027 : }
3028 9613 : cluster->root = RB_ROOT;
3029 9613 : spin_unlock(&cluster->lock);
3030 9613 : btrfs_put_block_group(block_group);
3031 : }
3032 :
3033 26372 : void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
3034 : {
3035 26372 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3036 26372 : struct btrfs_free_cluster *cluster;
3037 26372 : struct list_head *head;
3038 :
3039 26372 : spin_lock(&ctl->tree_lock);
3040 29521 : while ((head = block_group->cluster_list.next) !=
3041 29521 : &block_group->cluster_list) {
3042 3149 : cluster = list_entry(head, struct btrfs_free_cluster,
3043 : block_group_list);
3044 :
3045 3149 : WARN_ON(cluster->block_group != block_group);
3046 3149 : __btrfs_return_cluster_to_free_space(block_group, cluster);
3047 :
3048 3149 : cond_resched_lock(&ctl->tree_lock);
3049 : }
3050 26372 : __btrfs_remove_free_space_cache(ctl);
3051 26372 : btrfs_discard_update_discardable(block_group);
3052 26372 : spin_unlock(&ctl->tree_lock);
3053 :
3054 26372 : }
3055 :
3056 : /*
3057 : * Walk @block_group's free space rb_tree to determine if everything is trimmed.
3058 : */
3059 267 : bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group)
3060 : {
3061 267 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3062 267 : struct btrfs_free_space *info;
3063 267 : struct rb_node *node;
3064 267 : bool ret = true;
3065 :
3066 267 : spin_lock(&ctl->tree_lock);
3067 267 : node = rb_first(&ctl->free_space_offset);
3068 :
3069 424 : while (node) {
3070 227 : info = rb_entry(node, struct btrfs_free_space, offset_index);
3071 :
3072 227 : if (!btrfs_free_space_trimmed(info)) {
3073 : ret = false;
3074 : break;
3075 : }
3076 :
3077 157 : node = rb_next(node);
3078 : }
3079 :
3080 267 : spin_unlock(&ctl->tree_lock);
3081 267 : return ret;
3082 : }
3083 :
3084 4301983 : u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
3085 : u64 offset, u64 bytes, u64 empty_size,
3086 : u64 *max_extent_size)
3087 : {
3088 4301983 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3089 4301983 : struct btrfs_discard_ctl *discard_ctl =
3090 4301983 : &block_group->fs_info->discard_ctl;
3091 4301983 : struct btrfs_free_space *entry = NULL;
3092 4301983 : u64 bytes_search = bytes + empty_size;
3093 4301983 : u64 ret = 0;
3094 4301983 : u64 align_gap = 0;
3095 4301983 : u64 align_gap_len = 0;
3096 4301983 : enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3097 4301983 : bool use_bytes_index = (offset == block_group->start);
3098 :
3099 4301983 : ASSERT(!btrfs_is_zoned(block_group->fs_info));
3100 :
3101 4301983 : spin_lock(&ctl->tree_lock);
3102 4302154 : entry = find_free_space(ctl, &offset, &bytes_search,
3103 : block_group->full_stripe_len, max_extent_size,
3104 : use_bytes_index);
3105 4302150 : if (!entry)
3106 314637 : goto out;
3107 :
3108 3987513 : ret = offset;
3109 3987513 : if (entry->bitmap) {
3110 184965 : bitmap_clear_bits(ctl, entry, offset, bytes, true);
3111 :
3112 184965 : if (!btrfs_free_space_trimmed(entry))
3113 178775 : atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3114 :
3115 184965 : if (!entry->bytes)
3116 590 : free_bitmap(ctl, entry);
3117 : } else {
3118 3802548 : unlink_free_space(ctl, entry, true);
3119 3802546 : align_gap_len = offset - entry->offset;
3120 3802546 : align_gap = entry->offset;
3121 3802546 : align_gap_trim_state = entry->trim_state;
3122 :
3123 3802546 : if (!btrfs_free_space_trimmed(entry))
3124 1939611 : atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3125 :
3126 3802547 : entry->offset = offset + bytes;
3127 3802547 : WARN_ON(entry->bytes < bytes + align_gap_len);
3128 :
3129 3802547 : entry->bytes -= bytes + align_gap_len;
3130 3802547 : if (!entry->bytes)
3131 221533 : kmem_cache_free(btrfs_free_space_cachep, entry);
3132 : else
3133 3581014 : link_free_space(ctl, entry);
3134 : }
3135 4302152 : out:
3136 4302152 : btrfs_discard_update_discardable(block_group);
3137 4302138 : spin_unlock(&ctl->tree_lock);
3138 :
3139 4302158 : if (align_gap_len)
3140 1 : __btrfs_add_free_space(block_group, align_gap, align_gap_len,
3141 : align_gap_trim_state);
3142 4302158 : return ret;
3143 : }
3144 :
3145 : /*
3146 : * given a cluster, put all of its extents back into the free space
3147 : * cache. If a block group is passed, this function will only free
3148 : * a cluster that belongs to the passed block group.
3149 : *
3150 : * Otherwise, it'll get a reference on the block group pointed to by the
3151 : * cluster and remove the cluster from it.
3152 : */
3153 12857 : void btrfs_return_cluster_to_free_space(
3154 : struct btrfs_block_group *block_group,
3155 : struct btrfs_free_cluster *cluster)
3156 : {
3157 12857 : struct btrfs_free_space_ctl *ctl;
3158 :
3159 : /* first, get a safe pointer to the block group */
3160 12857 : spin_lock(&cluster->lock);
3161 12857 : if (!block_group) {
3162 11793 : block_group = cluster->block_group;
3163 11793 : if (!block_group) {
3164 5329 : spin_unlock(&cluster->lock);
3165 5329 : return;
3166 : }
3167 1064 : } else if (cluster->block_group != block_group) {
3168 : /* someone else has already freed it don't redo their work */
3169 1064 : spin_unlock(&cluster->lock);
3170 1064 : return;
3171 : }
3172 6464 : btrfs_get_block_group(block_group);
3173 6464 : spin_unlock(&cluster->lock);
3174 :
3175 6464 : ctl = block_group->free_space_ctl;
3176 :
3177 : /* now return any extents the cluster had on it */
3178 6464 : spin_lock(&ctl->tree_lock);
3179 6464 : __btrfs_return_cluster_to_free_space(block_group, cluster);
3180 6464 : spin_unlock(&ctl->tree_lock);
3181 :
3182 6464 : btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group);
3183 :
3184 : /* finally drop our ref */
3185 6464 : btrfs_put_block_group(block_group);
3186 : }
3187 :
3188 1135728 : static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
3189 : struct btrfs_free_cluster *cluster,
3190 : struct btrfs_free_space *entry,
3191 : u64 bytes, u64 min_start,
3192 : u64 *max_extent_size)
3193 : {
3194 1135728 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3195 1135728 : int err;
3196 1135728 : u64 search_start = cluster->window_start;
3197 1135728 : u64 search_bytes = bytes;
3198 1135728 : u64 ret = 0;
3199 :
3200 1135728 : search_start = min_start;
3201 1135728 : search_bytes = bytes;
3202 :
3203 1135728 : err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
3204 1135728 : if (err) {
3205 274 : *max_extent_size = max(get_max_extent_size(entry),
3206 : *max_extent_size);
3207 274 : return 0;
3208 : }
3209 :
3210 1135454 : ret = search_start;
3211 1135454 : bitmap_clear_bits(ctl, entry, ret, bytes, false);
3212 :
3213 1135454 : return ret;
3214 : }
3215 :
3216 : /*
3217 : * given a cluster, try to allocate 'bytes' from it, returns 0
3218 : * if it couldn't find anything suitably large, or a logical disk offset
3219 : * if things worked out
3220 : */
3221 8675976 : u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
3222 : struct btrfs_free_cluster *cluster, u64 bytes,
3223 : u64 min_start, u64 *max_extent_size)
3224 : {
3225 8675976 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3226 8675976 : struct btrfs_discard_ctl *discard_ctl =
3227 8675976 : &block_group->fs_info->discard_ctl;
3228 8675976 : struct btrfs_free_space *entry = NULL;
3229 8675976 : struct rb_node *node;
3230 8675976 : u64 ret = 0;
3231 :
3232 8675976 : ASSERT(!btrfs_is_zoned(block_group->fs_info));
3233 :
3234 8675976 : spin_lock(&cluster->lock);
3235 8675976 : if (bytes > cluster->max_size)
3236 0 : goto out;
3237 :
3238 8675976 : if (cluster->block_group != block_group)
3239 0 : goto out;
3240 :
3241 8675976 : node = rb_first(&cluster->root);
3242 8675976 : if (!node)
3243 6102 : goto out;
3244 :
3245 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
3246 8669989 : while (1) {
3247 8669989 : if (entry->bytes < bytes)
3248 0 : *max_extent_size = max(get_max_extent_size(entry),
3249 : *max_extent_size);
3250 :
3251 8669989 : if (entry->bytes < bytes ||
3252 8669989 : (!entry->bitmap && entry->offset < min_start)) {
3253 115 : node = rb_next(&entry->offset_index);
3254 115 : if (!node)
3255 : break;
3256 115 : entry = rb_entry(node, struct btrfs_free_space,
3257 : offset_index);
3258 115 : continue;
3259 : }
3260 :
3261 8669874 : if (entry->bitmap) {
3262 1135728 : ret = btrfs_alloc_from_bitmap(block_group,
3263 : cluster, entry, bytes,
3264 : cluster->window_start,
3265 : max_extent_size);
3266 1135728 : if (ret == 0) {
3267 274 : node = rb_next(&entry->offset_index);
3268 274 : if (!node)
3269 : break;
3270 0 : entry = rb_entry(node, struct btrfs_free_space,
3271 : offset_index);
3272 0 : continue;
3273 : }
3274 1135454 : cluster->window_start += bytes;
3275 : } else {
3276 7534146 : ret = entry->offset;
3277 :
3278 7534146 : entry->offset += bytes;
3279 7534146 : entry->bytes -= bytes;
3280 : }
3281 :
3282 : break;
3283 : }
3284 274 : out:
3285 8675976 : spin_unlock(&cluster->lock);
3286 :
3287 8675976 : if (!ret)
3288 : return 0;
3289 :
3290 8669600 : spin_lock(&ctl->tree_lock);
3291 :
3292 8669600 : if (!btrfs_free_space_trimmed(entry))
3293 5789128 : atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3294 :
3295 8669600 : ctl->free_space -= bytes;
3296 8669600 : if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
3297 4653859 : ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
3298 :
3299 8669600 : spin_lock(&cluster->lock);
3300 8669600 : if (entry->bytes == 0) {
3301 1181384 : rb_erase(&entry->offset_index, &cluster->root);
3302 1181384 : ctl->free_extents--;
3303 1181384 : if (entry->bitmap) {
3304 329 : kmem_cache_free(btrfs_free_space_bitmap_cachep,
3305 : entry->bitmap);
3306 329 : ctl->total_bitmaps--;
3307 329 : recalculate_thresholds(ctl);
3308 1181055 : } else if (!btrfs_free_space_trimmed(entry)) {
3309 1167025 : ctl->discardable_extents[BTRFS_STAT_CURR]--;
3310 : }
3311 1181384 : kmem_cache_free(btrfs_free_space_cachep, entry);
3312 : }
3313 :
3314 8669600 : spin_unlock(&cluster->lock);
3315 8669600 : spin_unlock(&ctl->tree_lock);
3316 :
3317 8669600 : return ret;
3318 : }
3319 :
3320 783 : static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
3321 : struct btrfs_free_space *entry,
3322 : struct btrfs_free_cluster *cluster,
3323 : u64 offset, u64 bytes,
3324 : u64 cont1_bytes, u64 min_bytes)
3325 : {
3326 783 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3327 783 : unsigned long next_zero;
3328 783 : unsigned long i;
3329 783 : unsigned long want_bits;
3330 783 : unsigned long min_bits;
3331 783 : unsigned long found_bits;
3332 783 : unsigned long max_bits = 0;
3333 783 : unsigned long start = 0;
3334 783 : unsigned long total_found = 0;
3335 783 : int ret;
3336 :
3337 783 : lockdep_assert_held(&ctl->tree_lock);
3338 :
3339 783 : i = offset_to_bit(entry->offset, ctl->unit,
3340 783 : max_t(u64, offset, entry->offset));
3341 783 : want_bits = bytes_to_bits(bytes, ctl->unit);
3342 783 : min_bits = bytes_to_bits(min_bytes, ctl->unit);
3343 :
3344 : /*
3345 : * Don't bother looking for a cluster in this bitmap if it's heavily
3346 : * fragmented.
3347 : */
3348 783 : if (entry->max_extent_size &&
3349 : entry->max_extent_size < cont1_bytes)
3350 : return -ENOSPC;
3351 783 : again:
3352 2553 : found_bits = 0;
3353 2553 : for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
3354 2374 : next_zero = find_next_zero_bit(entry->bitmap,
3355 : BITS_PER_BITMAP, i);
3356 2374 : if (next_zero - i >= min_bits) {
3357 2374 : found_bits = next_zero - i;
3358 2374 : if (found_bits > max_bits)
3359 : max_bits = found_bits;
3360 : break;
3361 : }
3362 0 : if (next_zero - i > max_bits)
3363 : max_bits = next_zero - i;
3364 0 : i = next_zero;
3365 : }
3366 :
3367 2553 : if (!found_bits) {
3368 179 : entry->max_extent_size = (u64)max_bits * ctl->unit;
3369 179 : return -ENOSPC;
3370 : }
3371 :
3372 2374 : if (!total_found) {
3373 611 : start = i;
3374 611 : cluster->max_size = 0;
3375 : }
3376 :
3377 2374 : total_found += found_bits;
3378 :
3379 2374 : if (cluster->max_size < found_bits * ctl->unit)
3380 837 : cluster->max_size = found_bits * ctl->unit;
3381 :
3382 2374 : if (total_found < want_bits || cluster->max_size < cont1_bytes) {
3383 1770 : i = next_zero + 1;
3384 1770 : goto again;
3385 : }
3386 :
3387 604 : cluster->window_start = start * ctl->unit + entry->offset;
3388 604 : rb_erase(&entry->offset_index, &ctl->free_space_offset);
3389 604 : rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes);
3390 :
3391 : /*
3392 : * We need to know if we're currently on the normal space index when we
3393 : * manipulate the bitmap so that we know we need to remove and re-insert
3394 : * it into the space_index tree. Clear the bytes_index node here so the
3395 : * bitmap manipulation helpers know not to mess with the space_index
3396 : * until this bitmap entry is added back into the normal cache.
3397 : */
3398 604 : RB_CLEAR_NODE(&entry->bytes_index);
3399 :
3400 604 : ret = tree_insert_offset(ctl, cluster, entry);
3401 604 : ASSERT(!ret); /* -EEXIST; Logic error */
3402 :
3403 604 : trace_btrfs_setup_cluster(block_group, cluster,
3404 604 : total_found * ctl->unit, 1);
3405 604 : return 0;
3406 : }
3407 :
3408 : /*
3409 : * This searches the block group for just extents to fill the cluster with.
3410 : * Try to find a cluster with at least bytes total bytes, at least one
3411 : * extent of cont1_bytes, and other clusters of at least min_bytes.
3412 : */
3413 : static noinline int
3414 10392 : setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
3415 : struct btrfs_free_cluster *cluster,
3416 : struct list_head *bitmaps, u64 offset, u64 bytes,
3417 : u64 cont1_bytes, u64 min_bytes)
3418 : {
3419 10392 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3420 10392 : struct btrfs_free_space *first = NULL;
3421 10392 : struct btrfs_free_space *entry = NULL;
3422 10392 : struct btrfs_free_space *last;
3423 10392 : struct rb_node *node;
3424 10392 : u64 window_free;
3425 10392 : u64 max_extent;
3426 10392 : u64 total_size = 0;
3427 :
3428 10392 : lockdep_assert_held(&ctl->tree_lock);
3429 :
3430 10392 : entry = tree_search_offset(ctl, offset, 0, 1);
3431 10392 : if (!entry)
3432 : return -ENOSPC;
3433 :
3434 : /*
3435 : * We don't want bitmaps, so just move along until we find a normal
3436 : * extent entry.
3437 : */
3438 10975 : while (entry->bitmap || entry->bytes < min_bytes) {
3439 1676 : if (entry->bitmap && list_empty(&entry->list))
3440 1673 : list_add_tail(&entry->list, bitmaps);
3441 1676 : node = rb_next(&entry->offset_index);
3442 1676 : if (!node)
3443 : return -ENOSPC;
3444 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
3445 : }
3446 :
3447 9299 : window_free = entry->bytes;
3448 9299 : max_extent = entry->bytes;
3449 9299 : first = entry;
3450 9299 : last = entry;
3451 :
3452 1210865 : for (node = rb_next(&entry->offset_index); node;
3453 1201566 : node = rb_next(&entry->offset_index)) {
3454 1201566 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
3455 :
3456 1201566 : if (entry->bitmap) {
3457 903 : if (list_empty(&entry->list))
3458 903 : list_add_tail(&entry->list, bitmaps);
3459 903 : continue;
3460 : }
3461 :
3462 1200663 : if (entry->bytes < min_bytes)
3463 0 : continue;
3464 :
3465 1200663 : last = entry;
3466 1200663 : window_free += entry->bytes;
3467 1200663 : if (entry->bytes > max_extent)
3468 : max_extent = entry->bytes;
3469 : }
3470 :
3471 9299 : if (window_free < bytes || max_extent < cont1_bytes)
3472 : return -ENOSPC;
3473 :
3474 9009 : cluster->window_start = first->offset;
3475 :
3476 9009 : node = &first->offset_index;
3477 :
3478 : /*
3479 : * now we've found our entries, pull them out of the free space
3480 : * cache and put them into the cluster rbtree
3481 : */
3482 1210213 : do {
3483 1210213 : int ret;
3484 :
3485 1210213 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
3486 1210213 : node = rb_next(&entry->offset_index);
3487 1210213 : if (entry->bitmap || entry->bytes < min_bytes)
3488 799 : continue;
3489 :
3490 1209414 : rb_erase(&entry->offset_index, &ctl->free_space_offset);
3491 1209414 : rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes);
3492 1209414 : ret = tree_insert_offset(ctl, cluster, entry);
3493 1209414 : total_size += entry->bytes;
3494 1210213 : ASSERT(!ret); /* -EEXIST; Logic error */
3495 1210213 : } while (node && entry != last);
3496 :
3497 9009 : cluster->max_size = max_extent;
3498 9009 : trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
3499 9009 : return 0;
3500 : }
3501 :
3502 : /*
3503 : * This specifically looks for bitmaps that may work in the cluster, we assume
3504 : * that we have already failed to find extents that will work.
3505 : */
3506 : static noinline int
3507 1383 : setup_cluster_bitmap(struct btrfs_block_group *block_group,
3508 : struct btrfs_free_cluster *cluster,
3509 : struct list_head *bitmaps, u64 offset, u64 bytes,
3510 : u64 cont1_bytes, u64 min_bytes)
3511 : {
3512 1383 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3513 1383 : struct btrfs_free_space *entry = NULL;
3514 1383 : int ret = -ENOSPC;
3515 1383 : u64 bitmap_offset = offset_to_bitmap(ctl, offset);
3516 :
3517 1383 : if (ctl->total_bitmaps == 0)
3518 : return -ENOSPC;
3519 :
3520 : /*
3521 : * The bitmap that covers offset won't be in the list unless offset
3522 : * is just its start offset.
3523 : */
3524 1089 : if (!list_empty(bitmaps))
3525 934 : entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
3526 :
3527 934 : if (!entry || entry->offset != bitmap_offset) {
3528 525 : entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
3529 525 : if (entry && list_empty(&entry->list))
3530 287 : list_add(&entry->list, bitmaps);
3531 : }
3532 :
3533 1664 : list_for_each_entry(entry, bitmaps, list) {
3534 1179 : if (entry->bytes < bytes)
3535 396 : continue;
3536 783 : ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3537 : bytes, cont1_bytes, min_bytes);
3538 783 : if (!ret)
3539 : return 0;
3540 : }
3541 :
3542 : /*
3543 : * The bitmaps list has all the bitmaps that record free space
3544 : * starting after offset, so no more search is required.
3545 : */
3546 : return -ENOSPC;
3547 : }
3548 :
3549 : /*
3550 : * here we try to find a cluster of blocks in a block group. The goal
3551 : * is to find at least bytes+empty_size.
3552 : * We might not find them all in one contiguous area.
3553 : *
3554 : * returns zero and sets up cluster if things worked out, otherwise
3555 : * it returns -enospc
3556 : */
3557 17159 : int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
3558 : struct btrfs_free_cluster *cluster,
3559 : u64 offset, u64 bytes, u64 empty_size)
3560 : {
3561 17159 : struct btrfs_fs_info *fs_info = block_group->fs_info;
3562 17159 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3563 17159 : struct btrfs_free_space *entry, *tmp;
3564 17159 : LIST_HEAD(bitmaps);
3565 17159 : u64 min_bytes;
3566 17159 : u64 cont1_bytes;
3567 17159 : int ret;
3568 :
3569 : /*
3570 : * Choose the minimum extent size we'll require for this
3571 : * cluster. For SSD_SPREAD, don't allow any fragmentation.
3572 : * For metadata, allow allocates with smaller extents. For
3573 : * data, keep it dense.
3574 : */
3575 17159 : if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
3576 3 : cont1_bytes = bytes + empty_size;
3577 3 : min_bytes = cont1_bytes;
3578 17156 : } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3579 17156 : cont1_bytes = bytes;
3580 17156 : min_bytes = fs_info->sectorsize;
3581 : } else {
3582 0 : cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3583 0 : min_bytes = fs_info->sectorsize;
3584 : }
3585 :
3586 17159 : spin_lock(&ctl->tree_lock);
3587 :
3588 : /*
3589 : * If we know we don't have enough space to make a cluster don't even
3590 : * bother doing all the work to try and find one.
3591 : */
3592 17159 : if (ctl->free_space < bytes) {
3593 6767 : spin_unlock(&ctl->tree_lock);
3594 6767 : return -ENOSPC;
3595 : }
3596 :
3597 10392 : spin_lock(&cluster->lock);
3598 :
3599 : /* someone already found a cluster, hooray */
3600 10392 : if (cluster->block_group) {
3601 0 : ret = 0;
3602 0 : goto out;
3603 : }
3604 :
3605 10392 : trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
3606 : min_bytes);
3607 :
3608 10392 : ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3609 : bytes + empty_size,
3610 : cont1_bytes, min_bytes);
3611 10392 : if (ret)
3612 1383 : ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3613 : offset, bytes + empty_size,
3614 : cont1_bytes, min_bytes);
3615 :
3616 : /* Clear our temporary list */
3617 13255 : list_for_each_entry_safe(entry, tmp, &bitmaps, list)
3618 2863 : list_del_init(&entry->list);
3619 :
3620 10392 : if (!ret) {
3621 9613 : btrfs_get_block_group(block_group);
3622 9613 : list_add_tail(&cluster->block_group_list,
3623 : &block_group->cluster_list);
3624 9613 : cluster->block_group = block_group;
3625 : } else {
3626 779 : trace_btrfs_failed_cluster_setup(block_group);
3627 : }
3628 10392 : out:
3629 10392 : spin_unlock(&cluster->lock);
3630 10392 : spin_unlock(&ctl->tree_lock);
3631 :
3632 10392 : return ret;
3633 : }
3634 :
3635 : /*
3636 : * simple code to zero out a cluster
3637 : */
3638 6944 : void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
3639 : {
3640 6944 : spin_lock_init(&cluster->lock);
3641 6944 : spin_lock_init(&cluster->refill_lock);
3642 6944 : cluster->root = RB_ROOT;
3643 6944 : cluster->max_size = 0;
3644 6944 : cluster->fragmented = false;
3645 6944 : INIT_LIST_HEAD(&cluster->block_group_list);
3646 6944 : cluster->block_group = NULL;
3647 6944 : }
3648 :
3649 33924 : static int do_trimming(struct btrfs_block_group *block_group,
3650 : u64 *total_trimmed, u64 start, u64 bytes,
3651 : u64 reserved_start, u64 reserved_bytes,
3652 : enum btrfs_trim_state reserved_trim_state,
3653 : struct btrfs_trim_range *trim_entry)
3654 : {
3655 33924 : struct btrfs_space_info *space_info = block_group->space_info;
3656 33924 : struct btrfs_fs_info *fs_info = block_group->fs_info;
3657 33924 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3658 33924 : int ret;
3659 33924 : int update = 0;
3660 33924 : const u64 end = start + bytes;
3661 33924 : const u64 reserved_end = reserved_start + reserved_bytes;
3662 33924 : enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3663 33924 : u64 trimmed = 0;
3664 :
3665 33924 : spin_lock(&space_info->lock);
3666 33924 : spin_lock(&block_group->lock);
3667 33924 : if (!block_group->ro) {
3668 33924 : block_group->reserved += reserved_bytes;
3669 33924 : space_info->bytes_reserved += reserved_bytes;
3670 33924 : update = 1;
3671 : }
3672 33924 : spin_unlock(&block_group->lock);
3673 33924 : spin_unlock(&space_info->lock);
3674 :
3675 33924 : ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
3676 33924 : if (!ret) {
3677 33924 : *total_trimmed += trimmed;
3678 33924 : trim_state = BTRFS_TRIM_STATE_TRIMMED;
3679 : }
3680 :
3681 33924 : mutex_lock(&ctl->cache_writeout_mutex);
3682 33924 : if (reserved_start < start)
3683 5 : __btrfs_add_free_space(block_group, reserved_start,
3684 : start - reserved_start,
3685 : reserved_trim_state);
3686 33924 : if (end < reserved_end)
3687 6 : __btrfs_add_free_space(block_group, end, reserved_end - end,
3688 : reserved_trim_state);
3689 33924 : __btrfs_add_free_space(block_group, start, bytes, trim_state);
3690 33924 : list_del(&trim_entry->list);
3691 33924 : mutex_unlock(&ctl->cache_writeout_mutex);
3692 :
3693 33924 : if (update) {
3694 33924 : spin_lock(&space_info->lock);
3695 33924 : spin_lock(&block_group->lock);
3696 33924 : if (block_group->ro)
3697 0 : space_info->bytes_readonly += reserved_bytes;
3698 33924 : block_group->reserved -= reserved_bytes;
3699 33924 : space_info->bytes_reserved -= reserved_bytes;
3700 33924 : spin_unlock(&block_group->lock);
3701 33924 : spin_unlock(&space_info->lock);
3702 : }
3703 :
3704 33924 : return ret;
3705 : }
3706 :
3707 : /*
3708 : * If @async is set, then we will trim 1 region and return.
3709 : */
3710 4640 : static int trim_no_bitmap(struct btrfs_block_group *block_group,
3711 : u64 *total_trimmed, u64 start, u64 end, u64 minlen,
3712 : bool async)
3713 : {
3714 4640 : struct btrfs_discard_ctl *discard_ctl =
3715 4640 : &block_group->fs_info->discard_ctl;
3716 4640 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3717 4640 : struct btrfs_free_space *entry;
3718 4640 : struct rb_node *node;
3719 4640 : int ret = 0;
3720 4640 : u64 extent_start;
3721 4640 : u64 extent_bytes;
3722 4640 : enum btrfs_trim_state extent_trim_state;
3723 4640 : u64 bytes;
3724 4640 : const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3725 :
3726 100693 : while (start < end) {
3727 99326 : struct btrfs_trim_range trim_entry;
3728 :
3729 99326 : mutex_lock(&ctl->cache_writeout_mutex);
3730 99326 : spin_lock(&ctl->tree_lock);
3731 :
3732 99326 : if (ctl->free_space < minlen)
3733 553 : goto out_unlock;
3734 :
3735 98773 : entry = tree_search_offset(ctl, start, 0, 1);
3736 98773 : if (!entry)
3737 282 : goto out_unlock;
3738 :
3739 : /* Skip bitmaps and if async, already trimmed entries */
3740 102765 : while (entry->bitmap ||
3741 16940 : (async && btrfs_free_space_trimmed(entry))) {
3742 4332 : node = rb_next(&entry->offset_index);
3743 4332 : if (!node)
3744 58 : goto out_unlock;
3745 : entry = rb_entry(node, struct btrfs_free_space,
3746 : offset_index);
3747 : }
3748 :
3749 98433 : if (entry->offset >= end)
3750 2 : goto out_unlock;
3751 :
3752 98431 : extent_start = entry->offset;
3753 98431 : extent_bytes = entry->bytes;
3754 98431 : extent_trim_state = entry->trim_state;
3755 98431 : if (async) {
3756 14406 : start = entry->offset;
3757 14406 : bytes = entry->bytes;
3758 14406 : if (bytes < minlen) {
3759 12031 : spin_unlock(&ctl->tree_lock);
3760 12031 : mutex_unlock(&ctl->cache_writeout_mutex);
3761 12031 : goto next;
3762 : }
3763 2375 : unlink_free_space(ctl, entry, true);
3764 : /*
3765 : * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
3766 : * If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim
3767 : * X when we come back around. So trim it now.
3768 : */
3769 2375 : if (max_discard_size &&
3770 2375 : bytes >= (max_discard_size +
3771 : BTRFS_ASYNC_DISCARD_MIN_FILTER)) {
3772 981 : bytes = max_discard_size;
3773 981 : extent_bytes = max_discard_size;
3774 981 : entry->offset += max_discard_size;
3775 981 : entry->bytes -= max_discard_size;
3776 981 : link_free_space(ctl, entry);
3777 : } else {
3778 1394 : kmem_cache_free(btrfs_free_space_cachep, entry);
3779 : }
3780 : } else {
3781 84025 : start = max(start, extent_start);
3782 84025 : bytes = min(extent_start + extent_bytes, end) - start;
3783 84025 : if (bytes < minlen) {
3784 68873 : spin_unlock(&ctl->tree_lock);
3785 68873 : mutex_unlock(&ctl->cache_writeout_mutex);
3786 68873 : goto next;
3787 : }
3788 :
3789 15152 : unlink_free_space(ctl, entry, true);
3790 15152 : kmem_cache_free(btrfs_free_space_cachep, entry);
3791 : }
3792 :
3793 17527 : spin_unlock(&ctl->tree_lock);
3794 17527 : trim_entry.start = extent_start;
3795 17527 : trim_entry.bytes = extent_bytes;
3796 17527 : list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3797 17527 : mutex_unlock(&ctl->cache_writeout_mutex);
3798 :
3799 17527 : ret = do_trimming(block_group, total_trimmed, start, bytes,
3800 : extent_start, extent_bytes, extent_trim_state,
3801 : &trim_entry);
3802 17527 : if (ret) {
3803 0 : block_group->discard_cursor = start + bytes;
3804 2378 : break;
3805 : }
3806 17527 : next:
3807 98431 : start += bytes;
3808 98431 : block_group->discard_cursor = start;
3809 98431 : if (async && *total_trimmed)
3810 : break;
3811 :
3812 96056 : if (fatal_signal_pending(current)) {
3813 : ret = -ERESTARTSYS;
3814 : break;
3815 : }
3816 :
3817 96053 : cond_resched();
3818 : }
3819 :
3820 : return ret;
3821 :
3822 : out_unlock:
3823 895 : block_group->discard_cursor = btrfs_block_group_end(block_group);
3824 895 : spin_unlock(&ctl->tree_lock);
3825 895 : mutex_unlock(&ctl->cache_writeout_mutex);
3826 :
3827 895 : return ret;
3828 : }
3829 :
3830 : /*
3831 : * If we break out of trimming a bitmap prematurely, we should reset the
3832 : * trimming bit. In a rather contrieved case, it's possible to race here so
3833 : * reset the state to BTRFS_TRIM_STATE_UNTRIMMED.
3834 : *
3835 : * start = start of bitmap
3836 : * end = near end of bitmap
3837 : *
3838 : * Thread 1: Thread 2:
3839 : * trim_bitmaps(start)
3840 : * trim_bitmaps(end)
3841 : * end_trimming_bitmap()
3842 : * reset_trimming_bitmap()
3843 : */
3844 2141 : static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset)
3845 : {
3846 2141 : struct btrfs_free_space *entry;
3847 :
3848 2141 : spin_lock(&ctl->tree_lock);
3849 2141 : entry = tree_search_offset(ctl, offset, 1, 0);
3850 2141 : if (entry) {
3851 3 : if (btrfs_free_space_trimmed(entry)) {
3852 0 : ctl->discardable_extents[BTRFS_STAT_CURR] +=
3853 0 : entry->bitmap_extents;
3854 0 : ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes;
3855 : }
3856 3 : entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3857 : }
3858 :
3859 2141 : spin_unlock(&ctl->tree_lock);
3860 2141 : }
3861 :
3862 : static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl,
3863 : struct btrfs_free_space *entry)
3864 : {
3865 381 : if (btrfs_free_space_trimming_bitmap(entry)) {
3866 374 : entry->trim_state = BTRFS_TRIM_STATE_TRIMMED;
3867 374 : ctl->discardable_extents[BTRFS_STAT_CURR] -=
3868 374 : entry->bitmap_extents;
3869 374 : ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes;
3870 : }
3871 : }
3872 :
3873 : /*
3874 : * If @async is set, then we will trim 1 region and return.
3875 : */
3876 3812 : static int trim_bitmaps(struct btrfs_block_group *block_group,
3877 : u64 *total_trimmed, u64 start, u64 end, u64 minlen,
3878 : u64 maxlen, bool async)
3879 : {
3880 3812 : struct btrfs_discard_ctl *discard_ctl =
3881 3812 : &block_group->fs_info->discard_ctl;
3882 3812 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3883 3812 : struct btrfs_free_space *entry;
3884 3812 : int ret = 0;
3885 3812 : int ret2;
3886 3812 : u64 bytes;
3887 3812 : u64 offset = offset_to_bitmap(ctl, start);
3888 3812 : const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3889 :
3890 29079 : while (offset < end) {
3891 27227 : bool next_bitmap = false;
3892 27227 : struct btrfs_trim_range trim_entry;
3893 :
3894 27227 : mutex_lock(&ctl->cache_writeout_mutex);
3895 27227 : spin_lock(&ctl->tree_lock);
3896 :
3897 27227 : if (ctl->free_space < minlen) {
3898 553 : block_group->discard_cursor =
3899 : btrfs_block_group_end(block_group);
3900 553 : spin_unlock(&ctl->tree_lock);
3901 553 : mutex_unlock(&ctl->cache_writeout_mutex);
3902 1106 : break;
3903 : }
3904 :
3905 26674 : entry = tree_search_offset(ctl, offset, 1, 0);
3906 : /*
3907 : * Bitmaps are marked trimmed lossily now to prevent constant
3908 : * discarding of the same bitmap (the reason why we are bound
3909 : * by the filters). So, retrim the block group bitmaps when we
3910 : * are preparing to punt to the unused_bgs list. This uses
3911 : * @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED
3912 : * which is the only discard index which sets minlen to 0.
3913 : */
3914 26674 : if (!entry || (async && minlen && start == offset &&
3915 : btrfs_free_space_trimmed(entry))) {
3916 7142 : spin_unlock(&ctl->tree_lock);
3917 7142 : mutex_unlock(&ctl->cache_writeout_mutex);
3918 7142 : next_bitmap = true;
3919 7142 : goto next;
3920 : }
3921 :
3922 : /*
3923 : * Async discard bitmap trimming begins at by setting the start
3924 : * to be key.objectid and the offset_to_bitmap() aligns to the
3925 : * start of the bitmap. This lets us know we are fully
3926 : * scanning the bitmap rather than only some portion of it.
3927 : */
3928 19532 : if (start == offset)
3929 1787 : entry->trim_state = BTRFS_TRIM_STATE_TRIMMING;
3930 :
3931 19532 : bytes = minlen;
3932 19532 : ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3933 19532 : if (ret2 || start >= end) {
3934 : /*
3935 : * We lossily consider a bitmap trimmed if we only skip
3936 : * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER.
3937 : */
3938 1728 : if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER)
3939 381 : end_trimming_bitmap(ctl, entry);
3940 : else
3941 1347 : entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3942 1728 : spin_unlock(&ctl->tree_lock);
3943 1728 : mutex_unlock(&ctl->cache_writeout_mutex);
3944 1728 : next_bitmap = true;
3945 1728 : goto next;
3946 : }
3947 :
3948 : /*
3949 : * We already trimmed a region, but are using the locking above
3950 : * to reset the trim_state.
3951 : */
3952 17804 : if (async && *total_trimmed) {
3953 1407 : spin_unlock(&ctl->tree_lock);
3954 1407 : mutex_unlock(&ctl->cache_writeout_mutex);
3955 1407 : goto out;
3956 : }
3957 :
3958 16397 : bytes = min(bytes, end - start);
3959 16397 : if (bytes < minlen || (async && maxlen && bytes > maxlen)) {
3960 0 : spin_unlock(&ctl->tree_lock);
3961 0 : mutex_unlock(&ctl->cache_writeout_mutex);
3962 0 : goto next;
3963 : }
3964 :
3965 : /*
3966 : * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
3967 : * If X < @minlen, we won't trim X when we come back around.
3968 : * So trim it now. We differ here from trimming extents as we
3969 : * don't keep individual state per bit.
3970 : */
3971 16397 : if (async &&
3972 1418 : max_discard_size &&
3973 1418 : bytes > (max_discard_size + minlen))
3974 0 : bytes = max_discard_size;
3975 :
3976 16397 : bitmap_clear_bits(ctl, entry, start, bytes, true);
3977 16397 : if (entry->bytes == 0)
3978 1 : free_bitmap(ctl, entry);
3979 :
3980 16397 : spin_unlock(&ctl->tree_lock);
3981 16397 : trim_entry.start = start;
3982 16397 : trim_entry.bytes = bytes;
3983 16397 : list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3984 16397 : mutex_unlock(&ctl->cache_writeout_mutex);
3985 :
3986 16397 : ret = do_trimming(block_group, total_trimmed, start, bytes,
3987 : start, bytes, 0, &trim_entry);
3988 16397 : if (ret) {
3989 0 : reset_trimming_bitmap(ctl, offset);
3990 0 : block_group->discard_cursor =
3991 : btrfs_block_group_end(block_group);
3992 0 : break;
3993 : }
3994 16397 : next:
3995 8870 : if (next_bitmap) {
3996 8870 : offset += BITS_PER_BITMAP * ctl->unit;
3997 8870 : start = offset;
3998 : } else {
3999 16397 : start += bytes;
4000 : }
4001 25267 : block_group->discard_cursor = start;
4002 :
4003 25267 : if (fatal_signal_pending(current)) {
4004 0 : if (start != offset)
4005 0 : reset_trimming_bitmap(ctl, offset);
4006 : ret = -ERESTARTSYS;
4007 : break;
4008 : }
4009 :
4010 25267 : cond_resched();
4011 : }
4012 :
4013 2405 : if (offset >= end)
4014 1852 : block_group->discard_cursor = end;
4015 :
4016 553 : out:
4017 3812 : return ret;
4018 : }
4019 :
4020 2144 : int btrfs_trim_block_group(struct btrfs_block_group *block_group,
4021 : u64 *trimmed, u64 start, u64 end, u64 minlen)
4022 : {
4023 2144 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
4024 2144 : int ret;
4025 2144 : u64 rem = 0;
4026 :
4027 2144 : ASSERT(!btrfs_is_zoned(block_group->fs_info));
4028 :
4029 2144 : *trimmed = 0;
4030 :
4031 2144 : spin_lock(&block_group->lock);
4032 4288 : if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4033 0 : spin_unlock(&block_group->lock);
4034 0 : return 0;
4035 : }
4036 2144 : btrfs_freeze_block_group(block_group);
4037 2144 : spin_unlock(&block_group->lock);
4038 :
4039 2144 : ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false);
4040 2144 : if (ret)
4041 3 : goto out;
4042 :
4043 2141 : ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false);
4044 2141 : div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem);
4045 : /* If we ended in the middle of a bitmap, reset the trimming flag */
4046 2141 : if (rem)
4047 2141 : reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end));
4048 0 : out:
4049 2144 : btrfs_unfreeze_block_group(block_group);
4050 2144 : return ret;
4051 : }
4052 :
4053 2496 : int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
4054 : u64 *trimmed, u64 start, u64 end, u64 minlen,
4055 : bool async)
4056 : {
4057 2496 : int ret;
4058 :
4059 2496 : *trimmed = 0;
4060 :
4061 2496 : spin_lock(&block_group->lock);
4062 4992 : if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4063 0 : spin_unlock(&block_group->lock);
4064 0 : return 0;
4065 : }
4066 2496 : btrfs_freeze_block_group(block_group);
4067 2496 : spin_unlock(&block_group->lock);
4068 :
4069 2496 : ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async);
4070 2496 : btrfs_unfreeze_block_group(block_group);
4071 :
4072 2496 : return ret;
4073 : }
4074 :
4075 1671 : int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
4076 : u64 *trimmed, u64 start, u64 end, u64 minlen,
4077 : u64 maxlen, bool async)
4078 : {
4079 1671 : int ret;
4080 :
4081 1671 : *trimmed = 0;
4082 :
4083 1671 : spin_lock(&block_group->lock);
4084 3342 : if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4085 0 : spin_unlock(&block_group->lock);
4086 0 : return 0;
4087 : }
4088 1671 : btrfs_freeze_block_group(block_group);
4089 1671 : spin_unlock(&block_group->lock);
4090 :
4091 1671 : ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen,
4092 : async);
4093 :
4094 1671 : btrfs_unfreeze_block_group(block_group);
4095 :
4096 1671 : return ret;
4097 : }
4098 :
4099 1240150 : bool btrfs_free_space_cache_v1_active(struct btrfs_fs_info *fs_info)
4100 : {
4101 1240150 : return btrfs_super_cache_generation(fs_info->super_copy);
4102 : }
4103 :
4104 2 : static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
4105 : struct btrfs_trans_handle *trans)
4106 : {
4107 2 : struct btrfs_block_group *block_group;
4108 2 : struct rb_node *node;
4109 2 : int ret = 0;
4110 :
4111 2 : btrfs_info(fs_info, "cleaning free space cache v1");
4112 :
4113 2 : node = rb_first_cached(&fs_info->block_group_cache_tree);
4114 8 : while (node) {
4115 6 : block_group = rb_entry(node, struct btrfs_block_group, cache_node);
4116 6 : ret = btrfs_remove_free_space_inode(trans, NULL, block_group);
4117 6 : if (ret)
4118 0 : goto out;
4119 6 : node = rb_next(node);
4120 : }
4121 2 : out:
4122 2 : return ret;
4123 : }
4124 :
4125 2 : int btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info *fs_info, bool active)
4126 : {
4127 2 : struct btrfs_trans_handle *trans;
4128 2 : int ret;
4129 :
4130 : /*
4131 : * update_super_roots will appropriately set or unset
4132 : * super_copy->cache_generation based on SPACE_CACHE and
4133 : * BTRFS_FS_CLEANUP_SPACE_CACHE_V1. For this reason, we need a
4134 : * transaction commit whether we are enabling space cache v1 and don't
4135 : * have any other work to do, or are disabling it and removing free
4136 : * space inodes.
4137 : */
4138 2 : trans = btrfs_start_transaction(fs_info->tree_root, 0);
4139 2 : if (IS_ERR(trans))
4140 0 : return PTR_ERR(trans);
4141 :
4142 2 : if (!active) {
4143 2 : set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
4144 2 : ret = cleanup_free_space_cache_v1(fs_info, trans);
4145 2 : if (ret) {
4146 0 : btrfs_abort_transaction(trans, ret);
4147 0 : btrfs_end_transaction(trans);
4148 0 : goto out;
4149 : }
4150 : }
4151 :
4152 2 : ret = btrfs_commit_transaction(trans);
4153 2 : out:
4154 2 : clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
4155 :
4156 2 : return ret;
4157 : }
4158 :
4159 11 : int __init btrfs_free_space_init(void)
4160 : {
4161 11 : btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
4162 : sizeof(struct btrfs_free_space), 0,
4163 : SLAB_MEM_SPREAD, NULL);
4164 11 : if (!btrfs_free_space_cachep)
4165 : return -ENOMEM;
4166 :
4167 11 : btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
4168 : PAGE_SIZE, PAGE_SIZE,
4169 : SLAB_MEM_SPREAD, NULL);
4170 11 : if (!btrfs_free_space_bitmap_cachep) {
4171 0 : kmem_cache_destroy(btrfs_free_space_cachep);
4172 0 : return -ENOMEM;
4173 : }
4174 :
4175 : return 0;
4176 : }
4177 :
4178 0 : void __cold btrfs_free_space_exit(void)
4179 : {
4180 0 : kmem_cache_destroy(btrfs_free_space_cachep);
4181 0 : kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
4182 0 : }
4183 :
4184 : #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4185 : /*
4186 : * Use this if you need to make a bitmap or extent entry specifically, it
4187 : * doesn't do any of the merging that add_free_space does, this acts a lot like
4188 : * how the free space cache loading stuff works, so you can get really weird
4189 : * configurations.
4190 : */
4191 : int test_add_free_space_entry(struct btrfs_block_group *cache,
4192 : u64 offset, u64 bytes, bool bitmap)
4193 : {
4194 : struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
4195 : struct btrfs_free_space *info = NULL, *bitmap_info;
4196 : void *map = NULL;
4197 : enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_TRIMMED;
4198 : u64 bytes_added;
4199 : int ret;
4200 :
4201 : again:
4202 : if (!info) {
4203 : info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
4204 : if (!info)
4205 : return -ENOMEM;
4206 : }
4207 :
4208 : if (!bitmap) {
4209 : spin_lock(&ctl->tree_lock);
4210 : info->offset = offset;
4211 : info->bytes = bytes;
4212 : info->max_extent_size = 0;
4213 : ret = link_free_space(ctl, info);
4214 : spin_unlock(&ctl->tree_lock);
4215 : if (ret)
4216 : kmem_cache_free(btrfs_free_space_cachep, info);
4217 : return ret;
4218 : }
4219 :
4220 : if (!map) {
4221 : map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
4222 : if (!map) {
4223 : kmem_cache_free(btrfs_free_space_cachep, info);
4224 : return -ENOMEM;
4225 : }
4226 : }
4227 :
4228 : spin_lock(&ctl->tree_lock);
4229 : bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
4230 : 1, 0);
4231 : if (!bitmap_info) {
4232 : info->bitmap = map;
4233 : map = NULL;
4234 : add_new_bitmap(ctl, info, offset);
4235 : bitmap_info = info;
4236 : info = NULL;
4237 : }
4238 :
4239 : bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
4240 : trim_state);
4241 :
4242 : bytes -= bytes_added;
4243 : offset += bytes_added;
4244 : spin_unlock(&ctl->tree_lock);
4245 :
4246 : if (bytes)
4247 : goto again;
4248 :
4249 : if (info)
4250 : kmem_cache_free(btrfs_free_space_cachep, info);
4251 : if (map)
4252 : kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
4253 : return 0;
4254 : }
4255 :
4256 : /*
4257 : * Checks to see if the given range is in the free space cache. This is really
4258 : * just used to check the absence of space, so if there is free space in the
4259 : * range at all we will return 1.
4260 : */
4261 : int test_check_exists(struct btrfs_block_group *cache,
4262 : u64 offset, u64 bytes)
4263 : {
4264 : struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
4265 : struct btrfs_free_space *info;
4266 : int ret = 0;
4267 :
4268 : spin_lock(&ctl->tree_lock);
4269 : info = tree_search_offset(ctl, offset, 0, 0);
4270 : if (!info) {
4271 : info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
4272 : 1, 0);
4273 : if (!info)
4274 : goto out;
4275 : }
4276 :
4277 : have_info:
4278 : if (info->bitmap) {
4279 : u64 bit_off, bit_bytes;
4280 : struct rb_node *n;
4281 : struct btrfs_free_space *tmp;
4282 :
4283 : bit_off = offset;
4284 : bit_bytes = ctl->unit;
4285 : ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
4286 : if (!ret) {
4287 : if (bit_off == offset) {
4288 : ret = 1;
4289 : goto out;
4290 : } else if (bit_off > offset &&
4291 : offset + bytes > bit_off) {
4292 : ret = 1;
4293 : goto out;
4294 : }
4295 : }
4296 :
4297 : n = rb_prev(&info->offset_index);
4298 : while (n) {
4299 : tmp = rb_entry(n, struct btrfs_free_space,
4300 : offset_index);
4301 : if (tmp->offset + tmp->bytes < offset)
4302 : break;
4303 : if (offset + bytes < tmp->offset) {
4304 : n = rb_prev(&tmp->offset_index);
4305 : continue;
4306 : }
4307 : info = tmp;
4308 : goto have_info;
4309 : }
4310 :
4311 : n = rb_next(&info->offset_index);
4312 : while (n) {
4313 : tmp = rb_entry(n, struct btrfs_free_space,
4314 : offset_index);
4315 : if (offset + bytes < tmp->offset)
4316 : break;
4317 : if (tmp->offset + tmp->bytes < offset) {
4318 : n = rb_next(&tmp->offset_index);
4319 : continue;
4320 : }
4321 : info = tmp;
4322 : goto have_info;
4323 : }
4324 :
4325 : ret = 0;
4326 : goto out;
4327 : }
4328 :
4329 : if (info->offset == offset) {
4330 : ret = 1;
4331 : goto out;
4332 : }
4333 :
4334 : if (offset > info->offset && offset < info->offset + info->bytes)
4335 : ret = 1;
4336 : out:
4337 : spin_unlock(&ctl->tree_lock);
4338 : return ret;
4339 : }
4340 : #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
|