Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 2008 Red Hat. All rights reserved.
4 : */
5 :
6 : #include <linux/pagemap.h>
7 : #include <linux/sched.h>
8 : #include <linux/sched/signal.h>
9 : #include <linux/slab.h>
10 : #include <linux/math64.h>
11 : #include <linux/ratelimit.h>
12 : #include <linux/error-injection.h>
13 : #include <linux/sched/mm.h>
14 : #include "ctree.h"
15 : #include "fs.h"
16 : #include "messages.h"
17 : #include "misc.h"
18 : #include "free-space-cache.h"
19 : #include "transaction.h"
20 : #include "disk-io.h"
21 : #include "extent_io.h"
22 : #include "volumes.h"
23 : #include "space-info.h"
24 : #include "delalloc-space.h"
25 : #include "block-group.h"
26 : #include "discard.h"
27 : #include "subpage.h"
28 : #include "inode-item.h"
29 : #include "accessors.h"
30 : #include "file-item.h"
31 : #include "file.h"
32 : #include "super.h"
33 :
34 : #define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
35 : #define MAX_CACHE_BYTES_PER_GIG SZ_64K
36 : #define FORCE_EXTENT_THRESHOLD SZ_1M
37 :
38 : static struct kmem_cache *btrfs_free_space_cachep;
39 : static struct kmem_cache *btrfs_free_space_bitmap_cachep;
40 :
41 : struct btrfs_trim_range {
42 : u64 start;
43 : u64 bytes;
44 : struct list_head list;
45 : };
46 :
47 : static int link_free_space(struct btrfs_free_space_ctl *ctl,
48 : struct btrfs_free_space *info);
49 : static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
50 : struct btrfs_free_space *info, bool update_stat);
51 : static int search_bitmap(struct btrfs_free_space_ctl *ctl,
52 : struct btrfs_free_space *bitmap_info, u64 *offset,
53 : u64 *bytes, bool for_alloc);
54 : static void free_bitmap(struct btrfs_free_space_ctl *ctl,
55 : struct btrfs_free_space *bitmap_info);
56 : static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
57 : struct btrfs_free_space *info, u64 offset,
58 : u64 bytes, bool update_stats);
59 :
60 22201 : static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
61 : {
62 22201 : struct btrfs_free_space *info;
63 22201 : struct rb_node *node;
64 :
65 153645 : while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
66 131444 : info = rb_entry(node, struct btrfs_free_space, offset_index);
67 131444 : if (!info->bitmap) {
68 131157 : unlink_free_space(ctl, info, true);
69 131157 : kmem_cache_free(btrfs_free_space_cachep, info);
70 : } else {
71 287 : free_bitmap(ctl, info);
72 : }
73 :
74 131444 : cond_resched_lock(&ctl->tree_lock);
75 : }
76 22201 : }
77 :
78 1028 : static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
79 : struct btrfs_path *path,
80 : u64 offset)
81 : {
82 1028 : struct btrfs_fs_info *fs_info = root->fs_info;
83 1028 : struct btrfs_key key;
84 1028 : struct btrfs_key location;
85 1028 : struct btrfs_disk_key disk_key;
86 1028 : struct btrfs_free_space_header *header;
87 1028 : struct extent_buffer *leaf;
88 1028 : struct inode *inode = NULL;
89 1028 : unsigned nofs_flag;
90 1028 : int ret;
91 :
92 1028 : key.objectid = BTRFS_FREE_SPACE_OBJECTID;
93 1028 : key.offset = offset;
94 1028 : key.type = 0;
95 :
96 1028 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
97 1028 : if (ret < 0)
98 0 : return ERR_PTR(ret);
99 1028 : if (ret > 0) {
100 1020 : btrfs_release_path(path);
101 1020 : return ERR_PTR(-ENOENT);
102 : }
103 :
104 8 : leaf = path->nodes[0];
105 8 : header = btrfs_item_ptr(leaf, path->slots[0],
106 : struct btrfs_free_space_header);
107 8 : btrfs_free_space_key(leaf, header, &disk_key);
108 8 : btrfs_disk_key_to_cpu(&location, &disk_key);
109 8 : btrfs_release_path(path);
110 :
111 : /*
112 : * We are often under a trans handle at this point, so we need to make
113 : * sure NOFS is set to keep us from deadlocking.
114 : */
115 8 : nofs_flag = memalloc_nofs_save();
116 8 : inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path);
117 8 : btrfs_release_path(path);
118 8 : memalloc_nofs_restore(nofs_flag);
119 8 : if (IS_ERR(inode))
120 : return inode;
121 :
122 8 : mapping_set_gfp_mask(inode->i_mapping,
123 : mapping_gfp_constraint(inode->i_mapping,
124 : ~(__GFP_FS | __GFP_HIGHMEM)));
125 :
126 8 : return inode;
127 : }
128 :
129 1105 : struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
130 : struct btrfs_path *path)
131 : {
132 1105 : struct btrfs_fs_info *fs_info = block_group->fs_info;
133 1105 : struct inode *inode = NULL;
134 1105 : u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
135 :
136 1105 : spin_lock(&block_group->lock);
137 1105 : if (block_group->inode)
138 77 : inode = igrab(block_group->inode);
139 1105 : spin_unlock(&block_group->lock);
140 1105 : if (inode)
141 : return inode;
142 :
143 1028 : inode = __lookup_free_space_inode(fs_info->tree_root, path,
144 : block_group->start);
145 1028 : if (IS_ERR(inode))
146 : return inode;
147 :
148 8 : spin_lock(&block_group->lock);
149 8 : if (!((BTRFS_I(inode)->flags & flags) == flags)) {
150 0 : btrfs_info(fs_info, "Old style space inode found, converting.");
151 0 : BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
152 : BTRFS_INODE_NODATACOW;
153 0 : block_group->disk_cache_state = BTRFS_DC_CLEAR;
154 : }
155 :
156 8 : if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags))
157 8 : block_group->inode = igrab(inode);
158 8 : spin_unlock(&block_group->lock);
159 :
160 8 : return inode;
161 : }
162 :
163 5 : static int __create_free_space_inode(struct btrfs_root *root,
164 : struct btrfs_trans_handle *trans,
165 : struct btrfs_path *path,
166 : u64 ino, u64 offset)
167 : {
168 5 : struct btrfs_key key;
169 5 : struct btrfs_disk_key disk_key;
170 5 : struct btrfs_free_space_header *header;
171 5 : struct btrfs_inode_item *inode_item;
172 5 : struct extent_buffer *leaf;
173 : /* We inline CRCs for the free disk space cache */
174 5 : const u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC |
175 : BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
176 5 : int ret;
177 :
178 5 : ret = btrfs_insert_empty_inode(trans, root, path, ino);
179 5 : if (ret)
180 : return ret;
181 :
182 5 : leaf = path->nodes[0];
183 5 : inode_item = btrfs_item_ptr(leaf, path->slots[0],
184 : struct btrfs_inode_item);
185 5 : btrfs_item_key(leaf, &disk_key, path->slots[0]);
186 5 : memzero_extent_buffer(leaf, (unsigned long)inode_item,
187 : sizeof(*inode_item));
188 5 : btrfs_set_inode_generation(leaf, inode_item, trans->transid);
189 5 : btrfs_set_inode_size(leaf, inode_item, 0);
190 5 : btrfs_set_inode_nbytes(leaf, inode_item, 0);
191 5 : btrfs_set_inode_uid(leaf, inode_item, 0);
192 5 : btrfs_set_inode_gid(leaf, inode_item, 0);
193 5 : btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
194 5 : btrfs_set_inode_flags(leaf, inode_item, flags);
195 5 : btrfs_set_inode_nlink(leaf, inode_item, 1);
196 5 : btrfs_set_inode_transid(leaf, inode_item, trans->transid);
197 5 : btrfs_set_inode_block_group(leaf, inode_item, offset);
198 5 : btrfs_mark_buffer_dirty(leaf);
199 5 : btrfs_release_path(path);
200 :
201 5 : key.objectid = BTRFS_FREE_SPACE_OBJECTID;
202 5 : key.offset = offset;
203 5 : key.type = 0;
204 5 : ret = btrfs_insert_empty_item(trans, root, path, &key,
205 : sizeof(struct btrfs_free_space_header));
206 5 : if (ret < 0) {
207 0 : btrfs_release_path(path);
208 0 : return ret;
209 : }
210 :
211 5 : leaf = path->nodes[0];
212 5 : header = btrfs_item_ptr(leaf, path->slots[0],
213 : struct btrfs_free_space_header);
214 5 : memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
215 5 : btrfs_set_free_space_key(leaf, header, &disk_key);
216 5 : btrfs_mark_buffer_dirty(leaf);
217 5 : btrfs_release_path(path);
218 :
219 5 : return 0;
220 : }
221 :
222 5 : int create_free_space_inode(struct btrfs_trans_handle *trans,
223 : struct btrfs_block_group *block_group,
224 : struct btrfs_path *path)
225 : {
226 5 : int ret;
227 5 : u64 ino;
228 :
229 5 : ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino);
230 5 : if (ret < 0)
231 : return ret;
232 :
233 5 : return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
234 : ino, block_group->start);
235 : }
236 :
237 : /*
238 : * inode is an optional sink: if it is NULL, btrfs_remove_free_space_inode
239 : * handles lookup, otherwise it takes ownership and iputs the inode.
240 : * Don't reuse an inode pointer after passing it into this function.
241 : */
242 517 : int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
243 : struct inode *inode,
244 : struct btrfs_block_group *block_group)
245 : {
246 517 : struct btrfs_path *path;
247 517 : struct btrfs_key key;
248 517 : int ret = 0;
249 :
250 517 : path = btrfs_alloc_path();
251 517 : if (!path)
252 : return -ENOMEM;
253 :
254 517 : if (!inode)
255 6 : inode = lookup_free_space_inode(block_group, path);
256 517 : if (IS_ERR(inode)) {
257 515 : if (PTR_ERR(inode) != -ENOENT)
258 0 : ret = PTR_ERR(inode);
259 515 : goto out;
260 : }
261 2 : ret = btrfs_orphan_add(trans, BTRFS_I(inode));
262 2 : if (ret) {
263 0 : btrfs_add_delayed_iput(BTRFS_I(inode));
264 0 : goto out;
265 : }
266 2 : clear_nlink(inode);
267 : /* One for the block groups ref */
268 2 : spin_lock(&block_group->lock);
269 2 : if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) {
270 2 : block_group->inode = NULL;
271 2 : spin_unlock(&block_group->lock);
272 2 : iput(inode);
273 : } else {
274 0 : spin_unlock(&block_group->lock);
275 : }
276 : /* One for the lookup ref */
277 2 : btrfs_add_delayed_iput(BTRFS_I(inode));
278 :
279 2 : key.objectid = BTRFS_FREE_SPACE_OBJECTID;
280 2 : key.type = 0;
281 2 : key.offset = block_group->start;
282 2 : ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path,
283 : -1, 1);
284 2 : if (ret) {
285 0 : if (ret > 0)
286 : ret = 0;
287 0 : goto out;
288 : }
289 2 : ret = btrfs_del_item(trans, trans->fs_info->tree_root, path);
290 517 : out:
291 517 : btrfs_free_path(path);
292 517 : return ret;
293 : }
294 :
295 12 : int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
296 : struct btrfs_block_group *block_group,
297 : struct inode *vfs_inode)
298 : {
299 12 : struct btrfs_truncate_control control = {
300 : .inode = BTRFS_I(vfs_inode),
301 : .new_size = 0,
302 : .ino = btrfs_ino(BTRFS_I(vfs_inode)),
303 : .min_type = BTRFS_EXTENT_DATA_KEY,
304 : .clear_extent_range = true,
305 : };
306 : struct btrfs_inode *inode = BTRFS_I(vfs_inode);
307 12 : struct btrfs_root *root = inode->root;
308 12 : struct extent_state *cached_state = NULL;
309 12 : int ret = 0;
310 12 : bool locked = false;
311 :
312 12 : if (block_group) {
313 0 : struct btrfs_path *path = btrfs_alloc_path();
314 :
315 0 : if (!path) {
316 0 : ret = -ENOMEM;
317 0 : goto fail;
318 : }
319 0 : locked = true;
320 0 : mutex_lock(&trans->transaction->cache_write_mutex);
321 0 : if (!list_empty(&block_group->io_list)) {
322 0 : list_del_init(&block_group->io_list);
323 :
324 0 : btrfs_wait_cache_io(trans, block_group, path);
325 0 : btrfs_put_block_group(block_group);
326 : }
327 :
328 : /*
329 : * now that we've truncated the cache away, its no longer
330 : * setup or written
331 : */
332 0 : spin_lock(&block_group->lock);
333 0 : block_group->disk_cache_state = BTRFS_DC_CLEAR;
334 0 : spin_unlock(&block_group->lock);
335 0 : btrfs_free_path(path);
336 : }
337 :
338 12 : btrfs_i_size_write(inode, 0);
339 12 : truncate_pagecache(vfs_inode, 0);
340 :
341 12 : lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
342 12 : btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
343 :
344 : /*
345 : * We skip the throttling logic for free space cache inodes, so we don't
346 : * need to check for -EAGAIN.
347 : */
348 12 : ret = btrfs_truncate_inode_items(trans, root, &control);
349 :
350 12 : inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
351 12 : btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
352 :
353 12 : unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
354 12 : if (ret)
355 0 : goto fail;
356 :
357 12 : ret = btrfs_update_inode(trans, root, inode);
358 :
359 12 : fail:
360 12 : if (locked)
361 0 : mutex_unlock(&trans->transaction->cache_write_mutex);
362 12 : if (ret)
363 0 : btrfs_abort_transaction(trans, ret);
364 :
365 12 : return ret;
366 : }
367 :
368 1 : static void readahead_cache(struct inode *inode)
369 : {
370 1 : struct file_ra_state ra;
371 1 : unsigned long last_index;
372 :
373 1 : file_ra_state_init(&ra, inode->i_mapping);
374 1 : last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
375 :
376 1 : page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index);
377 1 : }
378 :
379 42 : static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
380 : int write)
381 : {
382 42 : int num_pages;
383 :
384 42 : num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
385 :
386 : /* Make sure we can fit our crcs and generation into the first page */
387 42 : if (write && (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
388 : return -ENOSPC;
389 :
390 42 : memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
391 :
392 42 : io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
393 42 : if (!io_ctl->pages)
394 : return -ENOMEM;
395 :
396 42 : io_ctl->num_pages = num_pages;
397 42 : io_ctl->fs_info = btrfs_sb(inode->i_sb);
398 42 : io_ctl->inode = inode;
399 :
400 42 : return 0;
401 : }
402 : ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
403 :
404 : static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
405 : {
406 42 : kfree(io_ctl->pages);
407 42 : io_ctl->pages = NULL;
408 : }
409 :
410 : static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
411 : {
412 747 : if (io_ctl->cur) {
413 705 : io_ctl->cur = NULL;
414 705 : io_ctl->orig = NULL;
415 : }
416 : }
417 :
418 705 : static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
419 : {
420 705 : ASSERT(io_ctl->index < io_ctl->num_pages);
421 705 : io_ctl->page = io_ctl->pages[io_ctl->index++];
422 705 : io_ctl->cur = page_address(io_ctl->page);
423 705 : io_ctl->orig = io_ctl->cur;
424 705 : io_ctl->size = PAGE_SIZE;
425 705 : if (clear)
426 704 : clear_page(io_ctl->cur);
427 705 : }
428 :
429 42 : static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
430 : {
431 42 : int i;
432 :
433 42 : io_ctl_unmap_page(io_ctl);
434 :
435 762 : for (i = 0; i < io_ctl->num_pages; i++) {
436 720 : if (io_ctl->pages[i]) {
437 720 : btrfs_page_clear_checked(io_ctl->fs_info,
438 : io_ctl->pages[i],
439 : page_offset(io_ctl->pages[i]),
440 : PAGE_SIZE);
441 720 : unlock_page(io_ctl->pages[i]);
442 720 : put_page(io_ctl->pages[i]);
443 : }
444 : }
445 42 : }
446 :
447 42 : static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
448 : {
449 42 : struct page *page;
450 42 : struct inode *inode = io_ctl->inode;
451 42 : gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
452 42 : int i;
453 :
454 762 : for (i = 0; i < io_ctl->num_pages; i++) {
455 720 : int ret;
456 :
457 720 : page = find_or_create_page(inode->i_mapping, i, mask);
458 720 : if (!page) {
459 0 : io_ctl_drop_pages(io_ctl);
460 0 : return -ENOMEM;
461 : }
462 :
463 720 : ret = set_page_extent_mapped(page);
464 720 : if (ret < 0) {
465 0 : unlock_page(page);
466 0 : put_page(page);
467 0 : io_ctl_drop_pages(io_ctl);
468 0 : return ret;
469 : }
470 :
471 720 : io_ctl->pages[i] = page;
472 720 : if (uptodate && !PageUptodate(page)) {
473 0 : btrfs_read_folio(NULL, page_folio(page));
474 0 : lock_page(page);
475 0 : if (page->mapping != inode->i_mapping) {
476 0 : btrfs_err(BTRFS_I(inode)->root->fs_info,
477 : "free space cache page truncated");
478 0 : io_ctl_drop_pages(io_ctl);
479 0 : return -EIO;
480 : }
481 0 : if (!PageUptodate(page)) {
482 0 : btrfs_err(BTRFS_I(inode)->root->fs_info,
483 : "error reading free space cache");
484 0 : io_ctl_drop_pages(io_ctl);
485 0 : return -EIO;
486 : }
487 : }
488 : }
489 :
490 762 : for (i = 0; i < io_ctl->num_pages; i++)
491 720 : clear_page_dirty_for_io(io_ctl->pages[i]);
492 :
493 : return 0;
494 : }
495 :
496 41 : static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
497 : {
498 41 : io_ctl_map_page(io_ctl, 1);
499 :
500 : /*
501 : * Skip the csum areas. If we don't check crcs then we just have a
502 : * 64bit chunk at the front of the first page.
503 : */
504 41 : io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
505 41 : io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
506 :
507 41 : put_unaligned_le64(generation, io_ctl->cur);
508 41 : io_ctl->cur += sizeof(u64);
509 41 : }
510 :
511 1 : static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
512 : {
513 1 : u64 cache_gen;
514 :
515 : /*
516 : * Skip the crc area. If we don't check crcs then we just have a 64bit
517 : * chunk at the front of the first page.
518 : */
519 1 : io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
520 1 : io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
521 :
522 1 : cache_gen = get_unaligned_le64(io_ctl->cur);
523 1 : if (cache_gen != generation) {
524 0 : btrfs_err_rl(io_ctl->fs_info,
525 : "space cache generation (%llu) does not match inode (%llu)",
526 : cache_gen, generation);
527 0 : io_ctl_unmap_page(io_ctl);
528 0 : return -EIO;
529 : }
530 1 : io_ctl->cur += sizeof(u64);
531 1 : return 0;
532 : }
533 :
534 704 : static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
535 : {
536 704 : u32 *tmp;
537 704 : u32 crc = ~(u32)0;
538 704 : unsigned offset = 0;
539 :
540 704 : if (index == 0)
541 41 : offset = sizeof(u32) * io_ctl->num_pages;
542 :
543 704 : crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
544 704 : btrfs_crc32c_final(crc, (u8 *)&crc);
545 704 : io_ctl_unmap_page(io_ctl);
546 704 : tmp = page_address(io_ctl->pages[0]);
547 704 : tmp += index;
548 704 : *tmp = crc;
549 704 : }
550 :
551 1 : static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
552 : {
553 1 : u32 *tmp, val;
554 1 : u32 crc = ~(u32)0;
555 1 : unsigned offset = 0;
556 :
557 1 : if (index == 0)
558 1 : offset = sizeof(u32) * io_ctl->num_pages;
559 :
560 1 : tmp = page_address(io_ctl->pages[0]);
561 1 : tmp += index;
562 1 : val = *tmp;
563 :
564 1 : io_ctl_map_page(io_ctl, 0);
565 1 : crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
566 1 : btrfs_crc32c_final(crc, (u8 *)&crc);
567 1 : if (val != crc) {
568 0 : btrfs_err_rl(io_ctl->fs_info,
569 : "csum mismatch on free space cache");
570 0 : io_ctl_unmap_page(io_ctl);
571 0 : return -EIO;
572 : }
573 :
574 : return 0;
575 : }
576 :
577 220 : static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
578 : void *bitmap)
579 : {
580 220 : struct btrfs_free_space_entry *entry;
581 :
582 220 : if (!io_ctl->cur)
583 : return -ENOSPC;
584 :
585 220 : entry = io_ctl->cur;
586 220 : put_unaligned_le64(offset, &entry->offset);
587 220 : put_unaligned_le64(bytes, &entry->bytes);
588 220 : entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
589 : BTRFS_FREE_SPACE_EXTENT;
590 220 : io_ctl->cur += sizeof(struct btrfs_free_space_entry);
591 220 : io_ctl->size -= sizeof(struct btrfs_free_space_entry);
592 :
593 220 : if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
594 : return 0;
595 :
596 0 : io_ctl_set_crc(io_ctl, io_ctl->index - 1);
597 :
598 : /* No more pages to map */
599 0 : if (io_ctl->index >= io_ctl->num_pages)
600 : return 0;
601 :
602 : /* map the next page */
603 0 : io_ctl_map_page(io_ctl, 1);
604 0 : return 0;
605 : }
606 :
607 0 : static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
608 : {
609 0 : if (!io_ctl->cur)
610 : return -ENOSPC;
611 :
612 : /*
613 : * If we aren't at the start of the current page, unmap this one and
614 : * map the next one if there is any left.
615 : */
616 0 : if (io_ctl->cur != io_ctl->orig) {
617 0 : io_ctl_set_crc(io_ctl, io_ctl->index - 1);
618 0 : if (io_ctl->index >= io_ctl->num_pages)
619 : return -ENOSPC;
620 0 : io_ctl_map_page(io_ctl, 0);
621 : }
622 :
623 0 : copy_page(io_ctl->cur, bitmap);
624 0 : io_ctl_set_crc(io_ctl, io_ctl->index - 1);
625 0 : if (io_ctl->index < io_ctl->num_pages)
626 0 : io_ctl_map_page(io_ctl, 0);
627 : return 0;
628 : }
629 :
630 41 : static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
631 : {
632 : /*
633 : * If we're not on the boundary we know we've modified the page and we
634 : * need to crc the page.
635 : */
636 41 : if (io_ctl->cur != io_ctl->orig)
637 41 : io_ctl_set_crc(io_ctl, io_ctl->index - 1);
638 : else
639 0 : io_ctl_unmap_page(io_ctl);
640 :
641 704 : while (io_ctl->index < io_ctl->num_pages) {
642 663 : io_ctl_map_page(io_ctl, 1);
643 663 : io_ctl_set_crc(io_ctl, io_ctl->index - 1);
644 : }
645 41 : }
646 :
647 6 : static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
648 : struct btrfs_free_space *entry, u8 *type)
649 : {
650 6 : struct btrfs_free_space_entry *e;
651 6 : int ret;
652 :
653 6 : if (!io_ctl->cur) {
654 0 : ret = io_ctl_check_crc(io_ctl, io_ctl->index);
655 0 : if (ret)
656 : return ret;
657 : }
658 :
659 6 : e = io_ctl->cur;
660 6 : entry->offset = get_unaligned_le64(&e->offset);
661 6 : entry->bytes = get_unaligned_le64(&e->bytes);
662 6 : *type = e->type;
663 6 : io_ctl->cur += sizeof(struct btrfs_free_space_entry);
664 6 : io_ctl->size -= sizeof(struct btrfs_free_space_entry);
665 :
666 6 : if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
667 : return 0;
668 :
669 0 : io_ctl_unmap_page(io_ctl);
670 :
671 : return 0;
672 : }
673 :
674 0 : static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
675 : struct btrfs_free_space *entry)
676 : {
677 0 : int ret;
678 :
679 0 : ret = io_ctl_check_crc(io_ctl, io_ctl->index);
680 0 : if (ret)
681 : return ret;
682 :
683 0 : copy_page(entry->bitmap, io_ctl->cur);
684 0 : io_ctl_unmap_page(io_ctl);
685 :
686 : return 0;
687 : }
688 :
689 2640 : static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
690 : {
691 2640 : struct btrfs_block_group *block_group = ctl->block_group;
692 2640 : u64 max_bytes;
693 2640 : u64 bitmap_bytes;
694 2640 : u64 extent_bytes;
695 2640 : u64 size = block_group->length;
696 2640 : u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
697 2640 : u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
698 :
699 2640 : max_bitmaps = max_t(u64, max_bitmaps, 1);
700 :
701 2640 : if (ctl->total_bitmaps > max_bitmaps)
702 0 : btrfs_err(block_group->fs_info,
703 : "invalid free space control: bg start=%llu len=%llu total_bitmaps=%u unit=%u max_bitmaps=%llu bytes_per_bg=%llu",
704 : block_group->start, block_group->length,
705 : ctl->total_bitmaps, ctl->unit, max_bitmaps,
706 : bytes_per_bg);
707 2640 : ASSERT(ctl->total_bitmaps <= max_bitmaps);
708 :
709 : /*
710 : * We are trying to keep the total amount of memory used per 1GiB of
711 : * space to be MAX_CACHE_BYTES_PER_GIG. However, with a reclamation
712 : * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of
713 : * bitmaps, we may end up using more memory than this.
714 : */
715 2640 : if (size < SZ_1G)
716 : max_bytes = MAX_CACHE_BYTES_PER_GIG;
717 : else
718 1036 : max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
719 :
720 2640 : bitmap_bytes = ctl->total_bitmaps * ctl->unit;
721 :
722 : /*
723 : * we want the extent entry threshold to always be at most 1/2 the max
724 : * bytes we can have, or whatever is less than that.
725 : */
726 2640 : extent_bytes = max_bytes - bitmap_bytes;
727 2640 : extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
728 :
729 2640 : ctl->extents_thresh =
730 : div_u64(extent_bytes, sizeof(struct btrfs_free_space));
731 2640 : }
732 :
733 1 : static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
734 : struct btrfs_free_space_ctl *ctl,
735 : struct btrfs_path *path, u64 offset)
736 : {
737 1 : struct btrfs_fs_info *fs_info = root->fs_info;
738 1 : struct btrfs_free_space_header *header;
739 1 : struct extent_buffer *leaf;
740 1 : struct btrfs_io_ctl io_ctl;
741 1 : struct btrfs_key key;
742 1 : struct btrfs_free_space *e, *n;
743 1 : LIST_HEAD(bitmaps);
744 1 : u64 num_entries;
745 1 : u64 num_bitmaps;
746 1 : u64 generation;
747 1 : u8 type;
748 1 : int ret = 0;
749 :
750 : /* Nothing in the space cache, goodbye */
751 1 : if (!i_size_read(inode))
752 : return 0;
753 :
754 1 : key.objectid = BTRFS_FREE_SPACE_OBJECTID;
755 1 : key.offset = offset;
756 1 : key.type = 0;
757 :
758 1 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
759 1 : if (ret < 0)
760 : return 0;
761 1 : else if (ret > 0) {
762 0 : btrfs_release_path(path);
763 0 : return 0;
764 : }
765 :
766 1 : ret = -1;
767 :
768 1 : leaf = path->nodes[0];
769 1 : header = btrfs_item_ptr(leaf, path->slots[0],
770 : struct btrfs_free_space_header);
771 1 : num_entries = btrfs_free_space_entries(leaf, header);
772 1 : num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
773 1 : generation = btrfs_free_space_generation(leaf, header);
774 1 : btrfs_release_path(path);
775 :
776 1 : if (!BTRFS_I(inode)->generation) {
777 0 : btrfs_info(fs_info,
778 : "the free space cache file (%llu) is invalid, skip it",
779 : offset);
780 0 : return 0;
781 : }
782 :
783 1 : if (BTRFS_I(inode)->generation != generation) {
784 0 : btrfs_err(fs_info,
785 : "free space inode generation (%llu) did not match free space cache generation (%llu)",
786 : BTRFS_I(inode)->generation, generation);
787 0 : return 0;
788 : }
789 :
790 1 : if (!num_entries)
791 : return 0;
792 :
793 1 : ret = io_ctl_init(&io_ctl, inode, 0);
794 1 : if (ret)
795 : return ret;
796 :
797 1 : readahead_cache(inode);
798 :
799 1 : ret = io_ctl_prepare_pages(&io_ctl, true);
800 1 : if (ret)
801 0 : goto out;
802 :
803 1 : ret = io_ctl_check_crc(&io_ctl, 0);
804 1 : if (ret)
805 0 : goto free_cache;
806 :
807 1 : ret = io_ctl_check_generation(&io_ctl, generation);
808 1 : if (ret)
809 0 : goto free_cache;
810 :
811 7 : while (num_entries) {
812 6 : e = kmem_cache_zalloc(btrfs_free_space_cachep,
813 : GFP_NOFS);
814 6 : if (!e) {
815 0 : ret = -ENOMEM;
816 0 : goto free_cache;
817 : }
818 :
819 6 : ret = io_ctl_read_entry(&io_ctl, e, &type);
820 6 : if (ret) {
821 0 : kmem_cache_free(btrfs_free_space_cachep, e);
822 0 : goto free_cache;
823 : }
824 :
825 6 : if (!e->bytes) {
826 0 : ret = -1;
827 0 : kmem_cache_free(btrfs_free_space_cachep, e);
828 0 : goto free_cache;
829 : }
830 :
831 6 : if (type == BTRFS_FREE_SPACE_EXTENT) {
832 6 : spin_lock(&ctl->tree_lock);
833 6 : ret = link_free_space(ctl, e);
834 6 : spin_unlock(&ctl->tree_lock);
835 6 : if (ret) {
836 0 : btrfs_err(fs_info,
837 : "Duplicate entries in free space cache, dumping");
838 0 : kmem_cache_free(btrfs_free_space_cachep, e);
839 0 : goto free_cache;
840 : }
841 : } else {
842 0 : ASSERT(num_bitmaps);
843 0 : num_bitmaps--;
844 0 : e->bitmap = kmem_cache_zalloc(
845 : btrfs_free_space_bitmap_cachep, GFP_NOFS);
846 0 : if (!e->bitmap) {
847 0 : ret = -ENOMEM;
848 0 : kmem_cache_free(
849 : btrfs_free_space_cachep, e);
850 0 : goto free_cache;
851 : }
852 0 : spin_lock(&ctl->tree_lock);
853 0 : ret = link_free_space(ctl, e);
854 0 : if (ret) {
855 0 : spin_unlock(&ctl->tree_lock);
856 0 : btrfs_err(fs_info,
857 : "Duplicate entries in free space cache, dumping");
858 0 : kmem_cache_free(btrfs_free_space_cachep, e);
859 0 : goto free_cache;
860 : }
861 0 : ctl->total_bitmaps++;
862 0 : recalculate_thresholds(ctl);
863 0 : spin_unlock(&ctl->tree_lock);
864 0 : list_add_tail(&e->list, &bitmaps);
865 : }
866 :
867 6 : num_entries--;
868 : }
869 :
870 1 : io_ctl_unmap_page(&io_ctl);
871 :
872 : /*
873 : * We add the bitmaps at the end of the entries in order that
874 : * the bitmap entries are added to the cache.
875 : */
876 1 : list_for_each_entry_safe(e, n, &bitmaps, list) {
877 0 : list_del_init(&e->list);
878 0 : ret = io_ctl_read_bitmap(&io_ctl, e);
879 0 : if (ret)
880 0 : goto free_cache;
881 : }
882 :
883 1 : io_ctl_drop_pages(&io_ctl);
884 1 : ret = 1;
885 1 : out:
886 1 : io_ctl_free(&io_ctl);
887 1 : return ret;
888 0 : free_cache:
889 0 : io_ctl_drop_pages(&io_ctl);
890 :
891 0 : spin_lock(&ctl->tree_lock);
892 0 : __btrfs_remove_free_space_cache(ctl);
893 0 : spin_unlock(&ctl->tree_lock);
894 0 : goto out;
895 : }
896 :
897 1 : static int copy_free_space_cache(struct btrfs_block_group *block_group,
898 : struct btrfs_free_space_ctl *ctl)
899 : {
900 1 : struct btrfs_free_space *info;
901 1 : struct rb_node *n;
902 1 : int ret = 0;
903 :
904 7 : while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) {
905 6 : info = rb_entry(n, struct btrfs_free_space, offset_index);
906 6 : if (!info->bitmap) {
907 6 : const u64 offset = info->offset;
908 6 : const u64 bytes = info->bytes;
909 :
910 6 : unlink_free_space(ctl, info, true);
911 6 : spin_unlock(&ctl->tree_lock);
912 6 : kmem_cache_free(btrfs_free_space_cachep, info);
913 6 : ret = btrfs_add_free_space(block_group, offset, bytes);
914 6 : spin_lock(&ctl->tree_lock);
915 : } else {
916 0 : u64 offset = info->offset;
917 0 : u64 bytes = ctl->unit;
918 :
919 0 : ret = search_bitmap(ctl, info, &offset, &bytes, false);
920 0 : if (ret == 0) {
921 0 : bitmap_clear_bits(ctl, info, offset, bytes, true);
922 0 : spin_unlock(&ctl->tree_lock);
923 0 : ret = btrfs_add_free_space(block_group, offset,
924 : bytes);
925 0 : spin_lock(&ctl->tree_lock);
926 : } else {
927 0 : free_bitmap(ctl, info);
928 0 : ret = 0;
929 : }
930 : }
931 6 : cond_resched_lock(&ctl->tree_lock);
932 : }
933 1 : return ret;
934 : }
935 :
936 : static struct lock_class_key btrfs_free_space_inode_key;
937 :
938 23 : int load_free_space_cache(struct btrfs_block_group *block_group)
939 : {
940 23 : struct btrfs_fs_info *fs_info = block_group->fs_info;
941 23 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
942 23 : struct btrfs_free_space_ctl tmp_ctl = {};
943 23 : struct inode *inode;
944 23 : struct btrfs_path *path;
945 23 : int ret = 0;
946 23 : bool matched;
947 23 : u64 used = block_group->used;
948 :
949 : /*
950 : * Because we could potentially discard our loaded free space, we want
951 : * to load everything into a temporary structure first, and then if it's
952 : * valid copy it all into the actual free space ctl.
953 : */
954 23 : btrfs_init_free_space_ctl(block_group, &tmp_ctl);
955 :
956 : /*
957 : * If this block group has been marked to be cleared for one reason or
958 : * another then we can't trust the on disk cache, so just return.
959 : */
960 23 : spin_lock(&block_group->lock);
961 23 : if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
962 21 : spin_unlock(&block_group->lock);
963 21 : return 0;
964 : }
965 2 : spin_unlock(&block_group->lock);
966 :
967 2 : path = btrfs_alloc_path();
968 2 : if (!path)
969 : return 0;
970 2 : path->search_commit_root = 1;
971 2 : path->skip_locking = 1;
972 :
973 : /*
974 : * We must pass a path with search_commit_root set to btrfs_iget in
975 : * order to avoid a deadlock when allocating extents for the tree root.
976 : *
977 : * When we are COWing an extent buffer from the tree root, when looking
978 : * for a free extent, at extent-tree.c:find_free_extent(), we can find
979 : * block group without its free space cache loaded. When we find one
980 : * we must load its space cache which requires reading its free space
981 : * cache's inode item from the root tree. If this inode item is located
982 : * in the same leaf that we started COWing before, then we end up in
983 : * deadlock on the extent buffer (trying to read lock it when we
984 : * previously write locked it).
985 : *
986 : * It's safe to read the inode item using the commit root because
987 : * block groups, once loaded, stay in memory forever (until they are
988 : * removed) as well as their space caches once loaded. New block groups
989 : * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
990 : * we will never try to read their inode item while the fs is mounted.
991 : */
992 2 : inode = lookup_free_space_inode(block_group, path);
993 2 : if (IS_ERR(inode)) {
994 1 : btrfs_free_path(path);
995 1 : return 0;
996 : }
997 :
998 : /* We may have converted the inode and made the cache invalid. */
999 1 : spin_lock(&block_group->lock);
1000 1 : if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
1001 0 : spin_unlock(&block_group->lock);
1002 0 : btrfs_free_path(path);
1003 0 : goto out;
1004 : }
1005 1 : spin_unlock(&block_group->lock);
1006 :
1007 : /*
1008 : * Reinitialize the class of struct inode's mapping->invalidate_lock for
1009 : * free space inodes to prevent false positives related to locks for normal
1010 : * inodes.
1011 : */
1012 1 : lockdep_set_class(&(&inode->i_data)->invalidate_lock,
1013 : &btrfs_free_space_inode_key);
1014 :
1015 1 : ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl,
1016 : path, block_group->start);
1017 1 : btrfs_free_path(path);
1018 1 : if (ret <= 0)
1019 0 : goto out;
1020 :
1021 1 : matched = (tmp_ctl.free_space == (block_group->length - used -
1022 1 : block_group->bytes_super));
1023 :
1024 1 : if (matched) {
1025 1 : spin_lock(&tmp_ctl.tree_lock);
1026 1 : ret = copy_free_space_cache(block_group, &tmp_ctl);
1027 1 : spin_unlock(&tmp_ctl.tree_lock);
1028 : /*
1029 : * ret == 1 means we successfully loaded the free space cache,
1030 : * so we need to re-set it here.
1031 : */
1032 1 : if (ret == 0)
1033 : ret = 1;
1034 : } else {
1035 : /*
1036 : * We need to call the _locked variant so we don't try to update
1037 : * the discard counters.
1038 : */
1039 0 : spin_lock(&tmp_ctl.tree_lock);
1040 0 : __btrfs_remove_free_space_cache(&tmp_ctl);
1041 0 : spin_unlock(&tmp_ctl.tree_lock);
1042 0 : btrfs_warn(fs_info,
1043 : "block group %llu has wrong amount of free space",
1044 : block_group->start);
1045 0 : ret = -1;
1046 : }
1047 0 : out:
1048 0 : if (ret < 0) {
1049 : /* This cache is bogus, make sure it gets cleared */
1050 0 : spin_lock(&block_group->lock);
1051 0 : block_group->disk_cache_state = BTRFS_DC_CLEAR;
1052 0 : spin_unlock(&block_group->lock);
1053 0 : ret = 0;
1054 :
1055 0 : btrfs_warn(fs_info,
1056 : "failed to load free space cache for block group %llu, rebuilding it now",
1057 : block_group->start);
1058 : }
1059 :
1060 1 : spin_lock(&ctl->tree_lock);
1061 1 : btrfs_discard_update_discardable(block_group);
1062 1 : spin_unlock(&ctl->tree_lock);
1063 1 : iput(inode);
1064 1 : return ret;
1065 : }
1066 :
1067 : static noinline_for_stack
1068 41 : int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
1069 : struct btrfs_free_space_ctl *ctl,
1070 : struct btrfs_block_group *block_group,
1071 : int *entries, int *bitmaps,
1072 : struct list_head *bitmap_list)
1073 : {
1074 41 : int ret;
1075 41 : struct btrfs_free_cluster *cluster = NULL;
1076 41 : struct btrfs_free_cluster *cluster_locked = NULL;
1077 41 : struct rb_node *node = rb_first(&ctl->free_space_offset);
1078 41 : struct btrfs_trim_range *trim_entry;
1079 :
1080 : /* Get the cluster for this block_group if it exists */
1081 41 : if (block_group && !list_empty(&block_group->cluster_list)) {
1082 40 : cluster = list_entry(block_group->cluster_list.next,
1083 : struct btrfs_free_cluster,
1084 : block_group_list);
1085 : }
1086 :
1087 41 : if (!node && cluster) {
1088 14 : cluster_locked = cluster;
1089 14 : spin_lock(&cluster_locked->lock);
1090 14 : node = rb_first(&cluster->root);
1091 14 : cluster = NULL;
1092 : }
1093 :
1094 : /* Write out the extent entries */
1095 208 : while (node) {
1096 167 : struct btrfs_free_space *e;
1097 :
1098 167 : e = rb_entry(node, struct btrfs_free_space, offset_index);
1099 167 : *entries += 1;
1100 :
1101 167 : ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
1102 167 : e->bitmap);
1103 167 : if (ret)
1104 0 : goto fail;
1105 :
1106 167 : if (e->bitmap) {
1107 0 : list_add_tail(&e->list, bitmap_list);
1108 0 : *bitmaps += 1;
1109 : }
1110 167 : node = rb_next(node);
1111 167 : if (!node && cluster) {
1112 26 : node = rb_first(&cluster->root);
1113 26 : cluster_locked = cluster;
1114 26 : spin_lock(&cluster_locked->lock);
1115 26 : cluster = NULL;
1116 : }
1117 : }
1118 41 : if (cluster_locked) {
1119 40 : spin_unlock(&cluster_locked->lock);
1120 40 : cluster_locked = NULL;
1121 : }
1122 :
1123 : /*
1124 : * Make sure we don't miss any range that was removed from our rbtree
1125 : * because trimming is running. Otherwise after a umount+mount (or crash
1126 : * after committing the transaction) we would leak free space and get
1127 : * an inconsistent free space cache report from fsck.
1128 : */
1129 41 : list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
1130 0 : ret = io_ctl_add_entry(io_ctl, trim_entry->start,
1131 : trim_entry->bytes, NULL);
1132 0 : if (ret)
1133 0 : goto fail;
1134 0 : *entries += 1;
1135 : }
1136 :
1137 : return 0;
1138 0 : fail:
1139 0 : if (cluster_locked)
1140 0 : spin_unlock(&cluster_locked->lock);
1141 : return -ENOSPC;
1142 : }
1143 :
1144 : static noinline_for_stack int
1145 41 : update_cache_item(struct btrfs_trans_handle *trans,
1146 : struct btrfs_root *root,
1147 : struct inode *inode,
1148 : struct btrfs_path *path, u64 offset,
1149 : int entries, int bitmaps)
1150 : {
1151 41 : struct btrfs_key key;
1152 41 : struct btrfs_free_space_header *header;
1153 41 : struct extent_buffer *leaf;
1154 41 : int ret;
1155 :
1156 41 : key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1157 41 : key.offset = offset;
1158 41 : key.type = 0;
1159 :
1160 41 : ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1161 41 : if (ret < 0) {
1162 0 : clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1163 : EXTENT_DELALLOC, NULL);
1164 0 : goto fail;
1165 : }
1166 41 : leaf = path->nodes[0];
1167 41 : if (ret > 0) {
1168 0 : struct btrfs_key found_key;
1169 0 : ASSERT(path->slots[0]);
1170 0 : path->slots[0]--;
1171 0 : btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1172 0 : if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1173 0 : found_key.offset != offset) {
1174 0 : clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1175 0 : inode->i_size - 1, EXTENT_DELALLOC,
1176 : NULL);
1177 0 : btrfs_release_path(path);
1178 0 : goto fail;
1179 : }
1180 : }
1181 :
1182 41 : BTRFS_I(inode)->generation = trans->transid;
1183 41 : header = btrfs_item_ptr(leaf, path->slots[0],
1184 : struct btrfs_free_space_header);
1185 41 : btrfs_set_free_space_entries(leaf, header, entries);
1186 41 : btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1187 41 : btrfs_set_free_space_generation(leaf, header, trans->transid);
1188 41 : btrfs_mark_buffer_dirty(leaf);
1189 41 : btrfs_release_path(path);
1190 :
1191 41 : return 0;
1192 :
1193 : fail:
1194 : return -1;
1195 : }
1196 :
1197 41 : static noinline_for_stack int write_pinned_extent_entries(
1198 : struct btrfs_trans_handle *trans,
1199 : struct btrfs_block_group *block_group,
1200 : struct btrfs_io_ctl *io_ctl,
1201 : int *entries)
1202 : {
1203 41 : u64 start, extent_start, extent_end, len;
1204 41 : struct extent_io_tree *unpin = NULL;
1205 41 : int ret;
1206 :
1207 41 : if (!block_group)
1208 : return 0;
1209 :
1210 : /*
1211 : * We want to add any pinned extents to our free space cache
1212 : * so we don't leak the space
1213 : *
1214 : * We shouldn't have switched the pinned extents yet so this is the
1215 : * right one
1216 : */
1217 41 : unpin = &trans->transaction->pinned_extents;
1218 :
1219 41 : start = block_group->start;
1220 :
1221 94 : while (start < block_group->start + block_group->length) {
1222 94 : ret = find_first_extent_bit(unpin, start,
1223 : &extent_start, &extent_end,
1224 : EXTENT_DIRTY, NULL);
1225 94 : if (ret)
1226 : return 0;
1227 :
1228 : /* This pinned extent is out of our range */
1229 53 : if (extent_start >= block_group->start + block_group->length)
1230 : return 0;
1231 :
1232 53 : extent_start = max(extent_start, start);
1233 53 : extent_end = min(block_group->start + block_group->length,
1234 : extent_end + 1);
1235 53 : len = extent_end - extent_start;
1236 :
1237 53 : *entries += 1;
1238 53 : ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1239 53 : if (ret)
1240 : return -ENOSPC;
1241 :
1242 53 : start = extent_end;
1243 : }
1244 :
1245 : return 0;
1246 : }
1247 :
1248 : static noinline_for_stack int
1249 41 : write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1250 : {
1251 41 : struct btrfs_free_space *entry, *next;
1252 41 : int ret;
1253 :
1254 : /* Write out the bitmaps */
1255 41 : list_for_each_entry_safe(entry, next, bitmap_list, list) {
1256 0 : ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1257 0 : if (ret)
1258 : return -ENOSPC;
1259 0 : list_del_init(&entry->list);
1260 : }
1261 :
1262 : return 0;
1263 : }
1264 :
1265 41 : static int flush_dirty_cache(struct inode *inode)
1266 : {
1267 41 : int ret;
1268 :
1269 41 : ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1270 41 : if (ret)
1271 0 : clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1272 : EXTENT_DELALLOC, NULL);
1273 :
1274 41 : return ret;
1275 : }
1276 :
1277 : static void noinline_for_stack
1278 0 : cleanup_bitmap_list(struct list_head *bitmap_list)
1279 : {
1280 0 : struct btrfs_free_space *entry, *next;
1281 :
1282 0 : list_for_each_entry_safe(entry, next, bitmap_list, list)
1283 0 : list_del_init(&entry->list);
1284 0 : }
1285 :
1286 : static void noinline_for_stack
1287 0 : cleanup_write_cache_enospc(struct inode *inode,
1288 : struct btrfs_io_ctl *io_ctl,
1289 : struct extent_state **cached_state)
1290 : {
1291 0 : io_ctl_drop_pages(io_ctl);
1292 0 : unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1293 : cached_state);
1294 0 : }
1295 :
1296 41 : static int __btrfs_wait_cache_io(struct btrfs_root *root,
1297 : struct btrfs_trans_handle *trans,
1298 : struct btrfs_block_group *block_group,
1299 : struct btrfs_io_ctl *io_ctl,
1300 : struct btrfs_path *path, u64 offset)
1301 : {
1302 41 : int ret;
1303 41 : struct inode *inode = io_ctl->inode;
1304 :
1305 41 : if (!inode)
1306 : return 0;
1307 :
1308 : /* Flush the dirty pages in the cache file. */
1309 41 : ret = flush_dirty_cache(inode);
1310 41 : if (ret)
1311 0 : goto out;
1312 :
1313 : /* Update the cache item to tell everyone this cache file is valid. */
1314 41 : ret = update_cache_item(trans, root, inode, path, offset,
1315 : io_ctl->entries, io_ctl->bitmaps);
1316 41 : out:
1317 41 : if (ret) {
1318 0 : invalidate_inode_pages2(inode->i_mapping);
1319 0 : BTRFS_I(inode)->generation = 0;
1320 0 : if (block_group)
1321 : btrfs_debug(root->fs_info,
1322 : "failed to write free space cache for block group %llu error %d",
1323 : block_group->start, ret);
1324 : }
1325 41 : btrfs_update_inode(trans, root, BTRFS_I(inode));
1326 :
1327 41 : if (block_group) {
1328 : /* the dirty list is protected by the dirty_bgs_lock */
1329 41 : spin_lock(&trans->transaction->dirty_bgs_lock);
1330 :
1331 : /* the disk_cache_state is protected by the block group lock */
1332 41 : spin_lock(&block_group->lock);
1333 :
1334 : /*
1335 : * only mark this as written if we didn't get put back on
1336 : * the dirty list while waiting for IO. Otherwise our
1337 : * cache state won't be right, and we won't get written again
1338 : */
1339 41 : if (!ret && list_empty(&block_group->dirty_list))
1340 17 : block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1341 24 : else if (ret)
1342 0 : block_group->disk_cache_state = BTRFS_DC_ERROR;
1343 :
1344 41 : spin_unlock(&block_group->lock);
1345 41 : spin_unlock(&trans->transaction->dirty_bgs_lock);
1346 41 : io_ctl->inode = NULL;
1347 41 : iput(inode);
1348 : }
1349 :
1350 : return ret;
1351 :
1352 : }
1353 :
1354 41 : int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
1355 : struct btrfs_block_group *block_group,
1356 : struct btrfs_path *path)
1357 : {
1358 41 : return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
1359 : block_group, &block_group->io_ctl,
1360 : path, block_group->start);
1361 : }
1362 :
1363 : /*
1364 : * Write out cached info to an inode.
1365 : *
1366 : * @root: root the inode belongs to
1367 : * @inode: freespace inode we are writing out
1368 : * @ctl: free space cache we are going to write out
1369 : * @block_group: block_group for this cache if it belongs to a block_group
1370 : * @io_ctl: holds context for the io
1371 : * @trans: the trans handle
1372 : *
1373 : * This function writes out a free space cache struct to disk for quick recovery
1374 : * on mount. This will return 0 if it was successful in writing the cache out,
1375 : * or an errno if it was not.
1376 : */
1377 41 : static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1378 : struct btrfs_free_space_ctl *ctl,
1379 : struct btrfs_block_group *block_group,
1380 : struct btrfs_io_ctl *io_ctl,
1381 : struct btrfs_trans_handle *trans)
1382 : {
1383 41 : struct extent_state *cached_state = NULL;
1384 41 : LIST_HEAD(bitmap_list);
1385 41 : int entries = 0;
1386 41 : int bitmaps = 0;
1387 41 : int ret;
1388 41 : int must_iput = 0;
1389 :
1390 41 : if (!i_size_read(inode))
1391 : return -EIO;
1392 :
1393 41 : WARN_ON(io_ctl->pages);
1394 41 : ret = io_ctl_init(io_ctl, inode, 1);
1395 41 : if (ret)
1396 : return ret;
1397 :
1398 41 : if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1399 1 : down_write(&block_group->data_rwsem);
1400 1 : spin_lock(&block_group->lock);
1401 1 : if (block_group->delalloc_bytes) {
1402 0 : block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1403 0 : spin_unlock(&block_group->lock);
1404 0 : up_write(&block_group->data_rwsem);
1405 0 : BTRFS_I(inode)->generation = 0;
1406 0 : ret = 0;
1407 0 : must_iput = 1;
1408 0 : goto out;
1409 : }
1410 1 : spin_unlock(&block_group->lock);
1411 : }
1412 :
1413 : /* Lock all pages first so we can lock the extent safely. */
1414 41 : ret = io_ctl_prepare_pages(io_ctl, false);
1415 41 : if (ret)
1416 0 : goto out_unlock;
1417 :
1418 41 : lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1419 : &cached_state);
1420 :
1421 41 : io_ctl_set_generation(io_ctl, trans->transid);
1422 :
1423 41 : mutex_lock(&ctl->cache_writeout_mutex);
1424 : /* Write out the extent entries in the free space cache */
1425 41 : spin_lock(&ctl->tree_lock);
1426 41 : ret = write_cache_extent_entries(io_ctl, ctl,
1427 : block_group, &entries, &bitmaps,
1428 : &bitmap_list);
1429 41 : if (ret)
1430 0 : goto out_nospc_locked;
1431 :
1432 : /*
1433 : * Some spaces that are freed in the current transaction are pinned,
1434 : * they will be added into free space cache after the transaction is
1435 : * committed, we shouldn't lose them.
1436 : *
1437 : * If this changes while we are working we'll get added back to
1438 : * the dirty list and redo it. No locking needed
1439 : */
1440 41 : ret = write_pinned_extent_entries(trans, block_group, io_ctl, &entries);
1441 41 : if (ret)
1442 0 : goto out_nospc_locked;
1443 :
1444 : /*
1445 : * At last, we write out all the bitmaps and keep cache_writeout_mutex
1446 : * locked while doing it because a concurrent trim can be manipulating
1447 : * or freeing the bitmap.
1448 : */
1449 41 : ret = write_bitmap_entries(io_ctl, &bitmap_list);
1450 41 : spin_unlock(&ctl->tree_lock);
1451 41 : mutex_unlock(&ctl->cache_writeout_mutex);
1452 41 : if (ret)
1453 0 : goto out_nospc;
1454 :
1455 : /* Zero out the rest of the pages just to make sure */
1456 41 : io_ctl_zero_remaining_pages(io_ctl);
1457 :
1458 : /* Everything is written out, now we dirty the pages in the file. */
1459 41 : ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages,
1460 41 : io_ctl->num_pages, 0, i_size_read(inode),
1461 : &cached_state, false);
1462 41 : if (ret)
1463 0 : goto out_nospc;
1464 :
1465 41 : if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1466 1 : up_write(&block_group->data_rwsem);
1467 : /*
1468 : * Release the pages and unlock the extent, we will flush
1469 : * them out later
1470 : */
1471 41 : io_ctl_drop_pages(io_ctl);
1472 41 : io_ctl_free(io_ctl);
1473 :
1474 41 : unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1475 : &cached_state);
1476 :
1477 : /*
1478 : * at this point the pages are under IO and we're happy,
1479 : * The caller is responsible for waiting on them and updating
1480 : * the cache and the inode
1481 : */
1482 41 : io_ctl->entries = entries;
1483 41 : io_ctl->bitmaps = bitmaps;
1484 :
1485 41 : ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1486 41 : if (ret)
1487 0 : goto out;
1488 :
1489 : return 0;
1490 :
1491 0 : out_nospc_locked:
1492 0 : cleanup_bitmap_list(&bitmap_list);
1493 0 : spin_unlock(&ctl->tree_lock);
1494 0 : mutex_unlock(&ctl->cache_writeout_mutex);
1495 :
1496 0 : out_nospc:
1497 0 : cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
1498 :
1499 0 : out_unlock:
1500 0 : if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1501 0 : up_write(&block_group->data_rwsem);
1502 :
1503 0 : out:
1504 0 : io_ctl->inode = NULL;
1505 0 : io_ctl_free(io_ctl);
1506 0 : if (ret) {
1507 0 : invalidate_inode_pages2(inode->i_mapping);
1508 0 : BTRFS_I(inode)->generation = 0;
1509 : }
1510 0 : btrfs_update_inode(trans, root, BTRFS_I(inode));
1511 0 : if (must_iput)
1512 0 : iput(inode);
1513 : return ret;
1514 : }
1515 :
1516 41 : int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
1517 : struct btrfs_block_group *block_group,
1518 : struct btrfs_path *path)
1519 : {
1520 41 : struct btrfs_fs_info *fs_info = trans->fs_info;
1521 41 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1522 41 : struct inode *inode;
1523 41 : int ret = 0;
1524 :
1525 41 : spin_lock(&block_group->lock);
1526 41 : if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1527 0 : spin_unlock(&block_group->lock);
1528 0 : return 0;
1529 : }
1530 41 : spin_unlock(&block_group->lock);
1531 :
1532 41 : inode = lookup_free_space_inode(block_group, path);
1533 41 : if (IS_ERR(inode))
1534 : return 0;
1535 :
1536 41 : ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
1537 : block_group, &block_group->io_ctl, trans);
1538 41 : if (ret) {
1539 0 : btrfs_debug(fs_info,
1540 : "failed to write free space cache for block group %llu error %d",
1541 : block_group->start, ret);
1542 0 : spin_lock(&block_group->lock);
1543 0 : block_group->disk_cache_state = BTRFS_DC_ERROR;
1544 0 : spin_unlock(&block_group->lock);
1545 :
1546 0 : block_group->io_ctl.inode = NULL;
1547 0 : iput(inode);
1548 : }
1549 :
1550 : /*
1551 : * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1552 : * to wait for IO and put the inode
1553 : */
1554 :
1555 : return ret;
1556 : }
1557 :
1558 : static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1559 : u64 offset)
1560 : {
1561 5137955 : ASSERT(offset >= bitmap_start);
1562 5137955 : offset -= bitmap_start;
1563 5137955 : return (unsigned long)(div_u64(offset, unit));
1564 : }
1565 :
1566 : static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1567 : {
1568 4418321 : return (unsigned long)(div_u64(bytes, unit));
1569 : }
1570 :
1571 : static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1572 : u64 offset)
1573 : {
1574 4384424 : u64 bitmap_start;
1575 4384424 : u64 bytes_per_bitmap;
1576 :
1577 4384424 : bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1578 4384424 : bitmap_start = offset - ctl->start;
1579 4384424 : bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1580 4384424 : bitmap_start *= bytes_per_bitmap;
1581 4384424 : bitmap_start += ctl->start;
1582 :
1583 3479998 : return bitmap_start;
1584 : }
1585 :
1586 7086194 : static int tree_insert_offset(struct btrfs_free_space_ctl *ctl,
1587 : struct btrfs_free_cluster *cluster,
1588 : struct btrfs_free_space *new_entry)
1589 : {
1590 7086194 : struct rb_root *root;
1591 7086194 : struct rb_node **p;
1592 7086194 : struct rb_node *parent = NULL;
1593 :
1594 7086194 : lockdep_assert_held(&ctl->tree_lock);
1595 :
1596 7086194 : if (cluster) {
1597 1203323 : lockdep_assert_held(&cluster->lock);
1598 1203323 : root = &cluster->root;
1599 : } else {
1600 5882871 : root = &ctl->free_space_offset;
1601 : }
1602 :
1603 7086194 : p = &root->rb_node;
1604 :
1605 51997899 : while (*p) {
1606 44911705 : struct btrfs_free_space *info;
1607 :
1608 44911705 : parent = *p;
1609 44911705 : info = rb_entry(parent, struct btrfs_free_space, offset_index);
1610 :
1611 44911705 : if (new_entry->offset < info->offset) {
1612 10497768 : p = &(*p)->rb_left;
1613 34413937 : } else if (new_entry->offset > info->offset) {
1614 34404634 : p = &(*p)->rb_right;
1615 : } else {
1616 : /*
1617 : * we could have a bitmap entry and an extent entry
1618 : * share the same offset. If this is the case, we want
1619 : * the extent entry to always be found first if we do a
1620 : * linear search through the tree, since we want to have
1621 : * the quickest allocation time, and allocating from an
1622 : * extent is faster than allocating from a bitmap. So
1623 : * if we're inserting a bitmap and we find an entry at
1624 : * this offset, we want to go right, or after this entry
1625 : * logically. If we are inserting an extent and we've
1626 : * found a bitmap, we want to go left, or before
1627 : * logically.
1628 : */
1629 9303 : if (new_entry->bitmap) {
1630 145 : if (info->bitmap) {
1631 0 : WARN_ON_ONCE(1);
1632 0 : return -EEXIST;
1633 : }
1634 145 : p = &(*p)->rb_right;
1635 : } else {
1636 9158 : if (!info->bitmap) {
1637 0 : WARN_ON_ONCE(1);
1638 0 : return -EEXIST;
1639 : }
1640 9158 : p = &(*p)->rb_left;
1641 : }
1642 : }
1643 : }
1644 :
1645 7086194 : rb_link_node(&new_entry->offset_index, parent, p);
1646 7086194 : rb_insert_color(&new_entry->offset_index, root);
1647 :
1648 7086194 : return 0;
1649 : }
1650 :
1651 : /*
1652 : * This is a little subtle. We *only* have ->max_extent_size set if we actually
1653 : * searched through the bitmap and figured out the largest ->max_extent_size,
1654 : * otherwise it's 0. In the case that it's 0 we don't want to tell the
1655 : * allocator the wrong thing, we want to use the actual real max_extent_size
1656 : * we've found already if it's larger, or we want to use ->bytes.
1657 : *
1658 : * This matters because find_free_space() will skip entries who's ->bytes is
1659 : * less than the required bytes. So if we didn't search down this bitmap, we
1660 : * may pick some previous entry that has a smaller ->max_extent_size than we
1661 : * have. For example, assume we have two entries, one that has
1662 : * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set
1663 : * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will
1664 : * call into find_free_space(), and return with max_extent_size == 4K, because
1665 : * that first bitmap entry had ->max_extent_size set, but the second one did
1666 : * not. If instead we returned 8K we'd come in searching for 8K, and find the
1667 : * 8K contiguous range.
1668 : *
1669 : * Consider the other case, we have 2 8K chunks in that second entry and still
1670 : * don't have ->max_extent_size set. We'll return 16K, and the next time the
1671 : * allocator comes in it'll fully search our second bitmap, and this time it'll
1672 : * get an uptodate value of 8K as the maximum chunk size. Then we'll get the
1673 : * right allocation the next loop through.
1674 : */
1675 : static inline u64 get_max_extent_size(const struct btrfs_free_space *entry)
1676 : {
1677 46457807 : if (entry->bitmap && entry->max_extent_size)
1678 : return entry->max_extent_size;
1679 89555377 : return entry->bytes;
1680 : }
1681 :
1682 : /*
1683 : * We want the largest entry to be leftmost, so this is inverted from what you'd
1684 : * normally expect.
1685 : */
1686 44496664 : static bool entry_less(struct rb_node *node, const struct rb_node *parent)
1687 : {
1688 44496664 : const struct btrfs_free_space *entry, *exist;
1689 :
1690 44496664 : entry = rb_entry(node, struct btrfs_free_space, bytes_index);
1691 44496664 : exist = rb_entry(parent, struct btrfs_free_space, bytes_index);
1692 44496664 : return get_max_extent_size(exist) < get_max_extent_size(entry);
1693 : }
1694 :
1695 : /*
1696 : * searches the tree for the given offset.
1697 : *
1698 : * fuzzy - If this is set, then we are trying to make an allocation, and we just
1699 : * want a section that has at least bytes size and comes at or after the given
1700 : * offset.
1701 : */
1702 : static struct btrfs_free_space *
1703 11444674 : tree_search_offset(struct btrfs_free_space_ctl *ctl,
1704 : u64 offset, int bitmap_only, int fuzzy)
1705 : {
1706 11444674 : struct rb_node *n = ctl->free_space_offset.rb_node;
1707 11444674 : struct btrfs_free_space *entry = NULL, *prev = NULL;
1708 :
1709 11444674 : lockdep_assert_held(&ctl->tree_lock);
1710 :
1711 : /* find entry that is closest to the 'offset' */
1712 95761144 : while (n) {
1713 86576941 : entry = rb_entry(n, struct btrfs_free_space, offset_index);
1714 86576941 : prev = entry;
1715 :
1716 86576941 : if (offset < entry->offset)
1717 28805275 : n = n->rb_left;
1718 57771666 : else if (offset > entry->offset)
1719 55511195 : n = n->rb_right;
1720 : else
1721 : break;
1722 :
1723 : entry = NULL;
1724 : }
1725 :
1726 11444674 : if (bitmap_only) {
1727 2023698 : if (!entry)
1728 : return NULL;
1729 1836595 : if (entry->bitmap)
1730 : return entry;
1731 :
1732 : /*
1733 : * bitmap entry and extent entry may share same offset,
1734 : * in that case, bitmap entry comes after extent entry.
1735 : */
1736 44638 : n = rb_next(n);
1737 44638 : if (!n)
1738 : return NULL;
1739 44245 : entry = rb_entry(n, struct btrfs_free_space, offset_index);
1740 44245 : if (entry->offset != offset)
1741 : return NULL;
1742 :
1743 40828 : WARN_ON(!entry->bitmap);
1744 : return entry;
1745 9420976 : } else if (entry) {
1746 423876 : if (entry->bitmap) {
1747 : /*
1748 : * if previous extent entry covers the offset,
1749 : * we should return it instead of the bitmap entry
1750 : */
1751 45979 : n = rb_prev(&entry->offset_index);
1752 45979 : if (n) {
1753 37217 : prev = rb_entry(n, struct btrfs_free_space,
1754 : offset_index);
1755 37217 : if (!prev->bitmap &&
1756 32659 : prev->offset + prev->bytes > offset)
1757 971 : entry = prev;
1758 : }
1759 : }
1760 423876 : return entry;
1761 : }
1762 :
1763 8997100 : if (!prev)
1764 : return NULL;
1765 :
1766 : /* find last entry before the 'offset' */
1767 8933149 : entry = prev;
1768 8933149 : if (entry->offset > offset) {
1769 3299270 : n = rb_prev(&entry->offset_index);
1770 3299271 : if (n) {
1771 : entry = rb_entry(n, struct btrfs_free_space,
1772 : offset_index);
1773 : ASSERT(entry->offset <= offset);
1774 : } else {
1775 1033840 : if (fuzzy)
1776 : return entry;
1777 : else
1778 14706 : return NULL;
1779 : }
1780 : }
1781 :
1782 7899310 : if (entry->bitmap) {
1783 172597 : n = rb_prev(&entry->offset_index);
1784 172597 : if (n) {
1785 135018 : prev = rb_entry(n, struct btrfs_free_space,
1786 : offset_index);
1787 135018 : if (!prev->bitmap &&
1788 118969 : prev->offset + prev->bytes > offset)
1789 : return prev;
1790 : }
1791 172583 : if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1792 : return entry;
1793 7726713 : } else if (entry->offset + entry->bytes > offset)
1794 : return entry;
1795 :
1796 7287325 : if (!fuzzy)
1797 : return NULL;
1798 :
1799 564407 : while (1) {
1800 564407 : n = rb_next(&entry->offset_index);
1801 564410 : if (!n)
1802 : return NULL;
1803 564022 : entry = rb_entry(n, struct btrfs_free_space, offset_index);
1804 564022 : if (entry->bitmap) {
1805 4147 : if (entry->offset + BITS_PER_BITMAP *
1806 4147 : ctl->unit > offset)
1807 : break;
1808 : } else {
1809 559875 : if (entry->offset + entry->bytes > offset)
1810 : break;
1811 : }
1812 : }
1813 : return entry;
1814 : }
1815 :
1816 4679563 : static inline void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1817 : struct btrfs_free_space *info,
1818 : bool update_stat)
1819 : {
1820 4679563 : lockdep_assert_held(&ctl->tree_lock);
1821 :
1822 4679563 : rb_erase(&info->offset_index, &ctl->free_space_offset);
1823 4679565 : rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes);
1824 4679563 : ctl->free_extents--;
1825 :
1826 4679563 : if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1827 2711136 : ctl->discardable_extents[BTRFS_STAT_CURR]--;
1828 2711136 : ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes;
1829 : }
1830 :
1831 4679563 : if (update_stat)
1832 4677820 : ctl->free_space -= info->bytes;
1833 4679563 : }
1834 :
1835 5848422 : static int link_free_space(struct btrfs_free_space_ctl *ctl,
1836 : struct btrfs_free_space *info)
1837 : {
1838 5848422 : int ret = 0;
1839 :
1840 5848422 : lockdep_assert_held(&ctl->tree_lock);
1841 :
1842 5848422 : ASSERT(info->bytes || info->bitmap);
1843 5848422 : ret = tree_insert_offset(ctl, NULL, info);
1844 5848421 : if (ret)
1845 : return ret;
1846 :
1847 5848421 : rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less);
1848 :
1849 5848420 : if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1850 3865485 : ctl->discardable_extents[BTRFS_STAT_CURR]++;
1851 3865485 : ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
1852 : }
1853 :
1854 5848420 : ctl->free_space += info->bytes;
1855 5848420 : ctl->free_extents++;
1856 5848420 : return ret;
1857 : }
1858 :
1859 3085613 : static void relink_bitmap_entry(struct btrfs_free_space_ctl *ctl,
1860 : struct btrfs_free_space *info)
1861 : {
1862 3085613 : ASSERT(info->bitmap);
1863 :
1864 : /*
1865 : * If our entry is empty it's because we're on a cluster and we don't
1866 : * want to re-link it into our ctl bytes index.
1867 : */
1868 3085613 : if (RB_EMPTY_NODE(&info->bytes_index))
1869 : return;
1870 :
1871 1480789 : lockdep_assert_held(&ctl->tree_lock);
1872 :
1873 1480789 : rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes);
1874 1480784 : rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less);
1875 : }
1876 :
1877 1470359 : static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1878 : struct btrfs_free_space *info,
1879 : u64 offset, u64 bytes, bool update_stat)
1880 : {
1881 1470359 : unsigned long start, count, end;
1882 1470359 : int extent_delta = -1;
1883 :
1884 1470359 : start = offset_to_bit(info->offset, ctl->unit, offset);
1885 1470359 : count = bytes_to_bits(bytes, ctl->unit);
1886 1470359 : end = start + count;
1887 1470359 : ASSERT(end <= BITS_PER_BITMAP);
1888 :
1889 1470359 : bitmap_clear(info->bitmap, start, count);
1890 :
1891 1470354 : info->bytes -= bytes;
1892 1470354 : if (info->max_extent_size > ctl->unit)
1893 37812 : info->max_extent_size = 0;
1894 :
1895 1470354 : relink_bitmap_entry(ctl, info);
1896 :
1897 2940557 : if (start && test_bit(start - 1, info->bitmap))
1898 627 : extent_delta++;
1899 :
1900 2940488 : if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
1901 1007156 : extent_delta++;
1902 :
1903 1470351 : info->bitmap_extents += extent_delta;
1904 1470351 : if (!btrfs_free_space_trimmed(info)) {
1905 1457486 : ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1906 1457486 : ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
1907 : }
1908 :
1909 1470351 : if (update_stat)
1910 328778 : ctl->free_space -= bytes;
1911 1470351 : }
1912 :
1913 1558463 : static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1914 : struct btrfs_free_space *info, u64 offset,
1915 : u64 bytes)
1916 : {
1917 1558463 : unsigned long start, count, end;
1918 1558463 : int extent_delta = 1;
1919 :
1920 1558463 : start = offset_to_bit(info->offset, ctl->unit, offset);
1921 1558463 : count = bytes_to_bits(bytes, ctl->unit);
1922 1558463 : end = start + count;
1923 1558463 : ASSERT(end <= BITS_PER_BITMAP);
1924 :
1925 1558463 : bitmap_set(info->bitmap, start, count);
1926 :
1927 : /*
1928 : * We set some bytes, we have no idea what the max extent size is
1929 : * anymore.
1930 : */
1931 1558464 : info->max_extent_size = 0;
1932 1558464 : info->bytes += bytes;
1933 1558464 : ctl->free_space += bytes;
1934 :
1935 1558464 : relink_bitmap_entry(ctl, info);
1936 :
1937 3116750 : if (start && test_bit(start - 1, info->bitmap))
1938 511909 : extent_delta--;
1939 :
1940 3116680 : if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
1941 514105 : extent_delta--;
1942 :
1943 1558464 : info->bitmap_extents += extent_delta;
1944 1558464 : if (!btrfs_free_space_trimmed(info)) {
1945 1529881 : ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1946 1529881 : ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes;
1947 : }
1948 1558464 : }
1949 :
1950 : /*
1951 : * If we can not find suitable extent, we will use bytes to record
1952 : * the size of the max extent.
1953 : */
1954 1861496 : static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1955 : struct btrfs_free_space *bitmap_info, u64 *offset,
1956 : u64 *bytes, bool for_alloc)
1957 : {
1958 1861496 : unsigned long found_bits = 0;
1959 1861496 : unsigned long max_bits = 0;
1960 1861496 : unsigned long bits, i;
1961 1861496 : unsigned long next_zero;
1962 1861496 : unsigned long extent_bits;
1963 :
1964 : /*
1965 : * Skip searching the bitmap if we don't have a contiguous section that
1966 : * is large enough for this allocation.
1967 : */
1968 1861496 : if (for_alloc &&
1969 1843810 : bitmap_info->max_extent_size &&
1970 510764 : bitmap_info->max_extent_size < *bytes) {
1971 472755 : *bytes = bitmap_info->max_extent_size;
1972 472755 : return -1;
1973 : }
1974 :
1975 1388741 : i = offset_to_bit(bitmap_info->offset, ctl->unit,
1976 1388741 : max_t(u64, *offset, bitmap_info->offset));
1977 1388741 : bits = bytes_to_bits(*bytes, ctl->unit);
1978 :
1979 10630563 : for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1980 10573210 : if (for_alloc && bits == 1) {
1981 : found_bits = 1;
1982 : break;
1983 : }
1984 10472988 : next_zero = find_next_zero_bit(bitmap_info->bitmap,
1985 : BITS_PER_BITMAP, i);
1986 10473558 : extent_bits = next_zero - i;
1987 10473558 : if (extent_bits >= bits) {
1988 : found_bits = extent_bits;
1989 : break;
1990 9241822 : } else if (extent_bits > max_bits) {
1991 : max_bits = extent_bits;
1992 : }
1993 9241822 : i = next_zero;
1994 : }
1995 :
1996 1388747 : if (found_bits) {
1997 1331958 : *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1998 1331958 : *bytes = (u64)(found_bits) * ctl->unit;
1999 1331958 : return 0;
2000 : }
2001 :
2002 56789 : *bytes = (u64)(max_bits) * ctl->unit;
2003 56789 : bitmap_info->max_extent_size = *bytes;
2004 56789 : relink_bitmap_entry(ctl, bitmap_info);
2005 56789 : return -1;
2006 : }
2007 :
2008 : /* Cache the size of the max extent in bytes */
2009 : static struct btrfs_free_space *
2010 4114107 : find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
2011 : unsigned long align, u64 *max_extent_size, bool use_bytes_index)
2012 : {
2013 4114107 : struct btrfs_free_space *entry;
2014 4114107 : struct rb_node *node;
2015 4114107 : u64 tmp;
2016 4114107 : u64 align_off;
2017 4114107 : int ret;
2018 :
2019 4114107 : if (!ctl->free_space_offset.rb_node)
2020 1125 : goto out;
2021 4112982 : again:
2022 4160298 : if (use_bytes_index) {
2023 2489876 : node = rb_first_cached(&ctl->free_space_bytes);
2024 : } else {
2025 1670422 : entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset),
2026 : 0, 1);
2027 1670425 : if (!entry)
2028 39 : goto out;
2029 1670386 : node = &entry->offset_index;
2030 : }
2031 :
2032 5708037 : for (; node; node = rb_next(node)) {
2033 5668222 : if (use_bytes_index)
2034 2952539 : entry = rb_entry(node, struct btrfs_free_space,
2035 : bytes_index);
2036 : else
2037 : entry = rb_entry(node, struct btrfs_free_space,
2038 : offset_index);
2039 :
2040 : /*
2041 : * If we are using the bytes index then all subsequent entries
2042 : * in this tree are going to be < bytes, so simply set the max
2043 : * extent size and exit the loop.
2044 : *
2045 : * If we're using the offset index then we need to keep going
2046 : * through the rest of the tree.
2047 : */
2048 5668222 : if (entry->bytes < *bytes) {
2049 1298339 : *max_extent_size = max(get_max_extent_size(entry),
2050 : *max_extent_size);
2051 1298339 : if (use_bytes_index)
2052 : break;
2053 1067208 : continue;
2054 : }
2055 :
2056 : /* make sure the space returned is big enough
2057 : * to match our requested alignment
2058 : */
2059 4369883 : if (*bytes >= align) {
2060 4369883 : tmp = entry->offset - ctl->start + align - 1;
2061 4369883 : tmp = div64_u64(tmp, align);
2062 4369883 : tmp = tmp * align + ctl->start;
2063 4369883 : align_off = tmp - entry->offset;
2064 : } else {
2065 0 : align_off = 0;
2066 0 : tmp = entry->offset;
2067 : }
2068 :
2069 : /*
2070 : * We don't break here if we're using the bytes index because we
2071 : * may have another entry that has the correct alignment that is
2072 : * the right size, so we don't want to miss that possibility.
2073 : * At worst this adds another loop through the logic, but if we
2074 : * broke here we could prematurely ENOSPC.
2075 : */
2076 4369883 : if (entry->bytes < *bytes + align_off) {
2077 0 : *max_extent_size = max(get_max_extent_size(entry),
2078 : *max_extent_size);
2079 0 : continue;
2080 : }
2081 :
2082 4369883 : if (entry->bitmap) {
2083 702306 : struct rb_node *old_next = rb_next(node);
2084 702305 : u64 size = *bytes;
2085 :
2086 702305 : ret = search_bitmap(ctl, entry, &tmp, &size, true);
2087 702309 : if (!ret) {
2088 174428 : *offset = tmp;
2089 174428 : *bytes = size;
2090 174428 : return entry;
2091 : } else {
2092 1055762 : *max_extent_size =
2093 527881 : max(get_max_extent_size(entry),
2094 : *max_extent_size);
2095 : }
2096 :
2097 : /*
2098 : * The bitmap may have gotten re-arranged in the space
2099 : * index here because the max_extent_size may have been
2100 : * updated. Start from the beginning again if this
2101 : * happened.
2102 : */
2103 527881 : if (use_bytes_index && old_next != rb_next(node))
2104 47316 : goto again;
2105 480567 : continue;
2106 : }
2107 :
2108 3667577 : *offset = tmp;
2109 3667577 : *bytes = entry->bytes - align_off;
2110 3667577 : return entry;
2111 : }
2112 270943 : out:
2113 : return NULL;
2114 : }
2115 :
2116 1320 : static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
2117 : struct btrfs_free_space *info, u64 offset)
2118 : {
2119 1320 : info->offset = offset_to_bitmap(ctl, offset);
2120 1320 : info->bytes = 0;
2121 1320 : info->bitmap_extents = 0;
2122 1320 : INIT_LIST_HEAD(&info->list);
2123 1320 : link_free_space(ctl, info);
2124 1320 : ctl->total_bitmaps++;
2125 1320 : recalculate_thresholds(ctl);
2126 1320 : }
2127 :
2128 1053 : static void free_bitmap(struct btrfs_free_space_ctl *ctl,
2129 : struct btrfs_free_space *bitmap_info)
2130 : {
2131 : /*
2132 : * Normally when this is called, the bitmap is completely empty. However,
2133 : * if we are blowing up the free space cache for one reason or another
2134 : * via __btrfs_remove_free_space_cache(), then it may not be freed and
2135 : * we may leave stats on the table.
2136 : */
2137 1053 : if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) {
2138 228 : ctl->discardable_extents[BTRFS_STAT_CURR] -=
2139 228 : bitmap_info->bitmap_extents;
2140 228 : ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes;
2141 :
2142 : }
2143 1053 : unlink_free_space(ctl, bitmap_info, true);
2144 1053 : kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
2145 1053 : kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
2146 1053 : ctl->total_bitmaps--;
2147 1053 : recalculate_thresholds(ctl);
2148 1053 : }
2149 :
2150 1366 : static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
2151 : struct btrfs_free_space *bitmap_info,
2152 : u64 *offset, u64 *bytes)
2153 : {
2154 1366 : u64 end;
2155 1366 : u64 search_start, search_bytes;
2156 1366 : int ret;
2157 :
2158 1366 : again:
2159 1366 : end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
2160 :
2161 : /*
2162 : * We need to search for bits in this bitmap. We could only cover some
2163 : * of the extent in this bitmap thanks to how we add space, so we need
2164 : * to search for as much as it as we can and clear that amount, and then
2165 : * go searching for the next bit.
2166 : */
2167 1366 : search_start = *offset;
2168 1366 : search_bytes = ctl->unit;
2169 1366 : search_bytes = min(search_bytes, end - search_start + 1);
2170 1366 : ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
2171 : false);
2172 1366 : if (ret < 0 || search_start != *offset)
2173 : return -EINVAL;
2174 :
2175 : /* We may have found more bits than what we need */
2176 1366 : search_bytes = min(search_bytes, *bytes);
2177 :
2178 : /* Cannot clear past the end of the bitmap */
2179 1366 : search_bytes = min(search_bytes, end - search_start + 1);
2180 :
2181 1366 : bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes, true);
2182 1366 : *offset += search_bytes;
2183 1366 : *bytes -= search_bytes;
2184 :
2185 1366 : if (*bytes) {
2186 0 : struct rb_node *next = rb_next(&bitmap_info->offset_index);
2187 0 : if (!bitmap_info->bytes)
2188 0 : free_bitmap(ctl, bitmap_info);
2189 :
2190 : /*
2191 : * no entry after this bitmap, but we still have bytes to
2192 : * remove, so something has gone wrong.
2193 : */
2194 0 : if (!next)
2195 : return -EINVAL;
2196 :
2197 0 : bitmap_info = rb_entry(next, struct btrfs_free_space,
2198 : offset_index);
2199 :
2200 : /*
2201 : * if the next entry isn't a bitmap we need to return to let the
2202 : * extent stuff do its work.
2203 : */
2204 0 : if (!bitmap_info->bitmap)
2205 : return -EAGAIN;
2206 :
2207 : /*
2208 : * Ok the next item is a bitmap, but it may not actually hold
2209 : * the information for the rest of this free space stuff, so
2210 : * look for it, and if we don't find it return so we can try
2211 : * everything over again.
2212 : */
2213 0 : search_start = *offset;
2214 0 : search_bytes = ctl->unit;
2215 0 : ret = search_bitmap(ctl, bitmap_info, &search_start,
2216 : &search_bytes, false);
2217 0 : if (ret < 0 || search_start != *offset)
2218 : return -EAGAIN;
2219 :
2220 0 : goto again;
2221 1366 : } else if (!bitmap_info->bytes)
2222 12 : free_bitmap(ctl, bitmap_info);
2223 :
2224 : return 0;
2225 : }
2226 :
2227 1558463 : static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
2228 : struct btrfs_free_space *info, u64 offset,
2229 : u64 bytes, enum btrfs_trim_state trim_state)
2230 : {
2231 1558463 : u64 bytes_to_set = 0;
2232 1558463 : u64 end;
2233 :
2234 : /*
2235 : * This is a tradeoff to make bitmap trim state minimal. We mark the
2236 : * whole bitmap untrimmed if at any point we add untrimmed regions.
2237 : */
2238 1558463 : if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) {
2239 1516274 : if (btrfs_free_space_trimmed(info)) {
2240 1361 : ctl->discardable_extents[BTRFS_STAT_CURR] +=
2241 1361 : info->bitmap_extents;
2242 1361 : ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
2243 : }
2244 1516274 : info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2245 : }
2246 :
2247 1558463 : end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
2248 :
2249 1558463 : bytes_to_set = min(end - offset, bytes);
2250 :
2251 1558463 : bitmap_set_bits(ctl, info, offset, bytes_to_set);
2252 :
2253 1558464 : return bytes_to_set;
2254 :
2255 : }
2256 :
2257 3252294 : static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
2258 : struct btrfs_free_space *info)
2259 : {
2260 3252294 : struct btrfs_block_group *block_group = ctl->block_group;
2261 3252294 : struct btrfs_fs_info *fs_info = block_group->fs_info;
2262 3252294 : bool forced = false;
2263 :
2264 : #ifdef CONFIG_BTRFS_DEBUG
2265 : if (btrfs_should_fragment_free_space(block_group))
2266 : forced = true;
2267 : #endif
2268 :
2269 : /* This is a way to reclaim large regions from the bitmaps. */
2270 3252294 : if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD)
2271 : return false;
2272 :
2273 : /*
2274 : * If we are below the extents threshold then we can add this as an
2275 : * extent, and don't have to deal with the bitmap
2276 : */
2277 3212288 : if (!forced && ctl->free_extents < ctl->extents_thresh) {
2278 : /*
2279 : * If this block group has some small extents we don't want to
2280 : * use up all of our free slots in the cache with them, we want
2281 : * to reserve them to larger extents, however if we have plenty
2282 : * of cache left then go ahead an dadd them, no sense in adding
2283 : * the overhead of a bitmap if we don't have to.
2284 : */
2285 1838337 : if (info->bytes <= fs_info->sectorsize * 8) {
2286 1624518 : if (ctl->free_extents * 3 <= ctl->extents_thresh)
2287 : return false;
2288 : } else {
2289 : return false;
2290 : }
2291 : }
2292 :
2293 : /*
2294 : * The original block groups from mkfs can be really small, like 8
2295 : * megabytes, so don't bother with a bitmap for those entries. However
2296 : * some block groups can be smaller than what a bitmap would cover but
2297 : * are still large enough that they could overflow the 32k memory limit,
2298 : * so allow those block groups to still be allowed to have a bitmap
2299 : * entry.
2300 : */
2301 2768655 : if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
2302 1210229 : return false;
2303 :
2304 : return true;
2305 : }
2306 :
2307 : static const struct btrfs_free_space_op free_space_op = {
2308 : .use_bitmap = use_bitmap,
2309 : };
2310 :
2311 3252292 : static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
2312 : struct btrfs_free_space *info)
2313 : {
2314 3252292 : struct btrfs_free_space *bitmap_info;
2315 3252292 : struct btrfs_block_group *block_group = NULL;
2316 3252292 : int added = 0;
2317 3252292 : u64 bytes, offset, bytes_added;
2318 3252292 : enum btrfs_trim_state trim_state;
2319 3252292 : int ret;
2320 :
2321 3252292 : bytes = info->bytes;
2322 3252292 : offset = info->offset;
2323 3252292 : trim_state = info->trim_state;
2324 :
2325 3252292 : if (!ctl->op->use_bitmap(ctl, info))
2326 : return 0;
2327 :
2328 1558426 : if (ctl->op == &free_space_op)
2329 1558426 : block_group = ctl->block_group;
2330 1558426 : again:
2331 : /*
2332 : * Since we link bitmaps right into the cluster we need to see if we
2333 : * have a cluster here, and if so and it has our bitmap we need to add
2334 : * the free space to that bitmap.
2335 : */
2336 1561103 : if (block_group && !list_empty(&block_group->cluster_list)) {
2337 1358992 : struct btrfs_free_cluster *cluster;
2338 1358992 : struct rb_node *node;
2339 1358992 : struct btrfs_free_space *entry;
2340 :
2341 1358992 : cluster = list_entry(block_group->cluster_list.next,
2342 : struct btrfs_free_cluster,
2343 : block_group_list);
2344 1358992 : spin_lock(&cluster->lock);
2345 1358992 : node = rb_first(&cluster->root);
2346 1358992 : if (!node) {
2347 186 : spin_unlock(&cluster->lock);
2348 186 : goto no_cluster_bitmap;
2349 : }
2350 :
2351 1358806 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
2352 1358806 : if (!entry->bitmap) {
2353 669772 : spin_unlock(&cluster->lock);
2354 669772 : goto no_cluster_bitmap;
2355 : }
2356 :
2357 689034 : if (entry->offset == offset_to_bitmap(ctl, offset)) {
2358 463278 : bytes_added = add_bytes_to_bitmap(ctl, entry, offset,
2359 : bytes, trim_state);
2360 463278 : bytes -= bytes_added;
2361 463278 : offset += bytes_added;
2362 : }
2363 689034 : spin_unlock(&cluster->lock);
2364 689034 : if (!bytes) {
2365 463277 : ret = 1;
2366 463277 : goto out;
2367 : }
2368 : }
2369 :
2370 427868 : no_cluster_bitmap:
2371 1097826 : bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2372 : 1, 0);
2373 1097826 : if (!bitmap_info) {
2374 2640 : ASSERT(added == 0);
2375 2640 : goto new_bitmap;
2376 : }
2377 :
2378 1095186 : bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
2379 : trim_state);
2380 1095186 : bytes -= bytes_added;
2381 1095186 : offset += bytes_added;
2382 1095186 : added = 0;
2383 :
2384 1095186 : if (!bytes) {
2385 1095149 : ret = 1;
2386 1095149 : goto out;
2387 : } else
2388 37 : goto again;
2389 :
2390 : new_bitmap:
2391 2640 : if (info && info->bitmap) {
2392 1320 : add_new_bitmap(ctl, info, offset);
2393 1320 : added = 1;
2394 1320 : info = NULL;
2395 1320 : goto again;
2396 : } else {
2397 1320 : spin_unlock(&ctl->tree_lock);
2398 :
2399 : /* no pre-allocated info, allocate a new one */
2400 1320 : if (!info) {
2401 0 : info = kmem_cache_zalloc(btrfs_free_space_cachep,
2402 : GFP_NOFS);
2403 0 : if (!info) {
2404 0 : spin_lock(&ctl->tree_lock);
2405 0 : ret = -ENOMEM;
2406 0 : goto out;
2407 : }
2408 : }
2409 :
2410 : /* allocate the bitmap */
2411 1320 : info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
2412 : GFP_NOFS);
2413 1320 : info->trim_state = BTRFS_TRIM_STATE_TRIMMED;
2414 1320 : spin_lock(&ctl->tree_lock);
2415 1320 : if (!info->bitmap) {
2416 0 : ret = -ENOMEM;
2417 0 : goto out;
2418 : }
2419 1320 : goto again;
2420 : }
2421 :
2422 1558426 : out:
2423 1558426 : if (info) {
2424 1557106 : if (info->bitmap)
2425 0 : kmem_cache_free(btrfs_free_space_bitmap_cachep,
2426 : info->bitmap);
2427 1557106 : kmem_cache_free(btrfs_free_space_cachep, info);
2428 : }
2429 :
2430 : return ret;
2431 : }
2432 :
2433 : /*
2434 : * Free space merging rules:
2435 : * 1) Merge trimmed areas together
2436 : * 2) Let untrimmed areas coalesce with trimmed areas
2437 : * 3) Always pull neighboring regions from bitmaps
2438 : *
2439 : * The above rules are for when we merge free space based on btrfs_trim_state.
2440 : * Rules 2 and 3 are subtle because they are suboptimal, but are done for the
2441 : * same reason: to promote larger extent regions which makes life easier for
2442 : * find_free_extent(). Rule 2 enables coalescing based on the common path
2443 : * being returning free space from btrfs_finish_extent_commit(). So when free
2444 : * space is trimmed, it will prevent aggregating trimmed new region and
2445 : * untrimmed regions in the rb_tree. Rule 3 is purely to obtain larger extents
2446 : * and provide find_free_extent() with the largest extents possible hoping for
2447 : * the reuse path.
2448 : */
2449 4038579 : static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2450 : struct btrfs_free_space *info, bool update_stat)
2451 : {
2452 4038579 : struct btrfs_free_space *left_info = NULL;
2453 4038579 : struct btrfs_free_space *right_info;
2454 4038579 : bool merged = false;
2455 4038579 : u64 offset = info->offset;
2456 4038579 : u64 bytes = info->bytes;
2457 4038579 : const bool is_trimmed = btrfs_free_space_trimmed(info);
2458 4038579 : struct rb_node *right_prev = NULL;
2459 :
2460 : /*
2461 : * first we want to see if there is free space adjacent to the range we
2462 : * are adding, if there is remove that struct and add a new one to
2463 : * cover the entire range
2464 : */
2465 4038579 : right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2466 4038580 : if (right_info)
2467 498850 : right_prev = rb_prev(&right_info->offset_index);
2468 :
2469 498850 : if (right_prev)
2470 : left_info = rb_entry(right_prev, struct btrfs_free_space, offset_index);
2471 3578468 : else if (!right_info)
2472 3539730 : left_info = tree_search_offset(ctl, offset - 1, 0, 0);
2473 :
2474 : /* See try_merge_free_space() comment. */
2475 4038578 : if (right_info && !right_info->bitmap &&
2476 2985 : (!is_trimmed || btrfs_free_space_trimmed(right_info))) {
2477 324654 : unlink_free_space(ctl, right_info, update_stat);
2478 324654 : info->bytes += right_info->bytes;
2479 324654 : kmem_cache_free(btrfs_free_space_cachep, right_info);
2480 324654 : merged = true;
2481 : }
2482 :
2483 : /* See try_merge_free_space() comment. */
2484 4038578 : if (left_info && !left_info->bitmap &&
2485 721299 : left_info->offset + left_info->bytes == offset &&
2486 3357 : (!is_trimmed || btrfs_free_space_trimmed(left_info))) {
2487 459521 : unlink_free_space(ctl, left_info, update_stat);
2488 459521 : info->offset = left_info->offset;
2489 459521 : info->bytes += left_info->bytes;
2490 459521 : kmem_cache_free(btrfs_free_space_cachep, left_info);
2491 459521 : merged = true;
2492 : }
2493 :
2494 4038578 : return merged;
2495 : }
2496 :
2497 449155 : static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
2498 : struct btrfs_free_space *info,
2499 : bool update_stat)
2500 : {
2501 449155 : struct btrfs_free_space *bitmap;
2502 449155 : unsigned long i;
2503 449155 : unsigned long j;
2504 449155 : const u64 end = info->offset + info->bytes;
2505 449155 : const u64 bitmap_offset = offset_to_bitmap(ctl, end);
2506 449155 : u64 bytes;
2507 :
2508 449155 : bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2509 449155 : if (!bitmap)
2510 : return false;
2511 :
2512 369544 : i = offset_to_bit(bitmap->offset, ctl->unit, end);
2513 369544 : j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
2514 369544 : if (j == i)
2515 : return false;
2516 95005 : bytes = (j - i) * ctl->unit;
2517 95005 : info->bytes += bytes;
2518 :
2519 : /* See try_merge_free_space() comment. */
2520 95005 : if (!btrfs_free_space_trimmed(bitmap))
2521 93209 : info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2522 :
2523 95005 : bitmap_clear_bits(ctl, bitmap, end, bytes, update_stat);
2524 :
2525 95005 : if (!bitmap->bytes)
2526 336 : free_bitmap(ctl, bitmap);
2527 :
2528 : return true;
2529 : }
2530 :
2531 448950 : static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
2532 : struct btrfs_free_space *info,
2533 : bool update_stat)
2534 : {
2535 448950 : struct btrfs_free_space *bitmap;
2536 448950 : u64 bitmap_offset;
2537 448950 : unsigned long i;
2538 448950 : unsigned long j;
2539 448950 : unsigned long prev_j;
2540 448950 : u64 bytes;
2541 :
2542 448950 : bitmap_offset = offset_to_bitmap(ctl, info->offset);
2543 : /* If we're on a boundary, try the previous logical bitmap. */
2544 448950 : if (bitmap_offset == info->offset) {
2545 19380 : if (info->offset == 0)
2546 : return false;
2547 19380 : bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
2548 : }
2549 :
2550 448950 : bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2551 448950 : if (!bitmap)
2552 : return false;
2553 :
2554 350090 : i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
2555 350090 : j = 0;
2556 350090 : prev_j = (unsigned long)-1;
2557 3786192405 : for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
2558 3786192374 : if (j > i)
2559 : break;
2560 3785842315 : prev_j = j;
2561 : }
2562 350090 : if (prev_j == i)
2563 : return false;
2564 :
2565 44643 : if (prev_j == (unsigned long)-1)
2566 26 : bytes = (i + 1) * ctl->unit;
2567 : else
2568 44617 : bytes = (i - prev_j) * ctl->unit;
2569 :
2570 44643 : info->offset -= bytes;
2571 44643 : info->bytes += bytes;
2572 :
2573 : /* See try_merge_free_space() comment. */
2574 44643 : if (!btrfs_free_space_trimmed(bitmap))
2575 44630 : info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2576 :
2577 44643 : bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat);
2578 :
2579 44643 : if (!bitmap->bytes)
2580 18 : free_bitmap(ctl, bitmap);
2581 :
2582 : return true;
2583 : }
2584 :
2585 : /*
2586 : * We prefer always to allocate from extent entries, both for clustered and
2587 : * non-clustered allocation requests. So when attempting to add a new extent
2588 : * entry, try to see if there's adjacent free space in bitmap entries, and if
2589 : * there is, migrate that space from the bitmaps to the extent.
2590 : * Like this we get better chances of satisfying space allocation requests
2591 : * because we attempt to satisfy them based on a single cache entry, and never
2592 : * on 2 or more entries - even if the entries represent a contiguous free space
2593 : * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2594 : * ends).
2595 : */
2596 2349154 : static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
2597 : struct btrfs_free_space *info,
2598 : bool update_stat)
2599 : {
2600 : /*
2601 : * Only work with disconnected entries, as we can change their offset,
2602 : * and must be extent entries.
2603 : */
2604 2349154 : ASSERT(!info->bitmap);
2605 2349154 : ASSERT(RB_EMPTY_NODE(&info->offset_index));
2606 :
2607 2349154 : if (ctl->total_bitmaps > 0) {
2608 449155 : bool stole_end;
2609 449155 : bool stole_front = false;
2610 :
2611 449155 : stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
2612 449155 : if (ctl->total_bitmaps > 0)
2613 448950 : stole_front = steal_from_bitmap_to_front(ctl, info,
2614 : update_stat);
2615 :
2616 449155 : if (stole_end || stole_front)
2617 131000 : try_merge_free_space(ctl, info, update_stat);
2618 : }
2619 2349154 : }
2620 :
2621 3873433 : int __btrfs_add_free_space(struct btrfs_block_group *block_group,
2622 : u64 offset, u64 bytes,
2623 : enum btrfs_trim_state trim_state)
2624 : {
2625 3873433 : struct btrfs_fs_info *fs_info = block_group->fs_info;
2626 3873433 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2627 3873433 : struct btrfs_free_space *info;
2628 3873433 : int ret = 0;
2629 3873433 : u64 filter_bytes = bytes;
2630 :
2631 3873433 : ASSERT(!btrfs_is_zoned(fs_info));
2632 :
2633 3873433 : info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2634 3873433 : if (!info)
2635 : return -ENOMEM;
2636 :
2637 3873433 : info->offset = offset;
2638 3873433 : info->bytes = bytes;
2639 3873433 : info->trim_state = trim_state;
2640 3873433 : RB_CLEAR_NODE(&info->offset_index);
2641 3873433 : RB_CLEAR_NODE(&info->bytes_index);
2642 :
2643 3873433 : spin_lock(&ctl->tree_lock);
2644 :
2645 3873434 : if (try_merge_free_space(ctl, info, true))
2646 621141 : goto link;
2647 :
2648 : /*
2649 : * There was no extent directly to the left or right of this new
2650 : * extent then we know we're going to have to allocate a new extent, so
2651 : * before we do that see if we need to drop this into a bitmap
2652 : */
2653 3252292 : ret = insert_into_bitmap(ctl, info);
2654 3252294 : if (ret < 0) {
2655 0 : goto out;
2656 3252294 : } else if (ret) {
2657 1558426 : ret = 0;
2658 1558426 : goto out;
2659 : }
2660 1693868 : link:
2661 : /*
2662 : * Only steal free space from adjacent bitmaps if we're sure we're not
2663 : * going to add the new free space to existing bitmap entries - because
2664 : * that would mean unnecessary work that would be reverted. Therefore
2665 : * attempt to steal space from bitmaps if we're adding an extent entry.
2666 : */
2667 2315009 : steal_from_bitmap(ctl, info, true);
2668 :
2669 2315009 : filter_bytes = max(filter_bytes, info->bytes);
2670 :
2671 2315009 : ret = link_free_space(ctl, info);
2672 2315009 : if (ret)
2673 0 : kmem_cache_free(btrfs_free_space_cachep, info);
2674 2315009 : out:
2675 3873435 : btrfs_discard_update_discardable(block_group);
2676 3873435 : spin_unlock(&ctl->tree_lock);
2677 :
2678 3873435 : if (ret) {
2679 0 : btrfs_crit(fs_info, "unable to add free space :%d", ret);
2680 3873435 : ASSERT(ret != -EEXIST);
2681 : }
2682 :
2683 3873435 : if (trim_state != BTRFS_TRIM_STATE_TRIMMED) {
2684 3704935 : btrfs_discard_check_filter(block_group, filter_bytes);
2685 3704935 : btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
2686 : }
2687 :
2688 : return ret;
2689 : }
2690 :
2691 : static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
2692 : u64 bytenr, u64 size, bool used)
2693 : {
2694 : struct btrfs_space_info *sinfo = block_group->space_info;
2695 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2696 : u64 offset = bytenr - block_group->start;
2697 : u64 to_free, to_unusable;
2698 : int bg_reclaim_threshold = 0;
2699 : bool initial = (size == block_group->length);
2700 : u64 reclaimable_unusable;
2701 :
2702 : WARN_ON(!initial && offset + size > block_group->zone_capacity);
2703 :
2704 : if (!initial)
2705 : bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
2706 :
2707 : spin_lock(&ctl->tree_lock);
2708 : /* Count initial region as zone_unusable until it gets activated. */
2709 : if (!used)
2710 : to_free = size;
2711 : else if (initial &&
2712 : test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
2713 : (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
2714 : to_free = 0;
2715 : else if (initial)
2716 : to_free = block_group->zone_capacity;
2717 : else if (offset >= block_group->alloc_offset)
2718 : to_free = size;
2719 : else if (offset + size <= block_group->alloc_offset)
2720 : to_free = 0;
2721 : else
2722 : to_free = offset + size - block_group->alloc_offset;
2723 : to_unusable = size - to_free;
2724 :
2725 : ctl->free_space += to_free;
2726 : /*
2727 : * If the block group is read-only, we should account freed space into
2728 : * bytes_readonly.
2729 : */
2730 : if (!block_group->ro)
2731 : block_group->zone_unusable += to_unusable;
2732 : spin_unlock(&ctl->tree_lock);
2733 : if (!used) {
2734 : spin_lock(&block_group->lock);
2735 : block_group->alloc_offset -= size;
2736 : spin_unlock(&block_group->lock);
2737 : }
2738 :
2739 : reclaimable_unusable = block_group->zone_unusable -
2740 : (block_group->length - block_group->zone_capacity);
2741 : /* All the region is now unusable. Mark it as unused and reclaim */
2742 : if (block_group->zone_unusable == block_group->length &&
2743 : block_group->alloc_offset) {
2744 : btrfs_mark_bg_unused(block_group);
2745 : } else if (bg_reclaim_threshold &&
2746 : reclaimable_unusable >=
2747 : mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) {
2748 : btrfs_mark_bg_to_reclaim(block_group);
2749 : }
2750 :
2751 : return 0;
2752 : }
2753 :
2754 3695623 : int btrfs_add_free_space(struct btrfs_block_group *block_group,
2755 : u64 bytenr, u64 size)
2756 : {
2757 3695623 : enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2758 :
2759 3695623 : if (btrfs_is_zoned(block_group->fs_info))
2760 : return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2761 : true);
2762 :
2763 3695623 : if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC))
2764 39 : trim_state = BTRFS_TRIM_STATE_TRIMMED;
2765 :
2766 3695623 : return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
2767 : }
2768 :
2769 0 : int btrfs_add_free_space_unused(struct btrfs_block_group *block_group,
2770 : u64 bytenr, u64 size)
2771 : {
2772 0 : if (btrfs_is_zoned(block_group->fs_info))
2773 : return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2774 : false);
2775 :
2776 0 : return btrfs_add_free_space(block_group, bytenr, size);
2777 : }
2778 :
2779 : /*
2780 : * This is a subtle distinction because when adding free space back in general,
2781 : * we want it to be added as untrimmed for async. But in the case where we add
2782 : * it on loading of a block group, we want to consider it trimmed.
2783 : */
2784 122794 : int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
2785 : u64 bytenr, u64 size)
2786 : {
2787 122794 : enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2788 :
2789 122794 : if (btrfs_is_zoned(block_group->fs_info))
2790 : return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2791 : true);
2792 :
2793 122794 : if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) ||
2794 : btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
2795 113455 : trim_state = BTRFS_TRIM_STATE_TRIMMED;
2796 :
2797 122794 : return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
2798 : }
2799 :
2800 81644 : int btrfs_remove_free_space(struct btrfs_block_group *block_group,
2801 : u64 offset, u64 bytes)
2802 : {
2803 81644 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2804 81644 : struct btrfs_free_space *info;
2805 81644 : int ret;
2806 81644 : bool re_search = false;
2807 :
2808 81644 : if (btrfs_is_zoned(block_group->fs_info)) {
2809 : /*
2810 : * This can happen with conventional zones when replaying log.
2811 : * Since the allocation info of tree-log nodes are not recorded
2812 : * to the extent-tree, calculate_alloc_pointer() failed to
2813 : * advance the allocation pointer after last allocated tree log
2814 : * node blocks.
2815 : *
2816 : * This function is called from
2817 : * btrfs_pin_extent_for_log_replay() when replaying the log.
2818 : * Advance the pointer not to overwrite the tree-log nodes.
2819 : */
2820 : if (block_group->start + block_group->alloc_offset <
2821 : offset + bytes) {
2822 : block_group->alloc_offset =
2823 : offset + bytes - block_group->start;
2824 : }
2825 : return 0;
2826 : }
2827 :
2828 81644 : spin_lock(&ctl->tree_lock);
2829 :
2830 : again:
2831 123646 : ret = 0;
2832 123646 : if (!bytes)
2833 42002 : goto out_lock;
2834 :
2835 81644 : info = tree_search_offset(ctl, offset, 0, 0);
2836 81644 : if (!info) {
2837 : /*
2838 : * oops didn't find an extent that matched the space we wanted
2839 : * to remove, look for a bitmap instead
2840 : */
2841 1361 : info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2842 : 1, 0);
2843 1361 : if (!info) {
2844 : /*
2845 : * If we found a partial bit of our free space in a
2846 : * bitmap but then couldn't find the other part this may
2847 : * be a problem, so WARN about it.
2848 : */
2849 0 : WARN_ON(re_search);
2850 0 : goto out_lock;
2851 : }
2852 : }
2853 :
2854 81644 : re_search = false;
2855 81644 : if (!info->bitmap) {
2856 80278 : unlink_free_space(ctl, info, true);
2857 80278 : if (offset == info->offset) {
2858 42002 : u64 to_free = min(bytes, info->bytes);
2859 :
2860 42002 : info->bytes -= to_free;
2861 42002 : info->offset += to_free;
2862 42002 : if (info->bytes) {
2863 17959 : ret = link_free_space(ctl, info);
2864 17959 : WARN_ON(ret);
2865 : } else {
2866 24043 : kmem_cache_free(btrfs_free_space_cachep, info);
2867 : }
2868 :
2869 42002 : offset += to_free;
2870 42002 : bytes -= to_free;
2871 42002 : goto again;
2872 : } else {
2873 38276 : u64 old_end = info->bytes + info->offset;
2874 :
2875 38276 : info->bytes = offset - info->offset;
2876 38276 : ret = link_free_space(ctl, info);
2877 38276 : WARN_ON(ret);
2878 38276 : if (ret)
2879 0 : goto out_lock;
2880 :
2881 : /* Not enough bytes in this entry to satisfy us */
2882 38276 : if (old_end < offset + bytes) {
2883 0 : bytes -= old_end - offset;
2884 0 : offset = old_end;
2885 0 : goto again;
2886 38276 : } else if (old_end == offset + bytes) {
2887 : /* all done */
2888 12269 : goto out_lock;
2889 : }
2890 26007 : spin_unlock(&ctl->tree_lock);
2891 :
2892 26007 : ret = __btrfs_add_free_space(block_group,
2893 : offset + bytes,
2894 : old_end - (offset + bytes),
2895 : info->trim_state);
2896 26007 : WARN_ON(ret);
2897 26007 : goto out;
2898 : }
2899 : }
2900 :
2901 1366 : ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2902 1366 : if (ret == -EAGAIN) {
2903 0 : re_search = true;
2904 0 : goto again;
2905 : }
2906 1366 : out_lock:
2907 55637 : btrfs_discard_update_discardable(block_group);
2908 55637 : spin_unlock(&ctl->tree_lock);
2909 81644 : out:
2910 81644 : return ret;
2911 : }
2912 :
2913 0 : void btrfs_dump_free_space(struct btrfs_block_group *block_group,
2914 : u64 bytes)
2915 : {
2916 0 : struct btrfs_fs_info *fs_info = block_group->fs_info;
2917 0 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2918 0 : struct btrfs_free_space *info;
2919 0 : struct rb_node *n;
2920 0 : int count = 0;
2921 :
2922 : /*
2923 : * Zoned btrfs does not use free space tree and cluster. Just print
2924 : * out the free space after the allocation offset.
2925 : */
2926 0 : if (btrfs_is_zoned(fs_info)) {
2927 : btrfs_info(fs_info, "free space %llu active %d",
2928 : block_group->zone_capacity - block_group->alloc_offset,
2929 : test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2930 : &block_group->runtime_flags));
2931 : return;
2932 : }
2933 :
2934 0 : spin_lock(&ctl->tree_lock);
2935 0 : for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2936 0 : info = rb_entry(n, struct btrfs_free_space, offset_index);
2937 0 : if (info->bytes >= bytes && !block_group->ro)
2938 0 : count++;
2939 0 : btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2940 : info->offset, info->bytes,
2941 : (info->bitmap) ? "yes" : "no");
2942 : }
2943 0 : spin_unlock(&ctl->tree_lock);
2944 0 : btrfs_info(fs_info, "block group has cluster?: %s",
2945 : list_empty(&block_group->cluster_list) ? "no" : "yes");
2946 0 : btrfs_info(fs_info,
2947 : "%d blocks of free space at or bigger than bytes is", count);
2948 : }
2949 :
2950 22221 : void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
2951 : struct btrfs_free_space_ctl *ctl)
2952 : {
2953 22221 : struct btrfs_fs_info *fs_info = block_group->fs_info;
2954 :
2955 22221 : spin_lock_init(&ctl->tree_lock);
2956 22221 : ctl->unit = fs_info->sectorsize;
2957 22221 : ctl->start = block_group->start;
2958 22221 : ctl->block_group = block_group;
2959 22221 : ctl->op = &free_space_op;
2960 22221 : ctl->free_space_bytes = RB_ROOT_CACHED;
2961 22221 : INIT_LIST_HEAD(&ctl->trimming_ranges);
2962 22221 : mutex_init(&ctl->cache_writeout_mutex);
2963 :
2964 : /*
2965 : * we only want to have 32k of ram per block group for keeping
2966 : * track of free space, and if we pass 1/2 of that we want to
2967 : * start converting things over to using bitmaps
2968 : */
2969 22221 : ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
2970 22221 : }
2971 :
2972 : /*
2973 : * for a given cluster, put all of its extents back into the free
2974 : * space cache. If the block group passed doesn't match the block group
2975 : * pointed to by the cluster, someone else raced in and freed the
2976 : * cluster already. In that case, we just return without changing anything
2977 : */
2978 9539 : static void __btrfs_return_cluster_to_free_space(
2979 : struct btrfs_block_group *block_group,
2980 : struct btrfs_free_cluster *cluster)
2981 : {
2982 9539 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2983 9539 : struct rb_node *node;
2984 :
2985 9539 : lockdep_assert_held(&ctl->tree_lock);
2986 :
2987 9539 : spin_lock(&cluster->lock);
2988 9539 : if (cluster->block_group != block_group) {
2989 0 : spin_unlock(&cluster->lock);
2990 0 : return;
2991 : }
2992 :
2993 9539 : cluster->block_group = NULL;
2994 9539 : cluster->window_start = 0;
2995 9539 : list_del_init(&cluster->block_group_list);
2996 :
2997 9539 : node = rb_first(&cluster->root);
2998 43994 : while (node) {
2999 34455 : struct btrfs_free_space *entry;
3000 :
3001 34455 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
3002 34455 : node = rb_next(&entry->offset_index);
3003 34455 : rb_erase(&entry->offset_index, &cluster->root);
3004 34455 : RB_CLEAR_NODE(&entry->offset_index);
3005 :
3006 34455 : if (!entry->bitmap) {
3007 : /* Merging treats extents as if they were new */
3008 34145 : if (!btrfs_free_space_trimmed(entry)) {
3009 2388 : ctl->discardable_extents[BTRFS_STAT_CURR]--;
3010 2388 : ctl->discardable_bytes[BTRFS_STAT_CURR] -=
3011 2388 : entry->bytes;
3012 : }
3013 :
3014 34145 : try_merge_free_space(ctl, entry, false);
3015 34145 : steal_from_bitmap(ctl, entry, false);
3016 :
3017 : /* As we insert directly, update these statistics */
3018 34145 : if (!btrfs_free_space_trimmed(entry)) {
3019 2440 : ctl->discardable_extents[BTRFS_STAT_CURR]++;
3020 2440 : ctl->discardable_bytes[BTRFS_STAT_CURR] +=
3021 2440 : entry->bytes;
3022 : }
3023 : }
3024 34455 : tree_insert_offset(ctl, NULL, entry);
3025 34455 : rb_add_cached(&entry->bytes_index, &ctl->free_space_bytes,
3026 : entry_less);
3027 : }
3028 9539 : cluster->root = RB_ROOT;
3029 9539 : spin_unlock(&cluster->lock);
3030 9539 : btrfs_put_block_group(block_group);
3031 : }
3032 :
3033 22201 : void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
3034 : {
3035 22201 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3036 22201 : struct btrfs_free_cluster *cluster;
3037 22201 : struct list_head *head;
3038 :
3039 22201 : spin_lock(&ctl->tree_lock);
3040 25347 : while ((head = block_group->cluster_list.next) !=
3041 25347 : &block_group->cluster_list) {
3042 3146 : cluster = list_entry(head, struct btrfs_free_cluster,
3043 : block_group_list);
3044 :
3045 3146 : WARN_ON(cluster->block_group != block_group);
3046 3146 : __btrfs_return_cluster_to_free_space(block_group, cluster);
3047 :
3048 3146 : cond_resched_lock(&ctl->tree_lock);
3049 : }
3050 22201 : __btrfs_remove_free_space_cache(ctl);
3051 22201 : btrfs_discard_update_discardable(block_group);
3052 22201 : spin_unlock(&ctl->tree_lock);
3053 :
3054 22201 : }
3055 :
3056 : /*
3057 : * Walk @block_group's free space rb_tree to determine if everything is trimmed.
3058 : */
3059 258 : bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group)
3060 : {
3061 258 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3062 258 : struct btrfs_free_space *info;
3063 258 : struct rb_node *node;
3064 258 : bool ret = true;
3065 :
3066 258 : spin_lock(&ctl->tree_lock);
3067 258 : node = rb_first(&ctl->free_space_offset);
3068 :
3069 417 : while (node) {
3070 217 : info = rb_entry(node, struct btrfs_free_space, offset_index);
3071 :
3072 217 : if (!btrfs_free_space_trimmed(info)) {
3073 : ret = false;
3074 : break;
3075 : }
3076 :
3077 159 : node = rb_next(node);
3078 : }
3079 :
3080 258 : spin_unlock(&ctl->tree_lock);
3081 258 : return ret;
3082 : }
3083 :
3084 4113991 : u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
3085 : u64 offset, u64 bytes, u64 empty_size,
3086 : u64 *max_extent_size)
3087 : {
3088 4113991 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3089 4113991 : struct btrfs_discard_ctl *discard_ctl =
3090 4113991 : &block_group->fs_info->discard_ctl;
3091 4113991 : struct btrfs_free_space *entry = NULL;
3092 4113991 : u64 bytes_search = bytes + empty_size;
3093 4113991 : u64 ret = 0;
3094 4113991 : u64 align_gap = 0;
3095 4113991 : u64 align_gap_len = 0;
3096 4113991 : enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3097 4113991 : bool use_bytes_index = (offset == block_group->start);
3098 :
3099 4113991 : ASSERT(!btrfs_is_zoned(block_group->fs_info));
3100 :
3101 4113991 : spin_lock(&ctl->tree_lock);
3102 4114112 : entry = find_free_space(ctl, &offset, &bytes_search,
3103 : block_group->full_stripe_len, max_extent_size,
3104 : use_bytes_index);
3105 4114113 : if (!entry)
3106 272107 : goto out;
3107 :
3108 3842006 : ret = offset;
3109 3842006 : if (entry->bitmap) {
3110 174428 : bitmap_clear_bits(ctl, entry, offset, bytes, true);
3111 :
3112 174428 : if (!btrfs_free_space_trimmed(entry))
3113 165521 : atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3114 :
3115 174429 : if (!entry->bytes)
3116 399 : free_bitmap(ctl, entry);
3117 : } else {
3118 3667578 : unlink_free_space(ctl, entry, true);
3119 3667574 : align_gap_len = offset - entry->offset;
3120 3667574 : align_gap = entry->offset;
3121 3667574 : align_gap_trim_state = entry->trim_state;
3122 :
3123 3667574 : if (!btrfs_free_space_trimmed(entry))
3124 1865477 : atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3125 :
3126 3667575 : entry->offset = offset + bytes;
3127 3667575 : WARN_ON(entry->bytes < bytes + align_gap_len);
3128 :
3129 3667575 : entry->bytes -= bytes + align_gap_len;
3130 3667575 : if (!entry->bytes)
3131 192604 : kmem_cache_free(btrfs_free_space_cachep, entry);
3132 : else
3133 3474971 : link_free_space(ctl, entry);
3134 : }
3135 4114112 : out:
3136 4114112 : btrfs_discard_update_discardable(block_group);
3137 4114082 : spin_unlock(&ctl->tree_lock);
3138 :
3139 4114103 : if (align_gap_len)
3140 0 : __btrfs_add_free_space(block_group, align_gap, align_gap_len,
3141 : align_gap_trim_state);
3142 4114103 : return ret;
3143 : }
3144 :
3145 : /*
3146 : * given a cluster, put all of its extents back into the free space
3147 : * cache. If a block group is passed, this function will only free
3148 : * a cluster that belongs to the passed block group.
3149 : *
3150 : * Otherwise, it'll get a reference on the block group pointed to by the
3151 : * cluster and remove the cluster from it.
3152 : */
3153 12155 : void btrfs_return_cluster_to_free_space(
3154 : struct btrfs_block_group *block_group,
3155 : struct btrfs_free_cluster *cluster)
3156 : {
3157 12155 : struct btrfs_free_space_ctl *ctl;
3158 :
3159 : /* first, get a safe pointer to the block group */
3160 12155 : spin_lock(&cluster->lock);
3161 12155 : if (!block_group) {
3162 11133 : block_group = cluster->block_group;
3163 11133 : if (!block_group) {
3164 4740 : spin_unlock(&cluster->lock);
3165 4740 : return;
3166 : }
3167 1022 : } else if (cluster->block_group != block_group) {
3168 : /* someone else has already freed it don't redo their work */
3169 1022 : spin_unlock(&cluster->lock);
3170 1022 : return;
3171 : }
3172 6393 : btrfs_get_block_group(block_group);
3173 6393 : spin_unlock(&cluster->lock);
3174 :
3175 6393 : ctl = block_group->free_space_ctl;
3176 :
3177 : /* now return any extents the cluster had on it */
3178 6393 : spin_lock(&ctl->tree_lock);
3179 6393 : __btrfs_return_cluster_to_free_space(block_group, cluster);
3180 6393 : spin_unlock(&ctl->tree_lock);
3181 :
3182 6393 : btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group);
3183 :
3184 : /* finally drop our ref */
3185 6393 : btrfs_put_block_group(block_group);
3186 : }
3187 :
3188 1141546 : static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
3189 : struct btrfs_free_cluster *cluster,
3190 : struct btrfs_free_space *entry,
3191 : u64 bytes, u64 min_start,
3192 : u64 *max_extent_size)
3193 : {
3194 1141546 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3195 1141546 : int err;
3196 1141546 : u64 search_start = cluster->window_start;
3197 1141546 : u64 search_bytes = bytes;
3198 1141546 : u64 ret = 0;
3199 :
3200 1141546 : search_start = min_start;
3201 1141546 : search_bytes = bytes;
3202 :
3203 1141546 : err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
3204 1141546 : if (err) {
3205 307 : *max_extent_size = max(get_max_extent_size(entry),
3206 : *max_extent_size);
3207 307 : return 0;
3208 : }
3209 :
3210 1141239 : ret = search_start;
3211 1141239 : bitmap_clear_bits(ctl, entry, ret, bytes, false);
3212 :
3213 1141239 : return ret;
3214 : }
3215 :
3216 : /*
3217 : * given a cluster, try to allocate 'bytes' from it, returns 0
3218 : * if it couldn't find anything suitably large, or a logical disk offset
3219 : * if things worked out
3220 : */
3221 8519786 : u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
3222 : struct btrfs_free_cluster *cluster, u64 bytes,
3223 : u64 min_start, u64 *max_extent_size)
3224 : {
3225 8519786 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3226 8519786 : struct btrfs_discard_ctl *discard_ctl =
3227 8519786 : &block_group->fs_info->discard_ctl;
3228 8519786 : struct btrfs_free_space *entry = NULL;
3229 8519786 : struct rb_node *node;
3230 8519786 : u64 ret = 0;
3231 :
3232 8519786 : ASSERT(!btrfs_is_zoned(block_group->fs_info));
3233 :
3234 8519786 : spin_lock(&cluster->lock);
3235 8519786 : if (bytes > cluster->max_size)
3236 0 : goto out;
3237 :
3238 8519786 : if (cluster->block_group != block_group)
3239 0 : goto out;
3240 :
3241 8519786 : node = rb_first(&cluster->root);
3242 8519786 : if (!node)
3243 6000 : goto out;
3244 :
3245 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
3246 8513908 : while (1) {
3247 8513908 : if (entry->bytes < bytes)
3248 0 : *max_extent_size = max(get_max_extent_size(entry),
3249 : *max_extent_size);
3250 :
3251 8513908 : if (entry->bytes < bytes ||
3252 8513908 : (!entry->bitmap && entry->offset < min_start)) {
3253 122 : node = rb_next(&entry->offset_index);
3254 122 : if (!node)
3255 : break;
3256 122 : entry = rb_entry(node, struct btrfs_free_space,
3257 : offset_index);
3258 122 : continue;
3259 : }
3260 :
3261 8513786 : if (entry->bitmap) {
3262 1141546 : ret = btrfs_alloc_from_bitmap(block_group,
3263 : cluster, entry, bytes,
3264 : cluster->window_start,
3265 : max_extent_size);
3266 1141546 : if (ret == 0) {
3267 307 : node = rb_next(&entry->offset_index);
3268 307 : if (!node)
3269 : break;
3270 0 : entry = rb_entry(node, struct btrfs_free_space,
3271 : offset_index);
3272 0 : continue;
3273 : }
3274 1141239 : cluster->window_start += bytes;
3275 : } else {
3276 7372240 : ret = entry->offset;
3277 :
3278 7372240 : entry->offset += bytes;
3279 7372240 : entry->bytes -= bytes;
3280 : }
3281 :
3282 : break;
3283 : }
3284 307 : out:
3285 8519786 : spin_unlock(&cluster->lock);
3286 :
3287 8519786 : if (!ret)
3288 : return 0;
3289 :
3290 8513479 : spin_lock(&ctl->tree_lock);
3291 :
3292 8513479 : if (!btrfs_free_space_trimmed(entry))
3293 5690150 : atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3294 :
3295 8513479 : ctl->free_space -= bytes;
3296 8513479 : if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
3297 4549694 : ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
3298 :
3299 8513479 : spin_lock(&cluster->lock);
3300 8513479 : if (entry->bytes == 0) {
3301 1168868 : rb_erase(&entry->offset_index, &cluster->root);
3302 1168868 : ctl->free_extents--;
3303 1168868 : if (entry->bitmap) {
3304 267 : kmem_cache_free(btrfs_free_space_bitmap_cachep,
3305 : entry->bitmap);
3306 267 : ctl->total_bitmaps--;
3307 267 : recalculate_thresholds(ctl);
3308 1168601 : } else if (!btrfs_free_space_trimmed(entry)) {
3309 1154403 : ctl->discardable_extents[BTRFS_STAT_CURR]--;
3310 : }
3311 1168868 : kmem_cache_free(btrfs_free_space_cachep, entry);
3312 : }
3313 :
3314 8513479 : spin_unlock(&cluster->lock);
3315 8513479 : spin_unlock(&ctl->tree_lock);
3316 :
3317 8513479 : return ret;
3318 : }
3319 :
3320 758 : static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
3321 : struct btrfs_free_space *entry,
3322 : struct btrfs_free_cluster *cluster,
3323 : u64 offset, u64 bytes,
3324 : u64 cont1_bytes, u64 min_bytes)
3325 : {
3326 758 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3327 758 : unsigned long next_zero;
3328 758 : unsigned long i;
3329 758 : unsigned long want_bits;
3330 758 : unsigned long min_bits;
3331 758 : unsigned long found_bits;
3332 758 : unsigned long max_bits = 0;
3333 758 : unsigned long start = 0;
3334 758 : unsigned long total_found = 0;
3335 758 : int ret;
3336 :
3337 758 : lockdep_assert_held(&ctl->tree_lock);
3338 :
3339 758 : i = offset_to_bit(entry->offset, ctl->unit,
3340 758 : max_t(u64, offset, entry->offset));
3341 758 : want_bits = bytes_to_bits(bytes, ctl->unit);
3342 758 : min_bits = bytes_to_bits(min_bytes, ctl->unit);
3343 :
3344 : /*
3345 : * Don't bother looking for a cluster in this bitmap if it's heavily
3346 : * fragmented.
3347 : */
3348 758 : if (entry->max_extent_size &&
3349 : entry->max_extent_size < cont1_bytes)
3350 : return -ENOSPC;
3351 758 : again:
3352 2468 : found_bits = 0;
3353 2468 : for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
3354 2287 : next_zero = find_next_zero_bit(entry->bitmap,
3355 : BITS_PER_BITMAP, i);
3356 2287 : if (next_zero - i >= min_bits) {
3357 2287 : found_bits = next_zero - i;
3358 2287 : if (found_bits > max_bits)
3359 : max_bits = found_bits;
3360 : break;
3361 : }
3362 0 : if (next_zero - i > max_bits)
3363 : max_bits = next_zero - i;
3364 0 : i = next_zero;
3365 : }
3366 :
3367 2468 : if (!found_bits) {
3368 181 : entry->max_extent_size = (u64)max_bits * ctl->unit;
3369 181 : return -ENOSPC;
3370 : }
3371 :
3372 2287 : if (!total_found) {
3373 588 : start = i;
3374 588 : cluster->max_size = 0;
3375 : }
3376 :
3377 2287 : total_found += found_bits;
3378 :
3379 2287 : if (cluster->max_size < found_bits * ctl->unit)
3380 803 : cluster->max_size = found_bits * ctl->unit;
3381 :
3382 2287 : if (total_found < want_bits || cluster->max_size < cont1_bytes) {
3383 1710 : i = next_zero + 1;
3384 1710 : goto again;
3385 : }
3386 :
3387 577 : cluster->window_start = start * ctl->unit + entry->offset;
3388 577 : rb_erase(&entry->offset_index, &ctl->free_space_offset);
3389 577 : rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes);
3390 :
3391 : /*
3392 : * We need to know if we're currently on the normal space index when we
3393 : * manipulate the bitmap so that we know we need to remove and re-insert
3394 : * it into the space_index tree. Clear the bytes_index node here so the
3395 : * bitmap manipulation helpers know not to mess with the space_index
3396 : * until this bitmap entry is added back into the normal cache.
3397 : */
3398 577 : RB_CLEAR_NODE(&entry->bytes_index);
3399 :
3400 577 : ret = tree_insert_offset(ctl, cluster, entry);
3401 577 : ASSERT(!ret); /* -EEXIST; Logic error */
3402 :
3403 577 : trace_btrfs_setup_cluster(block_group, cluster,
3404 577 : total_found * ctl->unit, 1);
3405 577 : return 0;
3406 : }
3407 :
3408 : /*
3409 : * This searches the block group for just extents to fill the cluster with.
3410 : * Try to find a cluster with at least bytes total bytes, at least one
3411 : * extent of cont1_bytes, and other clusters of at least min_bytes.
3412 : */
3413 : static noinline int
3414 10476 : setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
3415 : struct btrfs_free_cluster *cluster,
3416 : struct list_head *bitmaps, u64 offset, u64 bytes,
3417 : u64 cont1_bytes, u64 min_bytes)
3418 : {
3419 10476 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3420 10476 : struct btrfs_free_space *first = NULL;
3421 10476 : struct btrfs_free_space *entry = NULL;
3422 10476 : struct btrfs_free_space *last;
3423 10476 : struct rb_node *node;
3424 10476 : u64 window_free;
3425 10476 : u64 max_extent;
3426 10476 : u64 total_size = 0;
3427 :
3428 10476 : lockdep_assert_held(&ctl->tree_lock);
3429 :
3430 10476 : entry = tree_search_offset(ctl, offset, 0, 1);
3431 10476 : if (!entry)
3432 : return -ENOSPC;
3433 :
3434 : /*
3435 : * We don't want bitmaps, so just move along until we find a normal
3436 : * extent entry.
3437 : */
3438 11104 : while (entry->bitmap || entry->bytes < min_bytes) {
3439 1844 : if (entry->bitmap && list_empty(&entry->list))
3440 1841 : list_add_tail(&entry->list, bitmaps);
3441 1844 : node = rb_next(&entry->offset_index);
3442 1844 : if (!node)
3443 : return -ENOSPC;
3444 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
3445 : }
3446 :
3447 9260 : window_free = entry->bytes;
3448 9260 : max_extent = entry->bytes;
3449 9260 : first = entry;
3450 9260 : last = entry;
3451 :
3452 1204188 : for (node = rb_next(&entry->offset_index); node;
3453 1194928 : node = rb_next(&entry->offset_index)) {
3454 1194928 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
3455 :
3456 1194928 : if (entry->bitmap) {
3457 873 : if (list_empty(&entry->list))
3458 873 : list_add_tail(&entry->list, bitmaps);
3459 873 : continue;
3460 : }
3461 :
3462 1194055 : if (entry->bytes < min_bytes)
3463 0 : continue;
3464 :
3465 1194055 : last = entry;
3466 1194055 : window_free += entry->bytes;
3467 1194055 : if (entry->bytes > max_extent)
3468 : max_extent = entry->bytes;
3469 : }
3470 :
3471 9260 : if (window_free < bytes || max_extent < cont1_bytes)
3472 : return -ENOSPC;
3473 :
3474 8962 : cluster->window_start = first->offset;
3475 :
3476 8962 : node = &first->offset_index;
3477 :
3478 : /*
3479 : * now we've found our entries, pull them out of the free space
3480 : * cache and put them into the cluster rbtree
3481 : */
3482 1203509 : do {
3483 1203509 : int ret;
3484 :
3485 1203509 : entry = rb_entry(node, struct btrfs_free_space, offset_index);
3486 1203509 : node = rb_next(&entry->offset_index);
3487 1203509 : if (entry->bitmap || entry->bytes < min_bytes)
3488 763 : continue;
3489 :
3490 1202746 : rb_erase(&entry->offset_index, &ctl->free_space_offset);
3491 1202746 : rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes);
3492 1202746 : ret = tree_insert_offset(ctl, cluster, entry);
3493 1202746 : total_size += entry->bytes;
3494 1203509 : ASSERT(!ret); /* -EEXIST; Logic error */
3495 1203509 : } while (node && entry != last);
3496 :
3497 8962 : cluster->max_size = max_extent;
3498 8962 : trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
3499 8962 : return 0;
3500 : }
3501 :
3502 : /*
3503 : * This specifically looks for bitmaps that may work in the cluster, we assume
3504 : * that we have already failed to find extents that will work.
3505 : */
3506 : static noinline int
3507 1514 : setup_cluster_bitmap(struct btrfs_block_group *block_group,
3508 : struct btrfs_free_cluster *cluster,
3509 : struct list_head *bitmaps, u64 offset, u64 bytes,
3510 : u64 cont1_bytes, u64 min_bytes)
3511 : {
3512 1514 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3513 1514 : struct btrfs_free_space *entry = NULL;
3514 1514 : int ret = -ENOSPC;
3515 1514 : u64 bitmap_offset = offset_to_bitmap(ctl, offset);
3516 :
3517 1514 : if (ctl->total_bitmaps == 0)
3518 : return -ENOSPC;
3519 :
3520 : /*
3521 : * The bitmap that covers offset won't be in the list unless offset
3522 : * is just its start offset.
3523 : */
3524 1218 : if (!list_empty(bitmaps))
3525 1051 : entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
3526 :
3527 1051 : if (!entry || entry->offset != bitmap_offset) {
3528 482 : entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
3529 482 : if (entry && list_empty(&entry->list))
3530 286 : list_add(&entry->list, bitmaps);
3531 : }
3532 :
3533 1982 : list_for_each_entry(entry, bitmaps, list) {
3534 1341 : if (entry->bytes < bytes)
3535 583 : continue;
3536 758 : ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3537 : bytes, cont1_bytes, min_bytes);
3538 758 : if (!ret)
3539 : return 0;
3540 : }
3541 :
3542 : /*
3543 : * The bitmaps list has all the bitmaps that record free space
3544 : * starting after offset, so no more search is required.
3545 : */
3546 : return -ENOSPC;
3547 : }
3548 :
3549 : /*
3550 : * here we try to find a cluster of blocks in a block group. The goal
3551 : * is to find at least bytes+empty_size.
3552 : * We might not find them all in one contiguous area.
3553 : *
3554 : * returns zero and sets up cluster if things worked out, otherwise
3555 : * it returns -enospc
3556 : */
3557 16533 : int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
3558 : struct btrfs_free_cluster *cluster,
3559 : u64 offset, u64 bytes, u64 empty_size)
3560 : {
3561 16533 : struct btrfs_fs_info *fs_info = block_group->fs_info;
3562 16533 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3563 16533 : struct btrfs_free_space *entry, *tmp;
3564 16533 : LIST_HEAD(bitmaps);
3565 16533 : u64 min_bytes;
3566 16533 : u64 cont1_bytes;
3567 16533 : int ret;
3568 :
3569 : /*
3570 : * Choose the minimum extent size we'll require for this
3571 : * cluster. For SSD_SPREAD, don't allow any fragmentation.
3572 : * For metadata, allow allocates with smaller extents. For
3573 : * data, keep it dense.
3574 : */
3575 16533 : if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
3576 3 : cont1_bytes = bytes + empty_size;
3577 3 : min_bytes = cont1_bytes;
3578 16530 : } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3579 16530 : cont1_bytes = bytes;
3580 16530 : min_bytes = fs_info->sectorsize;
3581 : } else {
3582 0 : cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3583 0 : min_bytes = fs_info->sectorsize;
3584 : }
3585 :
3586 16533 : spin_lock(&ctl->tree_lock);
3587 :
3588 : /*
3589 : * If we know we don't have enough space to make a cluster don't even
3590 : * bother doing all the work to try and find one.
3591 : */
3592 16533 : if (ctl->free_space < bytes) {
3593 6057 : spin_unlock(&ctl->tree_lock);
3594 6057 : return -ENOSPC;
3595 : }
3596 :
3597 10476 : spin_lock(&cluster->lock);
3598 :
3599 : /* someone already found a cluster, hooray */
3600 10476 : if (cluster->block_group) {
3601 0 : ret = 0;
3602 0 : goto out;
3603 : }
3604 :
3605 10476 : trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
3606 : min_bytes);
3607 :
3608 10476 : ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3609 : bytes + empty_size,
3610 : cont1_bytes, min_bytes);
3611 10476 : if (ret)
3612 1514 : ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3613 : offset, bytes + empty_size,
3614 : cont1_bytes, min_bytes);
3615 :
3616 : /* Clear our temporary list */
3617 13476 : list_for_each_entry_safe(entry, tmp, &bitmaps, list)
3618 3000 : list_del_init(&entry->list);
3619 :
3620 10476 : if (!ret) {
3621 9539 : btrfs_get_block_group(block_group);
3622 9539 : list_add_tail(&cluster->block_group_list,
3623 : &block_group->cluster_list);
3624 9539 : cluster->block_group = block_group;
3625 : } else {
3626 937 : trace_btrfs_failed_cluster_setup(block_group);
3627 : }
3628 10476 : out:
3629 10476 : spin_unlock(&cluster->lock);
3630 10476 : spin_unlock(&ctl->tree_lock);
3631 :
3632 10476 : return ret;
3633 : }
3634 :
3635 : /*
3636 : * simple code to zero out a cluster
3637 : */
3638 6934 : void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
3639 : {
3640 6934 : spin_lock_init(&cluster->lock);
3641 6934 : spin_lock_init(&cluster->refill_lock);
3642 6934 : cluster->root = RB_ROOT;
3643 6934 : cluster->max_size = 0;
3644 6934 : cluster->fragmented = false;
3645 6934 : INIT_LIST_HEAD(&cluster->block_group_list);
3646 6934 : cluster->block_group = NULL;
3647 6934 : }
3648 :
3649 29001 : static int do_trimming(struct btrfs_block_group *block_group,
3650 : u64 *total_trimmed, u64 start, u64 bytes,
3651 : u64 reserved_start, u64 reserved_bytes,
3652 : enum btrfs_trim_state reserved_trim_state,
3653 : struct btrfs_trim_range *trim_entry)
3654 : {
3655 29001 : struct btrfs_space_info *space_info = block_group->space_info;
3656 29001 : struct btrfs_fs_info *fs_info = block_group->fs_info;
3657 29001 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3658 29001 : int ret;
3659 29001 : int update = 0;
3660 29001 : const u64 end = start + bytes;
3661 29001 : const u64 reserved_end = reserved_start + reserved_bytes;
3662 29001 : enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3663 29001 : u64 trimmed = 0;
3664 :
3665 29001 : spin_lock(&space_info->lock);
3666 29001 : spin_lock(&block_group->lock);
3667 29001 : if (!block_group->ro) {
3668 29001 : block_group->reserved += reserved_bytes;
3669 29001 : space_info->bytes_reserved += reserved_bytes;
3670 29001 : update = 1;
3671 : }
3672 29001 : spin_unlock(&block_group->lock);
3673 29001 : spin_unlock(&space_info->lock);
3674 :
3675 29001 : ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
3676 29001 : if (!ret) {
3677 29001 : *total_trimmed += trimmed;
3678 29001 : trim_state = BTRFS_TRIM_STATE_TRIMMED;
3679 : }
3680 :
3681 29001 : mutex_lock(&ctl->cache_writeout_mutex);
3682 29001 : if (reserved_start < start)
3683 1 : __btrfs_add_free_space(block_group, reserved_start,
3684 : start - reserved_start,
3685 : reserved_trim_state);
3686 29001 : if (end < reserved_end)
3687 6 : __btrfs_add_free_space(block_group, end, reserved_end - end,
3688 : reserved_trim_state);
3689 29001 : __btrfs_add_free_space(block_group, start, bytes, trim_state);
3690 29001 : list_del(&trim_entry->list);
3691 29001 : mutex_unlock(&ctl->cache_writeout_mutex);
3692 :
3693 29001 : if (update) {
3694 29001 : spin_lock(&space_info->lock);
3695 29001 : spin_lock(&block_group->lock);
3696 29001 : if (block_group->ro)
3697 0 : space_info->bytes_readonly += reserved_bytes;
3698 29001 : block_group->reserved -= reserved_bytes;
3699 29001 : space_info->bytes_reserved -= reserved_bytes;
3700 29001 : spin_unlock(&block_group->lock);
3701 29001 : spin_unlock(&space_info->lock);
3702 : }
3703 :
3704 29001 : return ret;
3705 : }
3706 :
3707 : /*
3708 : * If @async is set, then we will trim 1 region and return.
3709 : */
3710 4891 : static int trim_no_bitmap(struct btrfs_block_group *block_group,
3711 : u64 *total_trimmed, u64 start, u64 end, u64 minlen,
3712 : bool async)
3713 : {
3714 4891 : struct btrfs_discard_ctl *discard_ctl =
3715 4891 : &block_group->fs_info->discard_ctl;
3716 4891 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3717 4891 : struct btrfs_free_space *entry;
3718 4891 : struct rb_node *node;
3719 4891 : int ret = 0;
3720 4891 : u64 extent_start;
3721 4891 : u64 extent_bytes;
3722 4891 : enum btrfs_trim_state extent_trim_state;
3723 4891 : u64 bytes;
3724 4891 : const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3725 :
3726 81897 : while (start < end) {
3727 80591 : struct btrfs_trim_range trim_entry;
3728 :
3729 80591 : mutex_lock(&ctl->cache_writeout_mutex);
3730 80591 : spin_lock(&ctl->tree_lock);
3731 :
3732 80591 : if (ctl->free_space < minlen)
3733 468 : goto out_unlock;
3734 :
3735 80123 : entry = tree_search_offset(ctl, start, 0, 1);
3736 80123 : if (!entry)
3737 243 : goto out_unlock;
3738 :
3739 : /* Skip bitmaps and if async, already trimmed entries */
3740 84913 : while (entry->bitmap ||
3741 17224 : (async && btrfs_free_space_trimmed(entry))) {
3742 5120 : node = rb_next(&entry->offset_index);
3743 5120 : if (!node)
3744 87 : goto out_unlock;
3745 : entry = rb_entry(node, struct btrfs_free_space,
3746 : offset_index);
3747 : }
3748 :
3749 79793 : if (entry->offset >= end)
3750 0 : goto out_unlock;
3751 :
3752 79793 : extent_start = entry->offset;
3753 79793 : extent_bytes = entry->bytes;
3754 79793 : extent_trim_state = entry->trim_state;
3755 79793 : if (async) {
3756 13485 : start = entry->offset;
3757 13485 : bytes = entry->bytes;
3758 13485 : if (bytes < minlen) {
3759 10703 : spin_unlock(&ctl->tree_lock);
3760 10703 : mutex_unlock(&ctl->cache_writeout_mutex);
3761 10703 : goto next;
3762 : }
3763 2782 : unlink_free_space(ctl, entry, true);
3764 : /*
3765 : * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
3766 : * If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim
3767 : * X when we come back around. So trim it now.
3768 : */
3769 2782 : if (max_discard_size &&
3770 2782 : bytes >= (max_discard_size +
3771 : BTRFS_ASYNC_DISCARD_MIN_FILTER)) {
3772 889 : bytes = max_discard_size;
3773 889 : extent_bytes = max_discard_size;
3774 889 : entry->offset += max_discard_size;
3775 889 : entry->bytes -= max_discard_size;
3776 889 : link_free_space(ctl, entry);
3777 : } else {
3778 1893 : kmem_cache_free(btrfs_free_space_cachep, entry);
3779 : }
3780 : } else {
3781 66308 : start = max(start, extent_start);
3782 66308 : bytes = min(extent_start + extent_bytes, end) - start;
3783 66308 : if (bytes < minlen) {
3784 53771 : spin_unlock(&ctl->tree_lock);
3785 53771 : mutex_unlock(&ctl->cache_writeout_mutex);
3786 53771 : goto next;
3787 : }
3788 :
3789 12537 : unlink_free_space(ctl, entry, true);
3790 12537 : kmem_cache_free(btrfs_free_space_cachep, entry);
3791 : }
3792 :
3793 15319 : spin_unlock(&ctl->tree_lock);
3794 15319 : trim_entry.start = extent_start;
3795 15319 : trim_entry.bytes = extent_bytes;
3796 15319 : list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3797 15319 : mutex_unlock(&ctl->cache_writeout_mutex);
3798 :
3799 15319 : ret = do_trimming(block_group, total_trimmed, start, bytes,
3800 : extent_start, extent_bytes, extent_trim_state,
3801 : &trim_entry);
3802 15319 : if (ret) {
3803 0 : block_group->discard_cursor = start + bytes;
3804 2787 : break;
3805 : }
3806 15319 : next:
3807 79793 : start += bytes;
3808 79793 : block_group->discard_cursor = start;
3809 79793 : if (async && *total_trimmed)
3810 : break;
3811 :
3812 77011 : if (fatal_signal_pending(current)) {
3813 : ret = -ERESTARTSYS;
3814 : break;
3815 : }
3816 :
3817 77006 : cond_resched();
3818 : }
3819 :
3820 : return ret;
3821 :
3822 : out_unlock:
3823 798 : block_group->discard_cursor = btrfs_block_group_end(block_group);
3824 798 : spin_unlock(&ctl->tree_lock);
3825 798 : mutex_unlock(&ctl->cache_writeout_mutex);
3826 :
3827 798 : return ret;
3828 : }
3829 :
3830 : /*
3831 : * If we break out of trimming a bitmap prematurely, we should reset the
3832 : * trimming bit. In a rather contrieved case, it's possible to race here so
3833 : * reset the state to BTRFS_TRIM_STATE_UNTRIMMED.
3834 : *
3835 : * start = start of bitmap
3836 : * end = near end of bitmap
3837 : *
3838 : * Thread 1: Thread 2:
3839 : * trim_bitmaps(start)
3840 : * trim_bitmaps(end)
3841 : * end_trimming_bitmap()
3842 : * reset_trimming_bitmap()
3843 : */
3844 1975 : static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset)
3845 : {
3846 1975 : struct btrfs_free_space *entry;
3847 :
3848 1975 : spin_lock(&ctl->tree_lock);
3849 1975 : entry = tree_search_offset(ctl, offset, 1, 0);
3850 1975 : if (entry) {
3851 2 : if (btrfs_free_space_trimmed(entry)) {
3852 0 : ctl->discardable_extents[BTRFS_STAT_CURR] +=
3853 0 : entry->bitmap_extents;
3854 0 : ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes;
3855 : }
3856 2 : entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3857 : }
3858 :
3859 1975 : spin_unlock(&ctl->tree_lock);
3860 1975 : }
3861 :
3862 : static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl,
3863 : struct btrfs_free_space *entry)
3864 : {
3865 337 : if (btrfs_free_space_trimming_bitmap(entry)) {
3866 334 : entry->trim_state = BTRFS_TRIM_STATE_TRIMMED;
3867 334 : ctl->discardable_extents[BTRFS_STAT_CURR] -=
3868 334 : entry->bitmap_extents;
3869 334 : ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes;
3870 : }
3871 : }
3872 :
3873 : /*
3874 : * If @async is set, then we will trim 1 region and return.
3875 : */
3876 3487 : static int trim_bitmaps(struct btrfs_block_group *block_group,
3877 : u64 *total_trimmed, u64 start, u64 end, u64 minlen,
3878 : u64 maxlen, bool async)
3879 : {
3880 3487 : struct btrfs_discard_ctl *discard_ctl =
3881 3487 : &block_group->fs_info->discard_ctl;
3882 3487 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3883 3487 : struct btrfs_free_space *entry;
3884 3487 : int ret = 0;
3885 3487 : int ret2;
3886 3487 : u64 bytes;
3887 3487 : u64 offset = offset_to_bitmap(ctl, start);
3888 3487 : const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3889 :
3890 26191 : while (offset < end) {
3891 24419 : bool next_bitmap = false;
3892 24419 : struct btrfs_trim_range trim_entry;
3893 :
3894 24419 : mutex_lock(&ctl->cache_writeout_mutex);
3895 24419 : spin_lock(&ctl->tree_lock);
3896 :
3897 24419 : if (ctl->free_space < minlen) {
3898 470 : block_group->discard_cursor =
3899 : btrfs_block_group_end(block_group);
3900 470 : spin_unlock(&ctl->tree_lock);
3901 470 : mutex_unlock(&ctl->cache_writeout_mutex);
3902 940 : break;
3903 : }
3904 :
3905 23949 : entry = tree_search_offset(ctl, offset, 1, 0);
3906 : /*
3907 : * Bitmaps are marked trimmed lossily now to prevent constant
3908 : * discarding of the same bitmap (the reason why we are bound
3909 : * by the filters). So, retrim the block group bitmaps when we
3910 : * are preparing to punt to the unused_bgs list. This uses
3911 : * @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED
3912 : * which is the only discard index which sets minlen to 0.
3913 : */
3914 23949 : if (!entry || (async && minlen && start == offset &&
3915 : btrfs_free_space_trimmed(entry))) {
3916 7664 : spin_unlock(&ctl->tree_lock);
3917 7664 : mutex_unlock(&ctl->cache_writeout_mutex);
3918 7664 : next_bitmap = true;
3919 7664 : goto next;
3920 : }
3921 :
3922 : /*
3923 : * Async discard bitmap trimming begins at by setting the start
3924 : * to be key.objectid and the offset_to_bitmap() aligns to the
3925 : * start of the bitmap. This lets us know we are fully
3926 : * scanning the bitmap rather than only some portion of it.
3927 : */
3928 16285 : if (start == offset)
3929 1414 : entry->trim_state = BTRFS_TRIM_STATE_TRIMMING;
3930 :
3931 16285 : bytes = minlen;
3932 16285 : ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3933 16285 : if (ret2 || start >= end) {
3934 : /*
3935 : * We lossily consider a bitmap trimmed if we only skip
3936 : * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER.
3937 : */
3938 1358 : if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER)
3939 337 : end_trimming_bitmap(ctl, entry);
3940 : else
3941 1021 : entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3942 1358 : spin_unlock(&ctl->tree_lock);
3943 1358 : mutex_unlock(&ctl->cache_writeout_mutex);
3944 1358 : next_bitmap = true;
3945 1358 : goto next;
3946 : }
3947 :
3948 : /*
3949 : * We already trimmed a region, but are using the locking above
3950 : * to reset the trim_state.
3951 : */
3952 14927 : if (async && *total_trimmed) {
3953 1245 : spin_unlock(&ctl->tree_lock);
3954 1245 : mutex_unlock(&ctl->cache_writeout_mutex);
3955 1245 : goto out;
3956 : }
3957 :
3958 13682 : bytes = min(bytes, end - start);
3959 13682 : if (bytes < minlen || (async && maxlen && bytes > maxlen)) {
3960 0 : spin_unlock(&ctl->tree_lock);
3961 0 : mutex_unlock(&ctl->cache_writeout_mutex);
3962 0 : goto next;
3963 : }
3964 :
3965 : /*
3966 : * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
3967 : * If X < @minlen, we won't trim X when we come back around.
3968 : * So trim it now. We differ here from trimming extents as we
3969 : * don't keep individual state per bit.
3970 : */
3971 13682 : if (async &&
3972 1259 : max_discard_size &&
3973 1259 : bytes > (max_discard_size + minlen))
3974 0 : bytes = max_discard_size;
3975 :
3976 13682 : bitmap_clear_bits(ctl, entry, start, bytes, true);
3977 13682 : if (entry->bytes == 0)
3978 1 : free_bitmap(ctl, entry);
3979 :
3980 13682 : spin_unlock(&ctl->tree_lock);
3981 13682 : trim_entry.start = start;
3982 13682 : trim_entry.bytes = bytes;
3983 13682 : list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3984 13682 : mutex_unlock(&ctl->cache_writeout_mutex);
3985 :
3986 13682 : ret = do_trimming(block_group, total_trimmed, start, bytes,
3987 : start, bytes, 0, &trim_entry);
3988 13682 : if (ret) {
3989 0 : reset_trimming_bitmap(ctl, offset);
3990 0 : block_group->discard_cursor =
3991 : btrfs_block_group_end(block_group);
3992 0 : break;
3993 : }
3994 13682 : next:
3995 9022 : if (next_bitmap) {
3996 9022 : offset += BITS_PER_BITMAP * ctl->unit;
3997 9022 : start = offset;
3998 : } else {
3999 13682 : start += bytes;
4000 : }
4001 22704 : block_group->discard_cursor = start;
4002 :
4003 22704 : if (fatal_signal_pending(current)) {
4004 0 : if (start != offset)
4005 0 : reset_trimming_bitmap(ctl, offset);
4006 : ret = -ERESTARTSYS;
4007 : break;
4008 : }
4009 :
4010 22704 : cond_resched();
4011 : }
4012 :
4013 2242 : if (offset >= end)
4014 1772 : block_group->discard_cursor = end;
4015 :
4016 470 : out:
4017 3487 : return ret;
4018 : }
4019 :
4020 1980 : int btrfs_trim_block_group(struct btrfs_block_group *block_group,
4021 : u64 *trimmed, u64 start, u64 end, u64 minlen)
4022 : {
4023 1980 : struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
4024 1980 : int ret;
4025 1980 : u64 rem = 0;
4026 :
4027 1980 : ASSERT(!btrfs_is_zoned(block_group->fs_info));
4028 :
4029 1980 : *trimmed = 0;
4030 :
4031 1980 : spin_lock(&block_group->lock);
4032 3960 : if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4033 0 : spin_unlock(&block_group->lock);
4034 0 : return 0;
4035 : }
4036 1980 : btrfs_freeze_block_group(block_group);
4037 1980 : spin_unlock(&block_group->lock);
4038 :
4039 1980 : ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false);
4040 1980 : if (ret)
4041 5 : goto out;
4042 :
4043 1975 : ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false);
4044 1975 : div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem);
4045 : /* If we ended in the middle of a bitmap, reset the trimming flag */
4046 1975 : if (rem)
4047 1975 : reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end));
4048 0 : out:
4049 1980 : btrfs_unfreeze_block_group(block_group);
4050 1980 : return ret;
4051 : }
4052 :
4053 2911 : int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
4054 : u64 *trimmed, u64 start, u64 end, u64 minlen,
4055 : bool async)
4056 : {
4057 2911 : int ret;
4058 :
4059 2911 : *trimmed = 0;
4060 :
4061 2911 : spin_lock(&block_group->lock);
4062 5822 : if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4063 0 : spin_unlock(&block_group->lock);
4064 0 : return 0;
4065 : }
4066 2911 : btrfs_freeze_block_group(block_group);
4067 2911 : spin_unlock(&block_group->lock);
4068 :
4069 2911 : ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async);
4070 2911 : btrfs_unfreeze_block_group(block_group);
4071 :
4072 2911 : return ret;
4073 : }
4074 :
4075 1512 : int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
4076 : u64 *trimmed, u64 start, u64 end, u64 minlen,
4077 : u64 maxlen, bool async)
4078 : {
4079 1512 : int ret;
4080 :
4081 1512 : *trimmed = 0;
4082 :
4083 1512 : spin_lock(&block_group->lock);
4084 3024 : if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4085 0 : spin_unlock(&block_group->lock);
4086 0 : return 0;
4087 : }
4088 1512 : btrfs_freeze_block_group(block_group);
4089 1512 : spin_unlock(&block_group->lock);
4090 :
4091 1512 : ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen,
4092 : async);
4093 :
4094 1512 : btrfs_unfreeze_block_group(block_group);
4095 :
4096 1512 : return ret;
4097 : }
4098 :
4099 1255536 : bool btrfs_free_space_cache_v1_active(struct btrfs_fs_info *fs_info)
4100 : {
4101 1255536 : return btrfs_super_cache_generation(fs_info->super_copy);
4102 : }
4103 :
4104 2 : static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
4105 : struct btrfs_trans_handle *trans)
4106 : {
4107 2 : struct btrfs_block_group *block_group;
4108 2 : struct rb_node *node;
4109 2 : int ret = 0;
4110 :
4111 2 : btrfs_info(fs_info, "cleaning free space cache v1");
4112 :
4113 2 : node = rb_first_cached(&fs_info->block_group_cache_tree);
4114 8 : while (node) {
4115 6 : block_group = rb_entry(node, struct btrfs_block_group, cache_node);
4116 6 : ret = btrfs_remove_free_space_inode(trans, NULL, block_group);
4117 6 : if (ret)
4118 0 : goto out;
4119 6 : node = rb_next(node);
4120 : }
4121 2 : out:
4122 2 : return ret;
4123 : }
4124 :
4125 2 : int btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info *fs_info, bool active)
4126 : {
4127 2 : struct btrfs_trans_handle *trans;
4128 2 : int ret;
4129 :
4130 : /*
4131 : * update_super_roots will appropriately set or unset
4132 : * super_copy->cache_generation based on SPACE_CACHE and
4133 : * BTRFS_FS_CLEANUP_SPACE_CACHE_V1. For this reason, we need a
4134 : * transaction commit whether we are enabling space cache v1 and don't
4135 : * have any other work to do, or are disabling it and removing free
4136 : * space inodes.
4137 : */
4138 2 : trans = btrfs_start_transaction(fs_info->tree_root, 0);
4139 2 : if (IS_ERR(trans))
4140 0 : return PTR_ERR(trans);
4141 :
4142 2 : if (!active) {
4143 2 : set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
4144 2 : ret = cleanup_free_space_cache_v1(fs_info, trans);
4145 2 : if (ret) {
4146 0 : btrfs_abort_transaction(trans, ret);
4147 0 : btrfs_end_transaction(trans);
4148 0 : goto out;
4149 : }
4150 : }
4151 :
4152 2 : ret = btrfs_commit_transaction(trans);
4153 2 : out:
4154 2 : clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
4155 :
4156 2 : return ret;
4157 : }
4158 :
4159 11 : int __init btrfs_free_space_init(void)
4160 : {
4161 11 : btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
4162 : sizeof(struct btrfs_free_space), 0,
4163 : SLAB_MEM_SPREAD, NULL);
4164 11 : if (!btrfs_free_space_cachep)
4165 : return -ENOMEM;
4166 :
4167 11 : btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
4168 : PAGE_SIZE, PAGE_SIZE,
4169 : SLAB_MEM_SPREAD, NULL);
4170 11 : if (!btrfs_free_space_bitmap_cachep) {
4171 0 : kmem_cache_destroy(btrfs_free_space_cachep);
4172 0 : return -ENOMEM;
4173 : }
4174 :
4175 : return 0;
4176 : }
4177 :
4178 0 : void __cold btrfs_free_space_exit(void)
4179 : {
4180 0 : kmem_cache_destroy(btrfs_free_space_cachep);
4181 0 : kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
4182 0 : }
4183 :
4184 : #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4185 : /*
4186 : * Use this if you need to make a bitmap or extent entry specifically, it
4187 : * doesn't do any of the merging that add_free_space does, this acts a lot like
4188 : * how the free space cache loading stuff works, so you can get really weird
4189 : * configurations.
4190 : */
4191 : int test_add_free_space_entry(struct btrfs_block_group *cache,
4192 : u64 offset, u64 bytes, bool bitmap)
4193 : {
4194 : struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
4195 : struct btrfs_free_space *info = NULL, *bitmap_info;
4196 : void *map = NULL;
4197 : enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_TRIMMED;
4198 : u64 bytes_added;
4199 : int ret;
4200 :
4201 : again:
4202 : if (!info) {
4203 : info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
4204 : if (!info)
4205 : return -ENOMEM;
4206 : }
4207 :
4208 : if (!bitmap) {
4209 : spin_lock(&ctl->tree_lock);
4210 : info->offset = offset;
4211 : info->bytes = bytes;
4212 : info->max_extent_size = 0;
4213 : ret = link_free_space(ctl, info);
4214 : spin_unlock(&ctl->tree_lock);
4215 : if (ret)
4216 : kmem_cache_free(btrfs_free_space_cachep, info);
4217 : return ret;
4218 : }
4219 :
4220 : if (!map) {
4221 : map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
4222 : if (!map) {
4223 : kmem_cache_free(btrfs_free_space_cachep, info);
4224 : return -ENOMEM;
4225 : }
4226 : }
4227 :
4228 : spin_lock(&ctl->tree_lock);
4229 : bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
4230 : 1, 0);
4231 : if (!bitmap_info) {
4232 : info->bitmap = map;
4233 : map = NULL;
4234 : add_new_bitmap(ctl, info, offset);
4235 : bitmap_info = info;
4236 : info = NULL;
4237 : }
4238 :
4239 : bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
4240 : trim_state);
4241 :
4242 : bytes -= bytes_added;
4243 : offset += bytes_added;
4244 : spin_unlock(&ctl->tree_lock);
4245 :
4246 : if (bytes)
4247 : goto again;
4248 :
4249 : if (info)
4250 : kmem_cache_free(btrfs_free_space_cachep, info);
4251 : if (map)
4252 : kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
4253 : return 0;
4254 : }
4255 :
4256 : /*
4257 : * Checks to see if the given range is in the free space cache. This is really
4258 : * just used to check the absence of space, so if there is free space in the
4259 : * range at all we will return 1.
4260 : */
4261 : int test_check_exists(struct btrfs_block_group *cache,
4262 : u64 offset, u64 bytes)
4263 : {
4264 : struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
4265 : struct btrfs_free_space *info;
4266 : int ret = 0;
4267 :
4268 : spin_lock(&ctl->tree_lock);
4269 : info = tree_search_offset(ctl, offset, 0, 0);
4270 : if (!info) {
4271 : info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
4272 : 1, 0);
4273 : if (!info)
4274 : goto out;
4275 : }
4276 :
4277 : have_info:
4278 : if (info->bitmap) {
4279 : u64 bit_off, bit_bytes;
4280 : struct rb_node *n;
4281 : struct btrfs_free_space *tmp;
4282 :
4283 : bit_off = offset;
4284 : bit_bytes = ctl->unit;
4285 : ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
4286 : if (!ret) {
4287 : if (bit_off == offset) {
4288 : ret = 1;
4289 : goto out;
4290 : } else if (bit_off > offset &&
4291 : offset + bytes > bit_off) {
4292 : ret = 1;
4293 : goto out;
4294 : }
4295 : }
4296 :
4297 : n = rb_prev(&info->offset_index);
4298 : while (n) {
4299 : tmp = rb_entry(n, struct btrfs_free_space,
4300 : offset_index);
4301 : if (tmp->offset + tmp->bytes < offset)
4302 : break;
4303 : if (offset + bytes < tmp->offset) {
4304 : n = rb_prev(&tmp->offset_index);
4305 : continue;
4306 : }
4307 : info = tmp;
4308 : goto have_info;
4309 : }
4310 :
4311 : n = rb_next(&info->offset_index);
4312 : while (n) {
4313 : tmp = rb_entry(n, struct btrfs_free_space,
4314 : offset_index);
4315 : if (offset + bytes < tmp->offset)
4316 : break;
4317 : if (tmp->offset + tmp->bytes < offset) {
4318 : n = rb_next(&tmp->offset_index);
4319 : continue;
4320 : }
4321 : info = tmp;
4322 : goto have_info;
4323 : }
4324 :
4325 : ret = 0;
4326 : goto out;
4327 : }
4328 :
4329 : if (info->offset == offset) {
4330 : ret = 1;
4331 : goto out;
4332 : }
4333 :
4334 : if (offset > info->offset && offset < info->offset + info->bytes)
4335 : ret = 1;
4336 : out:
4337 : spin_unlock(&ctl->tree_lock);
4338 : return ret;
4339 : }
4340 : #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
|