Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 :
3 : #include "misc.h"
4 : #include "ctree.h"
5 : #include "space-info.h"
6 : #include "sysfs.h"
7 : #include "volumes.h"
8 : #include "free-space-cache.h"
9 : #include "ordered-data.h"
10 : #include "transaction.h"
11 : #include "block-group.h"
12 : #include "zoned.h"
13 : #include "fs.h"
14 : #include "accessors.h"
15 : #include "extent-tree.h"
16 :
17 : /*
18 : * HOW DOES SPACE RESERVATION WORK
19 : *
20 : * If you want to know about delalloc specifically, there is a separate comment
21 : * for that with the delalloc code. This comment is about how the whole system
22 : * works generally.
23 : *
24 : * BASIC CONCEPTS
25 : *
26 : * 1) space_info. This is the ultimate arbiter of how much space we can use.
27 : * There's a description of the bytes_ fields with the struct declaration,
28 : * refer to that for specifics on each field. Suffice it to say that for
29 : * reservations we care about total_bytes - SUM(space_info->bytes_) when
30 : * determining if there is space to make an allocation. There is a space_info
31 : * for METADATA, SYSTEM, and DATA areas.
32 : *
33 : * 2) block_rsv's. These are basically buckets for every different type of
34 : * metadata reservation we have. You can see the comment in the block_rsv
35 : * code on the rules for each type, but generally block_rsv->reserved is how
36 : * much space is accounted for in space_info->bytes_may_use.
37 : *
38 : * 3) btrfs_calc*_size. These are the worst case calculations we used based
39 : * on the number of items we will want to modify. We have one for changing
40 : * items, and one for inserting new items. Generally we use these helpers to
41 : * determine the size of the block reserves, and then use the actual bytes
42 : * values to adjust the space_info counters.
43 : *
44 : * MAKING RESERVATIONS, THE NORMAL CASE
45 : *
46 : * We call into either btrfs_reserve_data_bytes() or
47 : * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
48 : * num_bytes we want to reserve.
49 : *
50 : * ->reserve
51 : * space_info->bytes_may_reserve += num_bytes
52 : *
53 : * ->extent allocation
54 : * Call btrfs_add_reserved_bytes() which does
55 : * space_info->bytes_may_reserve -= num_bytes
56 : * space_info->bytes_reserved += extent_bytes
57 : *
58 : * ->insert reference
59 : * Call btrfs_update_block_group() which does
60 : * space_info->bytes_reserved -= extent_bytes
61 : * space_info->bytes_used += extent_bytes
62 : *
63 : * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
64 : *
65 : * Assume we are unable to simply make the reservation because we do not have
66 : * enough space
67 : *
68 : * -> __reserve_bytes
69 : * create a reserve_ticket with ->bytes set to our reservation, add it to
70 : * the tail of space_info->tickets, kick async flush thread
71 : *
72 : * ->handle_reserve_ticket
73 : * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
74 : * on the ticket.
75 : *
76 : * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
77 : * Flushes various things attempting to free up space.
78 : *
79 : * -> btrfs_try_granting_tickets()
80 : * This is called by anything that either subtracts space from
81 : * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
82 : * space_info->total_bytes. This loops through the ->priority_tickets and
83 : * then the ->tickets list checking to see if the reservation can be
84 : * completed. If it can the space is added to space_info->bytes_may_use and
85 : * the ticket is woken up.
86 : *
87 : * -> ticket wakeup
88 : * Check if ->bytes == 0, if it does we got our reservation and we can carry
89 : * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
90 : * were interrupted.)
91 : *
92 : * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
93 : *
94 : * Same as the above, except we add ourselves to the
95 : * space_info->priority_tickets, and we do not use ticket->wait, we simply
96 : * call flush_space() ourselves for the states that are safe for us to call
97 : * without deadlocking and hope for the best.
98 : *
99 : * THE FLUSHING STATES
100 : *
101 : * Generally speaking we will have two cases for each state, a "nice" state
102 : * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
103 : * reduce the locking over head on the various trees, and even to keep from
104 : * doing any work at all in the case of delayed refs. Each of these delayed
105 : * things however hold reservations, and so letting them run allows us to
106 : * reclaim space so we can make new reservations.
107 : *
108 : * FLUSH_DELAYED_ITEMS
109 : * Every inode has a delayed item to update the inode. Take a simple write
110 : * for example, we would update the inode item at write time to update the
111 : * mtime, and then again at finish_ordered_io() time in order to update the
112 : * isize or bytes. We keep these delayed items to coalesce these operations
113 : * into a single operation done on demand. These are an easy way to reclaim
114 : * metadata space.
115 : *
116 : * FLUSH_DELALLOC
117 : * Look at the delalloc comment to get an idea of how much space is reserved
118 : * for delayed allocation. We can reclaim some of this space simply by
119 : * running delalloc, but usually we need to wait for ordered extents to
120 : * reclaim the bulk of this space.
121 : *
122 : * FLUSH_DELAYED_REFS
123 : * We have a block reserve for the outstanding delayed refs space, and every
124 : * delayed ref operation holds a reservation. Running these is a quick way
125 : * to reclaim space, but we want to hold this until the end because COW can
126 : * churn a lot and we can avoid making some extent tree modifications if we
127 : * are able to delay for as long as possible.
128 : *
129 : * ALLOC_CHUNK
130 : * We will skip this the first time through space reservation, because of
131 : * overcommit and we don't want to have a lot of useless metadata space when
132 : * our worst case reservations will likely never come true.
133 : *
134 : * RUN_DELAYED_IPUTS
135 : * If we're freeing inodes we're likely freeing checksums, file extent
136 : * items, and extent tree items. Loads of space could be freed up by these
137 : * operations, however they won't be usable until the transaction commits.
138 : *
139 : * COMMIT_TRANS
140 : * This will commit the transaction. Historically we had a lot of logic
141 : * surrounding whether or not we'd commit the transaction, but this waits born
142 : * out of a pre-tickets era where we could end up committing the transaction
143 : * thousands of times in a row without making progress. Now thanks to our
144 : * ticketing system we know if we're not making progress and can error
145 : * everybody out after a few commits rather than burning the disk hoping for
146 : * a different answer.
147 : *
148 : * OVERCOMMIT
149 : *
150 : * Because we hold so many reservations for metadata we will allow you to
151 : * reserve more space than is currently free in the currently allocate
152 : * metadata space. This only happens with metadata, data does not allow
153 : * overcommitting.
154 : *
155 : * You can see the current logic for when we allow overcommit in
156 : * btrfs_can_overcommit(), but it only applies to unallocated space. If there
157 : * is no unallocated space to be had, all reservations are kept within the
158 : * free space in the allocated metadata chunks.
159 : *
160 : * Because of overcommitting, you generally want to use the
161 : * btrfs_can_overcommit() logic for metadata allocations, as it does the right
162 : * thing with or without extra unallocated space.
163 : */
164 :
165 275275 : u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
166 : bool may_use_included)
167 : {
168 173300515 : ASSERT(s_info);
169 173300515 : return s_info->bytes_used + s_info->bytes_reserved +
170 173300515 : s_info->bytes_pinned + s_info->bytes_readonly +
171 173575790 : s_info->bytes_zone_unusable +
172 275275 : (may_use_included ? s_info->bytes_may_use : 0);
173 : }
174 :
175 : /*
176 : * after adding space to the filesystem, we need to clear the full flags
177 : * on all the space infos.
178 : */
179 1360 : void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
180 : {
181 1360 : struct list_head *head = &info->space_info;
182 1360 : struct btrfs_space_info *found;
183 :
184 5440 : list_for_each_entry(found, head, list)
185 4080 : found->full = 0;
186 1360 : }
187 :
188 : /*
189 : * Block groups with more than this value (percents) of unusable space will be
190 : * scheduled for background reclaim.
191 : */
192 : #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
193 :
194 : /*
195 : * Calculate chunk size depending on volume type (regular or zoned).
196 : */
197 : static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
198 : {
199 9621 : if (btrfs_is_zoned(fs_info))
200 : return fs_info->zone_size;
201 :
202 9621 : ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
203 :
204 9621 : if (flags & BTRFS_BLOCK_GROUP_DATA)
205 : return BTRFS_MAX_DATA_CHUNK_SIZE;
206 6404 : else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
207 : return SZ_32M;
208 :
209 : /* Handle BTRFS_BLOCK_GROUP_METADATA */
210 3187 : if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
211 93 : return SZ_1G;
212 :
213 : return SZ_256M;
214 : }
215 :
216 : /*
217 : * Update default chunk size.
218 : */
219 0 : void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
220 : u64 chunk_size)
221 : {
222 9621 : WRITE_ONCE(space_info->chunk_size, chunk_size);
223 0 : }
224 :
225 9621 : static int create_space_info(struct btrfs_fs_info *info, u64 flags)
226 : {
227 :
228 9621 : struct btrfs_space_info *space_info;
229 9621 : int i;
230 9621 : int ret;
231 :
232 9621 : space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
233 9621 : if (!space_info)
234 : return -ENOMEM;
235 :
236 96210 : for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
237 86589 : INIT_LIST_HEAD(&space_info->block_groups[i]);
238 9621 : init_rwsem(&space_info->groups_sem);
239 9621 : spin_lock_init(&space_info->lock);
240 9621 : space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
241 9621 : space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
242 9621 : INIT_LIST_HEAD(&space_info->ro_bgs);
243 9621 : INIT_LIST_HEAD(&space_info->tickets);
244 9621 : INIT_LIST_HEAD(&space_info->priority_tickets);
245 9621 : space_info->clamp = 1;
246 9621 : btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
247 :
248 9621 : if (btrfs_is_zoned(info))
249 : space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
250 :
251 9621 : ret = btrfs_sysfs_add_space_info_type(info, space_info);
252 9621 : if (ret)
253 : return ret;
254 :
255 9621 : list_add(&space_info->list, &info->space_info);
256 9621 : if (flags & BTRFS_BLOCK_GROUP_DATA)
257 3217 : info->data_sinfo = space_info;
258 :
259 : return ret;
260 : }
261 :
262 3217 : int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
263 : {
264 3217 : struct btrfs_super_block *disk_super;
265 3217 : u64 features;
266 3217 : u64 flags;
267 3217 : int mixed = 0;
268 3217 : int ret;
269 :
270 3217 : disk_super = fs_info->super_copy;
271 3217 : if (!btrfs_super_root(disk_super))
272 : return -EINVAL;
273 :
274 3217 : features = btrfs_super_incompat_flags(disk_super);
275 3217 : if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
276 30 : mixed = 1;
277 :
278 3217 : flags = BTRFS_BLOCK_GROUP_SYSTEM;
279 3217 : ret = create_space_info(fs_info, flags);
280 3217 : if (ret)
281 0 : goto out;
282 :
283 3217 : if (mixed) {
284 30 : flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
285 30 : ret = create_space_info(fs_info, flags);
286 : } else {
287 3187 : flags = BTRFS_BLOCK_GROUP_METADATA;
288 3187 : ret = create_space_info(fs_info, flags);
289 3187 : if (ret)
290 0 : goto out;
291 :
292 3187 : flags = BTRFS_BLOCK_GROUP_DATA;
293 3187 : ret = create_space_info(fs_info, flags);
294 : }
295 : out:
296 : return ret;
297 : }
298 :
299 30776 : void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
300 : struct btrfs_block_group *block_group)
301 : {
302 30776 : struct btrfs_space_info *found;
303 30776 : int factor, index;
304 :
305 30776 : factor = btrfs_bg_type_to_factor(block_group->flags);
306 :
307 30776 : found = btrfs_find_space_info(info, block_group->flags);
308 30776 : ASSERT(found);
309 30776 : spin_lock(&found->lock);
310 30776 : found->total_bytes += block_group->length;
311 30776 : found->disk_total += block_group->length * factor;
312 30776 : found->bytes_used += block_group->used;
313 30776 : found->disk_used += block_group->used * factor;
314 30776 : found->bytes_readonly += block_group->bytes_super;
315 30776 : found->bytes_zone_unusable += block_group->zone_unusable;
316 30776 : if (block_group->length > 0)
317 30776 : found->full = 0;
318 30776 : btrfs_try_granting_tickets(info, found);
319 30776 : spin_unlock(&found->lock);
320 :
321 30776 : block_group->space_info = found;
322 :
323 30776 : index = btrfs_bg_flags_to_raid_index(block_group->flags);
324 30776 : down_write(&found->groups_sem);
325 30776 : list_add_tail(&block_group->list, &found->block_groups[index]);
326 30776 : up_write(&found->groups_sem);
327 30776 : }
328 :
329 37824674 : struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
330 : u64 flags)
331 : {
332 38999847 : struct list_head *head = &info->space_info;
333 38999847 : struct btrfs_space_info *found;
334 :
335 38999847 : flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
336 :
337 73548845 : list_for_each_entry(found, head, list) {
338 73543504 : if (found->flags & flags)
339 37819333 : return found;
340 : }
341 : return NULL;
342 : }
343 :
344 71032536 : static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
345 : struct btrfs_space_info *space_info,
346 : enum btrfs_reserve_flush_enum flush)
347 : {
348 71032536 : u64 profile;
349 71032536 : u64 avail;
350 71032536 : int factor;
351 :
352 71032536 : if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
353 98 : profile = btrfs_system_alloc_profile(fs_info);
354 : else
355 71032438 : profile = btrfs_metadata_alloc_profile(fs_info);
356 :
357 71032532 : avail = atomic64_read(&fs_info->free_chunk_space);
358 :
359 : /*
360 : * If we have dup, raid1 or raid10 then only half of the free
361 : * space is actually usable. For raid56, the space info used
362 : * doesn't include the parity drive, so we don't have to
363 : * change the math
364 : */
365 71032532 : factor = btrfs_bg_type_to_factor(profile);
366 71032527 : avail = div_u64(avail, factor);
367 :
368 : /*
369 : * If we aren't flushing all things, let us overcommit up to
370 : * 1/2th of the space. If we can flush, don't let us overcommit
371 : * too much, let it overcommit up to 1/8 of the space.
372 : */
373 71032527 : if (flush == BTRFS_RESERVE_FLUSH_ALL)
374 64506447 : avail >>= 3;
375 : else
376 6526080 : avail >>= 1;
377 71032527 : return avail;
378 : }
379 :
380 44406729 : int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
381 : struct btrfs_space_info *space_info, u64 bytes,
382 : enum btrfs_reserve_flush_enum flush)
383 : {
384 44406729 : u64 avail;
385 44406729 : u64 used;
386 :
387 : /* Don't overcommit when in mixed mode */
388 44406729 : if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
389 : return 0;
390 :
391 43675725 : used = btrfs_space_info_used(space_info, true);
392 43675725 : if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
393 0 : (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
394 : avail = 0;
395 : else
396 43675725 : avail = calc_available_free_space(fs_info, space_info, flush);
397 :
398 43675717 : if (used + bytes < space_info->total_bytes + avail)
399 38496672 : return 1;
400 : return 0;
401 : }
402 :
403 2608711 : static void remove_ticket(struct btrfs_space_info *space_info,
404 : struct reserve_ticket *ticket)
405 : {
406 2608711 : if (!list_empty(&ticket->list)) {
407 2608711 : list_del_init(&ticket->list);
408 2608711 : ASSERT(space_info->reclaim_size >= ticket->bytes);
409 2608711 : space_info->reclaim_size -= ticket->bytes;
410 : }
411 2608711 : }
412 :
413 : /*
414 : * This is for space we already have accounted in space_info->bytes_may_use, so
415 : * basically when we're returning space from block_rsv's.
416 : */
417 142761436 : void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
418 : struct btrfs_space_info *space_info)
419 : {
420 142761436 : struct list_head *head;
421 142761436 : enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
422 :
423 142761436 : lockdep_assert_held(&space_info->lock);
424 :
425 142761436 : head = &space_info->priority_tickets;
426 : again:
427 288016413 : while (!list_empty(head)) {
428 6721808 : struct reserve_ticket *ticket;
429 6721808 : u64 used = btrfs_space_info_used(space_info, true);
430 :
431 6721808 : ticket = list_first_entry(head, struct reserve_ticket, list);
432 :
433 : /* Check and see if our ticket can be satisfied now. */
434 12778693 : if ((used + ticket->bytes <= space_info->total_bytes) ||
435 6056890 : btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
436 : flush)) {
437 2477903 : btrfs_space_info_update_bytes_may_use(fs_info,
438 : space_info,
439 2477903 : ticket->bytes);
440 2477905 : remove_ticket(space_info, ticket);
441 2477905 : ticket->bytes = 0;
442 2477905 : space_info->tickets_id++;
443 2477905 : wake_up(&ticket->wait);
444 : } else {
445 : break;
446 : }
447 : }
448 :
449 285538505 : if (head == &space_info->priority_tickets) {
450 142766340 : head = &space_info->tickets;
451 142766340 : flush = BTRFS_RESERVE_FLUSH_ALL;
452 142766340 : goto again;
453 : }
454 142772165 : }
455 :
456 : #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
457 : do { \
458 : struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
459 : spin_lock(&__rsv->lock); \
460 : btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
461 : __rsv->size, __rsv->reserved); \
462 : spin_unlock(&__rsv->lock); \
463 : } while (0)
464 :
465 : static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
466 : {
467 0 : switch (space_info->flags) {
468 : case BTRFS_BLOCK_GROUP_SYSTEM:
469 : return "SYSTEM";
470 : case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
471 : return "DATA+METADATA";
472 : case BTRFS_BLOCK_GROUP_DATA:
473 : return "DATA";
474 : case BTRFS_BLOCK_GROUP_METADATA:
475 : return "METADATA";
476 : default:
477 : return "UNKNOWN";
478 : }
479 : }
480 :
481 0 : static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
482 : {
483 0 : DUMP_BLOCK_RSV(fs_info, global_block_rsv);
484 0 : DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
485 0 : DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
486 0 : DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
487 0 : DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
488 0 : }
489 :
490 0 : static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
491 : struct btrfs_space_info *info)
492 : {
493 0 : const char *flag_str = space_info_flag_to_str(info);
494 0 : lockdep_assert_held(&info->lock);
495 :
496 : /* The free space could be negative in case of overcommit */
497 0 : btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
498 : flag_str,
499 : (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
500 : info->full ? "" : "not ");
501 0 : btrfs_info(fs_info,
502 : "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
503 : info->total_bytes, info->bytes_used, info->bytes_pinned,
504 : info->bytes_reserved, info->bytes_may_use,
505 : info->bytes_readonly, info->bytes_zone_unusable);
506 0 : }
507 :
508 0 : void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
509 : struct btrfs_space_info *info, u64 bytes,
510 : int dump_block_groups)
511 : {
512 0 : struct btrfs_block_group *cache;
513 0 : int index = 0;
514 :
515 0 : spin_lock(&info->lock);
516 0 : __btrfs_dump_space_info(fs_info, info);
517 0 : dump_global_block_rsv(fs_info);
518 0 : spin_unlock(&info->lock);
519 :
520 0 : if (!dump_block_groups)
521 : return;
522 :
523 0 : down_read(&info->groups_sem);
524 0 : again:
525 0 : list_for_each_entry(cache, &info->block_groups[index], list) {
526 0 : spin_lock(&cache->lock);
527 0 : btrfs_info(fs_info,
528 : "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
529 : cache->start, cache->length, cache->used, cache->pinned,
530 : cache->reserved, cache->zone_unusable,
531 : cache->ro ? "[readonly]" : "");
532 0 : spin_unlock(&cache->lock);
533 0 : btrfs_dump_free_space(cache, bytes);
534 : }
535 0 : if (++index < BTRFS_NR_RAID_TYPES)
536 0 : goto again;
537 0 : up_read(&info->groups_sem);
538 : }
539 :
540 : static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
541 : u64 to_reclaim)
542 : {
543 1055250 : u64 bytes;
544 1055250 : u64 nr;
545 :
546 1055250 : bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
547 1055250 : nr = div64_u64(to_reclaim, bytes);
548 1055250 : if (!nr)
549 21 : nr = 1;
550 1055250 : return nr;
551 : }
552 :
553 : static inline u64 calc_delayed_refs_nr(const struct btrfs_fs_info *fs_info,
554 : u64 to_reclaim)
555 : {
556 917190 : const u64 bytes = btrfs_calc_delayed_ref_bytes(fs_info, 1);
557 917190 : u64 nr;
558 :
559 917190 : nr = div64_u64(to_reclaim, bytes);
560 917190 : if (!nr)
561 60 : nr = 1;
562 917190 : return nr;
563 : }
564 :
565 : #define EXTENT_SIZE_PER_ITEM SZ_256K
566 :
567 : /*
568 : * shrink metadata reservation for delalloc
569 : */
570 331781 : static void shrink_delalloc(struct btrfs_fs_info *fs_info,
571 : struct btrfs_space_info *space_info,
572 : u64 to_reclaim, bool wait_ordered,
573 : bool for_preempt)
574 : {
575 331781 : struct btrfs_trans_handle *trans;
576 331781 : u64 delalloc_bytes;
577 331781 : u64 ordered_bytes;
578 331781 : u64 items;
579 331781 : long time_left;
580 331781 : int loops;
581 :
582 331781 : delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
583 331781 : ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
584 331781 : if (delalloc_bytes == 0 && ordered_bytes == 0)
585 : return;
586 :
587 : /* Calc the number of the pages we need flush for space reservation */
588 24842 : if (to_reclaim == U64_MAX) {
589 : items = U64_MAX;
590 : } else {
591 : /*
592 : * to_reclaim is set to however much metadata we need to
593 : * reclaim, but reclaiming that much data doesn't really track
594 : * exactly. What we really want to do is reclaim full inode's
595 : * worth of reservations, however that's not available to us
596 : * here. We will take a fraction of the delalloc bytes for our
597 : * flushing loops and hope for the best. Delalloc will expand
598 : * the amount we write to cover an entire dirty extent, which
599 : * will reclaim the metadata reservation for that range. If
600 : * it's not enough subsequent flush stages will be more
601 : * aggressive.
602 : */
603 18101 : to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
604 18101 : items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
605 : }
606 :
607 24842 : trans = current->journal_info;
608 :
609 : /*
610 : * If we are doing more ordered than delalloc we need to just wait on
611 : * ordered extents, otherwise we'll waste time trying to flush delalloc
612 : * that likely won't give us the space back we need.
613 : */
614 24842 : if (ordered_bytes > delalloc_bytes && !for_preempt)
615 1639 : wait_ordered = true;
616 :
617 24842 : loops = 0;
618 52277 : while ((delalloc_bytes || ordered_bytes) && loops < 3) {
619 32946 : u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
620 32946 : long nr_pages = min_t(u64, temp, LONG_MAX);
621 32946 : int async_pages;
622 :
623 32946 : btrfs_start_delalloc_roots(fs_info, nr_pages, true);
624 :
625 : /*
626 : * We need to make sure any outstanding async pages are now
627 : * processed before we continue. This is because things like
628 : * sync_inode() try to be smart and skip writing if the inode is
629 : * marked clean. We don't use filemap_fwrite for flushing
630 : * because we want to control how many pages we write out at a
631 : * time, thus this is the only safe way to make sure we've
632 : * waited for outstanding compressed workers to have started
633 : * their jobs and thus have ordered extents set up properly.
634 : *
635 : * This exists because we do not want to wait for each
636 : * individual inode to finish its async work, we simply want to
637 : * start the IO on everybody, and then come back here and wait
638 : * for all of the async work to catch up. Once we're done with
639 : * that we know we'll have ordered extents for everything and we
640 : * can decide if we wait for that or not.
641 : *
642 : * If we choose to replace this in the future, make absolutely
643 : * sure that the proper waiting is being done in the async case,
644 : * as there have been bugs in that area before.
645 : */
646 32946 : async_pages = atomic_read(&fs_info->async_delalloc_pages);
647 32946 : if (!async_pages)
648 32946 : goto skip_async;
649 :
650 : /*
651 : * We don't want to wait forever, if we wrote less pages in this
652 : * loop than we have outstanding, only wait for that number of
653 : * pages, otherwise we can wait for all async pages to finish
654 : * before continuing.
655 : */
656 0 : if (async_pages > nr_pages)
657 0 : async_pages -= nr_pages;
658 : else
659 : async_pages = 0;
660 0 : wait_event(fs_info->async_submit_wait,
661 : atomic_read(&fs_info->async_delalloc_pages) <=
662 : async_pages);
663 0 : skip_async:
664 32946 : loops++;
665 32946 : if (wait_ordered && !trans) {
666 9154 : btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
667 : } else {
668 23792 : time_left = schedule_timeout_killable(1);
669 23792 : if (time_left)
670 : break;
671 : }
672 :
673 : /*
674 : * If we are for preemption we just want a one-shot of delalloc
675 : * flushing so we can stop flushing if we decide we don't need
676 : * to anymore.
677 : */
678 32946 : if (for_preempt)
679 : break;
680 :
681 31296 : spin_lock(&space_info->lock);
682 31295 : if (list_empty(&space_info->tickets) &&
683 3861 : list_empty(&space_info->priority_tickets)) {
684 3861 : spin_unlock(&space_info->lock);
685 : break;
686 : }
687 27434 : spin_unlock(&space_info->lock);
688 :
689 27434 : delalloc_bytes = percpu_counter_sum_positive(
690 : &fs_info->delalloc_bytes);
691 27435 : ordered_bytes = percpu_counter_sum_positive(
692 : &fs_info->ordered_bytes);
693 : }
694 : }
695 :
696 : /*
697 : * Try to flush some data based on policy set by @state. This is only advisory
698 : * and may fail for various reasons. The caller is supposed to examine the
699 : * state of @space_info to detect the outcome.
700 : */
701 3873253 : static void flush_space(struct btrfs_fs_info *fs_info,
702 : struct btrfs_space_info *space_info, u64 num_bytes,
703 : enum btrfs_flush_state state, bool for_preempt)
704 : {
705 3873253 : struct btrfs_root *root = fs_info->tree_root;
706 3873253 : struct btrfs_trans_handle *trans;
707 3873253 : int nr;
708 3873253 : int ret = 0;
709 :
710 3873253 : switch (state) {
711 1921667 : case FLUSH_DELAYED_ITEMS_NR:
712 : case FLUSH_DELAYED_ITEMS:
713 1921667 : if (state == FLUSH_DELAYED_ITEMS_NR)
714 1037170 : nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
715 : else
716 : nr = -1;
717 :
718 1921667 : trans = btrfs_join_transaction(root);
719 1921668 : if (IS_ERR(trans)) {
720 0 : ret = PTR_ERR(trans);
721 0 : break;
722 : }
723 1921668 : ret = btrfs_run_delayed_items_nr(trans, nr);
724 1921667 : btrfs_end_transaction(trans);
725 1921667 : break;
726 331781 : case FLUSH_DELALLOC:
727 : case FLUSH_DELALLOC_WAIT:
728 : case FLUSH_DELALLOC_FULL:
729 331781 : if (state == FLUSH_DELALLOC_FULL)
730 121367 : num_bytes = U64_MAX;
731 331781 : shrink_delalloc(fs_info, space_info, num_bytes,
732 : state != FLUSH_DELALLOC, for_preempt);
733 331781 : break;
734 1054926 : case FLUSH_DELAYED_REFS_NR:
735 : case FLUSH_DELAYED_REFS:
736 1054926 : trans = btrfs_join_transaction(root);
737 1054926 : if (IS_ERR(trans)) {
738 0 : ret = PTR_ERR(trans);
739 0 : break;
740 : }
741 1054926 : if (state == FLUSH_DELAYED_REFS_NR)
742 1834380 : nr = calc_delayed_refs_nr(fs_info, num_bytes);
743 : else
744 : nr = 0;
745 1054926 : btrfs_run_delayed_refs(trans, nr);
746 1054925 : btrfs_end_transaction(trans);
747 1054925 : break;
748 : case ALLOC_CHUNK:
749 : case ALLOC_CHUNK_FORCE:
750 : /*
751 : * For metadata space on zoned filesystem, reaching here means we
752 : * don't have enough space left in active_total_bytes. Try to
753 : * activate a block group first, because we may have inactive
754 : * block group already allocated.
755 : */
756 218411 : ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
757 218411 : if (ret < 0)
758 : break;
759 218411 : else if (ret == 1)
760 : break;
761 :
762 218411 : trans = btrfs_join_transaction(root);
763 218411 : if (IS_ERR(trans)) {
764 0 : ret = PTR_ERR(trans);
765 0 : break;
766 : }
767 337000 : ret = btrfs_chunk_alloc(trans,
768 : btrfs_get_alloc_profile(fs_info, space_info->flags),
769 : (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
770 : CHUNK_ALLOC_FORCE);
771 218411 : btrfs_end_transaction(trans);
772 :
773 : /*
774 : * For metadata space on zoned filesystem, allocating a new chunk
775 : * is not enough. We still need to activate the block * group.
776 : * Active the newly allocated block group by (maybe) finishing
777 : * a block group.
778 : */
779 218411 : if (ret == 1) {
780 701 : ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
781 : /*
782 : * Revert to the original ret regardless we could finish
783 : * one block group or not.
784 : */
785 701 : if (ret >= 0)
786 701 : ret = 1;
787 : }
788 :
789 218411 : if (ret > 0 || ret == -ENOSPC)
790 190430 : ret = 0;
791 : break;
792 172841 : case RUN_DELAYED_IPUTS:
793 : /*
794 : * If we have pending delayed iputs then we could free up a
795 : * bunch of pinned space, so make sure we run the iputs before
796 : * we do our pinned bytes check below.
797 : */
798 172841 : btrfs_run_delayed_iputs(fs_info);
799 172841 : btrfs_wait_on_delayed_iputs(fs_info);
800 172841 : break;
801 : case COMMIT_TRANS:
802 173627 : ASSERT(current->journal_info == NULL);
803 173627 : trans = btrfs_join_transaction(root);
804 173627 : if (IS_ERR(trans)) {
805 0 : ret = PTR_ERR(trans);
806 0 : break;
807 : }
808 173627 : ret = btrfs_commit_transaction(trans);
809 173627 : break;
810 : default:
811 : ret = -ENOSPC;
812 : break;
813 : }
814 :
815 3873245 : trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
816 : ret, for_preempt);
817 3873232 : return;
818 : }
819 :
820 : static inline u64
821 3780123 : btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
822 : struct btrfs_space_info *space_info)
823 : {
824 3780123 : u64 used;
825 3780123 : u64 avail;
826 3780123 : u64 to_reclaim = space_info->reclaim_size;
827 :
828 3780123 : lockdep_assert_held(&space_info->lock);
829 :
830 3780123 : avail = calc_available_free_space(fs_info, space_info,
831 : BTRFS_RESERVE_FLUSH_ALL);
832 3780123 : used = btrfs_space_info_used(space_info, true);
833 :
834 : /*
835 : * We may be flushing because suddenly we have less space than we had
836 : * before, and now we're well over-committed based on our current free
837 : * space. If that's the case add in our overage so we make sure to put
838 : * appropriate pressure on the flushing state machine.
839 : */
840 3780123 : if (space_info->total_bytes + avail < used)
841 4745 : to_reclaim += used - (space_info->total_bytes + avail);
842 :
843 3780123 : return to_reclaim;
844 : }
845 :
846 61972728 : static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
847 : struct btrfs_space_info *space_info)
848 : {
849 61972728 : u64 global_rsv_size = fs_info->global_block_rsv.reserved;
850 61972728 : u64 ordered, delalloc;
851 61972728 : u64 thresh;
852 61972728 : u64 used;
853 :
854 61972728 : thresh = mult_perc(space_info->total_bytes, 90);
855 :
856 61972728 : lockdep_assert_held(&space_info->lock);
857 :
858 : /* If we're just plain full then async reclaim just slows us down. */
859 61972728 : if ((space_info->bytes_used + space_info->bytes_reserved +
860 : global_rsv_size) >= thresh)
861 : return false;
862 :
863 60302164 : used = space_info->bytes_may_use + space_info->bytes_pinned;
864 :
865 : /* The total flushable belongs to the global rsv, don't flush. */
866 60302164 : if (global_rsv_size >= used)
867 : return false;
868 :
869 : /*
870 : * 128MiB is 1/4 of the maximum global rsv size. If we have less than
871 : * that devoted to other reservations then there's no sense in flushing,
872 : * we don't have a lot of things that need flushing.
873 : */
874 60302164 : if (used - global_rsv_size <= SZ_128M)
875 : return false;
876 :
877 : /*
878 : * We have tickets queued, bail so we don't compete with the async
879 : * flushers.
880 : */
881 23638805 : if (space_info->reclaim_size)
882 : return false;
883 :
884 : /*
885 : * If we have over half of the free space occupied by reservations or
886 : * pinned then we want to start flushing.
887 : *
888 : * We do not do the traditional thing here, which is to say
889 : *
890 : * if (used >= ((total_bytes + avail) / 2))
891 : * return 1;
892 : *
893 : * because this doesn't quite work how we want. If we had more than 50%
894 : * of the space_info used by bytes_used and we had 0 available we'd just
895 : * constantly run the background flusher. Instead we want it to kick in
896 : * if our reclaimable space exceeds our clamped free space.
897 : *
898 : * Our clamping range is 2^1 -> 2^8. Practically speaking that means
899 : * the following:
900 : *
901 : * Amount of RAM Minimum threshold Maximum threshold
902 : *
903 : * 256GiB 1GiB 128GiB
904 : * 128GiB 512MiB 64GiB
905 : * 64GiB 256MiB 32GiB
906 : * 32GiB 128MiB 16GiB
907 : * 16GiB 64MiB 8GiB
908 : *
909 : * These are the range our thresholds will fall in, corresponding to how
910 : * much delalloc we need for the background flusher to kick in.
911 : */
912 :
913 23576692 : thresh = calc_available_free_space(fs_info, space_info,
914 : BTRFS_RESERVE_FLUSH_ALL);
915 23576692 : used = space_info->bytes_used + space_info->bytes_reserved +
916 23576692 : space_info->bytes_readonly + global_rsv_size;
917 23576692 : if (used < space_info->total_bytes)
918 23460354 : thresh += space_info->total_bytes - used;
919 23576692 : thresh >>= space_info->clamp;
920 :
921 23576692 : used = space_info->bytes_pinned;
922 :
923 : /*
924 : * If we have more ordered bytes than delalloc bytes then we're either
925 : * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
926 : * around. Preemptive flushing is only useful in that it can free up
927 : * space before tickets need to wait for things to finish. In the case
928 : * of ordered extents, preemptively waiting on ordered extents gets us
929 : * nothing, if our reservations are tied up in ordered extents we'll
930 : * simply have to slow down writers by forcing them to wait on ordered
931 : * extents.
932 : *
933 : * In the case that ordered is larger than delalloc, only include the
934 : * block reserves that we would actually be able to directly reclaim
935 : * from. In this case if we're heavy on metadata operations this will
936 : * clearly be heavy enough to warrant preemptive flushing. In the case
937 : * of heavy DIO or ordered reservations, preemptive flushing will just
938 : * waste time and cause us to slow down.
939 : *
940 : * We want to make sure we truly are maxed out on ordered however, so
941 : * cut ordered in half, and if it's still higher than delalloc then we
942 : * can keep flushing. This is to avoid the case where we start
943 : * flushing, and now delalloc == ordered and we stop preemptively
944 : * flushing when we could still have several gigs of delalloc to flush.
945 : */
946 23576692 : ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
947 23576692 : delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
948 23576692 : if (ordered >= delalloc)
949 14621439 : used += fs_info->delayed_refs_rsv.reserved +
950 14621439 : fs_info->delayed_block_rsv.reserved;
951 : else
952 8955253 : used += space_info->bytes_may_use - global_rsv_size;
953 :
954 23696064 : return (used >= thresh && !btrfs_fs_closing(fs_info) &&
955 59686 : !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
956 : }
957 :
958 130682 : static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
959 : struct btrfs_space_info *space_info,
960 : struct reserve_ticket *ticket)
961 : {
962 130682 : struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
963 130682 : u64 min_bytes;
964 :
965 130682 : if (!ticket->steal)
966 : return false;
967 :
968 0 : if (global_rsv->space_info != space_info)
969 : return false;
970 :
971 0 : spin_lock(&global_rsv->lock);
972 0 : min_bytes = mult_perc(global_rsv->size, 10);
973 0 : if (global_rsv->reserved < min_bytes + ticket->bytes) {
974 0 : spin_unlock(&global_rsv->lock);
975 0 : return false;
976 : }
977 0 : global_rsv->reserved -= ticket->bytes;
978 0 : remove_ticket(space_info, ticket);
979 0 : ticket->bytes = 0;
980 0 : wake_up(&ticket->wait);
981 0 : space_info->tickets_id++;
982 0 : if (global_rsv->reserved < global_rsv->size)
983 0 : global_rsv->full = 0;
984 0 : spin_unlock(&global_rsv->lock);
985 :
986 0 : return true;
987 : }
988 :
989 : /*
990 : * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
991 : * @fs_info - fs_info for this fs
992 : * @space_info - the space info we were flushing
993 : *
994 : * We call this when we've exhausted our flushing ability and haven't made
995 : * progress in satisfying tickets. The reservation code handles tickets in
996 : * order, so if there is a large ticket first and then smaller ones we could
997 : * very well satisfy the smaller tickets. This will attempt to wake up any
998 : * tickets in the list to catch this case.
999 : *
1000 : * This function returns true if it was able to make progress by clearing out
1001 : * other tickets, or if it stumbles across a ticket that was smaller than the
1002 : * first ticket.
1003 : */
1004 94668 : static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
1005 : struct btrfs_space_info *space_info)
1006 : {
1007 94668 : struct reserve_ticket *ticket;
1008 94668 : u64 tickets_id = space_info->tickets_id;
1009 94668 : const bool aborted = BTRFS_FS_ERROR(fs_info);
1010 :
1011 94668 : trace_btrfs_fail_all_tickets(fs_info, space_info);
1012 :
1013 94668 : if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1014 0 : btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1015 0 : __btrfs_dump_space_info(fs_info, space_info);
1016 : }
1017 :
1018 225350 : while (!list_empty(&space_info->tickets) &&
1019 131338 : tickets_id == space_info->tickets_id) {
1020 130682 : ticket = list_first_entry(&space_info->tickets,
1021 : struct reserve_ticket, list);
1022 :
1023 130682 : if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1024 : return true;
1025 :
1026 130682 : if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1027 0 : btrfs_info(fs_info, "failing ticket with %llu bytes",
1028 : ticket->bytes);
1029 :
1030 130682 : remove_ticket(space_info, ticket);
1031 130682 : if (aborted)
1032 0 : ticket->error = -EIO;
1033 : else
1034 130682 : ticket->error = -ENOSPC;
1035 130682 : wake_up(&ticket->wait);
1036 :
1037 : /*
1038 : * We're just throwing tickets away, so more flushing may not
1039 : * trip over btrfs_try_granting_tickets, so we need to call it
1040 : * here to see if we can make progress with the next ticket in
1041 : * the list.
1042 : */
1043 130682 : if (!aborted)
1044 130682 : btrfs_try_granting_tickets(fs_info, space_info);
1045 : }
1046 94668 : return (tickets_id != space_info->tickets_id);
1047 : }
1048 :
1049 : /*
1050 : * This is for normal flushers, we can wait all goddamned day if we want to. We
1051 : * will loop and continuously try to flush as long as we are making progress.
1052 : * We count progress as clearing off tickets each time we have to loop.
1053 : */
1054 1153404 : static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1055 : {
1056 1153404 : struct btrfs_fs_info *fs_info;
1057 1153404 : struct btrfs_space_info *space_info;
1058 1153404 : u64 to_reclaim;
1059 1153404 : enum btrfs_flush_state flush_state;
1060 1153404 : int commit_cycles = 0;
1061 1153404 : u64 last_tickets_id;
1062 :
1063 1153404 : fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1064 1153404 : space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1065 :
1066 1153404 : spin_lock(&space_info->lock);
1067 1153404 : to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1068 1153404 : if (!to_reclaim) {
1069 231845 : space_info->flush = 0;
1070 231845 : spin_unlock(&space_info->lock);
1071 231845 : return;
1072 : }
1073 921559 : last_tickets_id = space_info->tickets_id;
1074 921559 : spin_unlock(&space_info->lock);
1075 :
1076 921559 : flush_state = FLUSH_DELAYED_ITEMS_NR;
1077 3519912 : do {
1078 3519912 : flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1079 3519912 : spin_lock(&space_info->lock);
1080 3519912 : if (list_empty(&space_info->tickets)) {
1081 898811 : space_info->flush = 0;
1082 898811 : spin_unlock(&space_info->lock);
1083 898811 : return;
1084 : }
1085 2621101 : to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1086 : space_info);
1087 2621101 : if (last_tickets_id == space_info->tickets_id) {
1088 2558076 : flush_state++;
1089 : } else {
1090 63025 : last_tickets_id = space_info->tickets_id;
1091 63025 : flush_state = FLUSH_DELAYED_ITEMS_NR;
1092 63025 : if (commit_cycles)
1093 1189 : commit_cycles--;
1094 : }
1095 :
1096 : /*
1097 : * We do not want to empty the system of delalloc unless we're
1098 : * under heavy pressure, so allow one trip through the flushing
1099 : * logic before we start doing a FLUSH_DELALLOC_FULL.
1100 : */
1101 2621101 : if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1102 53039 : flush_state++;
1103 :
1104 : /*
1105 : * We don't want to force a chunk allocation until we've tried
1106 : * pretty hard to reclaim space. Think of the case where we
1107 : * freed up a bunch of space and so have a lot of pinned space
1108 : * to reclaim. We would rather use that than possibly create a
1109 : * underutilized metadata chunk. So if this is our first run
1110 : * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1111 : * commit the transaction. If nothing has changed the next go
1112 : * around then we can force a chunk allocation.
1113 : */
1114 2621101 : if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1115 52966 : flush_state++;
1116 :
1117 2621101 : if (flush_state > COMMIT_TRANS) {
1118 69503 : commit_cycles++;
1119 69503 : if (commit_cycles > 2) {
1120 22772 : if (maybe_fail_all_tickets(fs_info, space_info)) {
1121 : flush_state = FLUSH_DELAYED_ITEMS_NR;
1122 : commit_cycles--;
1123 : } else {
1124 22748 : space_info->flush = 0;
1125 : }
1126 : } else {
1127 : flush_state = FLUSH_DELAYED_ITEMS_NR;
1128 : }
1129 : }
1130 2621101 : spin_unlock(&space_info->lock);
1131 2621101 : } while (flush_state <= COMMIT_TRANS);
1132 : }
1133 :
1134 : /*
1135 : * This handles pre-flushing of metadata space before we get to the point that
1136 : * we need to start blocking threads on tickets. The logic here is different
1137 : * from the other flush paths because it doesn't rely on tickets to tell us how
1138 : * much we need to flush, instead it attempts to keep us below the 80% full
1139 : * watermark of space by flushing whichever reservation pool is currently the
1140 : * largest.
1141 : */
1142 21769 : static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1143 : {
1144 21769 : struct btrfs_fs_info *fs_info;
1145 21769 : struct btrfs_space_info *space_info;
1146 21769 : struct btrfs_block_rsv *delayed_block_rsv;
1147 21769 : struct btrfs_block_rsv *delayed_refs_rsv;
1148 21769 : struct btrfs_block_rsv *global_rsv;
1149 21769 : struct btrfs_block_rsv *trans_rsv;
1150 21769 : int loops = 0;
1151 :
1152 21769 : fs_info = container_of(work, struct btrfs_fs_info,
1153 : preempt_reclaim_work);
1154 21769 : space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1155 21769 : delayed_block_rsv = &fs_info->delayed_block_rsv;
1156 21769 : delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1157 21769 : global_rsv = &fs_info->global_block_rsv;
1158 21769 : trans_rsv = &fs_info->trans_block_rsv;
1159 :
1160 21769 : spin_lock(&space_info->lock);
1161 59686 : while (need_preemptive_reclaim(fs_info, space_info)) {
1162 37917 : enum btrfs_flush_state flush;
1163 37917 : u64 delalloc_size = 0;
1164 37917 : u64 to_reclaim, block_rsv_size;
1165 37917 : u64 global_rsv_size = global_rsv->reserved;
1166 :
1167 37917 : loops++;
1168 :
1169 : /*
1170 : * We don't have a precise counter for the metadata being
1171 : * reserved for delalloc, so we'll approximate it by subtracting
1172 : * out the block rsv's space from the bytes_may_use. If that
1173 : * amount is higher than the individual reserves, then we can
1174 : * assume it's tied up in delalloc reservations.
1175 : */
1176 37917 : block_rsv_size = global_rsv_size +
1177 37917 : delayed_block_rsv->reserved +
1178 37917 : delayed_refs_rsv->reserved +
1179 37917 : trans_rsv->reserved;
1180 37917 : if (block_rsv_size < space_info->bytes_may_use)
1181 36051 : delalloc_size = space_info->bytes_may_use - block_rsv_size;
1182 :
1183 : /*
1184 : * We don't want to include the global_rsv in our calculation,
1185 : * because that's space we can't touch. Subtract it from the
1186 : * block_rsv_size for the next checks.
1187 : */
1188 37917 : block_rsv_size -= global_rsv_size;
1189 :
1190 : /*
1191 : * We really want to avoid flushing delalloc too much, as it
1192 : * could result in poor allocation patterns, so only flush it if
1193 : * it's larger than the rest of the pools combined.
1194 : */
1195 37917 : if (delalloc_size > block_rsv_size) {
1196 : to_reclaim = delalloc_size;
1197 : flush = FLUSH_DELALLOC;
1198 36267 : } else if (space_info->bytes_pinned >
1199 36267 : (delayed_block_rsv->reserved +
1200 : delayed_refs_rsv->reserved)) {
1201 : to_reclaim = space_info->bytes_pinned;
1202 : flush = COMMIT_TRANS;
1203 36228 : } else if (delayed_block_rsv->reserved >
1204 : delayed_refs_rsv->reserved) {
1205 : to_reclaim = delayed_block_rsv->reserved;
1206 : flush = FLUSH_DELAYED_ITEMS_NR;
1207 : } else {
1208 36029 : to_reclaim = delayed_refs_rsv->reserved;
1209 36029 : flush = FLUSH_DELAYED_REFS_NR;
1210 : }
1211 :
1212 37917 : spin_unlock(&space_info->lock);
1213 :
1214 : /*
1215 : * We don't want to reclaim everything, just a portion, so scale
1216 : * down the to_reclaim by 1/4. If it takes us down to 0,
1217 : * reclaim 1 items worth.
1218 : */
1219 37917 : to_reclaim >>= 2;
1220 37917 : if (!to_reclaim)
1221 0 : to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1222 37917 : flush_space(fs_info, space_info, to_reclaim, flush, true);
1223 37917 : cond_resched();
1224 37917 : spin_lock(&space_info->lock);
1225 : }
1226 :
1227 : /* We only went through once, back off our clamping. */
1228 21769 : if (loops == 1 && !space_info->reclaim_size)
1229 15894 : space_info->clamp = max(1, space_info->clamp - 1);
1230 21769 : trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1231 21769 : spin_unlock(&space_info->lock);
1232 21769 : }
1233 :
1234 : /*
1235 : * FLUSH_DELALLOC_WAIT:
1236 : * Space is freed from flushing delalloc in one of two ways.
1237 : *
1238 : * 1) compression is on and we allocate less space than we reserved
1239 : * 2) we are overwriting existing space
1240 : *
1241 : * For #1 that extra space is reclaimed as soon as the delalloc pages are
1242 : * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1243 : * length to ->bytes_reserved, and subtracts the reserved space from
1244 : * ->bytes_may_use.
1245 : *
1246 : * For #2 this is trickier. Once the ordered extent runs we will drop the
1247 : * extent in the range we are overwriting, which creates a delayed ref for
1248 : * that freed extent. This however is not reclaimed until the transaction
1249 : * commits, thus the next stages.
1250 : *
1251 : * RUN_DELAYED_IPUTS
1252 : * If we are freeing inodes, we want to make sure all delayed iputs have
1253 : * completed, because they could have been on an inode with i_nlink == 0, and
1254 : * thus have been truncated and freed up space. But again this space is not
1255 : * immediately re-usable, it comes in the form of a delayed ref, which must be
1256 : * run and then the transaction must be committed.
1257 : *
1258 : * COMMIT_TRANS
1259 : * This is where we reclaim all of the pinned space generated by running the
1260 : * iputs
1261 : *
1262 : * ALLOC_CHUNK_FORCE
1263 : * For data we start with alloc chunk force, however we could have been full
1264 : * before, and then the transaction commit could have freed new block groups,
1265 : * so if we now have space to allocate do the force chunk allocation.
1266 : */
1267 : static const enum btrfs_flush_state data_flush_states[] = {
1268 : FLUSH_DELALLOC_FULL,
1269 : RUN_DELAYED_IPUTS,
1270 : COMMIT_TRANS,
1271 : ALLOC_CHUNK_FORCE,
1272 : };
1273 :
1274 73033 : static void btrfs_async_reclaim_data_space(struct work_struct *work)
1275 : {
1276 73033 : struct btrfs_fs_info *fs_info;
1277 73033 : struct btrfs_space_info *space_info;
1278 73033 : u64 last_tickets_id;
1279 73033 : enum btrfs_flush_state flush_state = 0;
1280 :
1281 73033 : fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1282 73033 : space_info = fs_info->data_sinfo;
1283 :
1284 73033 : spin_lock(&space_info->lock);
1285 73033 : if (list_empty(&space_info->tickets)) {
1286 33 : space_info->flush = 0;
1287 33 : spin_unlock(&space_info->lock);
1288 33 : return;
1289 : }
1290 73000 : last_tickets_id = space_info->tickets_id;
1291 73000 : spin_unlock(&space_info->lock);
1292 :
1293 73199 : while (!space_info->full) {
1294 670 : flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1295 670 : spin_lock(&space_info->lock);
1296 670 : if (list_empty(&space_info->tickets)) {
1297 471 : space_info->flush = 0;
1298 471 : spin_unlock(&space_info->lock);
1299 471 : return;
1300 : }
1301 :
1302 : /* Something happened, fail everything and bail. */
1303 199 : if (BTRFS_FS_ERROR(fs_info))
1304 0 : goto aborted_fs;
1305 199 : last_tickets_id = space_info->tickets_id;
1306 199 : spin_unlock(&space_info->lock);
1307 : }
1308 :
1309 365476 : while (flush_state < ARRAY_SIZE(data_flush_states)) {
1310 294228 : flush_space(fs_info, space_info, U64_MAX,
1311 294228 : data_flush_states[flush_state], false);
1312 294228 : spin_lock(&space_info->lock);
1313 294228 : if (list_empty(&space_info->tickets)) {
1314 1281 : space_info->flush = 0;
1315 1281 : spin_unlock(&space_info->lock);
1316 1281 : return;
1317 : }
1318 :
1319 292947 : if (last_tickets_id == space_info->tickets_id) {
1320 291540 : flush_state++;
1321 : } else {
1322 : last_tickets_id = space_info->tickets_id;
1323 : flush_state = 0;
1324 : }
1325 :
1326 291540 : if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1327 71896 : if (space_info->full) {
1328 71896 : if (maybe_fail_all_tickets(fs_info, space_info))
1329 : flush_state = 0;
1330 : else
1331 71248 : space_info->flush = 0;
1332 : } else {
1333 : flush_state = 0;
1334 : }
1335 :
1336 : /* Something happened, fail everything and bail. */
1337 71896 : if (BTRFS_FS_ERROR(fs_info))
1338 0 : goto aborted_fs;
1339 :
1340 : }
1341 292947 : spin_unlock(&space_info->lock);
1342 : }
1343 : return;
1344 :
1345 0 : aborted_fs:
1346 0 : maybe_fail_all_tickets(fs_info, space_info);
1347 0 : space_info->flush = 0;
1348 0 : spin_unlock(&space_info->lock);
1349 : }
1350 :
1351 3473 : void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1352 : {
1353 3473 : INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1354 3473 : INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1355 3473 : INIT_WORK(&fs_info->preempt_reclaim_work,
1356 : btrfs_preempt_reclaim_metadata_space);
1357 3473 : }
1358 :
1359 : static const enum btrfs_flush_state priority_flush_states[] = {
1360 : FLUSH_DELAYED_ITEMS_NR,
1361 : FLUSH_DELAYED_ITEMS,
1362 : ALLOC_CHUNK,
1363 : };
1364 :
1365 : static const enum btrfs_flush_state evict_flush_states[] = {
1366 : FLUSH_DELAYED_ITEMS_NR,
1367 : FLUSH_DELAYED_ITEMS,
1368 : FLUSH_DELAYED_REFS_NR,
1369 : FLUSH_DELAYED_REFS,
1370 : FLUSH_DELALLOC,
1371 : FLUSH_DELALLOC_WAIT,
1372 : FLUSH_DELALLOC_FULL,
1373 : ALLOC_CHUNK,
1374 : COMMIT_TRANS,
1375 : };
1376 :
1377 5618 : static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1378 : struct btrfs_space_info *space_info,
1379 : struct reserve_ticket *ticket,
1380 : const enum btrfs_flush_state *states,
1381 : int states_nr)
1382 : {
1383 5618 : u64 to_reclaim;
1384 5618 : int flush_state = 0;
1385 :
1386 5618 : spin_lock(&space_info->lock);
1387 5618 : to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1388 : /*
1389 : * This is the priority reclaim path, so to_reclaim could be >0 still
1390 : * because we may have only satisfied the priority tickets and still
1391 : * left non priority tickets on the list. We would then have
1392 : * to_reclaim but ->bytes == 0.
1393 : */
1394 5618 : if (ticket->bytes == 0) {
1395 7 : spin_unlock(&space_info->lock);
1396 7 : return;
1397 : }
1398 :
1399 20528 : while (flush_state < states_nr) {
1400 20528 : spin_unlock(&space_info->lock);
1401 20528 : flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1402 : false);
1403 20523 : flush_state++;
1404 20523 : spin_lock(&space_info->lock);
1405 20528 : if (ticket->bytes == 0) {
1406 5611 : spin_unlock(&space_info->lock);
1407 5611 : return;
1408 : }
1409 : }
1410 :
1411 : /* Attempt to steal from the global rsv if we can. */
1412 0 : if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1413 0 : ticket->error = -ENOSPC;
1414 0 : remove_ticket(space_info, ticket);
1415 : }
1416 :
1417 : /*
1418 : * We must run try_granting_tickets here because we could be a large
1419 : * ticket in front of a smaller ticket that can now be satisfied with
1420 : * the available space.
1421 : */
1422 0 : btrfs_try_granting_tickets(fs_info, space_info);
1423 0 : spin_unlock(&space_info->lock);
1424 : }
1425 :
1426 0 : static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1427 : struct btrfs_space_info *space_info,
1428 : struct reserve_ticket *ticket)
1429 : {
1430 0 : spin_lock(&space_info->lock);
1431 :
1432 : /* We could have been granted before we got here. */
1433 0 : if (ticket->bytes == 0) {
1434 0 : spin_unlock(&space_info->lock);
1435 0 : return;
1436 : }
1437 :
1438 0 : while (!space_info->full) {
1439 0 : spin_unlock(&space_info->lock);
1440 0 : flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1441 0 : spin_lock(&space_info->lock);
1442 0 : if (ticket->bytes == 0) {
1443 0 : spin_unlock(&space_info->lock);
1444 0 : return;
1445 : }
1446 : }
1447 :
1448 0 : ticket->error = -ENOSPC;
1449 0 : remove_ticket(space_info, ticket);
1450 0 : btrfs_try_granting_tickets(fs_info, space_info);
1451 0 : spin_unlock(&space_info->lock);
1452 : }
1453 :
1454 2602858 : static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1455 : struct btrfs_space_info *space_info,
1456 : struct reserve_ticket *ticket)
1457 :
1458 : {
1459 2602858 : DEFINE_WAIT(wait);
1460 2602858 : int ret = 0;
1461 :
1462 2602858 : spin_lock(&space_info->lock);
1463 5025291 : while (ticket->bytes > 0 && ticket->error == 0) {
1464 2422323 : ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1465 2422322 : if (ret) {
1466 : /*
1467 : * Delete us from the list. After we unlock the space
1468 : * info, we don't want the async reclaim job to reserve
1469 : * space for this ticket. If that would happen, then the
1470 : * ticket's task would not known that space was reserved
1471 : * despite getting an error, resulting in a space leak
1472 : * (bytes_may_use counter of our space_info).
1473 : */
1474 124 : remove_ticket(space_info, ticket);
1475 124 : ticket->error = -EINTR;
1476 124 : break;
1477 : }
1478 2422198 : spin_unlock(&space_info->lock);
1479 :
1480 2422193 : schedule();
1481 :
1482 2419268 : finish_wait(&ticket->wait, &wait);
1483 2412798 : spin_lock(&space_info->lock);
1484 : }
1485 2603092 : spin_unlock(&space_info->lock);
1486 2602944 : }
1487 :
1488 : /*
1489 : * Do the appropriate flushing and waiting for a ticket.
1490 : *
1491 : * @fs_info: the filesystem
1492 : * @space_info: space info for the reservation
1493 : * @ticket: ticket for the reservation
1494 : * @start_ns: timestamp when the reservation started
1495 : * @orig_bytes: amount of bytes originally reserved
1496 : * @flush: how much we can flush
1497 : *
1498 : * This does the work of figuring out how to flush for the ticket, waiting for
1499 : * the reservation, and returning the appropriate error if there is one.
1500 : */
1501 2608501 : static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1502 : struct btrfs_space_info *space_info,
1503 : struct reserve_ticket *ticket,
1504 : u64 start_ns, u64 orig_bytes,
1505 : enum btrfs_reserve_flush_enum flush)
1506 : {
1507 2608501 : int ret;
1508 :
1509 2608501 : switch (flush) {
1510 2602883 : case BTRFS_RESERVE_FLUSH_DATA:
1511 : case BTRFS_RESERVE_FLUSH_ALL:
1512 : case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1513 2602883 : wait_reserve_ticket(fs_info, space_info, ticket);
1514 2602883 : break;
1515 0 : case BTRFS_RESERVE_FLUSH_LIMIT:
1516 0 : priority_reclaim_metadata_space(fs_info, space_info, ticket,
1517 : priority_flush_states,
1518 : ARRAY_SIZE(priority_flush_states));
1519 0 : break;
1520 5618 : case BTRFS_RESERVE_FLUSH_EVICT:
1521 5618 : priority_reclaim_metadata_space(fs_info, space_info, ticket,
1522 : evict_flush_states,
1523 : ARRAY_SIZE(evict_flush_states));
1524 5618 : break;
1525 0 : case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1526 0 : priority_reclaim_data_space(fs_info, space_info, ticket);
1527 0 : break;
1528 : default:
1529 : ASSERT(0);
1530 : break;
1531 : }
1532 :
1533 2608501 : ret = ticket->error;
1534 2608501 : ASSERT(list_empty(&ticket->list));
1535 : /*
1536 : * Check that we can't have an error set if the reservation succeeded,
1537 : * as that would confuse tasks and lead them to error out without
1538 : * releasing reserved space (if an error happens the expectation is that
1539 : * space wasn't reserved at all).
1540 : */
1541 2608501 : ASSERT(!(ticket->bytes == 0 && ticket->error));
1542 2608501 : trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1543 : start_ns, flush, ticket->error);
1544 2608295 : return ret;
1545 : }
1546 :
1547 : /*
1548 : * This returns true if this flush state will go through the ordinary flushing
1549 : * code.
1550 : */
1551 : static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1552 : {
1553 118847584 : return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1554 : (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1555 : }
1556 :
1557 1226437 : static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1558 : struct btrfs_space_info *space_info)
1559 : {
1560 1226437 : u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1561 1226437 : u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1562 :
1563 : /*
1564 : * If we're heavy on ordered operations then clamping won't help us. We
1565 : * need to clamp specifically to keep up with dirty'ing buffered
1566 : * writers, because there's not a 1:1 correlation of writing delalloc
1567 : * and freeing space, like there is with flushing delayed refs or
1568 : * delayed nodes. If we're already more ordered than delalloc then
1569 : * we're keeping up, otherwise we aren't and should probably clamp.
1570 : */
1571 1226437 : if (ordered < delalloc)
1572 61088 : space_info->clamp = min(space_info->clamp + 1, 8);
1573 1226437 : }
1574 :
1575 : static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1576 : {
1577 2608695 : return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1578 2608695 : flush == BTRFS_RESERVE_FLUSH_EVICT);
1579 : }
1580 :
1581 : /*
1582 : * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
1583 : * fail as quickly as possible.
1584 : */
1585 : static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
1586 : {
1587 5753113 : return (flush != BTRFS_RESERVE_NO_FLUSH &&
1588 5753113 : flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
1589 : }
1590 :
1591 : /*
1592 : * Try to reserve bytes from the block_rsv's space.
1593 : *
1594 : * @fs_info: the filesystem
1595 : * @space_info: space info we want to allocate from
1596 : * @orig_bytes: number of bytes we want
1597 : * @flush: whether or not we can flush to make our reservation
1598 : *
1599 : * This will reserve orig_bytes number of bytes from the space info associated
1600 : * with the block_rsv. If there is not enough space it will make an attempt to
1601 : * flush out space to make room. It will do this by flushing delalloc if
1602 : * possible or committing the transaction. If flush is 0 then no attempts to
1603 : * regain reservations will be made and this will fail if there is not enough
1604 : * space already.
1605 : */
1606 118548104 : static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1607 : struct btrfs_space_info *space_info, u64 orig_bytes,
1608 : enum btrfs_reserve_flush_enum flush)
1609 : {
1610 118548104 : struct work_struct *async_work;
1611 118548104 : struct reserve_ticket ticket;
1612 118548104 : u64 start_ns = 0;
1613 118548104 : u64 used;
1614 118548104 : int ret = -ENOSPC;
1615 118548104 : bool pending_tickets;
1616 :
1617 118548104 : ASSERT(orig_bytes);
1618 : /*
1619 : * If have a transaction handle (current->journal_info != NULL), then
1620 : * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
1621 : * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those
1622 : * flushing methods can trigger transaction commits.
1623 : */
1624 118548104 : if (current->journal_info) {
1625 : /* One assert per line for easier debugging. */
1626 118548104 : ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL);
1627 118548104 : ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL);
1628 118548104 : ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT);
1629 : }
1630 :
1631 118548104 : if (flush == BTRFS_RESERVE_FLUSH_DATA)
1632 40110825 : async_work = &fs_info->async_data_reclaim_work;
1633 : else
1634 78437279 : async_work = &fs_info->async_reclaim_work;
1635 :
1636 118548104 : spin_lock(&space_info->lock);
1637 118847584 : used = btrfs_space_info_used(space_info, true);
1638 :
1639 : /*
1640 : * We don't want NO_FLUSH allocations to jump everybody, they can
1641 : * generally handle ENOSPC in a different way, so treat them the same as
1642 : * normal flushers when it comes to skipping pending tickets.
1643 : */
1644 118847584 : if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1645 74942972 : pending_tickets = !list_empty(&space_info->tickets) ||
1646 73732945 : !list_empty(&space_info->priority_tickets);
1647 : else
1648 43905130 : pending_tickets = !list_empty(&space_info->priority_tickets);
1649 :
1650 : /*
1651 : * Carry on if we have enough space (short-circuit) OR call
1652 : * can_overcommit() to ensure we can overcommit to continue.
1653 : */
1654 118847584 : if (!pending_tickets &&
1655 155960331 : ((used + orig_bytes <= space_info->total_bytes) ||
1656 38349526 : btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1657 115944661 : btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1658 : orig_bytes);
1659 115944661 : ret = 0;
1660 : }
1661 :
1662 : /*
1663 : * Things are dire, we need to make a reservation so we don't abort. We
1664 : * will let this reservation go through as long as we have actual space
1665 : * left to allocate for the block.
1666 : */
1667 118847578 : if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
1668 0 : used = btrfs_space_info_used(space_info, false);
1669 0 : if (used + orig_bytes <= space_info->total_bytes) {
1670 0 : btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1671 : orig_bytes);
1672 0 : ret = 0;
1673 : }
1674 : }
1675 :
1676 : /*
1677 : * If we couldn't make a reservation then setup our reservation ticket
1678 : * and kick the async worker if it's not already running.
1679 : *
1680 : * If we are a priority flusher then we just need to add our ticket to
1681 : * the list and we will do our own flushing further down.
1682 : */
1683 118828213 : if (ret && can_ticket(flush)) {
1684 2608696 : ticket.bytes = orig_bytes;
1685 2608696 : ticket.error = 0;
1686 2608696 : space_info->reclaim_size += ticket.bytes;
1687 2608696 : init_waitqueue_head(&ticket.wait);
1688 2608695 : ticket.steal = can_steal(flush);
1689 2608695 : if (trace_btrfs_reserve_ticket_enabled())
1690 0 : start_ns = ktime_get_ns();
1691 :
1692 2608701 : if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1693 2608701 : flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1694 2608701 : flush == BTRFS_RESERVE_FLUSH_DATA) {
1695 2603083 : list_add_tail(&ticket.list, &space_info->tickets);
1696 2603087 : if (!space_info->flush) {
1697 : /*
1698 : * We were forced to add a reserve ticket, so
1699 : * our preemptive flushing is unable to keep
1700 : * up. Clamp down on the threshold for the
1701 : * preemptive flushing in order to keep up with
1702 : * the workload.
1703 : */
1704 1226437 : maybe_clamp_preempt(fs_info, space_info);
1705 :
1706 1226437 : space_info->flush = 1;
1707 1226437 : trace_btrfs_trigger_flush(fs_info,
1708 : space_info->flags,
1709 : orig_bytes, flush,
1710 : "enospc");
1711 1226437 : queue_work(system_unbound_wq, async_work);
1712 : }
1713 : } else {
1714 5618 : list_add_tail(&ticket.list,
1715 : &space_info->priority_tickets);
1716 : }
1717 116219517 : } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1718 : /*
1719 : * We will do the space reservation dance during log replay,
1720 : * which means we won't have fs_info->fs_root set, so don't do
1721 : * the async reclaim as we will panic.
1722 : */
1723 153126187 : if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1724 138472985 : !work_busy(&fs_info->preempt_reclaim_work) &&
1725 61913042 : need_preemptive_reclaim(fs_info, space_info)) {
1726 21769 : trace_btrfs_trigger_flush(fs_info, space_info->flags,
1727 : orig_bytes, flush, "preempt");
1728 21769 : queue_work(system_unbound_wq,
1729 : &fs_info->preempt_reclaim_work);
1730 : }
1731 : }
1732 118828232 : spin_unlock(&space_info->lock);
1733 118823563 : if (!ret || !can_ticket(flush))
1734 : return ret;
1735 :
1736 2608701 : return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1737 : orig_bytes, flush);
1738 : }
1739 :
1740 : /*
1741 : * Try to reserve metadata bytes from the block_rsv's space.
1742 : *
1743 : * @fs_info: the filesystem
1744 : * @block_rsv: block_rsv we're allocating for
1745 : * @orig_bytes: number of bytes we want
1746 : * @flush: whether or not we can flush to make our reservation
1747 : *
1748 : * This will reserve orig_bytes number of bytes from the space info associated
1749 : * with the block_rsv. If there is not enough space it will make an attempt to
1750 : * flush out space to make room. It will do this by flushing delalloc if
1751 : * possible or committing the transaction. If flush is 0 then no attempts to
1752 : * regain reservations will be made and this will fail if there is not enough
1753 : * space already.
1754 : */
1755 78429804 : int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1756 : struct btrfs_block_rsv *block_rsv,
1757 : u64 orig_bytes,
1758 : enum btrfs_reserve_flush_enum flush)
1759 : {
1760 78429804 : int ret;
1761 :
1762 78429804 : ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1763 78514380 : if (ret == -ENOSPC) {
1764 290670 : trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1765 290670 : block_rsv->space_info->flags,
1766 : orig_bytes, 1);
1767 :
1768 290587 : if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1769 0 : btrfs_dump_space_info(fs_info, block_rsv->space_info,
1770 : orig_bytes, 0);
1771 : }
1772 78514297 : return ret;
1773 : }
1774 :
1775 : /*
1776 : * Try to reserve data bytes for an allocation.
1777 : *
1778 : * @fs_info: the filesystem
1779 : * @bytes: number of bytes we need
1780 : * @flush: how we are allowed to flush
1781 : *
1782 : * This will reserve bytes from the data space info. If there is not enough
1783 : * space then we will attempt to flush space as specified by flush.
1784 : */
1785 40100102 : int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1786 : enum btrfs_reserve_flush_enum flush)
1787 : {
1788 40100102 : struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1789 40100102 : int ret;
1790 :
1791 40100102 : ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1792 : flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1793 : flush == BTRFS_RESERVE_NO_FLUSH);
1794 40100102 : ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1795 :
1796 40100102 : ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1797 40331684 : if (ret == -ENOSPC) {
1798 107709 : trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1799 : data_sinfo->flags, bytes, 1);
1800 107681 : if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1801 0 : btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1802 : }
1803 40331656 : return ret;
1804 : }
1805 :
1806 : /* Dump all the space infos when we abort a transaction due to ENOSPC. */
1807 0 : __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1808 : {
1809 0 : struct btrfs_space_info *space_info;
1810 :
1811 0 : btrfs_info(fs_info, "dumping space info:");
1812 0 : list_for_each_entry(space_info, &fs_info->space_info, list) {
1813 0 : spin_lock(&space_info->lock);
1814 0 : __btrfs_dump_space_info(fs_info, space_info);
1815 0 : spin_unlock(&space_info->lock);
1816 : }
1817 0 : dump_global_block_rsv(fs_info);
1818 0 : }
1819 :
1820 : /*
1821 : * Account the unused space of all the readonly block group in the space_info.
1822 : * takes mirrors into account.
1823 : */
1824 2617924 : u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
1825 : {
1826 2617924 : struct btrfs_block_group *block_group;
1827 2617924 : u64 free_bytes = 0;
1828 2617924 : int factor;
1829 :
1830 : /* It's df, we don't care if it's racy */
1831 2617924 : if (list_empty(&sinfo->ro_bgs))
1832 : return 0;
1833 :
1834 76 : spin_lock(&sinfo->lock);
1835 152 : list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
1836 76 : spin_lock(&block_group->lock);
1837 :
1838 76 : if (!block_group->ro) {
1839 0 : spin_unlock(&block_group->lock);
1840 0 : continue;
1841 : }
1842 :
1843 76 : factor = btrfs_bg_type_to_factor(block_group->flags);
1844 76 : free_bytes += (block_group->length -
1845 76 : block_group->used) * factor;
1846 :
1847 76 : spin_unlock(&block_group->lock);
1848 : }
1849 76 : spin_unlock(&sinfo->lock);
1850 :
1851 76 : return free_bytes;
1852 : }
|