Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 2008 Oracle. All rights reserved.
4 : */
5 :
6 : #include <linux/sched.h>
7 : #include <linux/slab.h>
8 : #include <linux/blkdev.h>
9 : #include <linux/list_sort.h>
10 : #include <linux/iversion.h>
11 : #include "misc.h"
12 : #include "ctree.h"
13 : #include "tree-log.h"
14 : #include "disk-io.h"
15 : #include "locking.h"
16 : #include "print-tree.h"
17 : #include "backref.h"
18 : #include "compression.h"
19 : #include "qgroup.h"
20 : #include "block-group.h"
21 : #include "space-info.h"
22 : #include "zoned.h"
23 : #include "inode-item.h"
24 : #include "fs.h"
25 : #include "accessors.h"
26 : #include "extent-tree.h"
27 : #include "root-tree.h"
28 : #include "dir-item.h"
29 : #include "file-item.h"
30 : #include "file.h"
31 : #include "orphan.h"
32 : #include "tree-checker.h"
33 :
34 : #define MAX_CONFLICT_INODES 10
35 :
36 : /* magic values for the inode_only field in btrfs_log_inode:
37 : *
38 : * LOG_INODE_ALL means to log everything
39 : * LOG_INODE_EXISTS means to log just enough to recreate the inode
40 : * during log replay
41 : */
42 : enum {
43 : LOG_INODE_ALL,
44 : LOG_INODE_EXISTS,
45 : };
46 :
47 : /*
48 : * directory trouble cases
49 : *
50 : * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
51 : * log, we must force a full commit before doing an fsync of the directory
52 : * where the unlink was done.
53 : * ---> record transid of last unlink/rename per directory
54 : *
55 : * mkdir foo/some_dir
56 : * normal commit
57 : * rename foo/some_dir foo2/some_dir
58 : * mkdir foo/some_dir
59 : * fsync foo/some_dir/some_file
60 : *
61 : * The fsync above will unlink the original some_dir without recording
62 : * it in its new location (foo2). After a crash, some_dir will be gone
63 : * unless the fsync of some_file forces a full commit
64 : *
65 : * 2) we must log any new names for any file or dir that is in the fsync
66 : * log. ---> check inode while renaming/linking.
67 : *
68 : * 2a) we must log any new names for any file or dir during rename
69 : * when the directory they are being removed from was logged.
70 : * ---> check inode and old parent dir during rename
71 : *
72 : * 2a is actually the more important variant. With the extra logging
73 : * a crash might unlink the old name without recreating the new one
74 : *
75 : * 3) after a crash, we must go through any directories with a link count
76 : * of zero and redo the rm -rf
77 : *
78 : * mkdir f1/foo
79 : * normal commit
80 : * rm -rf f1/foo
81 : * fsync(f1)
82 : *
83 : * The directory f1 was fully removed from the FS, but fsync was never
84 : * called on f1, only its parent dir. After a crash the rm -rf must
85 : * be replayed. This must be able to recurse down the entire
86 : * directory tree. The inode link count fixup code takes care of the
87 : * ugly details.
88 : */
89 :
90 : /*
91 : * stages for the tree walking. The first
92 : * stage (0) is to only pin down the blocks we find
93 : * the second stage (1) is to make sure that all the inodes
94 : * we find in the log are created in the subvolume.
95 : *
96 : * The last stage is to deal with directories and links and extents
97 : * and all the other fun semantics
98 : */
99 : enum {
100 : LOG_WALK_PIN_ONLY,
101 : LOG_WALK_REPLAY_INODES,
102 : LOG_WALK_REPLAY_DIR_INDEX,
103 : LOG_WALK_REPLAY_ALL,
104 : };
105 :
106 : static int btrfs_log_inode(struct btrfs_trans_handle *trans,
107 : struct btrfs_inode *inode,
108 : int inode_only,
109 : struct btrfs_log_ctx *ctx);
110 : static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
111 : struct btrfs_root *root,
112 : struct btrfs_path *path, u64 objectid);
113 : static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
114 : struct btrfs_root *root,
115 : struct btrfs_root *log,
116 : struct btrfs_path *path,
117 : u64 dirid, int del_all);
118 : static void wait_log_commit(struct btrfs_root *root, int transid);
119 :
120 : /*
121 : * tree logging is a special write ahead log used to make sure that
122 : * fsyncs and O_SYNCs can happen without doing full tree commits.
123 : *
124 : * Full tree commits are expensive because they require commonly
125 : * modified blocks to be recowed, creating many dirty pages in the
126 : * extent tree an 4x-6x higher write load than ext3.
127 : *
128 : * Instead of doing a tree commit on every fsync, we use the
129 : * key ranges and transaction ids to find items for a given file or directory
130 : * that have changed in this transaction. Those items are copied into
131 : * a special tree (one per subvolume root), that tree is written to disk
132 : * and then the fsync is considered complete.
133 : *
134 : * After a crash, items are copied out of the log-tree back into the
135 : * subvolume tree. Any file data extents found are recorded in the extent
136 : * allocation tree, and the log-tree freed.
137 : *
138 : * The log tree is read three times, once to pin down all the extents it is
139 : * using in ram and once, once to create all the inodes logged in the tree
140 : * and once to do all the other items.
141 : */
142 :
143 : /*
144 : * start a sub transaction and setup the log tree
145 : * this increments the log tree writer count to make the people
146 : * syncing the tree wait for us to finish
147 : */
148 0 : static int start_log_trans(struct btrfs_trans_handle *trans,
149 : struct btrfs_root *root,
150 : struct btrfs_log_ctx *ctx)
151 : {
152 0 : struct btrfs_fs_info *fs_info = root->fs_info;
153 0 : struct btrfs_root *tree_root = fs_info->tree_root;
154 0 : const bool zoned = btrfs_is_zoned(fs_info);
155 0 : int ret = 0;
156 0 : bool created = false;
157 :
158 : /*
159 : * First check if the log root tree was already created. If not, create
160 : * it before locking the root's log_mutex, just to keep lockdep happy.
161 : */
162 0 : if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) {
163 0 : mutex_lock(&tree_root->log_mutex);
164 0 : if (!fs_info->log_root_tree) {
165 0 : ret = btrfs_init_log_root_tree(trans, fs_info);
166 0 : if (!ret) {
167 0 : set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state);
168 : created = true;
169 : }
170 : }
171 0 : mutex_unlock(&tree_root->log_mutex);
172 0 : if (ret)
173 : return ret;
174 : }
175 :
176 0 : mutex_lock(&root->log_mutex);
177 :
178 0 : again:
179 0 : if (root->log_root) {
180 0 : int index = (root->log_transid + 1) % 2;
181 :
182 0 : if (btrfs_need_log_full_commit(trans)) {
183 0 : ret = BTRFS_LOG_FORCE_COMMIT;
184 0 : goto out;
185 : }
186 :
187 0 : if (zoned && atomic_read(&root->log_commit[index])) {
188 0 : wait_log_commit(root, root->log_transid - 1);
189 0 : goto again;
190 : }
191 :
192 0 : if (!root->log_start_pid) {
193 0 : clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
194 0 : root->log_start_pid = current->pid;
195 0 : } else if (root->log_start_pid != current->pid) {
196 0 : set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
197 : }
198 : } else {
199 : /*
200 : * This means fs_info->log_root_tree was already created
201 : * for some other FS trees. Do the full commit not to mix
202 : * nodes from multiple log transactions to do sequential
203 : * writing.
204 : */
205 0 : if (zoned && !created) {
206 0 : ret = BTRFS_LOG_FORCE_COMMIT;
207 0 : goto out;
208 : }
209 :
210 0 : ret = btrfs_add_log_tree(trans, root);
211 0 : if (ret)
212 0 : goto out;
213 :
214 0 : set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
215 0 : clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
216 0 : root->log_start_pid = current->pid;
217 : }
218 :
219 0 : atomic_inc(&root->log_writers);
220 0 : if (!ctx->logging_new_name) {
221 0 : int index = root->log_transid % 2;
222 0 : list_add_tail(&ctx->list, &root->log_ctxs[index]);
223 0 : ctx->log_transid = root->log_transid;
224 : }
225 :
226 0 : out:
227 0 : mutex_unlock(&root->log_mutex);
228 0 : return ret;
229 : }
230 :
231 : /*
232 : * returns 0 if there was a log transaction running and we were able
233 : * to join, or returns -ENOENT if there were not transactions
234 : * in progress
235 : */
236 0 : static int join_running_log_trans(struct btrfs_root *root)
237 : {
238 0 : const bool zoned = btrfs_is_zoned(root->fs_info);
239 0 : int ret = -ENOENT;
240 :
241 0 : if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
242 : return ret;
243 :
244 0 : mutex_lock(&root->log_mutex);
245 0 : again:
246 0 : if (root->log_root) {
247 0 : int index = (root->log_transid + 1) % 2;
248 :
249 0 : ret = 0;
250 0 : if (zoned && atomic_read(&root->log_commit[index])) {
251 0 : wait_log_commit(root, root->log_transid - 1);
252 0 : goto again;
253 : }
254 0 : atomic_inc(&root->log_writers);
255 : }
256 0 : mutex_unlock(&root->log_mutex);
257 0 : return ret;
258 : }
259 :
260 : /*
261 : * This either makes the current running log transaction wait
262 : * until you call btrfs_end_log_trans() or it makes any future
263 : * log transactions wait until you call btrfs_end_log_trans()
264 : */
265 0 : void btrfs_pin_log_trans(struct btrfs_root *root)
266 : {
267 0 : atomic_inc(&root->log_writers);
268 0 : }
269 :
270 : /*
271 : * indicate we're done making changes to the log tree
272 : * and wake up anyone waiting to do a sync
273 : */
274 0 : void btrfs_end_log_trans(struct btrfs_root *root)
275 : {
276 0 : if (atomic_dec_and_test(&root->log_writers)) {
277 : /* atomic_dec_and_test implies a barrier */
278 0 : cond_wake_up_nomb(&root->log_writer_wait);
279 : }
280 0 : }
281 :
282 : /*
283 : * the walk control struct is used to pass state down the chain when
284 : * processing the log tree. The stage field tells us which part
285 : * of the log tree processing we are currently doing. The others
286 : * are state fields used for that specific part
287 : */
288 : struct walk_control {
289 : /* should we free the extent on disk when done? This is used
290 : * at transaction commit time while freeing a log tree
291 : */
292 : int free;
293 :
294 : /* pin only walk, we record which extents on disk belong to the
295 : * log trees
296 : */
297 : int pin;
298 :
299 : /* what stage of the replay code we're currently in */
300 : int stage;
301 :
302 : /*
303 : * Ignore any items from the inode currently being processed. Needs
304 : * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
305 : * the LOG_WALK_REPLAY_INODES stage.
306 : */
307 : bool ignore_cur_inode;
308 :
309 : /* the root we are currently replaying */
310 : struct btrfs_root *replay_dest;
311 :
312 : /* the trans handle for the current replay */
313 : struct btrfs_trans_handle *trans;
314 :
315 : /* the function that gets used to process blocks we find in the
316 : * tree. Note the extent_buffer might not be up to date when it is
317 : * passed in, and it must be checked or read if you need the data
318 : * inside it
319 : */
320 : int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
321 : struct walk_control *wc, u64 gen, int level);
322 : };
323 :
324 : /*
325 : * process_func used to pin down extents, write them or wait on them
326 : */
327 0 : static int process_one_buffer(struct btrfs_root *log,
328 : struct extent_buffer *eb,
329 : struct walk_control *wc, u64 gen, int level)
330 : {
331 0 : struct btrfs_fs_info *fs_info = log->fs_info;
332 0 : int ret = 0;
333 :
334 : /*
335 : * If this fs is mixed then we need to be able to process the leaves to
336 : * pin down any logged extents, so we have to read the block.
337 : */
338 0 : if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
339 0 : struct btrfs_tree_parent_check check = {
340 : .level = level,
341 : .transid = gen
342 : };
343 :
344 0 : ret = btrfs_read_extent_buffer(eb, &check);
345 0 : if (ret)
346 0 : return ret;
347 : }
348 :
349 0 : if (wc->pin) {
350 0 : ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
351 0 : eb->len);
352 0 : if (ret)
353 : return ret;
354 :
355 0 : if (btrfs_buffer_uptodate(eb, gen, 0) &&
356 : btrfs_header_level(eb) == 0)
357 0 : ret = btrfs_exclude_logged_extents(eb);
358 : }
359 : return ret;
360 : }
361 :
362 : /*
363 : * Item overwrite used by replay and tree logging. eb, slot and key all refer
364 : * to the src data we are copying out.
365 : *
366 : * root is the tree we are copying into, and path is a scratch
367 : * path for use in this function (it should be released on entry and
368 : * will be released on exit).
369 : *
370 : * If the key is already in the destination tree the existing item is
371 : * overwritten. If the existing item isn't big enough, it is extended.
372 : * If it is too large, it is truncated.
373 : *
374 : * If the key isn't in the destination yet, a new item is inserted.
375 : */
376 0 : static int overwrite_item(struct btrfs_trans_handle *trans,
377 : struct btrfs_root *root,
378 : struct btrfs_path *path,
379 : struct extent_buffer *eb, int slot,
380 : struct btrfs_key *key)
381 : {
382 0 : int ret;
383 0 : u32 item_size;
384 0 : u64 saved_i_size = 0;
385 0 : int save_old_i_size = 0;
386 0 : unsigned long src_ptr;
387 0 : unsigned long dst_ptr;
388 0 : bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
389 :
390 : /*
391 : * This is only used during log replay, so the root is always from a
392 : * fs/subvolume tree. In case we ever need to support a log root, then
393 : * we'll have to clone the leaf in the path, release the path and use
394 : * the leaf before writing into the log tree. See the comments at
395 : * copy_items() for more details.
396 : */
397 0 : ASSERT(root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
398 :
399 0 : item_size = btrfs_item_size(eb, slot);
400 0 : src_ptr = btrfs_item_ptr_offset(eb, slot);
401 :
402 : /* Look for the key in the destination tree. */
403 0 : ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
404 0 : if (ret < 0)
405 : return ret;
406 :
407 0 : if (ret == 0) {
408 0 : char *src_copy;
409 0 : char *dst_copy;
410 0 : u32 dst_size = btrfs_item_size(path->nodes[0],
411 : path->slots[0]);
412 0 : if (dst_size != item_size)
413 0 : goto insert;
414 :
415 0 : if (item_size == 0) {
416 0 : btrfs_release_path(path);
417 0 : return 0;
418 : }
419 0 : dst_copy = kmalloc(item_size, GFP_NOFS);
420 0 : src_copy = kmalloc(item_size, GFP_NOFS);
421 0 : if (!dst_copy || !src_copy) {
422 0 : btrfs_release_path(path);
423 0 : kfree(dst_copy);
424 0 : kfree(src_copy);
425 0 : return -ENOMEM;
426 : }
427 :
428 0 : read_extent_buffer(eb, src_copy, src_ptr, item_size);
429 :
430 0 : dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
431 0 : read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
432 : item_size);
433 0 : ret = memcmp(dst_copy, src_copy, item_size);
434 :
435 0 : kfree(dst_copy);
436 0 : kfree(src_copy);
437 : /*
438 : * they have the same contents, just return, this saves
439 : * us from cowing blocks in the destination tree and doing
440 : * extra writes that may not have been done by a previous
441 : * sync
442 : */
443 0 : if (ret == 0) {
444 0 : btrfs_release_path(path);
445 0 : return 0;
446 : }
447 :
448 : /*
449 : * We need to load the old nbytes into the inode so when we
450 : * replay the extents we've logged we get the right nbytes.
451 : */
452 0 : if (inode_item) {
453 0 : struct btrfs_inode_item *item;
454 0 : u64 nbytes;
455 0 : u32 mode;
456 :
457 0 : item = btrfs_item_ptr(path->nodes[0], path->slots[0],
458 : struct btrfs_inode_item);
459 0 : nbytes = btrfs_inode_nbytes(path->nodes[0], item);
460 0 : item = btrfs_item_ptr(eb, slot,
461 : struct btrfs_inode_item);
462 0 : btrfs_set_inode_nbytes(eb, item, nbytes);
463 :
464 : /*
465 : * If this is a directory we need to reset the i_size to
466 : * 0 so that we can set it up properly when replaying
467 : * the rest of the items in this log.
468 : */
469 0 : mode = btrfs_inode_mode(eb, item);
470 0 : if (S_ISDIR(mode))
471 0 : btrfs_set_inode_size(eb, item, 0);
472 : }
473 0 : } else if (inode_item) {
474 0 : struct btrfs_inode_item *item;
475 0 : u32 mode;
476 :
477 : /*
478 : * New inode, set nbytes to 0 so that the nbytes comes out
479 : * properly when we replay the extents.
480 : */
481 0 : item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
482 0 : btrfs_set_inode_nbytes(eb, item, 0);
483 :
484 : /*
485 : * If this is a directory we need to reset the i_size to 0 so
486 : * that we can set it up properly when replaying the rest of
487 : * the items in this log.
488 : */
489 0 : mode = btrfs_inode_mode(eb, item);
490 0 : if (S_ISDIR(mode))
491 0 : btrfs_set_inode_size(eb, item, 0);
492 : }
493 0 : insert:
494 0 : btrfs_release_path(path);
495 : /* try to insert the key into the destination tree */
496 0 : path->skip_release_on_error = 1;
497 0 : ret = btrfs_insert_empty_item(trans, root, path,
498 : key, item_size);
499 0 : path->skip_release_on_error = 0;
500 :
501 : /* make sure any existing item is the correct size */
502 0 : if (ret == -EEXIST || ret == -EOVERFLOW) {
503 0 : u32 found_size;
504 0 : found_size = btrfs_item_size(path->nodes[0],
505 : path->slots[0]);
506 0 : if (found_size > item_size)
507 0 : btrfs_truncate_item(path, item_size, 1);
508 0 : else if (found_size < item_size)
509 0 : btrfs_extend_item(path, item_size - found_size);
510 0 : } else if (ret) {
511 : return ret;
512 : }
513 0 : dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
514 : path->slots[0]);
515 :
516 : /* don't overwrite an existing inode if the generation number
517 : * was logged as zero. This is done when the tree logging code
518 : * is just logging an inode to make sure it exists after recovery.
519 : *
520 : * Also, don't overwrite i_size on directories during replay.
521 : * log replay inserts and removes directory items based on the
522 : * state of the tree found in the subvolume, and i_size is modified
523 : * as it goes
524 : */
525 0 : if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
526 0 : struct btrfs_inode_item *src_item;
527 0 : struct btrfs_inode_item *dst_item;
528 :
529 0 : src_item = (struct btrfs_inode_item *)src_ptr;
530 0 : dst_item = (struct btrfs_inode_item *)dst_ptr;
531 :
532 0 : if (btrfs_inode_generation(eb, src_item) == 0) {
533 0 : struct extent_buffer *dst_eb = path->nodes[0];
534 0 : const u64 ino_size = btrfs_inode_size(eb, src_item);
535 :
536 : /*
537 : * For regular files an ino_size == 0 is used only when
538 : * logging that an inode exists, as part of a directory
539 : * fsync, and the inode wasn't fsynced before. In this
540 : * case don't set the size of the inode in the fs/subvol
541 : * tree, otherwise we would be throwing valid data away.
542 : */
543 0 : if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
544 0 : S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
545 : ino_size != 0)
546 0 : btrfs_set_inode_size(dst_eb, dst_item, ino_size);
547 0 : goto no_copy;
548 : }
549 :
550 0 : if (S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
551 0 : S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
552 0 : save_old_i_size = 1;
553 0 : saved_i_size = btrfs_inode_size(path->nodes[0],
554 : dst_item);
555 : }
556 : }
557 :
558 0 : copy_extent_buffer(path->nodes[0], eb, dst_ptr,
559 : src_ptr, item_size);
560 :
561 0 : if (save_old_i_size) {
562 0 : struct btrfs_inode_item *dst_item;
563 0 : dst_item = (struct btrfs_inode_item *)dst_ptr;
564 0 : btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
565 : }
566 :
567 : /* make sure the generation is filled in */
568 0 : if (key->type == BTRFS_INODE_ITEM_KEY) {
569 0 : struct btrfs_inode_item *dst_item;
570 0 : dst_item = (struct btrfs_inode_item *)dst_ptr;
571 0 : if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
572 0 : btrfs_set_inode_generation(path->nodes[0], dst_item,
573 : trans->transid);
574 : }
575 : }
576 0 : no_copy:
577 0 : btrfs_mark_buffer_dirty(path->nodes[0]);
578 0 : btrfs_release_path(path);
579 0 : return 0;
580 : }
581 :
582 0 : static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len,
583 : struct fscrypt_str *name)
584 : {
585 0 : char *buf;
586 :
587 0 : buf = kmalloc(len, GFP_NOFS);
588 0 : if (!buf)
589 : return -ENOMEM;
590 :
591 0 : read_extent_buffer(eb, buf, (unsigned long)start, len);
592 0 : name->name = buf;
593 0 : name->len = len;
594 0 : return 0;
595 : }
596 :
597 : /*
598 : * simple helper to read an inode off the disk from a given root
599 : * This can only be called for subvolume roots and not for the log
600 : */
601 0 : static noinline struct inode *read_one_inode(struct btrfs_root *root,
602 : u64 objectid)
603 : {
604 0 : struct inode *inode;
605 :
606 0 : inode = btrfs_iget(root->fs_info->sb, objectid, root);
607 0 : if (IS_ERR(inode))
608 0 : inode = NULL;
609 0 : return inode;
610 : }
611 :
612 : /* replays a single extent in 'eb' at 'slot' with 'key' into the
613 : * subvolume 'root'. path is released on entry and should be released
614 : * on exit.
615 : *
616 : * extents in the log tree have not been allocated out of the extent
617 : * tree yet. So, this completes the allocation, taking a reference
618 : * as required if the extent already exists or creating a new extent
619 : * if it isn't in the extent allocation tree yet.
620 : *
621 : * The extent is inserted into the file, dropping any existing extents
622 : * from the file that overlap the new one.
623 : */
624 0 : static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
625 : struct btrfs_root *root,
626 : struct btrfs_path *path,
627 : struct extent_buffer *eb, int slot,
628 : struct btrfs_key *key)
629 : {
630 0 : struct btrfs_drop_extents_args drop_args = { 0 };
631 0 : struct btrfs_fs_info *fs_info = root->fs_info;
632 0 : int found_type;
633 0 : u64 extent_end;
634 0 : u64 start = key->offset;
635 0 : u64 nbytes = 0;
636 0 : struct btrfs_file_extent_item *item;
637 0 : struct inode *inode = NULL;
638 0 : unsigned long size;
639 0 : int ret = 0;
640 :
641 0 : item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
642 0 : found_type = btrfs_file_extent_type(eb, item);
643 :
644 0 : if (found_type == BTRFS_FILE_EXTENT_REG ||
645 : found_type == BTRFS_FILE_EXTENT_PREALLOC) {
646 0 : nbytes = btrfs_file_extent_num_bytes(eb, item);
647 0 : extent_end = start + nbytes;
648 :
649 : /*
650 : * We don't add to the inodes nbytes if we are prealloc or a
651 : * hole.
652 : */
653 0 : if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
654 0 : nbytes = 0;
655 0 : } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
656 0 : size = btrfs_file_extent_ram_bytes(eb, item);
657 0 : nbytes = btrfs_file_extent_ram_bytes(eb, item);
658 0 : extent_end = ALIGN(start + size,
659 : fs_info->sectorsize);
660 : } else {
661 0 : ret = 0;
662 0 : goto out;
663 : }
664 :
665 0 : inode = read_one_inode(root, key->objectid);
666 0 : if (!inode) {
667 0 : ret = -EIO;
668 0 : goto out;
669 : }
670 :
671 : /*
672 : * first check to see if we already have this extent in the
673 : * file. This must be done before the btrfs_drop_extents run
674 : * so we don't try to drop this extent.
675 : */
676 0 : ret = btrfs_lookup_file_extent(trans, root, path,
677 : btrfs_ino(BTRFS_I(inode)), start, 0);
678 :
679 0 : if (ret == 0 &&
680 : (found_type == BTRFS_FILE_EXTENT_REG ||
681 : found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
682 0 : struct btrfs_file_extent_item cmp1;
683 0 : struct btrfs_file_extent_item cmp2;
684 0 : struct btrfs_file_extent_item *existing;
685 0 : struct extent_buffer *leaf;
686 :
687 0 : leaf = path->nodes[0];
688 0 : existing = btrfs_item_ptr(leaf, path->slots[0],
689 : struct btrfs_file_extent_item);
690 :
691 0 : read_extent_buffer(eb, &cmp1, (unsigned long)item,
692 : sizeof(cmp1));
693 0 : read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
694 : sizeof(cmp2));
695 :
696 : /*
697 : * we already have a pointer to this exact extent,
698 : * we don't have to do anything
699 : */
700 0 : if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
701 0 : btrfs_release_path(path);
702 0 : goto out;
703 : }
704 : }
705 0 : btrfs_release_path(path);
706 :
707 : /* drop any overlapping extents */
708 0 : drop_args.start = start;
709 0 : drop_args.end = extent_end;
710 0 : drop_args.drop_cache = true;
711 0 : ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
712 0 : if (ret)
713 0 : goto out;
714 :
715 0 : if (found_type == BTRFS_FILE_EXTENT_REG ||
716 : found_type == BTRFS_FILE_EXTENT_PREALLOC) {
717 0 : u64 offset;
718 0 : unsigned long dest_offset;
719 0 : struct btrfs_key ins;
720 :
721 0 : if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
722 0 : btrfs_fs_incompat(fs_info, NO_HOLES))
723 0 : goto update_inode;
724 :
725 0 : ret = btrfs_insert_empty_item(trans, root, path, key,
726 : sizeof(*item));
727 0 : if (ret)
728 0 : goto out;
729 0 : dest_offset = btrfs_item_ptr_offset(path->nodes[0],
730 : path->slots[0]);
731 0 : copy_extent_buffer(path->nodes[0], eb, dest_offset,
732 : (unsigned long)item, sizeof(*item));
733 :
734 0 : ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
735 0 : ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
736 0 : ins.type = BTRFS_EXTENT_ITEM_KEY;
737 0 : offset = key->offset - btrfs_file_extent_offset(eb, item);
738 :
739 : /*
740 : * Manually record dirty extent, as here we did a shallow
741 : * file extent item copy and skip normal backref update,
742 : * but modifying extent tree all by ourselves.
743 : * So need to manually record dirty extent for qgroup,
744 : * as the owner of the file extent changed from log tree
745 : * (doesn't affect qgroup) to fs/file tree(affects qgroup)
746 : */
747 0 : ret = btrfs_qgroup_trace_extent(trans,
748 : btrfs_file_extent_disk_bytenr(eb, item),
749 : btrfs_file_extent_disk_num_bytes(eb, item));
750 0 : if (ret < 0)
751 0 : goto out;
752 :
753 0 : if (ins.objectid > 0) {
754 0 : struct btrfs_ref ref = { 0 };
755 0 : u64 csum_start;
756 0 : u64 csum_end;
757 0 : LIST_HEAD(ordered_sums);
758 :
759 : /*
760 : * is this extent already allocated in the extent
761 : * allocation tree? If so, just add a reference
762 : */
763 0 : ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
764 : ins.offset);
765 0 : if (ret < 0) {
766 0 : goto out;
767 0 : } else if (ret == 0) {
768 0 : btrfs_init_generic_ref(&ref,
769 : BTRFS_ADD_DELAYED_REF,
770 : ins.objectid, ins.offset, 0);
771 0 : btrfs_init_data_ref(&ref,
772 : root->root_key.objectid,
773 : key->objectid, offset, 0, false);
774 0 : ret = btrfs_inc_extent_ref(trans, &ref);
775 0 : if (ret)
776 0 : goto out;
777 : } else {
778 : /*
779 : * insert the extent pointer in the extent
780 : * allocation tree
781 : */
782 0 : ret = btrfs_alloc_logged_file_extent(trans,
783 : root->root_key.objectid,
784 : key->objectid, offset, &ins);
785 0 : if (ret)
786 0 : goto out;
787 : }
788 0 : btrfs_release_path(path);
789 :
790 0 : if (btrfs_file_extent_compression(eb, item)) {
791 0 : csum_start = ins.objectid;
792 0 : csum_end = csum_start + ins.offset;
793 : } else {
794 0 : csum_start = ins.objectid +
795 : btrfs_file_extent_offset(eb, item);
796 0 : csum_end = csum_start +
797 : btrfs_file_extent_num_bytes(eb, item);
798 : }
799 :
800 0 : ret = btrfs_lookup_csums_list(root->log_root,
801 : csum_start, csum_end - 1,
802 : &ordered_sums, 0, false);
803 0 : if (ret)
804 0 : goto out;
805 : /*
806 : * Now delete all existing cums in the csum root that
807 : * cover our range. We do this because we can have an
808 : * extent that is completely referenced by one file
809 : * extent item and partially referenced by another
810 : * file extent item (like after using the clone or
811 : * extent_same ioctls). In this case if we end up doing
812 : * the replay of the one that partially references the
813 : * extent first, and we do not do the csum deletion
814 : * below, we can get 2 csum items in the csum tree that
815 : * overlap each other. For example, imagine our log has
816 : * the two following file extent items:
817 : *
818 : * key (257 EXTENT_DATA 409600)
819 : * extent data disk byte 12845056 nr 102400
820 : * extent data offset 20480 nr 20480 ram 102400
821 : *
822 : * key (257 EXTENT_DATA 819200)
823 : * extent data disk byte 12845056 nr 102400
824 : * extent data offset 0 nr 102400 ram 102400
825 : *
826 : * Where the second one fully references the 100K extent
827 : * that starts at disk byte 12845056, and the log tree
828 : * has a single csum item that covers the entire range
829 : * of the extent:
830 : *
831 : * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
832 : *
833 : * After the first file extent item is replayed, the
834 : * csum tree gets the following csum item:
835 : *
836 : * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
837 : *
838 : * Which covers the 20K sub-range starting at offset 20K
839 : * of our extent. Now when we replay the second file
840 : * extent item, if we do not delete existing csum items
841 : * that cover any of its blocks, we end up getting two
842 : * csum items in our csum tree that overlap each other:
843 : *
844 : * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
845 : * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
846 : *
847 : * Which is a problem, because after this anyone trying
848 : * to lookup up for the checksum of any block of our
849 : * extent starting at an offset of 40K or higher, will
850 : * end up looking at the second csum item only, which
851 : * does not contain the checksum for any block starting
852 : * at offset 40K or higher of our extent.
853 : */
854 0 : while (!list_empty(&ordered_sums)) {
855 0 : struct btrfs_ordered_sum *sums;
856 0 : struct btrfs_root *csum_root;
857 :
858 0 : sums = list_entry(ordered_sums.next,
859 : struct btrfs_ordered_sum,
860 : list);
861 0 : csum_root = btrfs_csum_root(fs_info,
862 : sums->logical);
863 0 : if (!ret)
864 0 : ret = btrfs_del_csums(trans, csum_root,
865 : sums->logical,
866 0 : sums->len);
867 0 : if (!ret)
868 0 : ret = btrfs_csum_file_blocks(trans,
869 : csum_root,
870 : sums);
871 0 : list_del(&sums->list);
872 0 : kfree(sums);
873 : }
874 0 : if (ret)
875 0 : goto out;
876 : } else {
877 0 : btrfs_release_path(path);
878 : }
879 0 : } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
880 : /* inline extents are easy, we just overwrite them */
881 0 : ret = overwrite_item(trans, root, path, eb, slot, key);
882 0 : if (ret)
883 0 : goto out;
884 : }
885 :
886 0 : ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
887 : extent_end - start);
888 0 : if (ret)
889 0 : goto out;
890 :
891 0 : update_inode:
892 0 : btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
893 0 : ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
894 0 : out:
895 0 : iput(inode);
896 0 : return ret;
897 : }
898 :
899 0 : static int unlink_inode_for_log_replay(struct btrfs_trans_handle *trans,
900 : struct btrfs_inode *dir,
901 : struct btrfs_inode *inode,
902 : const struct fscrypt_str *name)
903 : {
904 0 : int ret;
905 :
906 0 : ret = btrfs_unlink_inode(trans, dir, inode, name);
907 0 : if (ret)
908 : return ret;
909 : /*
910 : * Whenever we need to check if a name exists or not, we check the
911 : * fs/subvolume tree. So after an unlink we must run delayed items, so
912 : * that future checks for a name during log replay see that the name
913 : * does not exists anymore.
914 : */
915 0 : return btrfs_run_delayed_items(trans);
916 : }
917 :
918 : /*
919 : * when cleaning up conflicts between the directory names in the
920 : * subvolume, directory names in the log and directory names in the
921 : * inode back references, we may have to unlink inodes from directories.
922 : *
923 : * This is a helper function to do the unlink of a specific directory
924 : * item
925 : */
926 0 : static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
927 : struct btrfs_path *path,
928 : struct btrfs_inode *dir,
929 : struct btrfs_dir_item *di)
930 : {
931 0 : struct btrfs_root *root = dir->root;
932 0 : struct inode *inode;
933 0 : struct fscrypt_str name;
934 0 : struct extent_buffer *leaf;
935 0 : struct btrfs_key location;
936 0 : int ret;
937 :
938 0 : leaf = path->nodes[0];
939 :
940 0 : btrfs_dir_item_key_to_cpu(leaf, di, &location);
941 0 : ret = read_alloc_one_name(leaf, di + 1, btrfs_dir_name_len(leaf, di), &name);
942 0 : if (ret)
943 : return -ENOMEM;
944 :
945 0 : btrfs_release_path(path);
946 :
947 0 : inode = read_one_inode(root, location.objectid);
948 0 : if (!inode) {
949 0 : ret = -EIO;
950 0 : goto out;
951 : }
952 :
953 0 : ret = link_to_fixup_dir(trans, root, path, location.objectid);
954 0 : if (ret)
955 0 : goto out;
956 :
957 0 : ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), &name);
958 0 : out:
959 0 : kfree(name.name);
960 0 : iput(inode);
961 0 : return ret;
962 : }
963 :
964 : /*
965 : * See if a given name and sequence number found in an inode back reference are
966 : * already in a directory and correctly point to this inode.
967 : *
968 : * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
969 : * exists.
970 : */
971 0 : static noinline int inode_in_dir(struct btrfs_root *root,
972 : struct btrfs_path *path,
973 : u64 dirid, u64 objectid, u64 index,
974 : struct fscrypt_str *name)
975 : {
976 0 : struct btrfs_dir_item *di;
977 0 : struct btrfs_key location;
978 0 : int ret = 0;
979 :
980 0 : di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
981 : index, name, 0);
982 0 : if (IS_ERR(di)) {
983 0 : ret = PTR_ERR(di);
984 0 : goto out;
985 0 : } else if (di) {
986 0 : btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
987 0 : if (location.objectid != objectid)
988 0 : goto out;
989 : } else {
990 0 : goto out;
991 : }
992 :
993 0 : btrfs_release_path(path);
994 0 : di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, 0);
995 0 : if (IS_ERR(di)) {
996 0 : ret = PTR_ERR(di);
997 0 : goto out;
998 0 : } else if (di) {
999 0 : btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1000 0 : if (location.objectid == objectid)
1001 0 : ret = 1;
1002 : }
1003 0 : out:
1004 0 : btrfs_release_path(path);
1005 0 : return ret;
1006 : }
1007 :
1008 : /*
1009 : * helper function to check a log tree for a named back reference in
1010 : * an inode. This is used to decide if a back reference that is
1011 : * found in the subvolume conflicts with what we find in the log.
1012 : *
1013 : * inode backreferences may have multiple refs in a single item,
1014 : * during replay we process one reference at a time, and we don't
1015 : * want to delete valid links to a file from the subvolume if that
1016 : * link is also in the log.
1017 : */
1018 0 : static noinline int backref_in_log(struct btrfs_root *log,
1019 : struct btrfs_key *key,
1020 : u64 ref_objectid,
1021 : const struct fscrypt_str *name)
1022 : {
1023 0 : struct btrfs_path *path;
1024 0 : int ret;
1025 :
1026 0 : path = btrfs_alloc_path();
1027 0 : if (!path)
1028 : return -ENOMEM;
1029 :
1030 0 : ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
1031 0 : if (ret < 0) {
1032 0 : goto out;
1033 0 : } else if (ret == 1) {
1034 0 : ret = 0;
1035 0 : goto out;
1036 : }
1037 :
1038 0 : if (key->type == BTRFS_INODE_EXTREF_KEY)
1039 0 : ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1040 : path->slots[0],
1041 : ref_objectid, name);
1042 : else
1043 0 : ret = !!btrfs_find_name_in_backref(path->nodes[0],
1044 : path->slots[0], name);
1045 0 : out:
1046 0 : btrfs_free_path(path);
1047 0 : return ret;
1048 : }
1049 :
1050 0 : static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
1051 : struct btrfs_root *root,
1052 : struct btrfs_path *path,
1053 : struct btrfs_root *log_root,
1054 : struct btrfs_inode *dir,
1055 : struct btrfs_inode *inode,
1056 : u64 inode_objectid, u64 parent_objectid,
1057 : u64 ref_index, struct fscrypt_str *name)
1058 : {
1059 0 : int ret;
1060 0 : struct extent_buffer *leaf;
1061 0 : struct btrfs_dir_item *di;
1062 0 : struct btrfs_key search_key;
1063 0 : struct btrfs_inode_extref *extref;
1064 :
1065 : again:
1066 : /* Search old style refs */
1067 0 : search_key.objectid = inode_objectid;
1068 0 : search_key.type = BTRFS_INODE_REF_KEY;
1069 0 : search_key.offset = parent_objectid;
1070 0 : ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1071 0 : if (ret == 0) {
1072 0 : struct btrfs_inode_ref *victim_ref;
1073 0 : unsigned long ptr;
1074 0 : unsigned long ptr_end;
1075 :
1076 0 : leaf = path->nodes[0];
1077 :
1078 : /* are we trying to overwrite a back ref for the root directory
1079 : * if so, just jump out, we're done
1080 : */
1081 0 : if (search_key.objectid == search_key.offset)
1082 : return 1;
1083 :
1084 : /* check all the names in this back reference to see
1085 : * if they are in the log. if so, we allow them to stay
1086 : * otherwise they must be unlinked as a conflict
1087 : */
1088 0 : ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1089 0 : ptr_end = ptr + btrfs_item_size(leaf, path->slots[0]);
1090 0 : while (ptr < ptr_end) {
1091 0 : struct fscrypt_str victim_name;
1092 :
1093 0 : victim_ref = (struct btrfs_inode_ref *)ptr;
1094 0 : ret = read_alloc_one_name(leaf, (victim_ref + 1),
1095 : btrfs_inode_ref_name_len(leaf, victim_ref),
1096 : &victim_name);
1097 0 : if (ret)
1098 0 : return ret;
1099 :
1100 0 : ret = backref_in_log(log_root, &search_key,
1101 : parent_objectid, &victim_name);
1102 0 : if (ret < 0) {
1103 0 : kfree(victim_name.name);
1104 0 : return ret;
1105 0 : } else if (!ret) {
1106 0 : inc_nlink(&inode->vfs_inode);
1107 0 : btrfs_release_path(path);
1108 :
1109 0 : ret = unlink_inode_for_log_replay(trans, dir, inode,
1110 : &victim_name);
1111 0 : kfree(victim_name.name);
1112 0 : if (ret)
1113 0 : return ret;
1114 0 : goto again;
1115 : }
1116 0 : kfree(victim_name.name);
1117 :
1118 0 : ptr = (unsigned long)(victim_ref + 1) + victim_name.len;
1119 : }
1120 : }
1121 0 : btrfs_release_path(path);
1122 :
1123 : /* Same search but for extended refs */
1124 0 : extref = btrfs_lookup_inode_extref(NULL, root, path, name,
1125 : inode_objectid, parent_objectid, 0,
1126 : 0);
1127 0 : if (IS_ERR(extref)) {
1128 0 : return PTR_ERR(extref);
1129 0 : } else if (extref) {
1130 0 : u32 item_size;
1131 0 : u32 cur_offset = 0;
1132 0 : unsigned long base;
1133 0 : struct inode *victim_parent;
1134 :
1135 0 : leaf = path->nodes[0];
1136 :
1137 0 : item_size = btrfs_item_size(leaf, path->slots[0]);
1138 0 : base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1139 :
1140 0 : while (cur_offset < item_size) {
1141 0 : struct fscrypt_str victim_name;
1142 :
1143 0 : extref = (struct btrfs_inode_extref *)(base + cur_offset);
1144 :
1145 0 : if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1146 0 : goto next;
1147 :
1148 0 : ret = read_alloc_one_name(leaf, &extref->name,
1149 : btrfs_inode_extref_name_len(leaf, extref),
1150 : &victim_name);
1151 0 : if (ret)
1152 0 : return ret;
1153 :
1154 0 : search_key.objectid = inode_objectid;
1155 0 : search_key.type = BTRFS_INODE_EXTREF_KEY;
1156 0 : search_key.offset = btrfs_extref_hash(parent_objectid,
1157 0 : victim_name.name,
1158 0 : victim_name.len);
1159 0 : ret = backref_in_log(log_root, &search_key,
1160 : parent_objectid, &victim_name);
1161 0 : if (ret < 0) {
1162 0 : kfree(victim_name.name);
1163 0 : return ret;
1164 0 : } else if (!ret) {
1165 0 : ret = -ENOENT;
1166 0 : victim_parent = read_one_inode(root,
1167 : parent_objectid);
1168 0 : if (victim_parent) {
1169 0 : inc_nlink(&inode->vfs_inode);
1170 0 : btrfs_release_path(path);
1171 :
1172 0 : ret = unlink_inode_for_log_replay(trans,
1173 : BTRFS_I(victim_parent),
1174 : inode, &victim_name);
1175 : }
1176 0 : iput(victim_parent);
1177 0 : kfree(victim_name.name);
1178 0 : if (ret)
1179 0 : return ret;
1180 0 : goto again;
1181 : }
1182 0 : kfree(victim_name.name);
1183 0 : next:
1184 0 : cur_offset += victim_name.len + sizeof(*extref);
1185 : }
1186 : }
1187 0 : btrfs_release_path(path);
1188 :
1189 : /* look for a conflicting sequence number */
1190 0 : di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1191 : ref_index, name, 0);
1192 0 : if (IS_ERR(di)) {
1193 0 : return PTR_ERR(di);
1194 0 : } else if (di) {
1195 0 : ret = drop_one_dir_item(trans, path, dir, di);
1196 0 : if (ret)
1197 : return ret;
1198 : }
1199 0 : btrfs_release_path(path);
1200 :
1201 : /* look for a conflicting name */
1202 0 : di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), name, 0);
1203 0 : if (IS_ERR(di)) {
1204 0 : return PTR_ERR(di);
1205 0 : } else if (di) {
1206 0 : ret = drop_one_dir_item(trans, path, dir, di);
1207 0 : if (ret)
1208 : return ret;
1209 : }
1210 0 : btrfs_release_path(path);
1211 :
1212 0 : return 0;
1213 : }
1214 :
1215 0 : static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1216 : struct fscrypt_str *name, u64 *index,
1217 : u64 *parent_objectid)
1218 : {
1219 0 : struct btrfs_inode_extref *extref;
1220 0 : int ret;
1221 :
1222 0 : extref = (struct btrfs_inode_extref *)ref_ptr;
1223 :
1224 0 : ret = read_alloc_one_name(eb, &extref->name,
1225 : btrfs_inode_extref_name_len(eb, extref), name);
1226 0 : if (ret)
1227 : return ret;
1228 :
1229 0 : if (index)
1230 0 : *index = btrfs_inode_extref_index(eb, extref);
1231 0 : if (parent_objectid)
1232 0 : *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1233 :
1234 : return 0;
1235 : }
1236 :
1237 0 : static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1238 : struct fscrypt_str *name, u64 *index)
1239 : {
1240 0 : struct btrfs_inode_ref *ref;
1241 0 : int ret;
1242 :
1243 0 : ref = (struct btrfs_inode_ref *)ref_ptr;
1244 :
1245 0 : ret = read_alloc_one_name(eb, ref + 1, btrfs_inode_ref_name_len(eb, ref),
1246 : name);
1247 0 : if (ret)
1248 : return ret;
1249 :
1250 0 : if (index)
1251 0 : *index = btrfs_inode_ref_index(eb, ref);
1252 :
1253 : return 0;
1254 : }
1255 :
1256 : /*
1257 : * Take an inode reference item from the log tree and iterate all names from the
1258 : * inode reference item in the subvolume tree with the same key (if it exists).
1259 : * For any name that is not in the inode reference item from the log tree, do a
1260 : * proper unlink of that name (that is, remove its entry from the inode
1261 : * reference item and both dir index keys).
1262 : */
1263 0 : static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1264 : struct btrfs_root *root,
1265 : struct btrfs_path *path,
1266 : struct btrfs_inode *inode,
1267 : struct extent_buffer *log_eb,
1268 : int log_slot,
1269 : struct btrfs_key *key)
1270 : {
1271 0 : int ret;
1272 0 : unsigned long ref_ptr;
1273 0 : unsigned long ref_end;
1274 0 : struct extent_buffer *eb;
1275 :
1276 0 : again:
1277 0 : btrfs_release_path(path);
1278 0 : ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1279 0 : if (ret > 0) {
1280 0 : ret = 0;
1281 0 : goto out;
1282 : }
1283 0 : if (ret < 0)
1284 0 : goto out;
1285 :
1286 0 : eb = path->nodes[0];
1287 0 : ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1288 0 : ref_end = ref_ptr + btrfs_item_size(eb, path->slots[0]);
1289 0 : while (ref_ptr < ref_end) {
1290 0 : struct fscrypt_str name;
1291 0 : u64 parent_id;
1292 :
1293 0 : if (key->type == BTRFS_INODE_EXTREF_KEY) {
1294 0 : ret = extref_get_fields(eb, ref_ptr, &name,
1295 : NULL, &parent_id);
1296 : } else {
1297 0 : parent_id = key->offset;
1298 0 : ret = ref_get_fields(eb, ref_ptr, &name, NULL);
1299 : }
1300 0 : if (ret)
1301 0 : goto out;
1302 :
1303 0 : if (key->type == BTRFS_INODE_EXTREF_KEY)
1304 0 : ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1305 : parent_id, &name);
1306 : else
1307 0 : ret = !!btrfs_find_name_in_backref(log_eb, log_slot, &name);
1308 :
1309 0 : if (!ret) {
1310 0 : struct inode *dir;
1311 :
1312 0 : btrfs_release_path(path);
1313 0 : dir = read_one_inode(root, parent_id);
1314 0 : if (!dir) {
1315 0 : ret = -ENOENT;
1316 0 : kfree(name.name);
1317 0 : goto out;
1318 : }
1319 0 : ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir),
1320 : inode, &name);
1321 0 : kfree(name.name);
1322 0 : iput(dir);
1323 0 : if (ret)
1324 0 : goto out;
1325 0 : goto again;
1326 : }
1327 :
1328 0 : kfree(name.name);
1329 0 : ref_ptr += name.len;
1330 0 : if (key->type == BTRFS_INODE_EXTREF_KEY)
1331 0 : ref_ptr += sizeof(struct btrfs_inode_extref);
1332 : else
1333 0 : ref_ptr += sizeof(struct btrfs_inode_ref);
1334 : }
1335 : ret = 0;
1336 0 : out:
1337 0 : btrfs_release_path(path);
1338 0 : return ret;
1339 : }
1340 :
1341 : /*
1342 : * replay one inode back reference item found in the log tree.
1343 : * eb, slot and key refer to the buffer and key found in the log tree.
1344 : * root is the destination we are replaying into, and path is for temp
1345 : * use by this function. (it should be released on return).
1346 : */
1347 0 : static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1348 : struct btrfs_root *root,
1349 : struct btrfs_root *log,
1350 : struct btrfs_path *path,
1351 : struct extent_buffer *eb, int slot,
1352 : struct btrfs_key *key)
1353 : {
1354 0 : struct inode *dir = NULL;
1355 0 : struct inode *inode = NULL;
1356 0 : unsigned long ref_ptr;
1357 0 : unsigned long ref_end;
1358 0 : struct fscrypt_str name;
1359 0 : int ret;
1360 0 : int log_ref_ver = 0;
1361 0 : u64 parent_objectid;
1362 0 : u64 inode_objectid;
1363 0 : u64 ref_index = 0;
1364 0 : int ref_struct_size;
1365 :
1366 0 : ref_ptr = btrfs_item_ptr_offset(eb, slot);
1367 0 : ref_end = ref_ptr + btrfs_item_size(eb, slot);
1368 :
1369 0 : if (key->type == BTRFS_INODE_EXTREF_KEY) {
1370 0 : struct btrfs_inode_extref *r;
1371 :
1372 0 : ref_struct_size = sizeof(struct btrfs_inode_extref);
1373 0 : log_ref_ver = 1;
1374 0 : r = (struct btrfs_inode_extref *)ref_ptr;
1375 0 : parent_objectid = btrfs_inode_extref_parent(eb, r);
1376 : } else {
1377 0 : ref_struct_size = sizeof(struct btrfs_inode_ref);
1378 0 : parent_objectid = key->offset;
1379 : }
1380 0 : inode_objectid = key->objectid;
1381 :
1382 : /*
1383 : * it is possible that we didn't log all the parent directories
1384 : * for a given inode. If we don't find the dir, just don't
1385 : * copy the back ref in. The link count fixup code will take
1386 : * care of the rest
1387 : */
1388 0 : dir = read_one_inode(root, parent_objectid);
1389 0 : if (!dir) {
1390 0 : ret = -ENOENT;
1391 0 : goto out;
1392 : }
1393 :
1394 0 : inode = read_one_inode(root, inode_objectid);
1395 0 : if (!inode) {
1396 0 : ret = -EIO;
1397 0 : goto out;
1398 : }
1399 :
1400 0 : while (ref_ptr < ref_end) {
1401 0 : if (log_ref_ver) {
1402 0 : ret = extref_get_fields(eb, ref_ptr, &name,
1403 : &ref_index, &parent_objectid);
1404 : /*
1405 : * parent object can change from one array
1406 : * item to another.
1407 : */
1408 0 : if (!dir)
1409 0 : dir = read_one_inode(root, parent_objectid);
1410 0 : if (!dir) {
1411 0 : ret = -ENOENT;
1412 0 : goto out;
1413 : }
1414 : } else {
1415 0 : ret = ref_get_fields(eb, ref_ptr, &name, &ref_index);
1416 : }
1417 0 : if (ret)
1418 0 : goto out;
1419 :
1420 0 : ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1421 : btrfs_ino(BTRFS_I(inode)), ref_index, &name);
1422 0 : if (ret < 0) {
1423 0 : goto out;
1424 0 : } else if (ret == 0) {
1425 : /*
1426 : * look for a conflicting back reference in the
1427 : * metadata. if we find one we have to unlink that name
1428 : * of the file before we add our new link. Later on, we
1429 : * overwrite any existing back reference, and we don't
1430 : * want to create dangling pointers in the directory.
1431 : */
1432 0 : ret = __add_inode_ref(trans, root, path, log,
1433 : BTRFS_I(dir), BTRFS_I(inode),
1434 : inode_objectid, parent_objectid,
1435 : ref_index, &name);
1436 0 : if (ret) {
1437 0 : if (ret == 1)
1438 0 : ret = 0;
1439 0 : goto out;
1440 : }
1441 :
1442 : /* insert our name */
1443 0 : ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1444 : &name, 0, ref_index);
1445 0 : if (ret)
1446 0 : goto out;
1447 :
1448 0 : ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1449 0 : if (ret)
1450 0 : goto out;
1451 : }
1452 : /* Else, ret == 1, we already have a perfect match, we're done. */
1453 :
1454 0 : ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + name.len;
1455 0 : kfree(name.name);
1456 0 : name.name = NULL;
1457 0 : if (log_ref_ver) {
1458 0 : iput(dir);
1459 0 : dir = NULL;
1460 : }
1461 : }
1462 :
1463 : /*
1464 : * Before we overwrite the inode reference item in the subvolume tree
1465 : * with the item from the log tree, we must unlink all names from the
1466 : * parent directory that are in the subvolume's tree inode reference
1467 : * item, otherwise we end up with an inconsistent subvolume tree where
1468 : * dir index entries exist for a name but there is no inode reference
1469 : * item with the same name.
1470 : */
1471 0 : ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1472 : key);
1473 0 : if (ret)
1474 0 : goto out;
1475 :
1476 : /* finally write the back reference in the inode */
1477 0 : ret = overwrite_item(trans, root, path, eb, slot, key);
1478 0 : out:
1479 0 : btrfs_release_path(path);
1480 0 : kfree(name.name);
1481 0 : iput(dir);
1482 0 : iput(inode);
1483 0 : return ret;
1484 : }
1485 :
1486 0 : static int count_inode_extrefs(struct btrfs_root *root,
1487 : struct btrfs_inode *inode, struct btrfs_path *path)
1488 : {
1489 0 : int ret = 0;
1490 0 : int name_len;
1491 0 : unsigned int nlink = 0;
1492 0 : u32 item_size;
1493 0 : u32 cur_offset = 0;
1494 0 : u64 inode_objectid = btrfs_ino(inode);
1495 0 : u64 offset = 0;
1496 0 : unsigned long ptr;
1497 0 : struct btrfs_inode_extref *extref;
1498 0 : struct extent_buffer *leaf;
1499 :
1500 0 : while (1) {
1501 0 : ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1502 : &extref, &offset);
1503 0 : if (ret)
1504 : break;
1505 :
1506 0 : leaf = path->nodes[0];
1507 0 : item_size = btrfs_item_size(leaf, path->slots[0]);
1508 0 : ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1509 0 : cur_offset = 0;
1510 :
1511 0 : while (cur_offset < item_size) {
1512 0 : extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1513 0 : name_len = btrfs_inode_extref_name_len(leaf, extref);
1514 :
1515 0 : nlink++;
1516 :
1517 0 : cur_offset += name_len + sizeof(*extref);
1518 : }
1519 :
1520 0 : offset++;
1521 0 : btrfs_release_path(path);
1522 : }
1523 0 : btrfs_release_path(path);
1524 :
1525 0 : if (ret < 0 && ret != -ENOENT)
1526 : return ret;
1527 0 : return nlink;
1528 : }
1529 :
1530 0 : static int count_inode_refs(struct btrfs_root *root,
1531 : struct btrfs_inode *inode, struct btrfs_path *path)
1532 : {
1533 0 : int ret;
1534 0 : struct btrfs_key key;
1535 0 : unsigned int nlink = 0;
1536 0 : unsigned long ptr;
1537 0 : unsigned long ptr_end;
1538 0 : int name_len;
1539 0 : u64 ino = btrfs_ino(inode);
1540 :
1541 0 : key.objectid = ino;
1542 0 : key.type = BTRFS_INODE_REF_KEY;
1543 0 : key.offset = (u64)-1;
1544 :
1545 0 : while (1) {
1546 0 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1547 0 : if (ret < 0)
1548 : break;
1549 0 : if (ret > 0) {
1550 0 : if (path->slots[0] == 0)
1551 : break;
1552 0 : path->slots[0]--;
1553 : }
1554 0 : process_slot:
1555 0 : btrfs_item_key_to_cpu(path->nodes[0], &key,
1556 : path->slots[0]);
1557 0 : if (key.objectid != ino ||
1558 0 : key.type != BTRFS_INODE_REF_KEY)
1559 : break;
1560 0 : ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1561 0 : ptr_end = ptr + btrfs_item_size(path->nodes[0],
1562 : path->slots[0]);
1563 0 : while (ptr < ptr_end) {
1564 0 : struct btrfs_inode_ref *ref;
1565 :
1566 0 : ref = (struct btrfs_inode_ref *)ptr;
1567 0 : name_len = btrfs_inode_ref_name_len(path->nodes[0],
1568 : ref);
1569 0 : ptr = (unsigned long)(ref + 1) + name_len;
1570 0 : nlink++;
1571 : }
1572 :
1573 0 : if (key.offset == 0)
1574 : break;
1575 0 : if (path->slots[0] > 0) {
1576 0 : path->slots[0]--;
1577 0 : goto process_slot;
1578 : }
1579 0 : key.offset--;
1580 0 : btrfs_release_path(path);
1581 : }
1582 0 : btrfs_release_path(path);
1583 :
1584 0 : return nlink;
1585 : }
1586 :
1587 : /*
1588 : * There are a few corners where the link count of the file can't
1589 : * be properly maintained during replay. So, instead of adding
1590 : * lots of complexity to the log code, we just scan the backrefs
1591 : * for any file that has been through replay.
1592 : *
1593 : * The scan will update the link count on the inode to reflect the
1594 : * number of back refs found. If it goes down to zero, the iput
1595 : * will free the inode.
1596 : */
1597 0 : static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1598 : struct btrfs_root *root,
1599 : struct inode *inode)
1600 : {
1601 0 : struct btrfs_path *path;
1602 0 : int ret;
1603 0 : u64 nlink = 0;
1604 0 : u64 ino = btrfs_ino(BTRFS_I(inode));
1605 :
1606 0 : path = btrfs_alloc_path();
1607 0 : if (!path)
1608 : return -ENOMEM;
1609 :
1610 0 : ret = count_inode_refs(root, BTRFS_I(inode), path);
1611 0 : if (ret < 0)
1612 0 : goto out;
1613 :
1614 0 : nlink = ret;
1615 :
1616 0 : ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1617 0 : if (ret < 0)
1618 0 : goto out;
1619 :
1620 0 : nlink += ret;
1621 :
1622 0 : ret = 0;
1623 :
1624 0 : if (nlink != inode->i_nlink) {
1625 0 : set_nlink(inode, nlink);
1626 0 : ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1627 0 : if (ret)
1628 0 : goto out;
1629 : }
1630 0 : BTRFS_I(inode)->index_cnt = (u64)-1;
1631 :
1632 0 : if (inode->i_nlink == 0) {
1633 0 : if (S_ISDIR(inode->i_mode)) {
1634 0 : ret = replay_dir_deletes(trans, root, NULL, path,
1635 : ino, 1);
1636 0 : if (ret)
1637 0 : goto out;
1638 : }
1639 0 : ret = btrfs_insert_orphan_item(trans, root, ino);
1640 0 : if (ret == -EEXIST)
1641 0 : ret = 0;
1642 : }
1643 :
1644 0 : out:
1645 0 : btrfs_free_path(path);
1646 0 : return ret;
1647 : }
1648 :
1649 0 : static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1650 : struct btrfs_root *root,
1651 : struct btrfs_path *path)
1652 : {
1653 0 : int ret;
1654 0 : struct btrfs_key key;
1655 0 : struct inode *inode;
1656 :
1657 0 : key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1658 0 : key.type = BTRFS_ORPHAN_ITEM_KEY;
1659 0 : key.offset = (u64)-1;
1660 0 : while (1) {
1661 0 : ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1662 0 : if (ret < 0)
1663 : break;
1664 :
1665 0 : if (ret == 1) {
1666 0 : ret = 0;
1667 0 : if (path->slots[0] == 0)
1668 : break;
1669 0 : path->slots[0]--;
1670 : }
1671 :
1672 0 : btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1673 0 : if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1674 0 : key.type != BTRFS_ORPHAN_ITEM_KEY)
1675 : break;
1676 :
1677 0 : ret = btrfs_del_item(trans, root, path);
1678 0 : if (ret)
1679 : break;
1680 :
1681 0 : btrfs_release_path(path);
1682 0 : inode = read_one_inode(root, key.offset);
1683 0 : if (!inode) {
1684 : ret = -EIO;
1685 : break;
1686 : }
1687 :
1688 0 : ret = fixup_inode_link_count(trans, root, inode);
1689 0 : iput(inode);
1690 0 : if (ret)
1691 : break;
1692 :
1693 : /*
1694 : * fixup on a directory may create new entries,
1695 : * make sure we always look for the highset possible
1696 : * offset
1697 : */
1698 0 : key.offset = (u64)-1;
1699 : }
1700 0 : btrfs_release_path(path);
1701 0 : return ret;
1702 : }
1703 :
1704 :
1705 : /*
1706 : * record a given inode in the fixup dir so we can check its link
1707 : * count when replay is done. The link count is incremented here
1708 : * so the inode won't go away until we check it
1709 : */
1710 0 : static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1711 : struct btrfs_root *root,
1712 : struct btrfs_path *path,
1713 : u64 objectid)
1714 : {
1715 0 : struct btrfs_key key;
1716 0 : int ret = 0;
1717 0 : struct inode *inode;
1718 :
1719 0 : inode = read_one_inode(root, objectid);
1720 0 : if (!inode)
1721 : return -EIO;
1722 :
1723 0 : key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1724 0 : key.type = BTRFS_ORPHAN_ITEM_KEY;
1725 0 : key.offset = objectid;
1726 :
1727 0 : ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1728 :
1729 0 : btrfs_release_path(path);
1730 0 : if (ret == 0) {
1731 0 : if (!inode->i_nlink)
1732 0 : set_nlink(inode, 1);
1733 : else
1734 0 : inc_nlink(inode);
1735 0 : ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1736 0 : } else if (ret == -EEXIST) {
1737 0 : ret = 0;
1738 : }
1739 0 : iput(inode);
1740 :
1741 0 : return ret;
1742 : }
1743 :
1744 : /*
1745 : * when replaying the log for a directory, we only insert names
1746 : * for inodes that actually exist. This means an fsync on a directory
1747 : * does not implicitly fsync all the new files in it
1748 : */
1749 0 : static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1750 : struct btrfs_root *root,
1751 : u64 dirid, u64 index,
1752 : const struct fscrypt_str *name,
1753 : struct btrfs_key *location)
1754 : {
1755 0 : struct inode *inode;
1756 0 : struct inode *dir;
1757 0 : int ret;
1758 :
1759 0 : inode = read_one_inode(root, location->objectid);
1760 0 : if (!inode)
1761 : return -ENOENT;
1762 :
1763 0 : dir = read_one_inode(root, dirid);
1764 0 : if (!dir) {
1765 0 : iput(inode);
1766 0 : return -EIO;
1767 : }
1768 :
1769 0 : ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1770 : 1, index);
1771 :
1772 : /* FIXME, put inode into FIXUP list */
1773 :
1774 0 : iput(inode);
1775 0 : iput(dir);
1776 0 : return ret;
1777 : }
1778 :
1779 0 : static int delete_conflicting_dir_entry(struct btrfs_trans_handle *trans,
1780 : struct btrfs_inode *dir,
1781 : struct btrfs_path *path,
1782 : struct btrfs_dir_item *dst_di,
1783 : const struct btrfs_key *log_key,
1784 : u8 log_flags,
1785 : bool exists)
1786 : {
1787 0 : struct btrfs_key found_key;
1788 :
1789 0 : btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1790 : /* The existing dentry points to the same inode, don't delete it. */
1791 0 : if (found_key.objectid == log_key->objectid &&
1792 0 : found_key.type == log_key->type &&
1793 0 : found_key.offset == log_key->offset &&
1794 0 : btrfs_dir_flags(path->nodes[0], dst_di) == log_flags)
1795 : return 1;
1796 :
1797 : /*
1798 : * Don't drop the conflicting directory entry if the inode for the new
1799 : * entry doesn't exist.
1800 : */
1801 0 : if (!exists)
1802 : return 0;
1803 :
1804 0 : return drop_one_dir_item(trans, path, dir, dst_di);
1805 : }
1806 :
1807 : /*
1808 : * take a single entry in a log directory item and replay it into
1809 : * the subvolume.
1810 : *
1811 : * if a conflicting item exists in the subdirectory already,
1812 : * the inode it points to is unlinked and put into the link count
1813 : * fix up tree.
1814 : *
1815 : * If a name from the log points to a file or directory that does
1816 : * not exist in the FS, it is skipped. fsyncs on directories
1817 : * do not force down inodes inside that directory, just changes to the
1818 : * names or unlinks in a directory.
1819 : *
1820 : * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1821 : * non-existing inode) and 1 if the name was replayed.
1822 : */
1823 0 : static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1824 : struct btrfs_root *root,
1825 : struct btrfs_path *path,
1826 : struct extent_buffer *eb,
1827 : struct btrfs_dir_item *di,
1828 : struct btrfs_key *key)
1829 : {
1830 0 : struct fscrypt_str name;
1831 0 : struct btrfs_dir_item *dir_dst_di;
1832 0 : struct btrfs_dir_item *index_dst_di;
1833 0 : bool dir_dst_matches = false;
1834 0 : bool index_dst_matches = false;
1835 0 : struct btrfs_key log_key;
1836 0 : struct btrfs_key search_key;
1837 0 : struct inode *dir;
1838 0 : u8 log_flags;
1839 0 : bool exists;
1840 0 : int ret;
1841 0 : bool update_size = true;
1842 0 : bool name_added = false;
1843 :
1844 0 : dir = read_one_inode(root, key->objectid);
1845 0 : if (!dir)
1846 : return -EIO;
1847 :
1848 0 : ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name);
1849 0 : if (ret)
1850 0 : goto out;
1851 :
1852 0 : log_flags = btrfs_dir_flags(eb, di);
1853 0 : btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1854 0 : ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1855 0 : btrfs_release_path(path);
1856 0 : if (ret < 0)
1857 0 : goto out;
1858 0 : exists = (ret == 0);
1859 0 : ret = 0;
1860 :
1861 0 : dir_dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1862 : &name, 1);
1863 0 : if (IS_ERR(dir_dst_di)) {
1864 0 : ret = PTR_ERR(dir_dst_di);
1865 0 : goto out;
1866 0 : } else if (dir_dst_di) {
1867 0 : ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path,
1868 : dir_dst_di, &log_key,
1869 : log_flags, exists);
1870 0 : if (ret < 0)
1871 0 : goto out;
1872 0 : dir_dst_matches = (ret == 1);
1873 : }
1874 :
1875 0 : btrfs_release_path(path);
1876 :
1877 0 : index_dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1878 : key->objectid, key->offset,
1879 : &name, 1);
1880 0 : if (IS_ERR(index_dst_di)) {
1881 0 : ret = PTR_ERR(index_dst_di);
1882 0 : goto out;
1883 0 : } else if (index_dst_di) {
1884 0 : ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path,
1885 : index_dst_di, &log_key,
1886 : log_flags, exists);
1887 0 : if (ret < 0)
1888 0 : goto out;
1889 0 : index_dst_matches = (ret == 1);
1890 : }
1891 :
1892 0 : btrfs_release_path(path);
1893 :
1894 0 : if (dir_dst_matches && index_dst_matches) {
1895 0 : ret = 0;
1896 0 : update_size = false;
1897 0 : goto out;
1898 : }
1899 :
1900 : /*
1901 : * Check if the inode reference exists in the log for the given name,
1902 : * inode and parent inode
1903 : */
1904 0 : search_key.objectid = log_key.objectid;
1905 0 : search_key.type = BTRFS_INODE_REF_KEY;
1906 0 : search_key.offset = key->objectid;
1907 0 : ret = backref_in_log(root->log_root, &search_key, 0, &name);
1908 0 : if (ret < 0) {
1909 0 : goto out;
1910 0 : } else if (ret) {
1911 : /* The dentry will be added later. */
1912 0 : ret = 0;
1913 0 : update_size = false;
1914 0 : goto out;
1915 : }
1916 :
1917 0 : search_key.objectid = log_key.objectid;
1918 0 : search_key.type = BTRFS_INODE_EXTREF_KEY;
1919 0 : search_key.offset = key->objectid;
1920 0 : ret = backref_in_log(root->log_root, &search_key, key->objectid, &name);
1921 0 : if (ret < 0) {
1922 0 : goto out;
1923 0 : } else if (ret) {
1924 : /* The dentry will be added later. */
1925 0 : ret = 0;
1926 0 : update_size = false;
1927 0 : goto out;
1928 : }
1929 0 : btrfs_release_path(path);
1930 0 : ret = insert_one_name(trans, root, key->objectid, key->offset,
1931 : &name, &log_key);
1932 0 : if (ret && ret != -ENOENT && ret != -EEXIST)
1933 0 : goto out;
1934 0 : if (!ret)
1935 0 : name_added = true;
1936 : update_size = false;
1937 : ret = 0;
1938 :
1939 0 : out:
1940 0 : if (!ret && update_size) {
1941 0 : btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2);
1942 0 : ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
1943 : }
1944 0 : kfree(name.name);
1945 0 : iput(dir);
1946 0 : if (!ret && name_added)
1947 0 : ret = 1;
1948 : return ret;
1949 : }
1950 :
1951 : /* Replay one dir item from a BTRFS_DIR_INDEX_KEY key. */
1952 0 : static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1953 : struct btrfs_root *root,
1954 : struct btrfs_path *path,
1955 : struct extent_buffer *eb, int slot,
1956 : struct btrfs_key *key)
1957 : {
1958 0 : int ret;
1959 0 : struct btrfs_dir_item *di;
1960 :
1961 : /* We only log dir index keys, which only contain a single dir item. */
1962 0 : ASSERT(key->type == BTRFS_DIR_INDEX_KEY);
1963 :
1964 0 : di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1965 0 : ret = replay_one_name(trans, root, path, eb, di, key);
1966 0 : if (ret < 0)
1967 : return ret;
1968 :
1969 : /*
1970 : * If this entry refers to a non-directory (directories can not have a
1971 : * link count > 1) and it was added in the transaction that was not
1972 : * committed, make sure we fixup the link count of the inode the entry
1973 : * points to. Otherwise something like the following would result in a
1974 : * directory pointing to an inode with a wrong link that does not account
1975 : * for this dir entry:
1976 : *
1977 : * mkdir testdir
1978 : * touch testdir/foo
1979 : * touch testdir/bar
1980 : * sync
1981 : *
1982 : * ln testdir/bar testdir/bar_link
1983 : * ln testdir/foo testdir/foo_link
1984 : * xfs_io -c "fsync" testdir/bar
1985 : *
1986 : * <power failure>
1987 : *
1988 : * mount fs, log replay happens
1989 : *
1990 : * File foo would remain with a link count of 1 when it has two entries
1991 : * pointing to it in the directory testdir. This would make it impossible
1992 : * to ever delete the parent directory has it would result in stale
1993 : * dentries that can never be deleted.
1994 : */
1995 0 : if (ret == 1 && btrfs_dir_ftype(eb, di) != BTRFS_FT_DIR) {
1996 0 : struct btrfs_path *fixup_path;
1997 0 : struct btrfs_key di_key;
1998 :
1999 0 : fixup_path = btrfs_alloc_path();
2000 0 : if (!fixup_path)
2001 0 : return -ENOMEM;
2002 :
2003 0 : btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2004 0 : ret = link_to_fixup_dir(trans, root, fixup_path, di_key.objectid);
2005 0 : btrfs_free_path(fixup_path);
2006 : }
2007 :
2008 : return ret;
2009 : }
2010 :
2011 : /*
2012 : * directory replay has two parts. There are the standard directory
2013 : * items in the log copied from the subvolume, and range items
2014 : * created in the log while the subvolume was logged.
2015 : *
2016 : * The range items tell us which parts of the key space the log
2017 : * is authoritative for. During replay, if a key in the subvolume
2018 : * directory is in a logged range item, but not actually in the log
2019 : * that means it was deleted from the directory before the fsync
2020 : * and should be removed.
2021 : */
2022 0 : static noinline int find_dir_range(struct btrfs_root *root,
2023 : struct btrfs_path *path,
2024 : u64 dirid,
2025 : u64 *start_ret, u64 *end_ret)
2026 : {
2027 0 : struct btrfs_key key;
2028 0 : u64 found_end;
2029 0 : struct btrfs_dir_log_item *item;
2030 0 : int ret;
2031 0 : int nritems;
2032 :
2033 0 : if (*start_ret == (u64)-1)
2034 : return 1;
2035 :
2036 0 : key.objectid = dirid;
2037 0 : key.type = BTRFS_DIR_LOG_INDEX_KEY;
2038 0 : key.offset = *start_ret;
2039 :
2040 0 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2041 0 : if (ret < 0)
2042 0 : goto out;
2043 0 : if (ret > 0) {
2044 0 : if (path->slots[0] == 0)
2045 0 : goto out;
2046 0 : path->slots[0]--;
2047 : }
2048 0 : if (ret != 0)
2049 0 : btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2050 :
2051 0 : if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) {
2052 0 : ret = 1;
2053 0 : goto next;
2054 : }
2055 0 : item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2056 : struct btrfs_dir_log_item);
2057 0 : found_end = btrfs_dir_log_end(path->nodes[0], item);
2058 :
2059 0 : if (*start_ret >= key.offset && *start_ret <= found_end) {
2060 0 : ret = 0;
2061 0 : *start_ret = key.offset;
2062 0 : *end_ret = found_end;
2063 0 : goto out;
2064 : }
2065 : ret = 1;
2066 0 : next:
2067 : /* check the next slot in the tree to see if it is a valid item */
2068 0 : nritems = btrfs_header_nritems(path->nodes[0]);
2069 0 : path->slots[0]++;
2070 0 : if (path->slots[0] >= nritems) {
2071 0 : ret = btrfs_next_leaf(root, path);
2072 0 : if (ret)
2073 0 : goto out;
2074 : }
2075 :
2076 0 : btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2077 :
2078 0 : if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) {
2079 0 : ret = 1;
2080 0 : goto out;
2081 : }
2082 0 : item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2083 : struct btrfs_dir_log_item);
2084 0 : found_end = btrfs_dir_log_end(path->nodes[0], item);
2085 0 : *start_ret = key.offset;
2086 0 : *end_ret = found_end;
2087 0 : ret = 0;
2088 0 : out:
2089 0 : btrfs_release_path(path);
2090 0 : return ret;
2091 : }
2092 :
2093 : /*
2094 : * this looks for a given directory item in the log. If the directory
2095 : * item is not in the log, the item is removed and the inode it points
2096 : * to is unlinked
2097 : */
2098 0 : static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2099 : struct btrfs_root *log,
2100 : struct btrfs_path *path,
2101 : struct btrfs_path *log_path,
2102 : struct inode *dir,
2103 : struct btrfs_key *dir_key)
2104 : {
2105 0 : struct btrfs_root *root = BTRFS_I(dir)->root;
2106 0 : int ret;
2107 0 : struct extent_buffer *eb;
2108 0 : int slot;
2109 0 : struct btrfs_dir_item *di;
2110 0 : struct fscrypt_str name;
2111 0 : struct inode *inode = NULL;
2112 0 : struct btrfs_key location;
2113 :
2114 : /*
2115 : * Currently we only log dir index keys. Even if we replay a log created
2116 : * by an older kernel that logged both dir index and dir item keys, all
2117 : * we need to do is process the dir index keys, we (and our caller) can
2118 : * safely ignore dir item keys (key type BTRFS_DIR_ITEM_KEY).
2119 : */
2120 0 : ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY);
2121 :
2122 0 : eb = path->nodes[0];
2123 0 : slot = path->slots[0];
2124 0 : di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2125 0 : ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name);
2126 0 : if (ret)
2127 0 : goto out;
2128 :
2129 0 : if (log) {
2130 0 : struct btrfs_dir_item *log_di;
2131 :
2132 0 : log_di = btrfs_lookup_dir_index_item(trans, log, log_path,
2133 : dir_key->objectid,
2134 : dir_key->offset, &name, 0);
2135 0 : if (IS_ERR(log_di)) {
2136 0 : ret = PTR_ERR(log_di);
2137 0 : goto out;
2138 0 : } else if (log_di) {
2139 : /* The dentry exists in the log, we have nothing to do. */
2140 0 : ret = 0;
2141 0 : goto out;
2142 : }
2143 : }
2144 :
2145 0 : btrfs_dir_item_key_to_cpu(eb, di, &location);
2146 0 : btrfs_release_path(path);
2147 0 : btrfs_release_path(log_path);
2148 0 : inode = read_one_inode(root, location.objectid);
2149 0 : if (!inode) {
2150 0 : ret = -EIO;
2151 0 : goto out;
2152 : }
2153 :
2154 0 : ret = link_to_fixup_dir(trans, root, path, location.objectid);
2155 0 : if (ret)
2156 0 : goto out;
2157 :
2158 0 : inc_nlink(inode);
2159 0 : ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(inode),
2160 : &name);
2161 : /*
2162 : * Unlike dir item keys, dir index keys can only have one name (entry) in
2163 : * them, as there are no key collisions since each key has a unique offset
2164 : * (an index number), so we're done.
2165 : */
2166 0 : out:
2167 0 : btrfs_release_path(path);
2168 0 : btrfs_release_path(log_path);
2169 0 : kfree(name.name);
2170 0 : iput(inode);
2171 0 : return ret;
2172 : }
2173 :
2174 0 : static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2175 : struct btrfs_root *root,
2176 : struct btrfs_root *log,
2177 : struct btrfs_path *path,
2178 : const u64 ino)
2179 : {
2180 0 : struct btrfs_key search_key;
2181 0 : struct btrfs_path *log_path;
2182 0 : int i;
2183 0 : int nritems;
2184 0 : int ret;
2185 :
2186 0 : log_path = btrfs_alloc_path();
2187 0 : if (!log_path)
2188 : return -ENOMEM;
2189 :
2190 0 : search_key.objectid = ino;
2191 0 : search_key.type = BTRFS_XATTR_ITEM_KEY;
2192 0 : search_key.offset = 0;
2193 0 : again:
2194 0 : ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2195 0 : if (ret < 0)
2196 0 : goto out;
2197 0 : process_leaf:
2198 0 : nritems = btrfs_header_nritems(path->nodes[0]);
2199 0 : for (i = path->slots[0]; i < nritems; i++) {
2200 0 : struct btrfs_key key;
2201 0 : struct btrfs_dir_item *di;
2202 0 : struct btrfs_dir_item *log_di;
2203 0 : u32 total_size;
2204 0 : u32 cur;
2205 :
2206 0 : btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2207 0 : if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2208 0 : ret = 0;
2209 0 : goto out;
2210 : }
2211 :
2212 0 : di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2213 0 : total_size = btrfs_item_size(path->nodes[0], i);
2214 0 : cur = 0;
2215 0 : while (cur < total_size) {
2216 0 : u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2217 0 : u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2218 0 : u32 this_len = sizeof(*di) + name_len + data_len;
2219 0 : char *name;
2220 :
2221 0 : name = kmalloc(name_len, GFP_NOFS);
2222 0 : if (!name) {
2223 0 : ret = -ENOMEM;
2224 0 : goto out;
2225 : }
2226 0 : read_extent_buffer(path->nodes[0], name,
2227 0 : (unsigned long)(di + 1), name_len);
2228 :
2229 0 : log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2230 : name, name_len, 0);
2231 0 : btrfs_release_path(log_path);
2232 0 : if (!log_di) {
2233 : /* Doesn't exist in log tree, so delete it. */
2234 0 : btrfs_release_path(path);
2235 0 : di = btrfs_lookup_xattr(trans, root, path, ino,
2236 : name, name_len, -1);
2237 0 : kfree(name);
2238 0 : if (IS_ERR(di)) {
2239 0 : ret = PTR_ERR(di);
2240 0 : goto out;
2241 : }
2242 0 : ASSERT(di);
2243 0 : ret = btrfs_delete_one_dir_name(trans, root,
2244 : path, di);
2245 0 : if (ret)
2246 0 : goto out;
2247 0 : btrfs_release_path(path);
2248 0 : search_key = key;
2249 0 : goto again;
2250 : }
2251 0 : kfree(name);
2252 0 : if (IS_ERR(log_di)) {
2253 0 : ret = PTR_ERR(log_di);
2254 0 : goto out;
2255 : }
2256 0 : cur += this_len;
2257 0 : di = (struct btrfs_dir_item *)((char *)di + this_len);
2258 : }
2259 : }
2260 0 : ret = btrfs_next_leaf(root, path);
2261 0 : if (ret > 0)
2262 : ret = 0;
2263 0 : else if (ret == 0)
2264 0 : goto process_leaf;
2265 0 : out:
2266 0 : btrfs_free_path(log_path);
2267 0 : btrfs_release_path(path);
2268 0 : return ret;
2269 : }
2270 :
2271 :
2272 : /*
2273 : * deletion replay happens before we copy any new directory items
2274 : * out of the log or out of backreferences from inodes. It
2275 : * scans the log to find ranges of keys that log is authoritative for,
2276 : * and then scans the directory to find items in those ranges that are
2277 : * not present in the log.
2278 : *
2279 : * Anything we don't find in the log is unlinked and removed from the
2280 : * directory.
2281 : */
2282 0 : static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2283 : struct btrfs_root *root,
2284 : struct btrfs_root *log,
2285 : struct btrfs_path *path,
2286 : u64 dirid, int del_all)
2287 : {
2288 0 : u64 range_start;
2289 0 : u64 range_end;
2290 0 : int ret = 0;
2291 0 : struct btrfs_key dir_key;
2292 0 : struct btrfs_key found_key;
2293 0 : struct btrfs_path *log_path;
2294 0 : struct inode *dir;
2295 :
2296 0 : dir_key.objectid = dirid;
2297 0 : dir_key.type = BTRFS_DIR_INDEX_KEY;
2298 0 : log_path = btrfs_alloc_path();
2299 0 : if (!log_path)
2300 : return -ENOMEM;
2301 :
2302 0 : dir = read_one_inode(root, dirid);
2303 : /* it isn't an error if the inode isn't there, that can happen
2304 : * because we replay the deletes before we copy in the inode item
2305 : * from the log
2306 : */
2307 0 : if (!dir) {
2308 0 : btrfs_free_path(log_path);
2309 0 : return 0;
2310 : }
2311 :
2312 0 : range_start = 0;
2313 0 : range_end = 0;
2314 0 : while (1) {
2315 0 : if (del_all)
2316 0 : range_end = (u64)-1;
2317 : else {
2318 0 : ret = find_dir_range(log, path, dirid,
2319 : &range_start, &range_end);
2320 0 : if (ret < 0)
2321 0 : goto out;
2322 0 : else if (ret > 0)
2323 : break;
2324 : }
2325 :
2326 0 : dir_key.offset = range_start;
2327 0 : while (1) {
2328 0 : int nritems;
2329 0 : ret = btrfs_search_slot(NULL, root, &dir_key, path,
2330 : 0, 0);
2331 0 : if (ret < 0)
2332 0 : goto out;
2333 :
2334 0 : nritems = btrfs_header_nritems(path->nodes[0]);
2335 0 : if (path->slots[0] >= nritems) {
2336 0 : ret = btrfs_next_leaf(root, path);
2337 0 : if (ret == 1)
2338 : break;
2339 0 : else if (ret < 0)
2340 0 : goto out;
2341 : }
2342 0 : btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2343 : path->slots[0]);
2344 0 : if (found_key.objectid != dirid ||
2345 0 : found_key.type != dir_key.type) {
2346 0 : ret = 0;
2347 0 : goto out;
2348 : }
2349 :
2350 0 : if (found_key.offset > range_end)
2351 : break;
2352 :
2353 0 : ret = check_item_in_log(trans, log, path,
2354 : log_path, dir,
2355 : &found_key);
2356 0 : if (ret)
2357 0 : goto out;
2358 0 : if (found_key.offset == (u64)-1)
2359 : break;
2360 0 : dir_key.offset = found_key.offset + 1;
2361 : }
2362 0 : btrfs_release_path(path);
2363 0 : if (range_end == (u64)-1)
2364 : break;
2365 0 : range_start = range_end + 1;
2366 : }
2367 : ret = 0;
2368 0 : out:
2369 0 : btrfs_release_path(path);
2370 0 : btrfs_free_path(log_path);
2371 0 : iput(dir);
2372 0 : return ret;
2373 : }
2374 :
2375 : /*
2376 : * the process_func used to replay items from the log tree. This
2377 : * gets called in two different stages. The first stage just looks
2378 : * for inodes and makes sure they are all copied into the subvolume.
2379 : *
2380 : * The second stage copies all the other item types from the log into
2381 : * the subvolume. The two stage approach is slower, but gets rid of
2382 : * lots of complexity around inodes referencing other inodes that exist
2383 : * only in the log (references come from either directory items or inode
2384 : * back refs).
2385 : */
2386 0 : static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2387 : struct walk_control *wc, u64 gen, int level)
2388 : {
2389 0 : int nritems;
2390 0 : struct btrfs_tree_parent_check check = {
2391 : .transid = gen,
2392 : .level = level
2393 : };
2394 0 : struct btrfs_path *path;
2395 0 : struct btrfs_root *root = wc->replay_dest;
2396 0 : struct btrfs_key key;
2397 0 : int i;
2398 0 : int ret;
2399 :
2400 0 : ret = btrfs_read_extent_buffer(eb, &check);
2401 0 : if (ret)
2402 : return ret;
2403 :
2404 0 : level = btrfs_header_level(eb);
2405 :
2406 0 : if (level != 0)
2407 : return 0;
2408 :
2409 0 : path = btrfs_alloc_path();
2410 0 : if (!path)
2411 : return -ENOMEM;
2412 :
2413 0 : nritems = btrfs_header_nritems(eb);
2414 0 : for (i = 0; i < nritems; i++) {
2415 0 : btrfs_item_key_to_cpu(eb, &key, i);
2416 :
2417 : /* inode keys are done during the first stage */
2418 0 : if (key.type == BTRFS_INODE_ITEM_KEY &&
2419 0 : wc->stage == LOG_WALK_REPLAY_INODES) {
2420 0 : struct btrfs_inode_item *inode_item;
2421 0 : u32 mode;
2422 :
2423 0 : inode_item = btrfs_item_ptr(eb, i,
2424 : struct btrfs_inode_item);
2425 : /*
2426 : * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2427 : * and never got linked before the fsync, skip it, as
2428 : * replaying it is pointless since it would be deleted
2429 : * later. We skip logging tmpfiles, but it's always
2430 : * possible we are replaying a log created with a kernel
2431 : * that used to log tmpfiles.
2432 : */
2433 0 : if (btrfs_inode_nlink(eb, inode_item) == 0) {
2434 0 : wc->ignore_cur_inode = true;
2435 0 : continue;
2436 : } else {
2437 0 : wc->ignore_cur_inode = false;
2438 : }
2439 0 : ret = replay_xattr_deletes(wc->trans, root, log,
2440 : path, key.objectid);
2441 0 : if (ret)
2442 : break;
2443 0 : mode = btrfs_inode_mode(eb, inode_item);
2444 0 : if (S_ISDIR(mode)) {
2445 0 : ret = replay_dir_deletes(wc->trans,
2446 : root, log, path, key.objectid, 0);
2447 0 : if (ret)
2448 : break;
2449 : }
2450 0 : ret = overwrite_item(wc->trans, root, path,
2451 : eb, i, &key);
2452 0 : if (ret)
2453 : break;
2454 :
2455 : /*
2456 : * Before replaying extents, truncate the inode to its
2457 : * size. We need to do it now and not after log replay
2458 : * because before an fsync we can have prealloc extents
2459 : * added beyond the inode's i_size. If we did it after,
2460 : * through orphan cleanup for example, we would drop
2461 : * those prealloc extents just after replaying them.
2462 : */
2463 0 : if (S_ISREG(mode)) {
2464 0 : struct btrfs_drop_extents_args drop_args = { 0 };
2465 0 : struct inode *inode;
2466 0 : u64 from;
2467 :
2468 0 : inode = read_one_inode(root, key.objectid);
2469 0 : if (!inode) {
2470 : ret = -EIO;
2471 0 : break;
2472 : }
2473 0 : from = ALIGN(i_size_read(inode),
2474 : root->fs_info->sectorsize);
2475 0 : drop_args.start = from;
2476 0 : drop_args.end = (u64)-1;
2477 0 : drop_args.drop_cache = true;
2478 0 : ret = btrfs_drop_extents(wc->trans, root,
2479 : BTRFS_I(inode),
2480 : &drop_args);
2481 0 : if (!ret) {
2482 0 : inode_sub_bytes(inode,
2483 0 : drop_args.bytes_found);
2484 : /* Update the inode's nbytes. */
2485 0 : ret = btrfs_update_inode(wc->trans,
2486 : root, BTRFS_I(inode));
2487 : }
2488 0 : iput(inode);
2489 0 : if (ret)
2490 : break;
2491 : }
2492 :
2493 0 : ret = link_to_fixup_dir(wc->trans, root,
2494 : path, key.objectid);
2495 0 : if (ret)
2496 : break;
2497 : }
2498 :
2499 0 : if (wc->ignore_cur_inode)
2500 0 : continue;
2501 :
2502 0 : if (key.type == BTRFS_DIR_INDEX_KEY &&
2503 0 : wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2504 0 : ret = replay_one_dir_item(wc->trans, root, path,
2505 : eb, i, &key);
2506 0 : if (ret)
2507 : break;
2508 : }
2509 :
2510 0 : if (wc->stage < LOG_WALK_REPLAY_ALL)
2511 0 : continue;
2512 :
2513 : /* these keys are simply copied */
2514 0 : if (key.type == BTRFS_XATTR_ITEM_KEY) {
2515 0 : ret = overwrite_item(wc->trans, root, path,
2516 : eb, i, &key);
2517 0 : if (ret)
2518 : break;
2519 0 : } else if (key.type == BTRFS_INODE_REF_KEY ||
2520 : key.type == BTRFS_INODE_EXTREF_KEY) {
2521 0 : ret = add_inode_ref(wc->trans, root, log, path,
2522 : eb, i, &key);
2523 0 : if (ret && ret != -ENOENT)
2524 : break;
2525 : ret = 0;
2526 0 : } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2527 0 : ret = replay_one_extent(wc->trans, root, path,
2528 : eb, i, &key);
2529 0 : if (ret)
2530 : break;
2531 : }
2532 : /*
2533 : * We don't log BTRFS_DIR_ITEM_KEY keys anymore, only the
2534 : * BTRFS_DIR_INDEX_KEY items which we use to derive the
2535 : * BTRFS_DIR_ITEM_KEY items. If we are replaying a log from an
2536 : * older kernel with such keys, ignore them.
2537 : */
2538 : }
2539 0 : btrfs_free_path(path);
2540 0 : return ret;
2541 : }
2542 :
2543 : /*
2544 : * Correctly adjust the reserved bytes occupied by a log tree extent buffer
2545 : */
2546 0 : static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
2547 : {
2548 0 : struct btrfs_block_group *cache;
2549 :
2550 0 : cache = btrfs_lookup_block_group(fs_info, start);
2551 0 : if (!cache) {
2552 0 : btrfs_err(fs_info, "unable to find block group for %llu", start);
2553 0 : return;
2554 : }
2555 :
2556 0 : spin_lock(&cache->space_info->lock);
2557 0 : spin_lock(&cache->lock);
2558 0 : cache->reserved -= fs_info->nodesize;
2559 0 : cache->space_info->bytes_reserved -= fs_info->nodesize;
2560 0 : spin_unlock(&cache->lock);
2561 0 : spin_unlock(&cache->space_info->lock);
2562 :
2563 0 : btrfs_put_block_group(cache);
2564 : }
2565 :
2566 0 : static int clean_log_buffer(struct btrfs_trans_handle *trans,
2567 : struct extent_buffer *eb)
2568 : {
2569 0 : int ret;
2570 :
2571 0 : btrfs_tree_lock(eb);
2572 0 : btrfs_clear_buffer_dirty(trans, eb);
2573 0 : wait_on_extent_buffer_writeback(eb);
2574 0 : btrfs_tree_unlock(eb);
2575 :
2576 0 : if (trans) {
2577 0 : ret = btrfs_pin_reserved_extent(trans, eb->start, eb->len);
2578 0 : if (ret)
2579 : return ret;
2580 0 : btrfs_redirty_list_add(trans->transaction, eb);
2581 : } else {
2582 0 : unaccount_log_buffer(eb->fs_info, eb->start);
2583 : }
2584 :
2585 : return 0;
2586 : }
2587 :
2588 0 : static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2589 : struct btrfs_root *root,
2590 : struct btrfs_path *path, int *level,
2591 : struct walk_control *wc)
2592 : {
2593 0 : struct btrfs_fs_info *fs_info = root->fs_info;
2594 0 : u64 bytenr;
2595 0 : u64 ptr_gen;
2596 0 : struct extent_buffer *next;
2597 0 : struct extent_buffer *cur;
2598 0 : int ret = 0;
2599 :
2600 0 : while (*level > 0) {
2601 0 : struct btrfs_tree_parent_check check = { 0 };
2602 :
2603 0 : cur = path->nodes[*level];
2604 :
2605 0 : WARN_ON(btrfs_header_level(cur) != *level);
2606 :
2607 0 : if (path->slots[*level] >=
2608 : btrfs_header_nritems(cur))
2609 : break;
2610 :
2611 0 : bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2612 0 : ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2613 0 : check.transid = ptr_gen;
2614 0 : check.level = *level - 1;
2615 0 : check.has_first_key = true;
2616 0 : btrfs_node_key_to_cpu(cur, &check.first_key, path->slots[*level]);
2617 :
2618 0 : next = btrfs_find_create_tree_block(fs_info, bytenr,
2619 : btrfs_header_owner(cur),
2620 0 : *level - 1);
2621 0 : if (IS_ERR(next))
2622 0 : return PTR_ERR(next);
2623 :
2624 0 : if (*level == 1) {
2625 0 : ret = wc->process_func(root, next, wc, ptr_gen,
2626 : *level - 1);
2627 0 : if (ret) {
2628 0 : free_extent_buffer(next);
2629 0 : return ret;
2630 : }
2631 :
2632 0 : path->slots[*level]++;
2633 0 : if (wc->free) {
2634 0 : ret = btrfs_read_extent_buffer(next, &check);
2635 0 : if (ret) {
2636 0 : free_extent_buffer(next);
2637 0 : return ret;
2638 : }
2639 :
2640 0 : ret = clean_log_buffer(trans, next);
2641 0 : if (ret) {
2642 0 : free_extent_buffer(next);
2643 0 : return ret;
2644 : }
2645 : }
2646 0 : free_extent_buffer(next);
2647 0 : continue;
2648 : }
2649 0 : ret = btrfs_read_extent_buffer(next, &check);
2650 0 : if (ret) {
2651 0 : free_extent_buffer(next);
2652 0 : return ret;
2653 : }
2654 :
2655 0 : if (path->nodes[*level-1])
2656 0 : free_extent_buffer(path->nodes[*level-1]);
2657 0 : path->nodes[*level-1] = next;
2658 0 : *level = btrfs_header_level(next);
2659 0 : path->slots[*level] = 0;
2660 0 : cond_resched();
2661 : }
2662 0 : path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2663 :
2664 0 : cond_resched();
2665 0 : return 0;
2666 : }
2667 :
2668 0 : static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2669 : struct btrfs_root *root,
2670 : struct btrfs_path *path, int *level,
2671 : struct walk_control *wc)
2672 : {
2673 0 : int i;
2674 0 : int slot;
2675 0 : int ret;
2676 :
2677 0 : for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2678 0 : slot = path->slots[i];
2679 0 : if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2680 0 : path->slots[i]++;
2681 0 : *level = i;
2682 0 : WARN_ON(*level == 0);
2683 0 : return 0;
2684 : } else {
2685 0 : ret = wc->process_func(root, path->nodes[*level], wc,
2686 : btrfs_header_generation(path->nodes[*level]),
2687 : *level);
2688 0 : if (ret)
2689 0 : return ret;
2690 :
2691 0 : if (wc->free) {
2692 0 : ret = clean_log_buffer(trans, path->nodes[*level]);
2693 0 : if (ret)
2694 0 : return ret;
2695 : }
2696 0 : free_extent_buffer(path->nodes[*level]);
2697 0 : path->nodes[*level] = NULL;
2698 0 : *level = i + 1;
2699 : }
2700 : }
2701 : return 1;
2702 : }
2703 :
2704 : /*
2705 : * drop the reference count on the tree rooted at 'snap'. This traverses
2706 : * the tree freeing any blocks that have a ref count of zero after being
2707 : * decremented.
2708 : */
2709 0 : static int walk_log_tree(struct btrfs_trans_handle *trans,
2710 : struct btrfs_root *log, struct walk_control *wc)
2711 : {
2712 0 : int ret = 0;
2713 0 : int wret;
2714 0 : int level;
2715 0 : struct btrfs_path *path;
2716 0 : int orig_level;
2717 :
2718 0 : path = btrfs_alloc_path();
2719 0 : if (!path)
2720 : return -ENOMEM;
2721 :
2722 0 : level = btrfs_header_level(log->node);
2723 0 : orig_level = level;
2724 0 : path->nodes[level] = log->node;
2725 0 : atomic_inc(&log->node->refs);
2726 0 : path->slots[level] = 0;
2727 :
2728 0 : while (1) {
2729 0 : wret = walk_down_log_tree(trans, log, path, &level, wc);
2730 0 : if (wret > 0)
2731 : break;
2732 0 : if (wret < 0) {
2733 0 : ret = wret;
2734 0 : goto out;
2735 : }
2736 :
2737 0 : wret = walk_up_log_tree(trans, log, path, &level, wc);
2738 0 : if (wret > 0)
2739 : break;
2740 0 : if (wret < 0) {
2741 0 : ret = wret;
2742 0 : goto out;
2743 : }
2744 : }
2745 :
2746 : /* was the root node processed? if not, catch it here */
2747 0 : if (path->nodes[orig_level]) {
2748 0 : ret = wc->process_func(log, path->nodes[orig_level], wc,
2749 : btrfs_header_generation(path->nodes[orig_level]),
2750 : orig_level);
2751 0 : if (ret)
2752 0 : goto out;
2753 0 : if (wc->free)
2754 0 : ret = clean_log_buffer(trans, path->nodes[orig_level]);
2755 : }
2756 :
2757 0 : out:
2758 0 : btrfs_free_path(path);
2759 0 : return ret;
2760 : }
2761 :
2762 : /*
2763 : * helper function to update the item for a given subvolumes log root
2764 : * in the tree of log roots
2765 : */
2766 0 : static int update_log_root(struct btrfs_trans_handle *trans,
2767 : struct btrfs_root *log,
2768 : struct btrfs_root_item *root_item)
2769 : {
2770 0 : struct btrfs_fs_info *fs_info = log->fs_info;
2771 0 : int ret;
2772 :
2773 0 : if (log->log_transid == 1) {
2774 : /* insert root item on the first sync */
2775 0 : ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2776 0 : &log->root_key, root_item);
2777 : } else {
2778 0 : ret = btrfs_update_root(trans, fs_info->log_root_tree,
2779 : &log->root_key, root_item);
2780 : }
2781 0 : return ret;
2782 : }
2783 :
2784 0 : static void wait_log_commit(struct btrfs_root *root, int transid)
2785 : {
2786 0 : DEFINE_WAIT(wait);
2787 0 : int index = transid % 2;
2788 :
2789 : /*
2790 : * we only allow two pending log transactions at a time,
2791 : * so we know that if ours is more than 2 older than the
2792 : * current transaction, we're done
2793 : */
2794 0 : for (;;) {
2795 0 : prepare_to_wait(&root->log_commit_wait[index],
2796 : &wait, TASK_UNINTERRUPTIBLE);
2797 :
2798 0 : if (!(root->log_transid_committed < transid &&
2799 0 : atomic_read(&root->log_commit[index])))
2800 : break;
2801 :
2802 0 : mutex_unlock(&root->log_mutex);
2803 0 : schedule();
2804 0 : mutex_lock(&root->log_mutex);
2805 : }
2806 0 : finish_wait(&root->log_commit_wait[index], &wait);
2807 0 : }
2808 :
2809 0 : static void wait_for_writer(struct btrfs_root *root)
2810 : {
2811 0 : DEFINE_WAIT(wait);
2812 :
2813 0 : for (;;) {
2814 0 : prepare_to_wait(&root->log_writer_wait, &wait,
2815 : TASK_UNINTERRUPTIBLE);
2816 0 : if (!atomic_read(&root->log_writers))
2817 : break;
2818 :
2819 0 : mutex_unlock(&root->log_mutex);
2820 0 : schedule();
2821 0 : mutex_lock(&root->log_mutex);
2822 : }
2823 0 : finish_wait(&root->log_writer_wait, &wait);
2824 0 : }
2825 :
2826 0 : static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2827 : struct btrfs_log_ctx *ctx)
2828 : {
2829 0 : mutex_lock(&root->log_mutex);
2830 0 : list_del_init(&ctx->list);
2831 0 : mutex_unlock(&root->log_mutex);
2832 0 : }
2833 :
2834 : /*
2835 : * Invoked in log mutex context, or be sure there is no other task which
2836 : * can access the list.
2837 : */
2838 0 : static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2839 : int index, int error)
2840 : {
2841 0 : struct btrfs_log_ctx *ctx;
2842 0 : struct btrfs_log_ctx *safe;
2843 :
2844 0 : list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2845 0 : list_del_init(&ctx->list);
2846 0 : ctx->log_ret = error;
2847 : }
2848 0 : }
2849 :
2850 : /*
2851 : * btrfs_sync_log does sends a given tree log down to the disk and
2852 : * updates the super blocks to record it. When this call is done,
2853 : * you know that any inodes previously logged are safely on disk only
2854 : * if it returns 0.
2855 : *
2856 : * Any other return value means you need to call btrfs_commit_transaction.
2857 : * Some of the edge cases for fsyncing directories that have had unlinks
2858 : * or renames done in the past mean that sometimes the only safe
2859 : * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2860 : * that has happened.
2861 : */
2862 0 : int btrfs_sync_log(struct btrfs_trans_handle *trans,
2863 : struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2864 : {
2865 0 : int index1;
2866 0 : int index2;
2867 0 : int mark;
2868 0 : int ret;
2869 0 : struct btrfs_fs_info *fs_info = root->fs_info;
2870 0 : struct btrfs_root *log = root->log_root;
2871 0 : struct btrfs_root *log_root_tree = fs_info->log_root_tree;
2872 0 : struct btrfs_root_item new_root_item;
2873 0 : int log_transid = 0;
2874 0 : struct btrfs_log_ctx root_log_ctx;
2875 0 : struct blk_plug plug;
2876 0 : u64 log_root_start;
2877 0 : u64 log_root_level;
2878 :
2879 0 : mutex_lock(&root->log_mutex);
2880 0 : log_transid = ctx->log_transid;
2881 0 : if (root->log_transid_committed >= log_transid) {
2882 0 : mutex_unlock(&root->log_mutex);
2883 0 : return ctx->log_ret;
2884 : }
2885 :
2886 0 : index1 = log_transid % 2;
2887 0 : if (atomic_read(&root->log_commit[index1])) {
2888 0 : wait_log_commit(root, log_transid);
2889 0 : mutex_unlock(&root->log_mutex);
2890 0 : return ctx->log_ret;
2891 : }
2892 0 : ASSERT(log_transid == root->log_transid);
2893 0 : atomic_set(&root->log_commit[index1], 1);
2894 :
2895 : /* wait for previous tree log sync to complete */
2896 0 : if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2897 0 : wait_log_commit(root, log_transid - 1);
2898 :
2899 0 : while (1) {
2900 0 : int batch = atomic_read(&root->log_batch);
2901 : /* when we're on an ssd, just kick the log commit out */
2902 0 : if (!btrfs_test_opt(fs_info, SSD) &&
2903 0 : test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2904 0 : mutex_unlock(&root->log_mutex);
2905 0 : schedule_timeout_uninterruptible(1);
2906 0 : mutex_lock(&root->log_mutex);
2907 : }
2908 0 : wait_for_writer(root);
2909 0 : if (batch == atomic_read(&root->log_batch))
2910 : break;
2911 : }
2912 :
2913 : /* bail out if we need to do a full commit */
2914 0 : if (btrfs_need_log_full_commit(trans)) {
2915 0 : ret = BTRFS_LOG_FORCE_COMMIT;
2916 0 : mutex_unlock(&root->log_mutex);
2917 0 : goto out;
2918 : }
2919 :
2920 0 : if (log_transid % 2 == 0)
2921 : mark = EXTENT_DIRTY;
2922 : else
2923 0 : mark = EXTENT_NEW;
2924 :
2925 : /* we start IO on all the marked extents here, but we don't actually
2926 : * wait for them until later.
2927 : */
2928 0 : blk_start_plug(&plug);
2929 0 : ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
2930 : /*
2931 : * -EAGAIN happens when someone, e.g., a concurrent transaction
2932 : * commit, writes a dirty extent in this tree-log commit. This
2933 : * concurrent write will create a hole writing out the extents,
2934 : * and we cannot proceed on a zoned filesystem, requiring
2935 : * sequential writing. While we can bail out to a full commit
2936 : * here, but we can continue hoping the concurrent writing fills
2937 : * the hole.
2938 : */
2939 0 : if (ret == -EAGAIN && btrfs_is_zoned(fs_info))
2940 : ret = 0;
2941 0 : if (ret) {
2942 0 : blk_finish_plug(&plug);
2943 0 : btrfs_set_log_full_commit(trans);
2944 0 : mutex_unlock(&root->log_mutex);
2945 0 : goto out;
2946 : }
2947 :
2948 : /*
2949 : * We _must_ update under the root->log_mutex in order to make sure we
2950 : * have a consistent view of the log root we are trying to commit at
2951 : * this moment.
2952 : *
2953 : * We _must_ copy this into a local copy, because we are not holding the
2954 : * log_root_tree->log_mutex yet. This is important because when we
2955 : * commit the log_root_tree we must have a consistent view of the
2956 : * log_root_tree when we update the super block to point at the
2957 : * log_root_tree bytenr. If we update the log_root_tree here we'll race
2958 : * with the commit and possibly point at the new block which we may not
2959 : * have written out.
2960 : */
2961 0 : btrfs_set_root_node(&log->root_item, log->node);
2962 0 : memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
2963 :
2964 0 : root->log_transid++;
2965 0 : log->log_transid = root->log_transid;
2966 0 : root->log_start_pid = 0;
2967 : /*
2968 : * IO has been started, blocks of the log tree have WRITTEN flag set
2969 : * in their headers. new modifications of the log will be written to
2970 : * new positions. so it's safe to allow log writers to go in.
2971 : */
2972 0 : mutex_unlock(&root->log_mutex);
2973 :
2974 0 : if (btrfs_is_zoned(fs_info)) {
2975 0 : mutex_lock(&fs_info->tree_root->log_mutex);
2976 0 : if (!log_root_tree->node) {
2977 0 : ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
2978 0 : if (ret) {
2979 0 : mutex_unlock(&fs_info->tree_root->log_mutex);
2980 0 : blk_finish_plug(&plug);
2981 0 : goto out;
2982 : }
2983 : }
2984 0 : mutex_unlock(&fs_info->tree_root->log_mutex);
2985 : }
2986 :
2987 0 : btrfs_init_log_ctx(&root_log_ctx, NULL);
2988 :
2989 0 : mutex_lock(&log_root_tree->log_mutex);
2990 :
2991 0 : index2 = log_root_tree->log_transid % 2;
2992 0 : list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2993 0 : root_log_ctx.log_transid = log_root_tree->log_transid;
2994 :
2995 : /*
2996 : * Now we are safe to update the log_root_tree because we're under the
2997 : * log_mutex, and we're a current writer so we're holding the commit
2998 : * open until we drop the log_mutex.
2999 : */
3000 0 : ret = update_log_root(trans, log, &new_root_item);
3001 0 : if (ret) {
3002 0 : if (!list_empty(&root_log_ctx.list))
3003 0 : list_del_init(&root_log_ctx.list);
3004 :
3005 0 : blk_finish_plug(&plug);
3006 0 : btrfs_set_log_full_commit(trans);
3007 0 : if (ret != -ENOSPC)
3008 0 : btrfs_err(fs_info,
3009 : "failed to update log for root %llu ret %d",
3010 : root->root_key.objectid, ret);
3011 0 : btrfs_wait_tree_log_extents(log, mark);
3012 0 : mutex_unlock(&log_root_tree->log_mutex);
3013 0 : goto out;
3014 : }
3015 :
3016 0 : if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3017 0 : blk_finish_plug(&plug);
3018 0 : list_del_init(&root_log_ctx.list);
3019 0 : mutex_unlock(&log_root_tree->log_mutex);
3020 0 : ret = root_log_ctx.log_ret;
3021 0 : goto out;
3022 : }
3023 :
3024 0 : index2 = root_log_ctx.log_transid % 2;
3025 0 : if (atomic_read(&log_root_tree->log_commit[index2])) {
3026 0 : blk_finish_plug(&plug);
3027 0 : ret = btrfs_wait_tree_log_extents(log, mark);
3028 0 : wait_log_commit(log_root_tree,
3029 : root_log_ctx.log_transid);
3030 0 : mutex_unlock(&log_root_tree->log_mutex);
3031 0 : if (!ret)
3032 0 : ret = root_log_ctx.log_ret;
3033 0 : goto out;
3034 : }
3035 0 : ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3036 0 : atomic_set(&log_root_tree->log_commit[index2], 1);
3037 :
3038 0 : if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3039 0 : wait_log_commit(log_root_tree,
3040 : root_log_ctx.log_transid - 1);
3041 : }
3042 :
3043 : /*
3044 : * now that we've moved on to the tree of log tree roots,
3045 : * check the full commit flag again
3046 : */
3047 0 : if (btrfs_need_log_full_commit(trans)) {
3048 0 : blk_finish_plug(&plug);
3049 0 : btrfs_wait_tree_log_extents(log, mark);
3050 0 : mutex_unlock(&log_root_tree->log_mutex);
3051 0 : ret = BTRFS_LOG_FORCE_COMMIT;
3052 0 : goto out_wake_log_root;
3053 : }
3054 :
3055 0 : ret = btrfs_write_marked_extents(fs_info,
3056 : &log_root_tree->dirty_log_pages,
3057 : EXTENT_DIRTY | EXTENT_NEW);
3058 0 : blk_finish_plug(&plug);
3059 : /*
3060 : * As described above, -EAGAIN indicates a hole in the extents. We
3061 : * cannot wait for these write outs since the waiting cause a
3062 : * deadlock. Bail out to the full commit instead.
3063 : */
3064 0 : if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) {
3065 0 : btrfs_set_log_full_commit(trans);
3066 0 : btrfs_wait_tree_log_extents(log, mark);
3067 0 : mutex_unlock(&log_root_tree->log_mutex);
3068 0 : goto out_wake_log_root;
3069 0 : } else if (ret) {
3070 0 : btrfs_set_log_full_commit(trans);
3071 0 : mutex_unlock(&log_root_tree->log_mutex);
3072 0 : goto out_wake_log_root;
3073 : }
3074 0 : ret = btrfs_wait_tree_log_extents(log, mark);
3075 0 : if (!ret)
3076 0 : ret = btrfs_wait_tree_log_extents(log_root_tree,
3077 : EXTENT_NEW | EXTENT_DIRTY);
3078 0 : if (ret) {
3079 0 : btrfs_set_log_full_commit(trans);
3080 0 : mutex_unlock(&log_root_tree->log_mutex);
3081 0 : goto out_wake_log_root;
3082 : }
3083 :
3084 0 : log_root_start = log_root_tree->node->start;
3085 0 : log_root_level = btrfs_header_level(log_root_tree->node);
3086 0 : log_root_tree->log_transid++;
3087 0 : mutex_unlock(&log_root_tree->log_mutex);
3088 :
3089 : /*
3090 : * Here we are guaranteed that nobody is going to write the superblock
3091 : * for the current transaction before us and that neither we do write
3092 : * our superblock before the previous transaction finishes its commit
3093 : * and writes its superblock, because:
3094 : *
3095 : * 1) We are holding a handle on the current transaction, so no body
3096 : * can commit it until we release the handle;
3097 : *
3098 : * 2) Before writing our superblock we acquire the tree_log_mutex, so
3099 : * if the previous transaction is still committing, and hasn't yet
3100 : * written its superblock, we wait for it to do it, because a
3101 : * transaction commit acquires the tree_log_mutex when the commit
3102 : * begins and releases it only after writing its superblock.
3103 : */
3104 0 : mutex_lock(&fs_info->tree_log_mutex);
3105 :
3106 : /*
3107 : * The previous transaction writeout phase could have failed, and thus
3108 : * marked the fs in an error state. We must not commit here, as we
3109 : * could have updated our generation in the super_for_commit and
3110 : * writing the super here would result in transid mismatches. If there
3111 : * is an error here just bail.
3112 : */
3113 0 : if (BTRFS_FS_ERROR(fs_info)) {
3114 0 : ret = -EIO;
3115 0 : btrfs_set_log_full_commit(trans);
3116 0 : btrfs_abort_transaction(trans, ret);
3117 0 : mutex_unlock(&fs_info->tree_log_mutex);
3118 0 : goto out_wake_log_root;
3119 : }
3120 :
3121 0 : btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
3122 0 : btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
3123 0 : ret = write_all_supers(fs_info, 1);
3124 0 : mutex_unlock(&fs_info->tree_log_mutex);
3125 0 : if (ret) {
3126 0 : btrfs_set_log_full_commit(trans);
3127 0 : btrfs_abort_transaction(trans, ret);
3128 0 : goto out_wake_log_root;
3129 : }
3130 :
3131 : /*
3132 : * We know there can only be one task here, since we have not yet set
3133 : * root->log_commit[index1] to 0 and any task attempting to sync the
3134 : * log must wait for the previous log transaction to commit if it's
3135 : * still in progress or wait for the current log transaction commit if
3136 : * someone else already started it. We use <= and not < because the
3137 : * first log transaction has an ID of 0.
3138 : */
3139 0 : ASSERT(root->last_log_commit <= log_transid);
3140 0 : root->last_log_commit = log_transid;
3141 :
3142 0 : out_wake_log_root:
3143 0 : mutex_lock(&log_root_tree->log_mutex);
3144 0 : btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3145 :
3146 0 : log_root_tree->log_transid_committed++;
3147 0 : atomic_set(&log_root_tree->log_commit[index2], 0);
3148 0 : mutex_unlock(&log_root_tree->log_mutex);
3149 :
3150 : /*
3151 : * The barrier before waitqueue_active (in cond_wake_up) is needed so
3152 : * all the updates above are seen by the woken threads. It might not be
3153 : * necessary, but proving that seems to be hard.
3154 : */
3155 0 : cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3156 0 : out:
3157 0 : mutex_lock(&root->log_mutex);
3158 0 : btrfs_remove_all_log_ctxs(root, index1, ret);
3159 0 : root->log_transid_committed++;
3160 0 : atomic_set(&root->log_commit[index1], 0);
3161 0 : mutex_unlock(&root->log_mutex);
3162 :
3163 : /*
3164 : * The barrier before waitqueue_active (in cond_wake_up) is needed so
3165 : * all the updates above are seen by the woken threads. It might not be
3166 : * necessary, but proving that seems to be hard.
3167 : */
3168 0 : cond_wake_up(&root->log_commit_wait[index1]);
3169 0 : return ret;
3170 : }
3171 :
3172 0 : static void free_log_tree(struct btrfs_trans_handle *trans,
3173 : struct btrfs_root *log)
3174 : {
3175 0 : int ret;
3176 0 : struct walk_control wc = {
3177 : .free = 1,
3178 : .process_func = process_one_buffer
3179 : };
3180 :
3181 0 : if (log->node) {
3182 0 : ret = walk_log_tree(trans, log, &wc);
3183 0 : if (ret) {
3184 : /*
3185 : * We weren't able to traverse the entire log tree, the
3186 : * typical scenario is getting an -EIO when reading an
3187 : * extent buffer of the tree, due to a previous writeback
3188 : * failure of it.
3189 : */
3190 0 : set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
3191 0 : &log->fs_info->fs_state);
3192 :
3193 : /*
3194 : * Some extent buffers of the log tree may still be dirty
3195 : * and not yet written back to storage, because we may
3196 : * have updates to a log tree without syncing a log tree,
3197 : * such as during rename and link operations. So flush
3198 : * them out and wait for their writeback to complete, so
3199 : * that we properly cleanup their state and pages.
3200 : */
3201 0 : btrfs_write_marked_extents(log->fs_info,
3202 : &log->dirty_log_pages,
3203 : EXTENT_DIRTY | EXTENT_NEW);
3204 0 : btrfs_wait_tree_log_extents(log,
3205 : EXTENT_DIRTY | EXTENT_NEW);
3206 :
3207 0 : if (trans)
3208 0 : btrfs_abort_transaction(trans, ret);
3209 : else
3210 0 : btrfs_handle_fs_error(log->fs_info, ret, NULL);
3211 : }
3212 : }
3213 :
3214 0 : clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3215 : EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3216 0 : extent_io_tree_release(&log->log_csum_range);
3217 :
3218 0 : btrfs_put_root(log);
3219 0 : }
3220 :
3221 : /*
3222 : * free all the extents used by the tree log. This should be called
3223 : * at commit time of the full transaction
3224 : */
3225 0 : int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3226 : {
3227 0 : if (root->log_root) {
3228 0 : free_log_tree(trans, root->log_root);
3229 0 : root->log_root = NULL;
3230 0 : clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
3231 : }
3232 0 : return 0;
3233 : }
3234 :
3235 0 : int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3236 : struct btrfs_fs_info *fs_info)
3237 : {
3238 0 : if (fs_info->log_root_tree) {
3239 0 : free_log_tree(trans, fs_info->log_root_tree);
3240 0 : fs_info->log_root_tree = NULL;
3241 0 : clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state);
3242 : }
3243 0 : return 0;
3244 : }
3245 :
3246 : /*
3247 : * Check if an inode was logged in the current transaction. This correctly deals
3248 : * with the case where the inode was logged but has a logged_trans of 0, which
3249 : * happens if the inode is evicted and loaded again, as logged_trans is an in
3250 : * memory only field (not persisted).
3251 : *
3252 : * Returns 1 if the inode was logged before in the transaction, 0 if it was not,
3253 : * and < 0 on error.
3254 : */
3255 0 : static int inode_logged(const struct btrfs_trans_handle *trans,
3256 : struct btrfs_inode *inode,
3257 : struct btrfs_path *path_in)
3258 : {
3259 0 : struct btrfs_path *path = path_in;
3260 0 : struct btrfs_key key;
3261 0 : int ret;
3262 :
3263 0 : if (inode->logged_trans == trans->transid)
3264 : return 1;
3265 :
3266 : /*
3267 : * If logged_trans is not 0, then we know the inode logged was not logged
3268 : * in this transaction, so we can return false right away.
3269 : */
3270 0 : if (inode->logged_trans > 0)
3271 : return 0;
3272 :
3273 : /*
3274 : * If no log tree was created for this root in this transaction, then
3275 : * the inode can not have been logged in this transaction. In that case
3276 : * set logged_trans to anything greater than 0 and less than the current
3277 : * transaction's ID, to avoid the search below in a future call in case
3278 : * a log tree gets created after this.
3279 : */
3280 0 : if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) {
3281 0 : inode->logged_trans = trans->transid - 1;
3282 0 : return 0;
3283 : }
3284 :
3285 : /*
3286 : * We have a log tree and the inode's logged_trans is 0. We can't tell
3287 : * for sure if the inode was logged before in this transaction by looking
3288 : * only at logged_trans. We could be pessimistic and assume it was, but
3289 : * that can lead to unnecessarily logging an inode during rename and link
3290 : * operations, and then further updating the log in followup rename and
3291 : * link operations, specially if it's a directory, which adds latency
3292 : * visible to applications doing a series of rename or link operations.
3293 : *
3294 : * A logged_trans of 0 here can mean several things:
3295 : *
3296 : * 1) The inode was never logged since the filesystem was mounted, and may
3297 : * or may have not been evicted and loaded again;
3298 : *
3299 : * 2) The inode was logged in a previous transaction, then evicted and
3300 : * then loaded again;
3301 : *
3302 : * 3) The inode was logged in the current transaction, then evicted and
3303 : * then loaded again.
3304 : *
3305 : * For cases 1) and 2) we don't want to return true, but we need to detect
3306 : * case 3) and return true. So we do a search in the log root for the inode
3307 : * item.
3308 : */
3309 0 : key.objectid = btrfs_ino(inode);
3310 0 : key.type = BTRFS_INODE_ITEM_KEY;
3311 0 : key.offset = 0;
3312 :
3313 0 : if (!path) {
3314 0 : path = btrfs_alloc_path();
3315 0 : if (!path)
3316 : return -ENOMEM;
3317 : }
3318 :
3319 0 : ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0);
3320 :
3321 0 : if (path_in)
3322 0 : btrfs_release_path(path);
3323 : else
3324 0 : btrfs_free_path(path);
3325 :
3326 : /*
3327 : * Logging an inode always results in logging its inode item. So if we
3328 : * did not find the item we know the inode was not logged for sure.
3329 : */
3330 0 : if (ret < 0) {
3331 : return ret;
3332 0 : } else if (ret > 0) {
3333 : /*
3334 : * Set logged_trans to a value greater than 0 and less then the
3335 : * current transaction to avoid doing the search in future calls.
3336 : */
3337 0 : inode->logged_trans = trans->transid - 1;
3338 0 : return 0;
3339 : }
3340 :
3341 : /*
3342 : * The inode was previously logged and then evicted, set logged_trans to
3343 : * the current transacion's ID, to avoid future tree searches as long as
3344 : * the inode is not evicted again.
3345 : */
3346 0 : inode->logged_trans = trans->transid;
3347 :
3348 : /*
3349 : * If it's a directory, then we must set last_dir_index_offset to the
3350 : * maximum possible value, so that the next attempt to log the inode does
3351 : * not skip checking if dir index keys found in modified subvolume tree
3352 : * leaves have been logged before, otherwise it would result in attempts
3353 : * to insert duplicate dir index keys in the log tree. This must be done
3354 : * because last_dir_index_offset is an in-memory only field, not persisted
3355 : * in the inode item or any other on-disk structure, so its value is lost
3356 : * once the inode is evicted.
3357 : */
3358 0 : if (S_ISDIR(inode->vfs_inode.i_mode))
3359 0 : inode->last_dir_index_offset = (u64)-1;
3360 :
3361 : return 1;
3362 : }
3363 :
3364 : /*
3365 : * Delete a directory entry from the log if it exists.
3366 : *
3367 : * Returns < 0 on error
3368 : * 1 if the entry does not exists
3369 : * 0 if the entry existed and was successfully deleted
3370 : */
3371 0 : static int del_logged_dentry(struct btrfs_trans_handle *trans,
3372 : struct btrfs_root *log,
3373 : struct btrfs_path *path,
3374 : u64 dir_ino,
3375 : const struct fscrypt_str *name,
3376 : u64 index)
3377 : {
3378 0 : struct btrfs_dir_item *di;
3379 :
3380 : /*
3381 : * We only log dir index items of a directory, so we don't need to look
3382 : * for dir item keys.
3383 : */
3384 0 : di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3385 : index, name, -1);
3386 0 : if (IS_ERR(di))
3387 0 : return PTR_ERR(di);
3388 0 : else if (!di)
3389 : return 1;
3390 :
3391 : /*
3392 : * We do not need to update the size field of the directory's
3393 : * inode item because on log replay we update the field to reflect
3394 : * all existing entries in the directory (see overwrite_item()).
3395 : */
3396 0 : return btrfs_delete_one_dir_name(trans, log, path, di);
3397 : }
3398 :
3399 : /*
3400 : * If both a file and directory are logged, and unlinks or renames are
3401 : * mixed in, we have a few interesting corners:
3402 : *
3403 : * create file X in dir Y
3404 : * link file X to X.link in dir Y
3405 : * fsync file X
3406 : * unlink file X but leave X.link
3407 : * fsync dir Y
3408 : *
3409 : * After a crash we would expect only X.link to exist. But file X
3410 : * didn't get fsync'd again so the log has back refs for X and X.link.
3411 : *
3412 : * We solve this by removing directory entries and inode backrefs from the
3413 : * log when a file that was logged in the current transaction is
3414 : * unlinked. Any later fsync will include the updated log entries, and
3415 : * we'll be able to reconstruct the proper directory items from backrefs.
3416 : *
3417 : * This optimizations allows us to avoid relogging the entire inode
3418 : * or the entire directory.
3419 : */
3420 0 : void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3421 : struct btrfs_root *root,
3422 : const struct fscrypt_str *name,
3423 : struct btrfs_inode *dir, u64 index)
3424 : {
3425 0 : struct btrfs_path *path;
3426 0 : int ret;
3427 :
3428 0 : ret = inode_logged(trans, dir, NULL);
3429 0 : if (ret == 0)
3430 : return;
3431 0 : else if (ret < 0) {
3432 0 : btrfs_set_log_full_commit(trans);
3433 0 : return;
3434 : }
3435 :
3436 0 : ret = join_running_log_trans(root);
3437 0 : if (ret)
3438 : return;
3439 :
3440 0 : mutex_lock(&dir->log_mutex);
3441 :
3442 0 : path = btrfs_alloc_path();
3443 0 : if (!path) {
3444 0 : ret = -ENOMEM;
3445 0 : goto out_unlock;
3446 : }
3447 :
3448 0 : ret = del_logged_dentry(trans, root->log_root, path, btrfs_ino(dir),
3449 : name, index);
3450 0 : btrfs_free_path(path);
3451 0 : out_unlock:
3452 0 : mutex_unlock(&dir->log_mutex);
3453 0 : if (ret < 0)
3454 0 : btrfs_set_log_full_commit(trans);
3455 0 : btrfs_end_log_trans(root);
3456 : }
3457 :
3458 : /* see comments for btrfs_del_dir_entries_in_log */
3459 0 : void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3460 : struct btrfs_root *root,
3461 : const struct fscrypt_str *name,
3462 : struct btrfs_inode *inode, u64 dirid)
3463 : {
3464 0 : struct btrfs_root *log;
3465 0 : u64 index;
3466 0 : int ret;
3467 :
3468 0 : ret = inode_logged(trans, inode, NULL);
3469 0 : if (ret == 0)
3470 0 : return;
3471 0 : else if (ret < 0) {
3472 0 : btrfs_set_log_full_commit(trans);
3473 0 : return;
3474 : }
3475 :
3476 0 : ret = join_running_log_trans(root);
3477 0 : if (ret)
3478 : return;
3479 0 : log = root->log_root;
3480 0 : mutex_lock(&inode->log_mutex);
3481 :
3482 0 : ret = btrfs_del_inode_ref(trans, log, name, btrfs_ino(inode),
3483 : dirid, &index);
3484 0 : mutex_unlock(&inode->log_mutex);
3485 0 : if (ret < 0 && ret != -ENOENT)
3486 0 : btrfs_set_log_full_commit(trans);
3487 0 : btrfs_end_log_trans(root);
3488 : }
3489 :
3490 : /*
3491 : * creates a range item in the log for 'dirid'. first_offset and
3492 : * last_offset tell us which parts of the key space the log should
3493 : * be considered authoritative for.
3494 : */
3495 0 : static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3496 : struct btrfs_root *log,
3497 : struct btrfs_path *path,
3498 : u64 dirid,
3499 : u64 first_offset, u64 last_offset)
3500 : {
3501 0 : int ret;
3502 0 : struct btrfs_key key;
3503 0 : struct btrfs_dir_log_item *item;
3504 :
3505 0 : key.objectid = dirid;
3506 0 : key.offset = first_offset;
3507 0 : key.type = BTRFS_DIR_LOG_INDEX_KEY;
3508 0 : ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3509 : /*
3510 : * -EEXIST is fine and can happen sporadically when we are logging a
3511 : * directory and have concurrent insertions in the subvolume's tree for
3512 : * items from other inodes and that result in pushing off some dir items
3513 : * from one leaf to another in order to accommodate for the new items.
3514 : * This results in logging the same dir index range key.
3515 : */
3516 0 : if (ret && ret != -EEXIST)
3517 : return ret;
3518 :
3519 0 : item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3520 : struct btrfs_dir_log_item);
3521 0 : if (ret == -EEXIST) {
3522 0 : const u64 curr_end = btrfs_dir_log_end(path->nodes[0], item);
3523 :
3524 : /*
3525 : * btrfs_del_dir_entries_in_log() might have been called during
3526 : * an unlink between the initial insertion of this key and the
3527 : * current update, or we might be logging a single entry deletion
3528 : * during a rename, so set the new last_offset to the max value.
3529 : */
3530 0 : last_offset = max(last_offset, curr_end);
3531 : }
3532 0 : btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3533 0 : btrfs_mark_buffer_dirty(path->nodes[0]);
3534 0 : btrfs_release_path(path);
3535 0 : return 0;
3536 : }
3537 :
3538 0 : static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
3539 : struct btrfs_inode *inode,
3540 : struct extent_buffer *src,
3541 : struct btrfs_path *dst_path,
3542 : int start_slot,
3543 : int count)
3544 : {
3545 0 : struct btrfs_root *log = inode->root->log_root;
3546 0 : char *ins_data = NULL;
3547 0 : struct btrfs_item_batch batch;
3548 0 : struct extent_buffer *dst;
3549 0 : unsigned long src_offset;
3550 0 : unsigned long dst_offset;
3551 0 : u64 last_index;
3552 0 : struct btrfs_key key;
3553 0 : u32 item_size;
3554 0 : int ret;
3555 0 : int i;
3556 :
3557 0 : ASSERT(count > 0);
3558 0 : batch.nr = count;
3559 :
3560 0 : if (count == 1) {
3561 0 : btrfs_item_key_to_cpu(src, &key, start_slot);
3562 0 : item_size = btrfs_item_size(src, start_slot);
3563 0 : batch.keys = &key;
3564 0 : batch.data_sizes = &item_size;
3565 0 : batch.total_data_size = item_size;
3566 : } else {
3567 0 : struct btrfs_key *ins_keys;
3568 0 : u32 *ins_sizes;
3569 :
3570 0 : ins_data = kmalloc(count * sizeof(u32) +
3571 : count * sizeof(struct btrfs_key), GFP_NOFS);
3572 0 : if (!ins_data)
3573 : return -ENOMEM;
3574 :
3575 0 : ins_sizes = (u32 *)ins_data;
3576 0 : ins_keys = (struct btrfs_key *)(ins_data + count * sizeof(u32));
3577 0 : batch.keys = ins_keys;
3578 0 : batch.data_sizes = ins_sizes;
3579 0 : batch.total_data_size = 0;
3580 :
3581 0 : for (i = 0; i < count; i++) {
3582 0 : const int slot = start_slot + i;
3583 :
3584 0 : btrfs_item_key_to_cpu(src, &ins_keys[i], slot);
3585 0 : ins_sizes[i] = btrfs_item_size(src, slot);
3586 0 : batch.total_data_size += ins_sizes[i];
3587 : }
3588 : }
3589 :
3590 0 : ret = btrfs_insert_empty_items(trans, log, dst_path, &batch);
3591 0 : if (ret)
3592 0 : goto out;
3593 :
3594 0 : dst = dst_path->nodes[0];
3595 : /*
3596 : * Copy all the items in bulk, in a single copy operation. Item data is
3597 : * organized such that it's placed at the end of a leaf and from right
3598 : * to left. For example, the data for the second item ends at an offset
3599 : * that matches the offset where the data for the first item starts, the
3600 : * data for the third item ends at an offset that matches the offset
3601 : * where the data of the second items starts, and so on.
3602 : * Therefore our source and destination start offsets for copy match the
3603 : * offsets of the last items (highest slots).
3604 : */
3605 0 : dst_offset = btrfs_item_ptr_offset(dst, dst_path->slots[0] + count - 1);
3606 0 : src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1);
3607 0 : copy_extent_buffer(dst, src, dst_offset, src_offset, batch.total_data_size);
3608 0 : btrfs_release_path(dst_path);
3609 :
3610 0 : last_index = batch.keys[count - 1].offset;
3611 0 : ASSERT(last_index > inode->last_dir_index_offset);
3612 :
3613 : /*
3614 : * If for some unexpected reason the last item's index is not greater
3615 : * than the last index we logged, warn and force a transaction commit.
3616 : */
3617 0 : if (WARN_ON(last_index <= inode->last_dir_index_offset))
3618 : ret = BTRFS_LOG_FORCE_COMMIT;
3619 : else
3620 0 : inode->last_dir_index_offset = last_index;
3621 :
3622 0 : if (btrfs_get_first_dir_index_to_log(inode) == 0)
3623 0 : btrfs_set_first_dir_index_to_log(inode, batch.keys[0].offset);
3624 0 : out:
3625 0 : kfree(ins_data);
3626 :
3627 0 : return ret;
3628 : }
3629 :
3630 0 : static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
3631 : struct btrfs_inode *inode,
3632 : struct btrfs_path *path,
3633 : struct btrfs_path *dst_path,
3634 : struct btrfs_log_ctx *ctx,
3635 : u64 *last_old_dentry_offset)
3636 : {
3637 0 : struct btrfs_root *log = inode->root->log_root;
3638 0 : struct extent_buffer *src;
3639 0 : const int nritems = btrfs_header_nritems(path->nodes[0]);
3640 0 : const u64 ino = btrfs_ino(inode);
3641 0 : bool last_found = false;
3642 0 : int batch_start = 0;
3643 0 : int batch_size = 0;
3644 0 : int i;
3645 :
3646 : /*
3647 : * We need to clone the leaf, release the read lock on it, and use the
3648 : * clone before modifying the log tree. See the comment at copy_items()
3649 : * about why we need to do this.
3650 : */
3651 0 : src = btrfs_clone_extent_buffer(path->nodes[0]);
3652 0 : if (!src)
3653 : return -ENOMEM;
3654 :
3655 0 : i = path->slots[0];
3656 0 : btrfs_release_path(path);
3657 0 : path->nodes[0] = src;
3658 0 : path->slots[0] = i;
3659 :
3660 0 : for (; i < nritems; i++) {
3661 0 : struct btrfs_dir_item *di;
3662 0 : struct btrfs_key key;
3663 0 : int ret;
3664 :
3665 0 : btrfs_item_key_to_cpu(src, &key, i);
3666 :
3667 0 : if (key.objectid != ino || key.type != BTRFS_DIR_INDEX_KEY) {
3668 0 : last_found = true;
3669 0 : break;
3670 : }
3671 :
3672 0 : di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3673 :
3674 : /*
3675 : * Skip ranges of items that consist only of dir item keys created
3676 : * in past transactions. However if we find a gap, we must log a
3677 : * dir index range item for that gap, so that index keys in that
3678 : * gap are deleted during log replay.
3679 : */
3680 0 : if (btrfs_dir_transid(src, di) < trans->transid) {
3681 0 : if (key.offset > *last_old_dentry_offset + 1) {
3682 0 : ret = insert_dir_log_key(trans, log, dst_path,
3683 : ino, *last_old_dentry_offset + 1,
3684 : key.offset - 1);
3685 0 : if (ret < 0)
3686 0 : return ret;
3687 : }
3688 :
3689 0 : *last_old_dentry_offset = key.offset;
3690 0 : continue;
3691 : }
3692 :
3693 : /* If we logged this dir index item before, we can skip it. */
3694 0 : if (key.offset <= inode->last_dir_index_offset)
3695 0 : continue;
3696 :
3697 : /*
3698 : * We must make sure that when we log a directory entry, the
3699 : * corresponding inode, after log replay, has a matching link
3700 : * count. For example:
3701 : *
3702 : * touch foo
3703 : * mkdir mydir
3704 : * sync
3705 : * ln foo mydir/bar
3706 : * xfs_io -c "fsync" mydir
3707 : * <crash>
3708 : * <mount fs and log replay>
3709 : *
3710 : * Would result in a fsync log that when replayed, our file inode
3711 : * would have a link count of 1, but we get two directory entries
3712 : * pointing to the same inode. After removing one of the names,
3713 : * it would not be possible to remove the other name, which
3714 : * resulted always in stale file handle errors, and would not be
3715 : * possible to rmdir the parent directory, since its i_size could
3716 : * never be decremented to the value BTRFS_EMPTY_DIR_SIZE,
3717 : * resulting in -ENOTEMPTY errors.
3718 : */
3719 0 : if (!ctx->log_new_dentries) {
3720 0 : struct btrfs_key di_key;
3721 :
3722 0 : btrfs_dir_item_key_to_cpu(src, di, &di_key);
3723 0 : if (di_key.type != BTRFS_ROOT_ITEM_KEY)
3724 0 : ctx->log_new_dentries = true;
3725 : }
3726 :
3727 0 : if (batch_size == 0)
3728 0 : batch_start = i;
3729 0 : batch_size++;
3730 : }
3731 :
3732 0 : if (batch_size > 0) {
3733 0 : int ret;
3734 :
3735 0 : ret = flush_dir_items_batch(trans, inode, src, dst_path,
3736 : batch_start, batch_size);
3737 0 : if (ret < 0)
3738 : return ret;
3739 : }
3740 :
3741 0 : return last_found ? 1 : 0;
3742 : }
3743 :
3744 : /*
3745 : * log all the items included in the current transaction for a given
3746 : * directory. This also creates the range items in the log tree required
3747 : * to replay anything deleted before the fsync
3748 : */
3749 0 : static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3750 : struct btrfs_inode *inode,
3751 : struct btrfs_path *path,
3752 : struct btrfs_path *dst_path,
3753 : struct btrfs_log_ctx *ctx,
3754 : u64 min_offset, u64 *last_offset_ret)
3755 : {
3756 0 : struct btrfs_key min_key;
3757 0 : struct btrfs_root *root = inode->root;
3758 0 : struct btrfs_root *log = root->log_root;
3759 0 : int ret;
3760 0 : u64 last_old_dentry_offset = min_offset - 1;
3761 0 : u64 last_offset = (u64)-1;
3762 0 : u64 ino = btrfs_ino(inode);
3763 :
3764 0 : min_key.objectid = ino;
3765 0 : min_key.type = BTRFS_DIR_INDEX_KEY;
3766 0 : min_key.offset = min_offset;
3767 :
3768 0 : ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3769 :
3770 : /*
3771 : * we didn't find anything from this transaction, see if there
3772 : * is anything at all
3773 : */
3774 0 : if (ret != 0 || min_key.objectid != ino ||
3775 0 : min_key.type != BTRFS_DIR_INDEX_KEY) {
3776 0 : min_key.objectid = ino;
3777 0 : min_key.type = BTRFS_DIR_INDEX_KEY;
3778 0 : min_key.offset = (u64)-1;
3779 0 : btrfs_release_path(path);
3780 0 : ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3781 0 : if (ret < 0) {
3782 0 : btrfs_release_path(path);
3783 0 : return ret;
3784 : }
3785 0 : ret = btrfs_previous_item(root, path, ino, BTRFS_DIR_INDEX_KEY);
3786 :
3787 : /* if ret == 0 there are items for this type,
3788 : * create a range to tell us the last key of this type.
3789 : * otherwise, there are no items in this directory after
3790 : * *min_offset, and we create a range to indicate that.
3791 : */
3792 0 : if (ret == 0) {
3793 0 : struct btrfs_key tmp;
3794 :
3795 0 : btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3796 : path->slots[0]);
3797 0 : if (tmp.type == BTRFS_DIR_INDEX_KEY)
3798 0 : last_old_dentry_offset = tmp.offset;
3799 0 : } else if (ret > 0) {
3800 : ret = 0;
3801 : }
3802 :
3803 0 : goto done;
3804 : }
3805 :
3806 : /* go backward to find any previous key */
3807 0 : ret = btrfs_previous_item(root, path, ino, BTRFS_DIR_INDEX_KEY);
3808 0 : if (ret == 0) {
3809 0 : struct btrfs_key tmp;
3810 :
3811 0 : btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3812 : /*
3813 : * The dir index key before the first one we found that needs to
3814 : * be logged might be in a previous leaf, and there might be a
3815 : * gap between these keys, meaning that we had deletions that
3816 : * happened. So the key range item we log (key type
3817 : * BTRFS_DIR_LOG_INDEX_KEY) must cover a range that starts at the
3818 : * previous key's offset plus 1, so that those deletes are replayed.
3819 : */
3820 0 : if (tmp.type == BTRFS_DIR_INDEX_KEY)
3821 0 : last_old_dentry_offset = tmp.offset;
3822 0 : } else if (ret < 0) {
3823 0 : goto done;
3824 : }
3825 :
3826 0 : btrfs_release_path(path);
3827 :
3828 : /*
3829 : * Find the first key from this transaction again or the one we were at
3830 : * in the loop below in case we had to reschedule. We may be logging the
3831 : * directory without holding its VFS lock, which happen when logging new
3832 : * dentries (through log_new_dir_dentries()) or in some cases when we
3833 : * need to log the parent directory of an inode. This means a dir index
3834 : * key might be deleted from the inode's root, and therefore we may not
3835 : * find it anymore. If we can't find it, just move to the next key. We
3836 : * can not bail out and ignore, because if we do that we will simply
3837 : * not log dir index keys that come after the one that was just deleted
3838 : * and we can end up logging a dir index range that ends at (u64)-1
3839 : * (@last_offset is initialized to that), resulting in removing dir
3840 : * entries we should not remove at log replay time.
3841 : */
3842 0 : search:
3843 0 : ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3844 0 : if (ret > 0) {
3845 0 : ret = btrfs_next_item(root, path);
3846 0 : if (ret > 0) {
3847 : /* There are no more keys in the inode's root. */
3848 0 : ret = 0;
3849 0 : goto done;
3850 : }
3851 : }
3852 0 : if (ret < 0)
3853 0 : goto done;
3854 :
3855 : /*
3856 : * we have a block from this transaction, log every item in it
3857 : * from our directory
3858 : */
3859 0 : while (1) {
3860 0 : ret = process_dir_items_leaf(trans, inode, path, dst_path, ctx,
3861 : &last_old_dentry_offset);
3862 0 : if (ret != 0) {
3863 0 : if (ret > 0)
3864 : ret = 0;
3865 0 : goto done;
3866 : }
3867 0 : path->slots[0] = btrfs_header_nritems(path->nodes[0]);
3868 :
3869 : /*
3870 : * look ahead to the next item and see if it is also
3871 : * from this directory and from this transaction
3872 : */
3873 0 : ret = btrfs_next_leaf(root, path);
3874 0 : if (ret) {
3875 0 : if (ret == 1) {
3876 0 : last_offset = (u64)-1;
3877 0 : ret = 0;
3878 : }
3879 0 : goto done;
3880 : }
3881 0 : btrfs_item_key_to_cpu(path->nodes[0], &min_key, path->slots[0]);
3882 0 : if (min_key.objectid != ino || min_key.type != BTRFS_DIR_INDEX_KEY) {
3883 0 : last_offset = (u64)-1;
3884 0 : goto done;
3885 : }
3886 0 : if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3887 : /*
3888 : * The next leaf was not changed in the current transaction
3889 : * and has at least one dir index key.
3890 : * We check for the next key because there might have been
3891 : * one or more deletions between the last key we logged and
3892 : * that next key. So the key range item we log (key type
3893 : * BTRFS_DIR_LOG_INDEX_KEY) must end at the next key's
3894 : * offset minus 1, so that those deletes are replayed.
3895 : */
3896 0 : last_offset = min_key.offset - 1;
3897 0 : goto done;
3898 : }
3899 0 : if (need_resched()) {
3900 0 : btrfs_release_path(path);
3901 0 : cond_resched();
3902 0 : goto search;
3903 : }
3904 : }
3905 0 : done:
3906 0 : btrfs_release_path(path);
3907 0 : btrfs_release_path(dst_path);
3908 :
3909 0 : if (ret == 0) {
3910 0 : *last_offset_ret = last_offset;
3911 : /*
3912 : * In case the leaf was changed in the current transaction but
3913 : * all its dir items are from a past transaction, the last item
3914 : * in the leaf is a dir item and there's no gap between that last
3915 : * dir item and the first one on the next leaf (which did not
3916 : * change in the current transaction), then we don't need to log
3917 : * a range, last_old_dentry_offset is == to last_offset.
3918 : */
3919 0 : ASSERT(last_old_dentry_offset <= last_offset);
3920 0 : if (last_old_dentry_offset < last_offset)
3921 0 : ret = insert_dir_log_key(trans, log, path, ino,
3922 : last_old_dentry_offset + 1,
3923 : last_offset);
3924 : }
3925 :
3926 : return ret;
3927 : }
3928 :
3929 : /*
3930 : * If the inode was logged before and it was evicted, then its
3931 : * last_dir_index_offset is (u64)-1, so we don't the value of the last index
3932 : * key offset. If that's the case, search for it and update the inode. This
3933 : * is to avoid lookups in the log tree every time we try to insert a dir index
3934 : * key from a leaf changed in the current transaction, and to allow us to always
3935 : * do batch insertions of dir index keys.
3936 : */
3937 0 : static int update_last_dir_index_offset(struct btrfs_inode *inode,
3938 : struct btrfs_path *path,
3939 : const struct btrfs_log_ctx *ctx)
3940 : {
3941 0 : const u64 ino = btrfs_ino(inode);
3942 0 : struct btrfs_key key;
3943 0 : int ret;
3944 :
3945 0 : lockdep_assert_held(&inode->log_mutex);
3946 :
3947 0 : if (inode->last_dir_index_offset != (u64)-1)
3948 : return 0;
3949 :
3950 0 : if (!ctx->logged_before) {
3951 0 : inode->last_dir_index_offset = BTRFS_DIR_START_INDEX - 1;
3952 0 : return 0;
3953 : }
3954 :
3955 0 : key.objectid = ino;
3956 0 : key.type = BTRFS_DIR_INDEX_KEY;
3957 0 : key.offset = (u64)-1;
3958 :
3959 0 : ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0);
3960 : /*
3961 : * An error happened or we actually have an index key with an offset
3962 : * value of (u64)-1. Bail out, we're done.
3963 : */
3964 0 : if (ret <= 0)
3965 0 : goto out;
3966 :
3967 0 : ret = 0;
3968 0 : inode->last_dir_index_offset = BTRFS_DIR_START_INDEX - 1;
3969 :
3970 : /*
3971 : * No dir index items, bail out and leave last_dir_index_offset with
3972 : * the value right before the first valid index value.
3973 : */
3974 0 : if (path->slots[0] == 0)
3975 0 : goto out;
3976 :
3977 : /*
3978 : * btrfs_search_slot() left us at one slot beyond the slot with the last
3979 : * index key, or beyond the last key of the directory that is not an
3980 : * index key. If we have an index key before, set last_dir_index_offset
3981 : * to its offset value, otherwise leave it with a value right before the
3982 : * first valid index value, as it means we have an empty directory.
3983 : */
3984 0 : btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3985 0 : if (key.objectid == ino && key.type == BTRFS_DIR_INDEX_KEY)
3986 0 : inode->last_dir_index_offset = key.offset;
3987 :
3988 0 : out:
3989 0 : btrfs_release_path(path);
3990 :
3991 0 : return ret;
3992 : }
3993 :
3994 : /*
3995 : * logging directories is very similar to logging inodes, We find all the items
3996 : * from the current transaction and write them to the log.
3997 : *
3998 : * The recovery code scans the directory in the subvolume, and if it finds a
3999 : * key in the range logged that is not present in the log tree, then it means
4000 : * that dir entry was unlinked during the transaction.
4001 : *
4002 : * In order for that scan to work, we must include one key smaller than
4003 : * the smallest logged by this transaction and one key larger than the largest
4004 : * key logged by this transaction.
4005 : */
4006 0 : static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
4007 : struct btrfs_inode *inode,
4008 : struct btrfs_path *path,
4009 : struct btrfs_path *dst_path,
4010 : struct btrfs_log_ctx *ctx)
4011 : {
4012 0 : u64 min_key;
4013 0 : u64 max_key;
4014 0 : int ret;
4015 :
4016 0 : ret = update_last_dir_index_offset(inode, path, ctx);
4017 0 : if (ret)
4018 : return ret;
4019 :
4020 0 : min_key = BTRFS_DIR_START_INDEX;
4021 0 : max_key = 0;
4022 :
4023 0 : while (1) {
4024 0 : ret = log_dir_items(trans, inode, path, dst_path,
4025 : ctx, min_key, &max_key);
4026 0 : if (ret)
4027 0 : return ret;
4028 0 : if (max_key == (u64)-1)
4029 : break;
4030 0 : min_key = max_key + 1;
4031 : }
4032 :
4033 : return 0;
4034 : }
4035 :
4036 : /*
4037 : * a helper function to drop items from the log before we relog an
4038 : * inode. max_key_type indicates the highest item type to remove.
4039 : * This cannot be run for file data extents because it does not
4040 : * free the extents they point to.
4041 : */
4042 0 : static int drop_inode_items(struct btrfs_trans_handle *trans,
4043 : struct btrfs_root *log,
4044 : struct btrfs_path *path,
4045 : struct btrfs_inode *inode,
4046 : int max_key_type)
4047 : {
4048 0 : int ret;
4049 0 : struct btrfs_key key;
4050 0 : struct btrfs_key found_key;
4051 0 : int start_slot;
4052 :
4053 0 : key.objectid = btrfs_ino(inode);
4054 0 : key.type = max_key_type;
4055 0 : key.offset = (u64)-1;
4056 :
4057 0 : while (1) {
4058 0 : ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
4059 0 : if (ret < 0) {
4060 : break;
4061 0 : } else if (ret > 0) {
4062 0 : if (path->slots[0] == 0)
4063 : break;
4064 0 : path->slots[0]--;
4065 : }
4066 :
4067 0 : btrfs_item_key_to_cpu(path->nodes[0], &found_key,
4068 : path->slots[0]);
4069 :
4070 0 : if (found_key.objectid != key.objectid)
4071 : break;
4072 :
4073 0 : found_key.offset = 0;
4074 0 : found_key.type = 0;
4075 0 : ret = btrfs_bin_search(path->nodes[0], 0, &found_key, &start_slot);
4076 0 : if (ret < 0)
4077 : break;
4078 :
4079 0 : ret = btrfs_del_items(trans, log, path, start_slot,
4080 0 : path->slots[0] - start_slot + 1);
4081 : /*
4082 : * If start slot isn't 0 then we don't need to re-search, we've
4083 : * found the last guy with the objectid in this tree.
4084 : */
4085 0 : if (ret || start_slot != 0)
4086 : break;
4087 0 : btrfs_release_path(path);
4088 : }
4089 0 : btrfs_release_path(path);
4090 0 : if (ret > 0)
4091 : ret = 0;
4092 0 : return ret;
4093 : }
4094 :
4095 0 : static int truncate_inode_items(struct btrfs_trans_handle *trans,
4096 : struct btrfs_root *log_root,
4097 : struct btrfs_inode *inode,
4098 : u64 new_size, u32 min_type)
4099 : {
4100 0 : struct btrfs_truncate_control control = {
4101 : .new_size = new_size,
4102 : .ino = btrfs_ino(inode),
4103 : .min_type = min_type,
4104 : .skip_ref_updates = true,
4105 : };
4106 :
4107 0 : return btrfs_truncate_inode_items(trans, log_root, &control);
4108 : }
4109 :
4110 0 : static void fill_inode_item(struct btrfs_trans_handle *trans,
4111 : struct extent_buffer *leaf,
4112 : struct btrfs_inode_item *item,
4113 : struct inode *inode, int log_inode_only,
4114 : u64 logged_isize)
4115 : {
4116 0 : struct btrfs_map_token token;
4117 0 : u64 flags;
4118 :
4119 0 : btrfs_init_map_token(&token, leaf);
4120 :
4121 0 : if (log_inode_only) {
4122 : /* set the generation to zero so the recover code
4123 : * can tell the difference between an logging
4124 : * just to say 'this inode exists' and a logging
4125 : * to say 'update this inode with these values'
4126 : */
4127 0 : btrfs_set_token_inode_generation(&token, item, 0);
4128 0 : btrfs_set_token_inode_size(&token, item, logged_isize);
4129 : } else {
4130 0 : btrfs_set_token_inode_generation(&token, item,
4131 : BTRFS_I(inode)->generation);
4132 0 : btrfs_set_token_inode_size(&token, item, inode->i_size);
4133 : }
4134 :
4135 0 : btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
4136 0 : btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
4137 0 : btrfs_set_token_inode_mode(&token, item, inode->i_mode);
4138 0 : btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
4139 :
4140 0 : btrfs_set_token_timespec_sec(&token, &item->atime,
4141 0 : inode->i_atime.tv_sec);
4142 0 : btrfs_set_token_timespec_nsec(&token, &item->atime,
4143 0 : inode->i_atime.tv_nsec);
4144 :
4145 0 : btrfs_set_token_timespec_sec(&token, &item->mtime,
4146 0 : inode->i_mtime.tv_sec);
4147 0 : btrfs_set_token_timespec_nsec(&token, &item->mtime,
4148 0 : inode->i_mtime.tv_nsec);
4149 :
4150 0 : btrfs_set_token_timespec_sec(&token, &item->ctime,
4151 0 : inode->i_ctime.tv_sec);
4152 0 : btrfs_set_token_timespec_nsec(&token, &item->ctime,
4153 0 : inode->i_ctime.tv_nsec);
4154 :
4155 : /*
4156 : * We do not need to set the nbytes field, in fact during a fast fsync
4157 : * its value may not even be correct, since a fast fsync does not wait
4158 : * for ordered extent completion, which is where we update nbytes, it
4159 : * only waits for writeback to complete. During log replay as we find
4160 : * file extent items and replay them, we adjust the nbytes field of the
4161 : * inode item in subvolume tree as needed (see overwrite_item()).
4162 : */
4163 :
4164 0 : btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4165 0 : btrfs_set_token_inode_transid(&token, item, trans->transid);
4166 0 : btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4167 0 : flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4168 : BTRFS_I(inode)->ro_flags);
4169 0 : btrfs_set_token_inode_flags(&token, item, flags);
4170 0 : btrfs_set_token_inode_block_group(&token, item, 0);
4171 0 : }
4172 :
4173 0 : static int log_inode_item(struct btrfs_trans_handle *trans,
4174 : struct btrfs_root *log, struct btrfs_path *path,
4175 : struct btrfs_inode *inode, bool inode_item_dropped)
4176 : {
4177 0 : struct btrfs_inode_item *inode_item;
4178 0 : int ret;
4179 :
4180 : /*
4181 : * If we are doing a fast fsync and the inode was logged before in the
4182 : * current transaction, then we know the inode was previously logged and
4183 : * it exists in the log tree. For performance reasons, in this case use
4184 : * btrfs_search_slot() directly with ins_len set to 0 so that we never
4185 : * attempt a write lock on the leaf's parent, which adds unnecessary lock
4186 : * contention in case there are concurrent fsyncs for other inodes of the
4187 : * same subvolume. Using btrfs_insert_empty_item() when the inode item
4188 : * already exists can also result in unnecessarily splitting a leaf.
4189 : */
4190 0 : if (!inode_item_dropped && inode->logged_trans == trans->transid) {
4191 0 : ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1);
4192 0 : ASSERT(ret <= 0);
4193 0 : if (ret > 0)
4194 : ret = -ENOENT;
4195 : } else {
4196 : /*
4197 : * This means it is the first fsync in the current transaction,
4198 : * so the inode item is not in the log and we need to insert it.
4199 : * We can never get -EEXIST because we are only called for a fast
4200 : * fsync and in case an inode eviction happens after the inode was
4201 : * logged before in the current transaction, when we load again
4202 : * the inode, we set BTRFS_INODE_NEEDS_FULL_SYNC on its runtime
4203 : * flags and set ->logged_trans to 0.
4204 : */
4205 0 : ret = btrfs_insert_empty_item(trans, log, path, &inode->location,
4206 : sizeof(*inode_item));
4207 0 : ASSERT(ret != -EEXIST);
4208 : }
4209 0 : if (ret)
4210 0 : return ret;
4211 0 : inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4212 : struct btrfs_inode_item);
4213 0 : fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
4214 : 0, 0);
4215 0 : btrfs_release_path(path);
4216 0 : return 0;
4217 : }
4218 :
4219 0 : static int log_csums(struct btrfs_trans_handle *trans,
4220 : struct btrfs_inode *inode,
4221 : struct btrfs_root *log_root,
4222 : struct btrfs_ordered_sum *sums)
4223 : {
4224 0 : const u64 lock_end = sums->logical + sums->len - 1;
4225 0 : struct extent_state *cached_state = NULL;
4226 0 : int ret;
4227 :
4228 : /*
4229 : * If this inode was not used for reflink operations in the current
4230 : * transaction with new extents, then do the fast path, no need to
4231 : * worry about logging checksum items with overlapping ranges.
4232 : */
4233 0 : if (inode->last_reflink_trans < trans->transid)
4234 0 : return btrfs_csum_file_blocks(trans, log_root, sums);
4235 :
4236 : /*
4237 : * Serialize logging for checksums. This is to avoid racing with the
4238 : * same checksum being logged by another task that is logging another
4239 : * file which happens to refer to the same extent as well. Such races
4240 : * can leave checksum items in the log with overlapping ranges.
4241 : */
4242 0 : ret = lock_extent(&log_root->log_csum_range, sums->logical, lock_end,
4243 : &cached_state);
4244 0 : if (ret)
4245 : return ret;
4246 : /*
4247 : * Due to extent cloning, we might have logged a csum item that covers a
4248 : * subrange of a cloned extent, and later we can end up logging a csum
4249 : * item for a larger subrange of the same extent or the entire range.
4250 : * This would leave csum items in the log tree that cover the same range
4251 : * and break the searches for checksums in the log tree, resulting in
4252 : * some checksums missing in the fs/subvolume tree. So just delete (or
4253 : * trim and adjust) any existing csum items in the log for this range.
4254 : */
4255 0 : ret = btrfs_del_csums(trans, log_root, sums->logical, sums->len);
4256 0 : if (!ret)
4257 0 : ret = btrfs_csum_file_blocks(trans, log_root, sums);
4258 :
4259 0 : unlock_extent(&log_root->log_csum_range, sums->logical, lock_end,
4260 : &cached_state);
4261 :
4262 0 : return ret;
4263 : }
4264 :
4265 0 : static noinline int copy_items(struct btrfs_trans_handle *trans,
4266 : struct btrfs_inode *inode,
4267 : struct btrfs_path *dst_path,
4268 : struct btrfs_path *src_path,
4269 : int start_slot, int nr, int inode_only,
4270 : u64 logged_isize)
4271 : {
4272 0 : struct btrfs_root *log = inode->root->log_root;
4273 0 : struct btrfs_file_extent_item *extent;
4274 0 : struct extent_buffer *src;
4275 0 : int ret = 0;
4276 0 : struct btrfs_key *ins_keys;
4277 0 : u32 *ins_sizes;
4278 0 : struct btrfs_item_batch batch;
4279 0 : char *ins_data;
4280 0 : int i;
4281 0 : int dst_index;
4282 0 : const bool skip_csum = (inode->flags & BTRFS_INODE_NODATASUM);
4283 0 : const u64 i_size = i_size_read(&inode->vfs_inode);
4284 :
4285 : /*
4286 : * To keep lockdep happy and avoid deadlocks, clone the source leaf and
4287 : * use the clone. This is because otherwise we would be changing the log
4288 : * tree, to insert items from the subvolume tree or insert csum items,
4289 : * while holding a read lock on a leaf from the subvolume tree, which
4290 : * creates a nasty lock dependency when COWing log tree nodes/leaves:
4291 : *
4292 : * 1) Modifying the log tree triggers an extent buffer allocation while
4293 : * holding a write lock on a parent extent buffer from the log tree.
4294 : * Allocating the pages for an extent buffer, or the extent buffer
4295 : * struct, can trigger inode eviction and finally the inode eviction
4296 : * will trigger a release/remove of a delayed node, which requires
4297 : * taking the delayed node's mutex;
4298 : *
4299 : * 2) Allocating a metadata extent for a log tree can trigger the async
4300 : * reclaim thread and make us wait for it to release enough space and
4301 : * unblock our reservation ticket. The reclaim thread can start
4302 : * flushing delayed items, and that in turn results in the need to
4303 : * lock delayed node mutexes and in the need to write lock extent
4304 : * buffers of a subvolume tree - all this while holding a write lock
4305 : * on the parent extent buffer in the log tree.
4306 : *
4307 : * So one task in scenario 1) running in parallel with another task in
4308 : * scenario 2) could lead to a deadlock, one wanting to lock a delayed
4309 : * node mutex while having a read lock on a leaf from the subvolume,
4310 : * while the other is holding the delayed node's mutex and wants to
4311 : * write lock the same subvolume leaf for flushing delayed items.
4312 : */
4313 0 : src = btrfs_clone_extent_buffer(src_path->nodes[0]);
4314 0 : if (!src)
4315 : return -ENOMEM;
4316 :
4317 0 : i = src_path->slots[0];
4318 0 : btrfs_release_path(src_path);
4319 0 : src_path->nodes[0] = src;
4320 0 : src_path->slots[0] = i;
4321 :
4322 0 : ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
4323 : nr * sizeof(u32), GFP_NOFS);
4324 0 : if (!ins_data)
4325 : return -ENOMEM;
4326 :
4327 0 : ins_sizes = (u32 *)ins_data;
4328 0 : ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
4329 0 : batch.keys = ins_keys;
4330 0 : batch.data_sizes = ins_sizes;
4331 0 : batch.total_data_size = 0;
4332 0 : batch.nr = 0;
4333 :
4334 0 : dst_index = 0;
4335 0 : for (i = 0; i < nr; i++) {
4336 0 : const int src_slot = start_slot + i;
4337 0 : struct btrfs_root *csum_root;
4338 0 : struct btrfs_ordered_sum *sums;
4339 0 : struct btrfs_ordered_sum *sums_next;
4340 0 : LIST_HEAD(ordered_sums);
4341 0 : u64 disk_bytenr;
4342 0 : u64 disk_num_bytes;
4343 0 : u64 extent_offset;
4344 0 : u64 extent_num_bytes;
4345 0 : bool is_old_extent;
4346 :
4347 0 : btrfs_item_key_to_cpu(src, &ins_keys[dst_index], src_slot);
4348 :
4349 0 : if (ins_keys[dst_index].type != BTRFS_EXTENT_DATA_KEY)
4350 0 : goto add_to_batch;
4351 :
4352 0 : extent = btrfs_item_ptr(src, src_slot,
4353 : struct btrfs_file_extent_item);
4354 :
4355 0 : is_old_extent = (btrfs_file_extent_generation(src, extent) <
4356 0 : trans->transid);
4357 :
4358 : /*
4359 : * Don't copy extents from past generations. That would make us
4360 : * log a lot more metadata for common cases like doing only a
4361 : * few random writes into a file and then fsync it for the first
4362 : * time or after the full sync flag is set on the inode. We can
4363 : * get leaves full of extent items, most of which are from past
4364 : * generations, so we can skip them - as long as the inode has
4365 : * not been the target of a reflink operation in this transaction,
4366 : * as in that case it might have had file extent items with old
4367 : * generations copied into it. We also must always log prealloc
4368 : * extents that start at or beyond eof, otherwise we would lose
4369 : * them on log replay.
4370 : */
4371 0 : if (is_old_extent &&
4372 0 : ins_keys[dst_index].offset < i_size &&
4373 0 : inode->last_reflink_trans < trans->transid)
4374 0 : continue;
4375 :
4376 0 : if (skip_csum)
4377 0 : goto add_to_batch;
4378 :
4379 : /* Only regular extents have checksums. */
4380 0 : if (btrfs_file_extent_type(src, extent) != BTRFS_FILE_EXTENT_REG)
4381 0 : goto add_to_batch;
4382 :
4383 : /*
4384 : * If it's an extent created in a past transaction, then its
4385 : * checksums are already accessible from the committed csum tree,
4386 : * no need to log them.
4387 : */
4388 0 : if (is_old_extent)
4389 0 : goto add_to_batch;
4390 :
4391 0 : disk_bytenr = btrfs_file_extent_disk_bytenr(src, extent);
4392 : /* If it's an explicit hole, there are no checksums. */
4393 0 : if (disk_bytenr == 0)
4394 0 : goto add_to_batch;
4395 :
4396 0 : disk_num_bytes = btrfs_file_extent_disk_num_bytes(src, extent);
4397 :
4398 0 : if (btrfs_file_extent_compression(src, extent)) {
4399 : extent_offset = 0;
4400 : extent_num_bytes = disk_num_bytes;
4401 : } else {
4402 0 : extent_offset = btrfs_file_extent_offset(src, extent);
4403 0 : extent_num_bytes = btrfs_file_extent_num_bytes(src, extent);
4404 : }
4405 :
4406 0 : csum_root = btrfs_csum_root(trans->fs_info, disk_bytenr);
4407 0 : disk_bytenr += extent_offset;
4408 0 : ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
4409 0 : disk_bytenr + extent_num_bytes - 1,
4410 : &ordered_sums, 0, false);
4411 0 : if (ret)
4412 0 : goto out;
4413 :
4414 0 : list_for_each_entry_safe(sums, sums_next, &ordered_sums, list) {
4415 0 : if (!ret)
4416 0 : ret = log_csums(trans, inode, log, sums);
4417 0 : list_del(&sums->list);
4418 0 : kfree(sums);
4419 : }
4420 0 : if (ret)
4421 0 : goto out;
4422 :
4423 0 : add_to_batch:
4424 0 : ins_sizes[dst_index] = btrfs_item_size(src, src_slot);
4425 0 : batch.total_data_size += ins_sizes[dst_index];
4426 0 : batch.nr++;
4427 0 : dst_index++;
4428 : }
4429 :
4430 : /*
4431 : * We have a leaf full of old extent items that don't need to be logged,
4432 : * so we don't need to do anything.
4433 : */
4434 0 : if (batch.nr == 0)
4435 0 : goto out;
4436 :
4437 0 : ret = btrfs_insert_empty_items(trans, log, dst_path, &batch);
4438 0 : if (ret)
4439 0 : goto out;
4440 :
4441 : dst_index = 0;
4442 0 : for (i = 0; i < nr; i++) {
4443 0 : const int src_slot = start_slot + i;
4444 0 : const int dst_slot = dst_path->slots[0] + dst_index;
4445 0 : struct btrfs_key key;
4446 0 : unsigned long src_offset;
4447 0 : unsigned long dst_offset;
4448 :
4449 : /*
4450 : * We're done, all the remaining items in the source leaf
4451 : * correspond to old file extent items.
4452 : */
4453 0 : if (dst_index >= batch.nr)
4454 : break;
4455 :
4456 0 : btrfs_item_key_to_cpu(src, &key, src_slot);
4457 :
4458 0 : if (key.type != BTRFS_EXTENT_DATA_KEY)
4459 0 : goto copy_item;
4460 :
4461 0 : extent = btrfs_item_ptr(src, src_slot,
4462 : struct btrfs_file_extent_item);
4463 :
4464 : /* See the comment in the previous loop, same logic. */
4465 0 : if (btrfs_file_extent_generation(src, extent) < trans->transid &&
4466 0 : key.offset < i_size &&
4467 0 : inode->last_reflink_trans < trans->transid)
4468 0 : continue;
4469 :
4470 0 : copy_item:
4471 0 : dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], dst_slot);
4472 0 : src_offset = btrfs_item_ptr_offset(src, src_slot);
4473 :
4474 0 : if (key.type == BTRFS_INODE_ITEM_KEY) {
4475 0 : struct btrfs_inode_item *inode_item;
4476 :
4477 0 : inode_item = btrfs_item_ptr(dst_path->nodes[0], dst_slot,
4478 : struct btrfs_inode_item);
4479 0 : fill_inode_item(trans, dst_path->nodes[0], inode_item,
4480 : &inode->vfs_inode,
4481 : inode_only == LOG_INODE_EXISTS,
4482 : logged_isize);
4483 : } else {
4484 0 : copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4485 0 : src_offset, ins_sizes[dst_index]);
4486 : }
4487 :
4488 0 : dst_index++;
4489 : }
4490 :
4491 0 : btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4492 0 : btrfs_release_path(dst_path);
4493 0 : out:
4494 0 : kfree(ins_data);
4495 :
4496 0 : return ret;
4497 : }
4498 :
4499 0 : static int extent_cmp(void *priv, const struct list_head *a,
4500 : const struct list_head *b)
4501 : {
4502 0 : const struct extent_map *em1, *em2;
4503 :
4504 0 : em1 = list_entry(a, struct extent_map, list);
4505 0 : em2 = list_entry(b, struct extent_map, list);
4506 :
4507 0 : if (em1->start < em2->start)
4508 : return -1;
4509 0 : else if (em1->start > em2->start)
4510 0 : return 1;
4511 : return 0;
4512 : }
4513 :
4514 0 : static int log_extent_csums(struct btrfs_trans_handle *trans,
4515 : struct btrfs_inode *inode,
4516 : struct btrfs_root *log_root,
4517 : const struct extent_map *em,
4518 : struct btrfs_log_ctx *ctx)
4519 : {
4520 0 : struct btrfs_ordered_extent *ordered;
4521 0 : struct btrfs_root *csum_root;
4522 0 : u64 csum_offset;
4523 0 : u64 csum_len;
4524 0 : u64 mod_start = em->mod_start;
4525 0 : u64 mod_len = em->mod_len;
4526 0 : LIST_HEAD(ordered_sums);
4527 0 : int ret = 0;
4528 :
4529 0 : if (inode->flags & BTRFS_INODE_NODATASUM ||
4530 0 : test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4531 0 : em->block_start == EXTENT_MAP_HOLE)
4532 : return 0;
4533 :
4534 0 : list_for_each_entry(ordered, &ctx->ordered_extents, log_list) {
4535 0 : const u64 ordered_end = ordered->file_offset + ordered->num_bytes;
4536 0 : const u64 mod_end = mod_start + mod_len;
4537 0 : struct btrfs_ordered_sum *sums;
4538 :
4539 0 : if (mod_len == 0)
4540 : break;
4541 :
4542 0 : if (ordered_end <= mod_start)
4543 0 : continue;
4544 0 : if (mod_end <= ordered->file_offset)
4545 : break;
4546 :
4547 : /*
4548 : * We are going to copy all the csums on this ordered extent, so
4549 : * go ahead and adjust mod_start and mod_len in case this ordered
4550 : * extent has already been logged.
4551 : */
4552 0 : if (ordered->file_offset > mod_start) {
4553 0 : if (ordered_end >= mod_end)
4554 0 : mod_len = ordered->file_offset - mod_start;
4555 : /*
4556 : * If we have this case
4557 : *
4558 : * |--------- logged extent ---------|
4559 : * |----- ordered extent ----|
4560 : *
4561 : * Just don't mess with mod_start and mod_len, we'll
4562 : * just end up logging more csums than we need and it
4563 : * will be ok.
4564 : */
4565 : } else {
4566 0 : if (ordered_end < mod_end) {
4567 0 : mod_len = mod_end - ordered_end;
4568 0 : mod_start = ordered_end;
4569 : } else {
4570 : mod_len = 0;
4571 : }
4572 : }
4573 :
4574 : /*
4575 : * To keep us from looping for the above case of an ordered
4576 : * extent that falls inside of the logged extent.
4577 : */
4578 0 : if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags))
4579 0 : continue;
4580 :
4581 0 : list_for_each_entry(sums, &ordered->list, list) {
4582 0 : ret = log_csums(trans, inode, log_root, sums);
4583 0 : if (ret)
4584 0 : return ret;
4585 : }
4586 : }
4587 :
4588 : /* We're done, found all csums in the ordered extents. */
4589 0 : if (mod_len == 0)
4590 : return 0;
4591 :
4592 : /* If we're compressed we have to save the entire range of csums. */
4593 0 : if (em->compress_type) {
4594 0 : csum_offset = 0;
4595 0 : csum_len = max(em->block_len, em->orig_block_len);
4596 : } else {
4597 0 : csum_offset = mod_start - em->start;
4598 0 : csum_len = mod_len;
4599 : }
4600 :
4601 : /* block start is already adjusted for the file extent offset. */
4602 0 : csum_root = btrfs_csum_root(trans->fs_info, em->block_start);
4603 0 : ret = btrfs_lookup_csums_list(csum_root, em->block_start + csum_offset,
4604 0 : em->block_start + csum_offset +
4605 : csum_len - 1, &ordered_sums, 0, false);
4606 0 : if (ret)
4607 : return ret;
4608 :
4609 0 : while (!list_empty(&ordered_sums)) {
4610 0 : struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4611 : struct btrfs_ordered_sum,
4612 : list);
4613 0 : if (!ret)
4614 0 : ret = log_csums(trans, inode, log_root, sums);
4615 0 : list_del(&sums->list);
4616 0 : kfree(sums);
4617 : }
4618 :
4619 : return ret;
4620 : }
4621 :
4622 0 : static int log_one_extent(struct btrfs_trans_handle *trans,
4623 : struct btrfs_inode *inode,
4624 : const struct extent_map *em,
4625 : struct btrfs_path *path,
4626 : struct btrfs_log_ctx *ctx)
4627 : {
4628 0 : struct btrfs_drop_extents_args drop_args = { 0 };
4629 0 : struct btrfs_root *log = inode->root->log_root;
4630 0 : struct btrfs_file_extent_item fi = { 0 };
4631 0 : struct extent_buffer *leaf;
4632 0 : struct btrfs_key key;
4633 0 : u64 extent_offset = em->start - em->orig_start;
4634 0 : u64 block_len;
4635 0 : int ret;
4636 :
4637 0 : btrfs_set_stack_file_extent_generation(&fi, trans->transid);
4638 0 : if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4639 0 : btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_PREALLOC);
4640 : else
4641 0 : btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_REG);
4642 :
4643 0 : block_len = max(em->block_len, em->orig_block_len);
4644 0 : if (em->compress_type != BTRFS_COMPRESS_NONE) {
4645 0 : btrfs_set_stack_file_extent_disk_bytenr(&fi, em->block_start);
4646 0 : btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len);
4647 0 : } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4648 0 : btrfs_set_stack_file_extent_disk_bytenr(&fi, em->block_start -
4649 : extent_offset);
4650 0 : btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len);
4651 : }
4652 :
4653 0 : btrfs_set_stack_file_extent_offset(&fi, extent_offset);
4654 0 : btrfs_set_stack_file_extent_num_bytes(&fi, em->len);
4655 0 : btrfs_set_stack_file_extent_ram_bytes(&fi, em->ram_bytes);
4656 0 : btrfs_set_stack_file_extent_compression(&fi, em->compress_type);
4657 :
4658 0 : ret = log_extent_csums(trans, inode, log, em, ctx);
4659 0 : if (ret)
4660 : return ret;
4661 :
4662 : /*
4663 : * If this is the first time we are logging the inode in the current
4664 : * transaction, we can avoid btrfs_drop_extents(), which is expensive
4665 : * because it does a deletion search, which always acquires write locks
4666 : * for extent buffers at levels 2, 1 and 0. This not only wastes time
4667 : * but also adds significant contention in a log tree, since log trees
4668 : * are small, with a root at level 2 or 3 at most, due to their short
4669 : * life span.
4670 : */
4671 0 : if (ctx->logged_before) {
4672 0 : drop_args.path = path;
4673 0 : drop_args.start = em->start;
4674 0 : drop_args.end = em->start + em->len;
4675 0 : drop_args.replace_extent = true;
4676 0 : drop_args.extent_item_size = sizeof(fi);
4677 0 : ret = btrfs_drop_extents(trans, log, inode, &drop_args);
4678 0 : if (ret)
4679 : return ret;
4680 : }
4681 :
4682 0 : if (!drop_args.extent_inserted) {
4683 0 : key.objectid = btrfs_ino(inode);
4684 0 : key.type = BTRFS_EXTENT_DATA_KEY;
4685 0 : key.offset = em->start;
4686 :
4687 0 : ret = btrfs_insert_empty_item(trans, log, path, &key,
4688 : sizeof(fi));
4689 0 : if (ret)
4690 : return ret;
4691 : }
4692 0 : leaf = path->nodes[0];
4693 0 : write_extent_buffer(leaf, &fi,
4694 0 : btrfs_item_ptr_offset(leaf, path->slots[0]),
4695 : sizeof(fi));
4696 0 : btrfs_mark_buffer_dirty(leaf);
4697 :
4698 0 : btrfs_release_path(path);
4699 :
4700 0 : return ret;
4701 : }
4702 :
4703 : /*
4704 : * Log all prealloc extents beyond the inode's i_size to make sure we do not
4705 : * lose them after doing a full/fast fsync and replaying the log. We scan the
4706 : * subvolume's root instead of iterating the inode's extent map tree because
4707 : * otherwise we can log incorrect extent items based on extent map conversion.
4708 : * That can happen due to the fact that extent maps are merged when they
4709 : * are not in the extent map tree's list of modified extents.
4710 : */
4711 0 : static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4712 : struct btrfs_inode *inode,
4713 : struct btrfs_path *path)
4714 : {
4715 0 : struct btrfs_root *root = inode->root;
4716 0 : struct btrfs_key key;
4717 0 : const u64 i_size = i_size_read(&inode->vfs_inode);
4718 0 : const u64 ino = btrfs_ino(inode);
4719 0 : struct btrfs_path *dst_path = NULL;
4720 0 : bool dropped_extents = false;
4721 0 : u64 truncate_offset = i_size;
4722 0 : struct extent_buffer *leaf;
4723 0 : int slot;
4724 0 : int ins_nr = 0;
4725 0 : int start_slot;
4726 0 : int ret;
4727 :
4728 0 : if (!(inode->flags & BTRFS_INODE_PREALLOC))
4729 : return 0;
4730 :
4731 0 : key.objectid = ino;
4732 0 : key.type = BTRFS_EXTENT_DATA_KEY;
4733 0 : key.offset = i_size;
4734 0 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4735 0 : if (ret < 0)
4736 0 : goto out;
4737 :
4738 : /*
4739 : * We must check if there is a prealloc extent that starts before the
4740 : * i_size and crosses the i_size boundary. This is to ensure later we
4741 : * truncate down to the end of that extent and not to the i_size, as
4742 : * otherwise we end up losing part of the prealloc extent after a log
4743 : * replay and with an implicit hole if there is another prealloc extent
4744 : * that starts at an offset beyond i_size.
4745 : */
4746 0 : ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
4747 0 : if (ret < 0)
4748 0 : goto out;
4749 :
4750 0 : if (ret == 0) {
4751 0 : struct btrfs_file_extent_item *ei;
4752 :
4753 0 : leaf = path->nodes[0];
4754 0 : slot = path->slots[0];
4755 0 : ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4756 :
4757 0 : if (btrfs_file_extent_type(leaf, ei) ==
4758 : BTRFS_FILE_EXTENT_PREALLOC) {
4759 0 : u64 extent_end;
4760 :
4761 0 : btrfs_item_key_to_cpu(leaf, &key, slot);
4762 0 : extent_end = key.offset +
4763 : btrfs_file_extent_num_bytes(leaf, ei);
4764 :
4765 0 : if (extent_end > i_size)
4766 : truncate_offset = extent_end;
4767 : }
4768 : } else {
4769 : ret = 0;
4770 : }
4771 :
4772 0 : while (true) {
4773 0 : leaf = path->nodes[0];
4774 0 : slot = path->slots[0];
4775 :
4776 0 : if (slot >= btrfs_header_nritems(leaf)) {
4777 0 : if (ins_nr > 0) {
4778 0 : ret = copy_items(trans, inode, dst_path, path,
4779 : start_slot, ins_nr, 1, 0);
4780 0 : if (ret < 0)
4781 0 : goto out;
4782 : ins_nr = 0;
4783 : }
4784 0 : ret = btrfs_next_leaf(root, path);
4785 0 : if (ret < 0)
4786 0 : goto out;
4787 0 : if (ret > 0) {
4788 : ret = 0;
4789 : break;
4790 : }
4791 0 : continue;
4792 : }
4793 :
4794 0 : btrfs_item_key_to_cpu(leaf, &key, slot);
4795 0 : if (key.objectid > ino)
4796 : break;
4797 0 : if (WARN_ON_ONCE(key.objectid < ino) ||
4798 0 : key.type < BTRFS_EXTENT_DATA_KEY ||
4799 0 : key.offset < i_size) {
4800 0 : path->slots[0]++;
4801 0 : continue;
4802 : }
4803 0 : if (!dropped_extents) {
4804 : /*
4805 : * Avoid logging extent items logged in past fsync calls
4806 : * and leading to duplicate keys in the log tree.
4807 : */
4808 0 : ret = truncate_inode_items(trans, root->log_root, inode,
4809 : truncate_offset,
4810 : BTRFS_EXTENT_DATA_KEY);
4811 0 : if (ret)
4812 0 : goto out;
4813 : dropped_extents = true;
4814 : }
4815 0 : if (ins_nr == 0)
4816 0 : start_slot = slot;
4817 0 : ins_nr++;
4818 0 : path->slots[0]++;
4819 0 : if (!dst_path) {
4820 0 : dst_path = btrfs_alloc_path();
4821 0 : if (!dst_path) {
4822 0 : ret = -ENOMEM;
4823 0 : goto out;
4824 : }
4825 : }
4826 : }
4827 0 : if (ins_nr > 0)
4828 0 : ret = copy_items(trans, inode, dst_path, path,
4829 : start_slot, ins_nr, 1, 0);
4830 0 : out:
4831 0 : btrfs_release_path(path);
4832 0 : btrfs_free_path(dst_path);
4833 0 : return ret;
4834 : }
4835 :
4836 0 : static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4837 : struct btrfs_inode *inode,
4838 : struct btrfs_path *path,
4839 : struct btrfs_log_ctx *ctx)
4840 : {
4841 0 : struct btrfs_ordered_extent *ordered;
4842 0 : struct btrfs_ordered_extent *tmp;
4843 0 : struct extent_map *em, *n;
4844 0 : struct list_head extents;
4845 0 : struct extent_map_tree *tree = &inode->extent_tree;
4846 0 : int ret = 0;
4847 0 : int num = 0;
4848 :
4849 0 : INIT_LIST_HEAD(&extents);
4850 :
4851 0 : write_lock(&tree->lock);
4852 :
4853 0 : list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4854 0 : list_del_init(&em->list);
4855 : /*
4856 : * Just an arbitrary number, this can be really CPU intensive
4857 : * once we start getting a lot of extents, and really once we
4858 : * have a bunch of extents we just want to commit since it will
4859 : * be faster.
4860 : */
4861 0 : if (++num > 32768) {
4862 0 : list_del_init(&tree->modified_extents);
4863 0 : ret = -EFBIG;
4864 0 : goto process;
4865 : }
4866 :
4867 0 : if (em->generation < trans->transid)
4868 0 : continue;
4869 :
4870 : /* We log prealloc extents beyond eof later. */
4871 0 : if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4872 0 : em->start >= i_size_read(&inode->vfs_inode))
4873 0 : continue;
4874 :
4875 : /* Need a ref to keep it from getting evicted from cache */
4876 0 : refcount_inc(&em->refs);
4877 0 : set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4878 0 : list_add_tail(&em->list, &extents);
4879 0 : num++;
4880 : }
4881 :
4882 0 : list_sort(NULL, &extents, extent_cmp);
4883 : process:
4884 0 : while (!list_empty(&extents)) {
4885 0 : em = list_entry(extents.next, struct extent_map, list);
4886 :
4887 0 : list_del_init(&em->list);
4888 :
4889 : /*
4890 : * If we had an error we just need to delete everybody from our
4891 : * private list.
4892 : */
4893 0 : if (ret) {
4894 0 : clear_em_logging(tree, em);
4895 0 : free_extent_map(em);
4896 0 : continue;
4897 : }
4898 :
4899 0 : write_unlock(&tree->lock);
4900 :
4901 0 : ret = log_one_extent(trans, inode, em, path, ctx);
4902 0 : write_lock(&tree->lock);
4903 0 : clear_em_logging(tree, em);
4904 0 : free_extent_map(em);
4905 : }
4906 0 : WARN_ON(!list_empty(&extents));
4907 0 : write_unlock(&tree->lock);
4908 :
4909 0 : if (!ret)
4910 0 : ret = btrfs_log_prealloc_extents(trans, inode, path);
4911 0 : if (ret)
4912 : return ret;
4913 :
4914 : /*
4915 : * We have logged all extents successfully, now make sure the commit of
4916 : * the current transaction waits for the ordered extents to complete
4917 : * before it commits and wipes out the log trees, otherwise we would
4918 : * lose data if an ordered extents completes after the transaction
4919 : * commits and a power failure happens after the transaction commit.
4920 : */
4921 0 : list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
4922 0 : list_del_init(&ordered->log_list);
4923 0 : set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
4924 :
4925 0 : if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4926 0 : spin_lock_irq(&inode->ordered_tree.lock);
4927 0 : if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4928 0 : set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
4929 0 : atomic_inc(&trans->transaction->pending_ordered);
4930 : }
4931 0 : spin_unlock_irq(&inode->ordered_tree.lock);
4932 : }
4933 0 : btrfs_put_ordered_extent(ordered);
4934 : }
4935 :
4936 : return 0;
4937 : }
4938 :
4939 0 : static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4940 : struct btrfs_path *path, u64 *size_ret)
4941 : {
4942 0 : struct btrfs_key key;
4943 0 : int ret;
4944 :
4945 0 : key.objectid = btrfs_ino(inode);
4946 0 : key.type = BTRFS_INODE_ITEM_KEY;
4947 0 : key.offset = 0;
4948 :
4949 0 : ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4950 0 : if (ret < 0) {
4951 : return ret;
4952 0 : } else if (ret > 0) {
4953 0 : *size_ret = 0;
4954 : } else {
4955 0 : struct btrfs_inode_item *item;
4956 :
4957 0 : item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4958 : struct btrfs_inode_item);
4959 0 : *size_ret = btrfs_inode_size(path->nodes[0], item);
4960 : /*
4961 : * If the in-memory inode's i_size is smaller then the inode
4962 : * size stored in the btree, return the inode's i_size, so
4963 : * that we get a correct inode size after replaying the log
4964 : * when before a power failure we had a shrinking truncate
4965 : * followed by addition of a new name (rename / new hard link).
4966 : * Otherwise return the inode size from the btree, to avoid
4967 : * data loss when replaying a log due to previously doing a
4968 : * write that expands the inode's size and logging a new name
4969 : * immediately after.
4970 : */
4971 0 : if (*size_ret > inode->vfs_inode.i_size)
4972 0 : *size_ret = inode->vfs_inode.i_size;
4973 : }
4974 :
4975 0 : btrfs_release_path(path);
4976 0 : return 0;
4977 : }
4978 :
4979 : /*
4980 : * At the moment we always log all xattrs. This is to figure out at log replay
4981 : * time which xattrs must have their deletion replayed. If a xattr is missing
4982 : * in the log tree and exists in the fs/subvol tree, we delete it. This is
4983 : * because if a xattr is deleted, the inode is fsynced and a power failure
4984 : * happens, causing the log to be replayed the next time the fs is mounted,
4985 : * we want the xattr to not exist anymore (same behaviour as other filesystems
4986 : * with a journal, ext3/4, xfs, f2fs, etc).
4987 : */
4988 0 : static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4989 : struct btrfs_inode *inode,
4990 : struct btrfs_path *path,
4991 : struct btrfs_path *dst_path)
4992 : {
4993 0 : struct btrfs_root *root = inode->root;
4994 0 : int ret;
4995 0 : struct btrfs_key key;
4996 0 : const u64 ino = btrfs_ino(inode);
4997 0 : int ins_nr = 0;
4998 0 : int start_slot = 0;
4999 0 : bool found_xattrs = false;
5000 :
5001 0 : if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags))
5002 : return 0;
5003 :
5004 0 : key.objectid = ino;
5005 0 : key.type = BTRFS_XATTR_ITEM_KEY;
5006 0 : key.offset = 0;
5007 :
5008 0 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5009 0 : if (ret < 0)
5010 : return ret;
5011 :
5012 0 : while (true) {
5013 0 : int slot = path->slots[0];
5014 0 : struct extent_buffer *leaf = path->nodes[0];
5015 0 : int nritems = btrfs_header_nritems(leaf);
5016 :
5017 0 : if (slot >= nritems) {
5018 0 : if (ins_nr > 0) {
5019 0 : ret = copy_items(trans, inode, dst_path, path,
5020 : start_slot, ins_nr, 1, 0);
5021 0 : if (ret < 0)
5022 0 : return ret;
5023 : ins_nr = 0;
5024 : }
5025 0 : ret = btrfs_next_leaf(root, path);
5026 0 : if (ret < 0)
5027 0 : return ret;
5028 0 : else if (ret > 0)
5029 : break;
5030 0 : continue;
5031 : }
5032 :
5033 0 : btrfs_item_key_to_cpu(leaf, &key, slot);
5034 0 : if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
5035 : break;
5036 :
5037 0 : if (ins_nr == 0)
5038 0 : start_slot = slot;
5039 0 : ins_nr++;
5040 0 : path->slots[0]++;
5041 0 : found_xattrs = true;
5042 0 : cond_resched();
5043 : }
5044 0 : if (ins_nr > 0) {
5045 0 : ret = copy_items(trans, inode, dst_path, path,
5046 : start_slot, ins_nr, 1, 0);
5047 0 : if (ret < 0)
5048 : return ret;
5049 : }
5050 :
5051 0 : if (!found_xattrs)
5052 0 : set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags);
5053 :
5054 : return 0;
5055 : }
5056 :
5057 : /*
5058 : * When using the NO_HOLES feature if we punched a hole that causes the
5059 : * deletion of entire leafs or all the extent items of the first leaf (the one
5060 : * that contains the inode item and references) we may end up not processing
5061 : * any extents, because there are no leafs with a generation matching the
5062 : * current transaction that have extent items for our inode. So we need to find
5063 : * if any holes exist and then log them. We also need to log holes after any
5064 : * truncate operation that changes the inode's size.
5065 : */
5066 0 : static int btrfs_log_holes(struct btrfs_trans_handle *trans,
5067 : struct btrfs_inode *inode,
5068 : struct btrfs_path *path)
5069 : {
5070 0 : struct btrfs_root *root = inode->root;
5071 0 : struct btrfs_fs_info *fs_info = root->fs_info;
5072 0 : struct btrfs_key key;
5073 0 : const u64 ino = btrfs_ino(inode);
5074 0 : const u64 i_size = i_size_read(&inode->vfs_inode);
5075 0 : u64 prev_extent_end = 0;
5076 0 : int ret;
5077 :
5078 0 : if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
5079 : return 0;
5080 :
5081 0 : key.objectid = ino;
5082 0 : key.type = BTRFS_EXTENT_DATA_KEY;
5083 0 : key.offset = 0;
5084 :
5085 0 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5086 0 : if (ret < 0)
5087 : return ret;
5088 :
5089 0 : while (true) {
5090 0 : struct extent_buffer *leaf = path->nodes[0];
5091 :
5092 0 : if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5093 0 : ret = btrfs_next_leaf(root, path);
5094 0 : if (ret < 0)
5095 0 : return ret;
5096 0 : if (ret > 0) {
5097 : ret = 0;
5098 : break;
5099 : }
5100 0 : leaf = path->nodes[0];
5101 : }
5102 :
5103 0 : btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5104 0 : if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
5105 : break;
5106 :
5107 : /* We have a hole, log it. */
5108 0 : if (prev_extent_end < key.offset) {
5109 0 : const u64 hole_len = key.offset - prev_extent_end;
5110 :
5111 : /*
5112 : * Release the path to avoid deadlocks with other code
5113 : * paths that search the root while holding locks on
5114 : * leafs from the log root.
5115 : */
5116 0 : btrfs_release_path(path);
5117 0 : ret = btrfs_insert_hole_extent(trans, root->log_root,
5118 : ino, prev_extent_end,
5119 : hole_len);
5120 0 : if (ret < 0)
5121 0 : return ret;
5122 :
5123 : /*
5124 : * Search for the same key again in the root. Since it's
5125 : * an extent item and we are holding the inode lock, the
5126 : * key must still exist. If it doesn't just emit warning
5127 : * and return an error to fall back to a transaction
5128 : * commit.
5129 : */
5130 0 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5131 0 : if (ret < 0)
5132 0 : return ret;
5133 0 : if (WARN_ON(ret > 0))
5134 : return -ENOENT;
5135 : leaf = path->nodes[0];
5136 : }
5137 :
5138 0 : prev_extent_end = btrfs_file_extent_end(path);
5139 0 : path->slots[0]++;
5140 0 : cond_resched();
5141 : }
5142 :
5143 0 : if (prev_extent_end < i_size) {
5144 0 : u64 hole_len;
5145 :
5146 0 : btrfs_release_path(path);
5147 0 : hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
5148 0 : ret = btrfs_insert_hole_extent(trans, root->log_root, ino,
5149 : prev_extent_end, hole_len);
5150 0 : if (ret < 0)
5151 : return ret;
5152 : }
5153 :
5154 : return 0;
5155 : }
5156 :
5157 : /*
5158 : * When we are logging a new inode X, check if it doesn't have a reference that
5159 : * matches the reference from some other inode Y created in a past transaction
5160 : * and that was renamed in the current transaction. If we don't do this, then at
5161 : * log replay time we can lose inode Y (and all its files if it's a directory):
5162 : *
5163 : * mkdir /mnt/x
5164 : * echo "hello world" > /mnt/x/foobar
5165 : * sync
5166 : * mv /mnt/x /mnt/y
5167 : * mkdir /mnt/x # or touch /mnt/x
5168 : * xfs_io -c fsync /mnt/x
5169 : * <power fail>
5170 : * mount fs, trigger log replay
5171 : *
5172 : * After the log replay procedure, we would lose the first directory and all its
5173 : * files (file foobar).
5174 : * For the case where inode Y is not a directory we simply end up losing it:
5175 : *
5176 : * echo "123" > /mnt/foo
5177 : * sync
5178 : * mv /mnt/foo /mnt/bar
5179 : * echo "abc" > /mnt/foo
5180 : * xfs_io -c fsync /mnt/foo
5181 : * <power fail>
5182 : *
5183 : * We also need this for cases where a snapshot entry is replaced by some other
5184 : * entry (file or directory) otherwise we end up with an unreplayable log due to
5185 : * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
5186 : * if it were a regular entry:
5187 : *
5188 : * mkdir /mnt/x
5189 : * btrfs subvolume snapshot /mnt /mnt/x/snap
5190 : * btrfs subvolume delete /mnt/x/snap
5191 : * rmdir /mnt/x
5192 : * mkdir /mnt/x
5193 : * fsync /mnt/x or fsync some new file inside it
5194 : * <power fail>
5195 : *
5196 : * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
5197 : * the same transaction.
5198 : */
5199 0 : static int btrfs_check_ref_name_override(struct extent_buffer *eb,
5200 : const int slot,
5201 : const struct btrfs_key *key,
5202 : struct btrfs_inode *inode,
5203 : u64 *other_ino, u64 *other_parent)
5204 : {
5205 0 : int ret;
5206 0 : struct btrfs_path *search_path;
5207 0 : char *name = NULL;
5208 0 : u32 name_len = 0;
5209 0 : u32 item_size = btrfs_item_size(eb, slot);
5210 0 : u32 cur_offset = 0;
5211 0 : unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
5212 :
5213 0 : search_path = btrfs_alloc_path();
5214 0 : if (!search_path)
5215 : return -ENOMEM;
5216 0 : search_path->search_commit_root = 1;
5217 0 : search_path->skip_locking = 1;
5218 :
5219 0 : while (cur_offset < item_size) {
5220 0 : u64 parent;
5221 0 : u32 this_name_len;
5222 0 : u32 this_len;
5223 0 : unsigned long name_ptr;
5224 0 : struct btrfs_dir_item *di;
5225 0 : struct fscrypt_str name_str;
5226 :
5227 0 : if (key->type == BTRFS_INODE_REF_KEY) {
5228 0 : struct btrfs_inode_ref *iref;
5229 :
5230 0 : iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
5231 0 : parent = key->offset;
5232 0 : this_name_len = btrfs_inode_ref_name_len(eb, iref);
5233 0 : name_ptr = (unsigned long)(iref + 1);
5234 0 : this_len = sizeof(*iref) + this_name_len;
5235 : } else {
5236 0 : struct btrfs_inode_extref *extref;
5237 :
5238 0 : extref = (struct btrfs_inode_extref *)(ptr +
5239 : cur_offset);
5240 0 : parent = btrfs_inode_extref_parent(eb, extref);
5241 0 : this_name_len = btrfs_inode_extref_name_len(eb, extref);
5242 0 : name_ptr = (unsigned long)&extref->name;
5243 0 : this_len = sizeof(*extref) + this_name_len;
5244 : }
5245 :
5246 0 : if (this_name_len > name_len) {
5247 0 : char *new_name;
5248 :
5249 0 : new_name = krealloc(name, this_name_len, GFP_NOFS);
5250 0 : if (!new_name) {
5251 0 : ret = -ENOMEM;
5252 0 : goto out;
5253 : }
5254 : name_len = this_name_len;
5255 : name = new_name;
5256 : }
5257 :
5258 0 : read_extent_buffer(eb, name, name_ptr, this_name_len);
5259 :
5260 0 : name_str.name = name;
5261 0 : name_str.len = this_name_len;
5262 0 : di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
5263 : parent, &name_str, 0);
5264 0 : if (di && !IS_ERR(di)) {
5265 0 : struct btrfs_key di_key;
5266 :
5267 0 : btrfs_dir_item_key_to_cpu(search_path->nodes[0],
5268 : di, &di_key);
5269 0 : if (di_key.type == BTRFS_INODE_ITEM_KEY) {
5270 0 : if (di_key.objectid != key->objectid) {
5271 0 : ret = 1;
5272 0 : *other_ino = di_key.objectid;
5273 0 : *other_parent = parent;
5274 : } else {
5275 : ret = 0;
5276 : }
5277 : } else {
5278 : ret = -EAGAIN;
5279 : }
5280 0 : goto out;
5281 0 : } else if (IS_ERR(di)) {
5282 0 : ret = PTR_ERR(di);
5283 0 : goto out;
5284 : }
5285 0 : btrfs_release_path(search_path);
5286 :
5287 0 : cur_offset += this_len;
5288 : }
5289 : ret = 0;
5290 0 : out:
5291 0 : btrfs_free_path(search_path);
5292 0 : kfree(name);
5293 0 : return ret;
5294 : }
5295 :
5296 : /*
5297 : * Check if we need to log an inode. This is used in contexts where while
5298 : * logging an inode we need to log another inode (either that it exists or in
5299 : * full mode). This is used instead of btrfs_inode_in_log() because the later
5300 : * requires the inode to be in the log and have the log transaction committed,
5301 : * while here we do not care if the log transaction was already committed - our
5302 : * caller will commit the log later - and we want to avoid logging an inode
5303 : * multiple times when multiple tasks have joined the same log transaction.
5304 : */
5305 0 : static bool need_log_inode(const struct btrfs_trans_handle *trans,
5306 : struct btrfs_inode *inode)
5307 : {
5308 : /*
5309 : * If a directory was not modified, no dentries added or removed, we can
5310 : * and should avoid logging it.
5311 : */
5312 0 : if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid)
5313 : return false;
5314 :
5315 : /*
5316 : * If this inode does not have new/updated/deleted xattrs since the last
5317 : * time it was logged and is flagged as logged in the current transaction,
5318 : * we can skip logging it. As for new/deleted names, those are updated in
5319 : * the log by link/unlink/rename operations.
5320 : * In case the inode was logged and then evicted and reloaded, its
5321 : * logged_trans will be 0, in which case we have to fully log it since
5322 : * logged_trans is a transient field, not persisted.
5323 : */
5324 0 : if (inode_logged(trans, inode, NULL) == 1 &&
5325 0 : !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
5326 0 : return false;
5327 :
5328 : return true;
5329 : }
5330 :
5331 : struct btrfs_dir_list {
5332 : u64 ino;
5333 : struct list_head list;
5334 : };
5335 :
5336 : /*
5337 : * Log the inodes of the new dentries of a directory.
5338 : * See process_dir_items_leaf() for details about why it is needed.
5339 : * This is a recursive operation - if an existing dentry corresponds to a
5340 : * directory, that directory's new entries are logged too (same behaviour as
5341 : * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5342 : * the dentries point to we do not acquire their VFS lock, otherwise lockdep
5343 : * complains about the following circular lock dependency / possible deadlock:
5344 : *
5345 : * CPU0 CPU1
5346 : * ---- ----
5347 : * lock(&type->i_mutex_dir_key#3/2);
5348 : * lock(sb_internal#2);
5349 : * lock(&type->i_mutex_dir_key#3/2);
5350 : * lock(&sb->s_type->i_mutex_key#14);
5351 : *
5352 : * Where sb_internal is the lock (a counter that works as a lock) acquired by
5353 : * sb_start_intwrite() in btrfs_start_transaction().
5354 : * Not acquiring the VFS lock of the inodes is still safe because:
5355 : *
5356 : * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5357 : * that while logging the inode new references (names) are added or removed
5358 : * from the inode, leaving the logged inode item with a link count that does
5359 : * not match the number of logged inode reference items. This is fine because
5360 : * at log replay time we compute the real number of links and correct the
5361 : * link count in the inode item (see replay_one_buffer() and
5362 : * link_to_fixup_dir());
5363 : *
5364 : * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5365 : * while logging the inode's items new index items (key type
5366 : * BTRFS_DIR_INDEX_KEY) are added to fs/subvol tree and the logged inode item
5367 : * has a size that doesn't match the sum of the lengths of all the logged
5368 : * names - this is ok, not a problem, because at log replay time we set the
5369 : * directory's i_size to the correct value (see replay_one_name() and
5370 : * overwrite_item()).
5371 : */
5372 0 : static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5373 : struct btrfs_inode *start_inode,
5374 : struct btrfs_log_ctx *ctx)
5375 : {
5376 0 : struct btrfs_root *root = start_inode->root;
5377 0 : struct btrfs_fs_info *fs_info = root->fs_info;
5378 0 : struct btrfs_path *path;
5379 0 : LIST_HEAD(dir_list);
5380 0 : struct btrfs_dir_list *dir_elem;
5381 0 : u64 ino = btrfs_ino(start_inode);
5382 0 : struct btrfs_inode *curr_inode = start_inode;
5383 0 : int ret = 0;
5384 :
5385 : /*
5386 : * If we are logging a new name, as part of a link or rename operation,
5387 : * don't bother logging new dentries, as we just want to log the names
5388 : * of an inode and that any new parents exist.
5389 : */
5390 0 : if (ctx->logging_new_name)
5391 : return 0;
5392 :
5393 0 : path = btrfs_alloc_path();
5394 0 : if (!path)
5395 : return -ENOMEM;
5396 :
5397 : /* Pairs with btrfs_add_delayed_iput below. */
5398 0 : ihold(&curr_inode->vfs_inode);
5399 :
5400 0 : while (true) {
5401 0 : struct inode *vfs_inode;
5402 0 : struct btrfs_key key;
5403 0 : struct btrfs_key found_key;
5404 0 : u64 next_index;
5405 0 : bool continue_curr_inode = true;
5406 0 : int iter_ret;
5407 :
5408 0 : key.objectid = ino;
5409 0 : key.type = BTRFS_DIR_INDEX_KEY;
5410 0 : key.offset = btrfs_get_first_dir_index_to_log(curr_inode);
5411 0 : next_index = key.offset;
5412 0 : again:
5413 0 : btrfs_for_each_slot(root->log_root, &key, &found_key, path, iter_ret) {
5414 0 : struct extent_buffer *leaf = path->nodes[0];
5415 0 : struct btrfs_dir_item *di;
5416 0 : struct btrfs_key di_key;
5417 0 : struct inode *di_inode;
5418 0 : int log_mode = LOG_INODE_EXISTS;
5419 0 : int type;
5420 :
5421 0 : if (found_key.objectid != ino ||
5422 0 : found_key.type != BTRFS_DIR_INDEX_KEY) {
5423 : continue_curr_inode = false;
5424 0 : break;
5425 : }
5426 :
5427 0 : next_index = found_key.offset + 1;
5428 :
5429 0 : di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
5430 0 : type = btrfs_dir_ftype(leaf, di);
5431 0 : if (btrfs_dir_transid(leaf, di) < trans->transid)
5432 0 : continue;
5433 0 : btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5434 0 : if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5435 0 : continue;
5436 :
5437 0 : btrfs_release_path(path);
5438 0 : di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
5439 0 : if (IS_ERR(di_inode)) {
5440 0 : ret = PTR_ERR(di_inode);
5441 0 : goto out;
5442 : }
5443 :
5444 0 : if (!need_log_inode(trans, BTRFS_I(di_inode))) {
5445 0 : btrfs_add_delayed_iput(BTRFS_I(di_inode));
5446 0 : break;
5447 : }
5448 :
5449 0 : ctx->log_new_dentries = false;
5450 0 : if (type == BTRFS_FT_DIR)
5451 0 : log_mode = LOG_INODE_ALL;
5452 0 : ret = btrfs_log_inode(trans, BTRFS_I(di_inode),
5453 : log_mode, ctx);
5454 0 : btrfs_add_delayed_iput(BTRFS_I(di_inode));
5455 0 : if (ret)
5456 0 : goto out;
5457 0 : if (ctx->log_new_dentries) {
5458 0 : dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5459 0 : if (!dir_elem) {
5460 0 : ret = -ENOMEM;
5461 0 : goto out;
5462 : }
5463 0 : dir_elem->ino = di_key.objectid;
5464 0 : list_add_tail(&dir_elem->list, &dir_list);
5465 : }
5466 : break;
5467 : }
5468 :
5469 0 : btrfs_release_path(path);
5470 :
5471 0 : if (iter_ret < 0) {
5472 0 : ret = iter_ret;
5473 0 : goto out;
5474 0 : } else if (iter_ret > 0) {
5475 : continue_curr_inode = false;
5476 : } else {
5477 0 : key = found_key;
5478 : }
5479 :
5480 0 : if (continue_curr_inode && key.offset < (u64)-1) {
5481 0 : key.offset++;
5482 0 : goto again;
5483 : }
5484 :
5485 0 : btrfs_set_first_dir_index_to_log(curr_inode, next_index);
5486 :
5487 0 : if (list_empty(&dir_list))
5488 : break;
5489 :
5490 0 : dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, list);
5491 0 : ino = dir_elem->ino;
5492 0 : list_del(&dir_elem->list);
5493 0 : kfree(dir_elem);
5494 :
5495 0 : btrfs_add_delayed_iput(curr_inode);
5496 0 : curr_inode = NULL;
5497 :
5498 0 : vfs_inode = btrfs_iget(fs_info->sb, ino, root);
5499 0 : if (IS_ERR(vfs_inode)) {
5500 0 : ret = PTR_ERR(vfs_inode);
5501 0 : break;
5502 : }
5503 0 : curr_inode = BTRFS_I(vfs_inode);
5504 : }
5505 0 : out:
5506 0 : btrfs_free_path(path);
5507 0 : if (curr_inode)
5508 0 : btrfs_add_delayed_iput(curr_inode);
5509 :
5510 0 : if (ret) {
5511 0 : struct btrfs_dir_list *next;
5512 :
5513 0 : list_for_each_entry_safe(dir_elem, next, &dir_list, list)
5514 0 : kfree(dir_elem);
5515 : }
5516 :
5517 : return ret;
5518 : }
5519 :
5520 : struct btrfs_ino_list {
5521 : u64 ino;
5522 : u64 parent;
5523 : struct list_head list;
5524 : };
5525 :
5526 0 : static void free_conflicting_inodes(struct btrfs_log_ctx *ctx)
5527 : {
5528 0 : struct btrfs_ino_list *curr;
5529 0 : struct btrfs_ino_list *next;
5530 :
5531 0 : list_for_each_entry_safe(curr, next, &ctx->conflict_inodes, list) {
5532 0 : list_del(&curr->list);
5533 0 : kfree(curr);
5534 : }
5535 0 : }
5536 :
5537 0 : static int conflicting_inode_is_dir(struct btrfs_root *root, u64 ino,
5538 : struct btrfs_path *path)
5539 : {
5540 0 : struct btrfs_key key;
5541 0 : int ret;
5542 :
5543 0 : key.objectid = ino;
5544 0 : key.type = BTRFS_INODE_ITEM_KEY;
5545 0 : key.offset = 0;
5546 :
5547 0 : path->search_commit_root = 1;
5548 0 : path->skip_locking = 1;
5549 :
5550 0 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5551 0 : if (WARN_ON_ONCE(ret > 0)) {
5552 : /*
5553 : * We have previously found the inode through the commit root
5554 : * so this should not happen. If it does, just error out and
5555 : * fallback to a transaction commit.
5556 : */
5557 : ret = -ENOENT;
5558 0 : } else if (ret == 0) {
5559 0 : struct btrfs_inode_item *item;
5560 :
5561 0 : item = btrfs_item_ptr(path->nodes[0], path->slots[0],
5562 : struct btrfs_inode_item);
5563 0 : if (S_ISDIR(btrfs_inode_mode(path->nodes[0], item)))
5564 0 : ret = 1;
5565 : }
5566 :
5567 0 : btrfs_release_path(path);
5568 0 : path->search_commit_root = 0;
5569 0 : path->skip_locking = 0;
5570 :
5571 0 : return ret;
5572 : }
5573 :
5574 0 : static int add_conflicting_inode(struct btrfs_trans_handle *trans,
5575 : struct btrfs_root *root,
5576 : struct btrfs_path *path,
5577 : u64 ino, u64 parent,
5578 : struct btrfs_log_ctx *ctx)
5579 : {
5580 0 : struct btrfs_ino_list *ino_elem;
5581 0 : struct inode *inode;
5582 :
5583 : /*
5584 : * It's rare to have a lot of conflicting inodes, in practice it is not
5585 : * common to have more than 1 or 2. We don't want to collect too many,
5586 : * as we could end up logging too many inodes (even if only in
5587 : * LOG_INODE_EXISTS mode) and slow down other fsyncs or transaction
5588 : * commits.
5589 : */
5590 0 : if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES)
5591 : return BTRFS_LOG_FORCE_COMMIT;
5592 :
5593 0 : inode = btrfs_iget(root->fs_info->sb, ino, root);
5594 : /*
5595 : * If the other inode that had a conflicting dir entry was deleted in
5596 : * the current transaction then we either:
5597 : *
5598 : * 1) Log the parent directory (later after adding it to the list) if
5599 : * the inode is a directory. This is because it may be a deleted
5600 : * subvolume/snapshot or it may be a regular directory that had
5601 : * deleted subvolumes/snapshots (or subdirectories that had them),
5602 : * and at the moment we can't deal with dropping subvolumes/snapshots
5603 : * during log replay. So we just log the parent, which will result in
5604 : * a fallback to a transaction commit if we are dealing with those
5605 : * cases (last_unlink_trans will match the current transaction);
5606 : *
5607 : * 2) Do nothing if it's not a directory. During log replay we simply
5608 : * unlink the conflicting dentry from the parent directory and then
5609 : * add the dentry for our inode. Like this we can avoid logging the
5610 : * parent directory (and maybe fallback to a transaction commit in
5611 : * case it has a last_unlink_trans == trans->transid, due to moving
5612 : * some inode from it to some other directory).
5613 : */
5614 0 : if (IS_ERR(inode)) {
5615 0 : int ret = PTR_ERR(inode);
5616 :
5617 0 : if (ret != -ENOENT)
5618 : return ret;
5619 :
5620 0 : ret = conflicting_inode_is_dir(root, ino, path);
5621 : /* Not a directory or we got an error. */
5622 0 : if (ret <= 0)
5623 : return ret;
5624 :
5625 : /* Conflicting inode is a directory, so we'll log its parent. */
5626 0 : ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5627 0 : if (!ino_elem)
5628 : return -ENOMEM;
5629 0 : ino_elem->ino = ino;
5630 0 : ino_elem->parent = parent;
5631 0 : list_add_tail(&ino_elem->list, &ctx->conflict_inodes);
5632 0 : ctx->num_conflict_inodes++;
5633 :
5634 0 : return 0;
5635 : }
5636 :
5637 : /*
5638 : * If the inode was already logged skip it - otherwise we can hit an
5639 : * infinite loop. Example:
5640 : *
5641 : * From the commit root (previous transaction) we have the following
5642 : * inodes:
5643 : *
5644 : * inode 257 a directory
5645 : * inode 258 with references "zz" and "zz_link" on inode 257
5646 : * inode 259 with reference "a" on inode 257
5647 : *
5648 : * And in the current (uncommitted) transaction we have:
5649 : *
5650 : * inode 257 a directory, unchanged
5651 : * inode 258 with references "a" and "a2" on inode 257
5652 : * inode 259 with reference "zz_link" on inode 257
5653 : * inode 261 with reference "zz" on inode 257
5654 : *
5655 : * When logging inode 261 the following infinite loop could
5656 : * happen if we don't skip already logged inodes:
5657 : *
5658 : * - we detect inode 258 as a conflicting inode, with inode 261
5659 : * on reference "zz", and log it;
5660 : *
5661 : * - we detect inode 259 as a conflicting inode, with inode 258
5662 : * on reference "a", and log it;
5663 : *
5664 : * - we detect inode 258 as a conflicting inode, with inode 259
5665 : * on reference "zz_link", and log it - again! After this we
5666 : * repeat the above steps forever.
5667 : *
5668 : * Here we can use need_log_inode() because we only need to log the
5669 : * inode in LOG_INODE_EXISTS mode and rename operations update the log,
5670 : * so that the log ends up with the new name and without the old name.
5671 : */
5672 0 : if (!need_log_inode(trans, BTRFS_I(inode))) {
5673 0 : btrfs_add_delayed_iput(BTRFS_I(inode));
5674 0 : return 0;
5675 : }
5676 :
5677 0 : btrfs_add_delayed_iput(BTRFS_I(inode));
5678 :
5679 0 : ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5680 0 : if (!ino_elem)
5681 : return -ENOMEM;
5682 0 : ino_elem->ino = ino;
5683 0 : ino_elem->parent = parent;
5684 0 : list_add_tail(&ino_elem->list, &ctx->conflict_inodes);
5685 0 : ctx->num_conflict_inodes++;
5686 :
5687 0 : return 0;
5688 : }
5689 :
5690 0 : static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
5691 : struct btrfs_root *root,
5692 : struct btrfs_log_ctx *ctx)
5693 : {
5694 0 : struct btrfs_fs_info *fs_info = root->fs_info;
5695 0 : int ret = 0;
5696 :
5697 : /*
5698 : * Conflicting inodes are logged by the first call to btrfs_log_inode(),
5699 : * otherwise we could have unbounded recursion of btrfs_log_inode()
5700 : * calls. This check guarantees we can have only 1 level of recursion.
5701 : */
5702 0 : if (ctx->logging_conflict_inodes)
5703 : return 0;
5704 :
5705 0 : ctx->logging_conflict_inodes = true;
5706 :
5707 : /*
5708 : * New conflicting inodes may be found and added to the list while we
5709 : * are logging a conflicting inode, so keep iterating while the list is
5710 : * not empty.
5711 : */
5712 0 : while (!list_empty(&ctx->conflict_inodes)) {
5713 0 : struct btrfs_ino_list *curr;
5714 0 : struct inode *inode;
5715 0 : u64 ino;
5716 0 : u64 parent;
5717 :
5718 0 : curr = list_first_entry(&ctx->conflict_inodes,
5719 : struct btrfs_ino_list, list);
5720 0 : ino = curr->ino;
5721 0 : parent = curr->parent;
5722 0 : list_del(&curr->list);
5723 0 : kfree(curr);
5724 :
5725 0 : inode = btrfs_iget(fs_info->sb, ino, root);
5726 : /*
5727 : * If the other inode that had a conflicting dir entry was
5728 : * deleted in the current transaction, we need to log its parent
5729 : * directory. See the comment at add_conflicting_inode().
5730 : */
5731 0 : if (IS_ERR(inode)) {
5732 0 : ret = PTR_ERR(inode);
5733 0 : if (ret != -ENOENT)
5734 : break;
5735 :
5736 0 : inode = btrfs_iget(fs_info->sb, parent, root);
5737 0 : if (IS_ERR(inode)) {
5738 0 : ret = PTR_ERR(inode);
5739 0 : break;
5740 : }
5741 :
5742 : /*
5743 : * Always log the directory, we cannot make this
5744 : * conditional on need_log_inode() because the directory
5745 : * might have been logged in LOG_INODE_EXISTS mode or
5746 : * the dir index of the conflicting inode is not in a
5747 : * dir index key range logged for the directory. So we
5748 : * must make sure the deletion is recorded.
5749 : */
5750 0 : ret = btrfs_log_inode(trans, BTRFS_I(inode),
5751 : LOG_INODE_ALL, ctx);
5752 0 : btrfs_add_delayed_iput(BTRFS_I(inode));
5753 0 : if (ret)
5754 : break;
5755 0 : continue;
5756 : }
5757 :
5758 : /*
5759 : * Here we can use need_log_inode() because we only need to log
5760 : * the inode in LOG_INODE_EXISTS mode and rename operations
5761 : * update the log, so that the log ends up with the new name and
5762 : * without the old name.
5763 : *
5764 : * We did this check at add_conflicting_inode(), but here we do
5765 : * it again because if some other task logged the inode after
5766 : * that, we can avoid doing it again.
5767 : */
5768 0 : if (!need_log_inode(trans, BTRFS_I(inode))) {
5769 0 : btrfs_add_delayed_iput(BTRFS_I(inode));
5770 0 : continue;
5771 : }
5772 :
5773 : /*
5774 : * We are safe logging the other inode without acquiring its
5775 : * lock as long as we log with the LOG_INODE_EXISTS mode. We
5776 : * are safe against concurrent renames of the other inode as
5777 : * well because during a rename we pin the log and update the
5778 : * log with the new name before we unpin it.
5779 : */
5780 0 : ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_INODE_EXISTS, ctx);
5781 0 : btrfs_add_delayed_iput(BTRFS_I(inode));
5782 0 : if (ret)
5783 : break;
5784 : }
5785 :
5786 0 : ctx->logging_conflict_inodes = false;
5787 0 : if (ret)
5788 0 : free_conflicting_inodes(ctx);
5789 :
5790 : return ret;
5791 : }
5792 :
5793 0 : static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
5794 : struct btrfs_inode *inode,
5795 : struct btrfs_key *min_key,
5796 : const struct btrfs_key *max_key,
5797 : struct btrfs_path *path,
5798 : struct btrfs_path *dst_path,
5799 : const u64 logged_isize,
5800 : const int inode_only,
5801 : struct btrfs_log_ctx *ctx,
5802 : bool *need_log_inode_item)
5803 : {
5804 0 : const u64 i_size = i_size_read(&inode->vfs_inode);
5805 0 : struct btrfs_root *root = inode->root;
5806 0 : int ins_start_slot = 0;
5807 0 : int ins_nr = 0;
5808 0 : int ret;
5809 :
5810 0 : while (1) {
5811 0 : ret = btrfs_search_forward(root, min_key, path, trans->transid);
5812 0 : if (ret < 0)
5813 0 : return ret;
5814 0 : if (ret > 0) {
5815 : ret = 0;
5816 : break;
5817 : }
5818 0 : again:
5819 : /* Note, ins_nr might be > 0 here, cleanup outside the loop */
5820 0 : if (min_key->objectid != max_key->objectid)
5821 : break;
5822 0 : if (min_key->type > max_key->type)
5823 : break;
5824 :
5825 0 : if (min_key->type == BTRFS_INODE_ITEM_KEY) {
5826 0 : *need_log_inode_item = false;
5827 0 : } else if (min_key->type == BTRFS_EXTENT_DATA_KEY &&
5828 0 : min_key->offset >= i_size) {
5829 : /*
5830 : * Extents at and beyond eof are logged with
5831 : * btrfs_log_prealloc_extents().
5832 : * Only regular files have BTRFS_EXTENT_DATA_KEY keys,
5833 : * and no keys greater than that, so bail out.
5834 : */
5835 : break;
5836 0 : } else if ((min_key->type == BTRFS_INODE_REF_KEY ||
5837 0 : min_key->type == BTRFS_INODE_EXTREF_KEY) &&
5838 0 : (inode->generation == trans->transid ||
5839 0 : ctx->logging_conflict_inodes)) {
5840 0 : u64 other_ino = 0;
5841 0 : u64 other_parent = 0;
5842 :
5843 0 : ret = btrfs_check_ref_name_override(path->nodes[0],
5844 : path->slots[0], min_key, inode,
5845 : &other_ino, &other_parent);
5846 0 : if (ret < 0) {
5847 0 : return ret;
5848 0 : } else if (ret > 0 &&
5849 0 : other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5850 0 : if (ins_nr > 0) {
5851 0 : ins_nr++;
5852 : } else {
5853 0 : ins_nr = 1;
5854 0 : ins_start_slot = path->slots[0];
5855 : }
5856 0 : ret = copy_items(trans, inode, dst_path, path,
5857 : ins_start_slot, ins_nr,
5858 : inode_only, logged_isize);
5859 0 : if (ret < 0)
5860 0 : return ret;
5861 0 : ins_nr = 0;
5862 :
5863 0 : btrfs_release_path(path);
5864 0 : ret = add_conflicting_inode(trans, root, path,
5865 : other_ino,
5866 : other_parent, ctx);
5867 0 : if (ret)
5868 0 : return ret;
5869 0 : goto next_key;
5870 : }
5871 0 : } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
5872 : /* Skip xattrs, logged later with btrfs_log_all_xattrs() */
5873 0 : if (ins_nr == 0)
5874 0 : goto next_slot;
5875 0 : ret = copy_items(trans, inode, dst_path, path,
5876 : ins_start_slot,
5877 : ins_nr, inode_only, logged_isize);
5878 0 : if (ret < 0)
5879 0 : return ret;
5880 0 : ins_nr = 0;
5881 0 : goto next_slot;
5882 : }
5883 :
5884 0 : if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5885 0 : ins_nr++;
5886 0 : goto next_slot;
5887 0 : } else if (!ins_nr) {
5888 0 : ins_start_slot = path->slots[0];
5889 0 : ins_nr = 1;
5890 0 : goto next_slot;
5891 : }
5892 :
5893 0 : ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5894 : ins_nr, inode_only, logged_isize);
5895 0 : if (ret < 0)
5896 0 : return ret;
5897 0 : ins_nr = 1;
5898 0 : ins_start_slot = path->slots[0];
5899 0 : next_slot:
5900 0 : path->slots[0]++;
5901 0 : if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
5902 0 : btrfs_item_key_to_cpu(path->nodes[0], min_key,
5903 : path->slots[0]);
5904 0 : goto again;
5905 : }
5906 0 : if (ins_nr) {
5907 0 : ret = copy_items(trans, inode, dst_path, path,
5908 : ins_start_slot, ins_nr, inode_only,
5909 : logged_isize);
5910 0 : if (ret < 0)
5911 0 : return ret;
5912 : ins_nr = 0;
5913 : }
5914 0 : btrfs_release_path(path);
5915 0 : next_key:
5916 0 : if (min_key->offset < (u64)-1) {
5917 0 : min_key->offset++;
5918 0 : } else if (min_key->type < max_key->type) {
5919 0 : min_key->type++;
5920 0 : min_key->offset = 0;
5921 : } else {
5922 : break;
5923 : }
5924 :
5925 : /*
5926 : * We may process many leaves full of items for our inode, so
5927 : * avoid monopolizing a cpu for too long by rescheduling while
5928 : * not holding locks on any tree.
5929 : */
5930 0 : cond_resched();
5931 : }
5932 0 : if (ins_nr) {
5933 0 : ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5934 : ins_nr, inode_only, logged_isize);
5935 0 : if (ret)
5936 : return ret;
5937 : }
5938 :
5939 0 : if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) {
5940 : /*
5941 : * Release the path because otherwise we might attempt to double
5942 : * lock the same leaf with btrfs_log_prealloc_extents() below.
5943 : */
5944 0 : btrfs_release_path(path);
5945 0 : ret = btrfs_log_prealloc_extents(trans, inode, dst_path);
5946 : }
5947 :
5948 : return ret;
5949 : }
5950 :
5951 0 : static int insert_delayed_items_batch(struct btrfs_trans_handle *trans,
5952 : struct btrfs_root *log,
5953 : struct btrfs_path *path,
5954 : const struct btrfs_item_batch *batch,
5955 : const struct btrfs_delayed_item *first_item)
5956 : {
5957 0 : const struct btrfs_delayed_item *curr = first_item;
5958 0 : int ret;
5959 :
5960 0 : ret = btrfs_insert_empty_items(trans, log, path, batch);
5961 0 : if (ret)
5962 : return ret;
5963 :
5964 0 : for (int i = 0; i < batch->nr; i++) {
5965 0 : char *data_ptr;
5966 :
5967 0 : data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
5968 0 : write_extent_buffer(path->nodes[0], &curr->data,
5969 0 : (unsigned long)data_ptr, curr->data_len);
5970 0 : curr = list_next_entry(curr, log_list);
5971 0 : path->slots[0]++;
5972 : }
5973 :
5974 0 : btrfs_release_path(path);
5975 :
5976 0 : return 0;
5977 : }
5978 :
5979 0 : static int log_delayed_insertion_items(struct btrfs_trans_handle *trans,
5980 : struct btrfs_inode *inode,
5981 : struct btrfs_path *path,
5982 : const struct list_head *delayed_ins_list,
5983 : struct btrfs_log_ctx *ctx)
5984 : {
5985 : /* 195 (4095 bytes of keys and sizes) fits in a single 4K page. */
5986 0 : const int max_batch_size = 195;
5987 0 : const int leaf_data_size = BTRFS_LEAF_DATA_SIZE(trans->fs_info);
5988 0 : const u64 ino = btrfs_ino(inode);
5989 0 : struct btrfs_root *log = inode->root->log_root;
5990 0 : struct btrfs_item_batch batch = {
5991 : .nr = 0,
5992 : .total_data_size = 0,
5993 : };
5994 0 : const struct btrfs_delayed_item *first = NULL;
5995 0 : const struct btrfs_delayed_item *curr;
5996 0 : char *ins_data;
5997 0 : struct btrfs_key *ins_keys;
5998 0 : u32 *ins_sizes;
5999 0 : u64 curr_batch_size = 0;
6000 0 : int batch_idx = 0;
6001 0 : int ret;
6002 :
6003 : /* We are adding dir index items to the log tree. */
6004 0 : lockdep_assert_held(&inode->log_mutex);
6005 :
6006 : /*
6007 : * We collect delayed items before copying index keys from the subvolume
6008 : * to the log tree. However just after we collected them, they may have
6009 : * been flushed (all of them or just some of them), and therefore we
6010 : * could have copied them from the subvolume tree to the log tree.
6011 : * So find the first delayed item that was not yet logged (they are
6012 : * sorted by index number).
6013 : */
6014 0 : list_for_each_entry(curr, delayed_ins_list, log_list) {
6015 0 : if (curr->index > inode->last_dir_index_offset) {
6016 : first = curr;
6017 : break;
6018 : }
6019 : }
6020 :
6021 : /* Empty list or all delayed items were already logged. */
6022 0 : if (!first)
6023 : return 0;
6024 :
6025 0 : ins_data = kmalloc(max_batch_size * sizeof(u32) +
6026 : max_batch_size * sizeof(struct btrfs_key), GFP_NOFS);
6027 0 : if (!ins_data)
6028 : return -ENOMEM;
6029 0 : ins_sizes = (u32 *)ins_data;
6030 0 : batch.data_sizes = ins_sizes;
6031 0 : ins_keys = (struct btrfs_key *)(ins_data + max_batch_size * sizeof(u32));
6032 0 : batch.keys = ins_keys;
6033 :
6034 0 : curr = first;
6035 0 : while (!list_entry_is_head(curr, delayed_ins_list, log_list)) {
6036 0 : const u32 curr_size = curr->data_len + sizeof(struct btrfs_item);
6037 :
6038 0 : if (curr_batch_size + curr_size > leaf_data_size ||
6039 0 : batch.nr == max_batch_size) {
6040 0 : ret = insert_delayed_items_batch(trans, log, path,
6041 : &batch, first);
6042 0 : if (ret)
6043 0 : goto out;
6044 0 : batch_idx = 0;
6045 0 : batch.nr = 0;
6046 0 : batch.total_data_size = 0;
6047 0 : curr_batch_size = 0;
6048 0 : first = curr;
6049 : }
6050 :
6051 0 : ins_sizes[batch_idx] = curr->data_len;
6052 0 : ins_keys[batch_idx].objectid = ino;
6053 0 : ins_keys[batch_idx].type = BTRFS_DIR_INDEX_KEY;
6054 0 : ins_keys[batch_idx].offset = curr->index;
6055 0 : curr_batch_size += curr_size;
6056 0 : batch.total_data_size += curr->data_len;
6057 0 : batch.nr++;
6058 0 : batch_idx++;
6059 0 : curr = list_next_entry(curr, log_list);
6060 : }
6061 :
6062 0 : ASSERT(batch.nr >= 1);
6063 0 : ret = insert_delayed_items_batch(trans, log, path, &batch, first);
6064 :
6065 0 : curr = list_last_entry(delayed_ins_list, struct btrfs_delayed_item,
6066 : log_list);
6067 0 : inode->last_dir_index_offset = curr->index;
6068 0 : out:
6069 0 : kfree(ins_data);
6070 :
6071 0 : return ret;
6072 : }
6073 :
6074 0 : static int log_delayed_deletions_full(struct btrfs_trans_handle *trans,
6075 : struct btrfs_inode *inode,
6076 : struct btrfs_path *path,
6077 : const struct list_head *delayed_del_list,
6078 : struct btrfs_log_ctx *ctx)
6079 : {
6080 0 : const u64 ino = btrfs_ino(inode);
6081 0 : const struct btrfs_delayed_item *curr;
6082 :
6083 0 : curr = list_first_entry(delayed_del_list, struct btrfs_delayed_item,
6084 : log_list);
6085 :
6086 0 : while (!list_entry_is_head(curr, delayed_del_list, log_list)) {
6087 0 : u64 first_dir_index = curr->index;
6088 0 : u64 last_dir_index;
6089 0 : const struct btrfs_delayed_item *next;
6090 0 : int ret;
6091 :
6092 : /*
6093 : * Find a range of consecutive dir index items to delete. Like
6094 : * this we log a single dir range item spanning several contiguous
6095 : * dir items instead of logging one range item per dir index item.
6096 : */
6097 0 : next = list_next_entry(curr, log_list);
6098 0 : while (!list_entry_is_head(next, delayed_del_list, log_list)) {
6099 0 : if (next->index != curr->index + 1)
6100 : break;
6101 0 : curr = next;
6102 0 : next = list_next_entry(next, log_list);
6103 : }
6104 :
6105 0 : last_dir_index = curr->index;
6106 0 : ASSERT(last_dir_index >= first_dir_index);
6107 :
6108 0 : ret = insert_dir_log_key(trans, inode->root->log_root, path,
6109 : ino, first_dir_index, last_dir_index);
6110 0 : if (ret)
6111 0 : return ret;
6112 0 : curr = list_next_entry(curr, log_list);
6113 : }
6114 :
6115 : return 0;
6116 : }
6117 :
6118 0 : static int batch_delete_dir_index_items(struct btrfs_trans_handle *trans,
6119 : struct btrfs_inode *inode,
6120 : struct btrfs_path *path,
6121 : struct btrfs_log_ctx *ctx,
6122 : const struct list_head *delayed_del_list,
6123 : const struct btrfs_delayed_item *first,
6124 : const struct btrfs_delayed_item **last_ret)
6125 : {
6126 0 : const struct btrfs_delayed_item *next;
6127 0 : struct extent_buffer *leaf = path->nodes[0];
6128 0 : const int last_slot = btrfs_header_nritems(leaf) - 1;
6129 0 : int slot = path->slots[0] + 1;
6130 0 : const u64 ino = btrfs_ino(inode);
6131 :
6132 0 : next = list_next_entry(first, log_list);
6133 :
6134 0 : while (slot < last_slot &&
6135 0 : !list_entry_is_head(next, delayed_del_list, log_list)) {
6136 0 : struct btrfs_key key;
6137 :
6138 0 : btrfs_item_key_to_cpu(leaf, &key, slot);
6139 0 : if (key.objectid != ino ||
6140 0 : key.type != BTRFS_DIR_INDEX_KEY ||
6141 0 : key.offset != next->index)
6142 : break;
6143 :
6144 0 : slot++;
6145 0 : *last_ret = next;
6146 0 : next = list_next_entry(next, log_list);
6147 : }
6148 :
6149 0 : return btrfs_del_items(trans, inode->root->log_root, path,
6150 : path->slots[0], slot - path->slots[0]);
6151 : }
6152 :
6153 0 : static int log_delayed_deletions_incremental(struct btrfs_trans_handle *trans,
6154 : struct btrfs_inode *inode,
6155 : struct btrfs_path *path,
6156 : const struct list_head *delayed_del_list,
6157 : struct btrfs_log_ctx *ctx)
6158 : {
6159 0 : struct btrfs_root *log = inode->root->log_root;
6160 0 : const struct btrfs_delayed_item *curr;
6161 0 : u64 last_range_start = 0;
6162 0 : u64 last_range_end = 0;
6163 0 : struct btrfs_key key;
6164 :
6165 0 : key.objectid = btrfs_ino(inode);
6166 0 : key.type = BTRFS_DIR_INDEX_KEY;
6167 0 : curr = list_first_entry(delayed_del_list, struct btrfs_delayed_item,
6168 : log_list);
6169 :
6170 0 : while (!list_entry_is_head(curr, delayed_del_list, log_list)) {
6171 0 : const struct btrfs_delayed_item *last = curr;
6172 0 : u64 first_dir_index = curr->index;
6173 0 : u64 last_dir_index;
6174 0 : bool deleted_items = false;
6175 0 : int ret;
6176 :
6177 0 : key.offset = curr->index;
6178 0 : ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
6179 0 : if (ret < 0) {
6180 0 : return ret;
6181 0 : } else if (ret == 0) {
6182 0 : ret = batch_delete_dir_index_items(trans, inode, path, ctx,
6183 : delayed_del_list, curr,
6184 : &last);
6185 0 : if (ret)
6186 0 : return ret;
6187 : deleted_items = true;
6188 : }
6189 :
6190 0 : btrfs_release_path(path);
6191 :
6192 : /*
6193 : * If we deleted items from the leaf, it means we have a range
6194 : * item logging their range, so no need to add one or update an
6195 : * existing one. Otherwise we have to log a dir range item.
6196 : */
6197 0 : if (deleted_items)
6198 0 : goto next_batch;
6199 :
6200 0 : last_dir_index = last->index;
6201 0 : ASSERT(last_dir_index >= first_dir_index);
6202 : /*
6203 : * If this range starts right after where the previous one ends,
6204 : * then we want to reuse the previous range item and change its
6205 : * end offset to the end of this range. This is just to minimize
6206 : * leaf space usage, by avoiding adding a new range item.
6207 : */
6208 0 : if (last_range_end != 0 && first_dir_index == last_range_end + 1)
6209 0 : first_dir_index = last_range_start;
6210 :
6211 0 : ret = insert_dir_log_key(trans, log, path, key.objectid,
6212 : first_dir_index, last_dir_index);
6213 0 : if (ret)
6214 0 : return ret;
6215 :
6216 : last_range_start = first_dir_index;
6217 : last_range_end = last_dir_index;
6218 0 : next_batch:
6219 0 : curr = list_next_entry(last, log_list);
6220 : }
6221 :
6222 : return 0;
6223 : }
6224 :
6225 0 : static int log_delayed_deletion_items(struct btrfs_trans_handle *trans,
6226 : struct btrfs_inode *inode,
6227 : struct btrfs_path *path,
6228 : const struct list_head *delayed_del_list,
6229 : struct btrfs_log_ctx *ctx)
6230 : {
6231 : /*
6232 : * We are deleting dir index items from the log tree or adding range
6233 : * items to it.
6234 : */
6235 0 : lockdep_assert_held(&inode->log_mutex);
6236 :
6237 0 : if (list_empty(delayed_del_list))
6238 : return 0;
6239 :
6240 0 : if (ctx->logged_before)
6241 0 : return log_delayed_deletions_incremental(trans, inode, path,
6242 : delayed_del_list, ctx);
6243 :
6244 0 : return log_delayed_deletions_full(trans, inode, path, delayed_del_list,
6245 : ctx);
6246 : }
6247 :
6248 : /*
6249 : * Similar logic as for log_new_dir_dentries(), but it iterates over the delayed
6250 : * items instead of the subvolume tree.
6251 : */
6252 0 : static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
6253 : struct btrfs_inode *inode,
6254 : const struct list_head *delayed_ins_list,
6255 : struct btrfs_log_ctx *ctx)
6256 : {
6257 0 : const bool orig_log_new_dentries = ctx->log_new_dentries;
6258 0 : struct btrfs_fs_info *fs_info = trans->fs_info;
6259 0 : struct btrfs_delayed_item *item;
6260 0 : int ret = 0;
6261 :
6262 : /*
6263 : * No need for the log mutex, plus to avoid potential deadlocks or
6264 : * lockdep annotations due to nesting of delayed inode mutexes and log
6265 : * mutexes.
6266 : */
6267 0 : lockdep_assert_not_held(&inode->log_mutex);
6268 :
6269 0 : ASSERT(!ctx->logging_new_delayed_dentries);
6270 0 : ctx->logging_new_delayed_dentries = true;
6271 :
6272 0 : list_for_each_entry(item, delayed_ins_list, log_list) {
6273 0 : struct btrfs_dir_item *dir_item;
6274 0 : struct inode *di_inode;
6275 0 : struct btrfs_key key;
6276 0 : int log_mode = LOG_INODE_EXISTS;
6277 :
6278 0 : dir_item = (struct btrfs_dir_item *)item->data;
6279 0 : btrfs_disk_key_to_cpu(&key, &dir_item->location);
6280 :
6281 0 : if (key.type == BTRFS_ROOT_ITEM_KEY)
6282 0 : continue;
6283 :
6284 0 : di_inode = btrfs_iget(fs_info->sb, key.objectid, inode->root);
6285 0 : if (IS_ERR(di_inode)) {
6286 0 : ret = PTR_ERR(di_inode);
6287 0 : break;
6288 : }
6289 :
6290 0 : if (!need_log_inode(trans, BTRFS_I(di_inode))) {
6291 0 : btrfs_add_delayed_iput(BTRFS_I(di_inode));
6292 0 : continue;
6293 : }
6294 :
6295 0 : if (btrfs_stack_dir_ftype(dir_item) == BTRFS_FT_DIR)
6296 0 : log_mode = LOG_INODE_ALL;
6297 :
6298 0 : ctx->log_new_dentries = false;
6299 0 : ret = btrfs_log_inode(trans, BTRFS_I(di_inode), log_mode, ctx);
6300 :
6301 0 : if (!ret && ctx->log_new_dentries)
6302 0 : ret = log_new_dir_dentries(trans, BTRFS_I(di_inode), ctx);
6303 :
6304 0 : btrfs_add_delayed_iput(BTRFS_I(di_inode));
6305 :
6306 0 : if (ret)
6307 : break;
6308 : }
6309 :
6310 0 : ctx->log_new_dentries = orig_log_new_dentries;
6311 0 : ctx->logging_new_delayed_dentries = false;
6312 :
6313 0 : return ret;
6314 : }
6315 :
6316 : /* log a single inode in the tree log.
6317 : * At least one parent directory for this inode must exist in the tree
6318 : * or be logged already.
6319 : *
6320 : * Any items from this inode changed by the current transaction are copied
6321 : * to the log tree. An extra reference is taken on any extents in this
6322 : * file, allowing us to avoid a whole pile of corner cases around logging
6323 : * blocks that have been removed from the tree.
6324 : *
6325 : * See LOG_INODE_ALL and related defines for a description of what inode_only
6326 : * does.
6327 : *
6328 : * This handles both files and directories.
6329 : */
6330 0 : static int btrfs_log_inode(struct btrfs_trans_handle *trans,
6331 : struct btrfs_inode *inode,
6332 : int inode_only,
6333 : struct btrfs_log_ctx *ctx)
6334 : {
6335 0 : struct btrfs_path *path;
6336 0 : struct btrfs_path *dst_path;
6337 0 : struct btrfs_key min_key;
6338 0 : struct btrfs_key max_key;
6339 0 : struct btrfs_root *log = inode->root->log_root;
6340 0 : int ret;
6341 0 : bool fast_search = false;
6342 0 : u64 ino = btrfs_ino(inode);
6343 0 : struct extent_map_tree *em_tree = &inode->extent_tree;
6344 0 : u64 logged_isize = 0;
6345 0 : bool need_log_inode_item = true;
6346 0 : bool xattrs_logged = false;
6347 0 : bool inode_item_dropped = true;
6348 0 : bool full_dir_logging = false;
6349 0 : LIST_HEAD(delayed_ins_list);
6350 0 : LIST_HEAD(delayed_del_list);
6351 :
6352 0 : path = btrfs_alloc_path();
6353 0 : if (!path)
6354 : return -ENOMEM;
6355 0 : dst_path = btrfs_alloc_path();
6356 0 : if (!dst_path) {
6357 0 : btrfs_free_path(path);
6358 0 : return -ENOMEM;
6359 : }
6360 :
6361 0 : min_key.objectid = ino;
6362 0 : min_key.type = BTRFS_INODE_ITEM_KEY;
6363 0 : min_key.offset = 0;
6364 :
6365 0 : max_key.objectid = ino;
6366 :
6367 :
6368 : /* today the code can only do partial logging of directories */
6369 0 : if (S_ISDIR(inode->vfs_inode.i_mode) ||
6370 0 : (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
6371 0 : &inode->runtime_flags) &&
6372 : inode_only >= LOG_INODE_EXISTS))
6373 0 : max_key.type = BTRFS_XATTR_ITEM_KEY;
6374 : else
6375 0 : max_key.type = (u8)-1;
6376 0 : max_key.offset = (u64)-1;
6377 :
6378 0 : if (S_ISDIR(inode->vfs_inode.i_mode) && inode_only == LOG_INODE_ALL)
6379 0 : full_dir_logging = true;
6380 :
6381 : /*
6382 : * If we are logging a directory while we are logging dentries of the
6383 : * delayed items of some other inode, then we need to flush the delayed
6384 : * items of this directory and not log the delayed items directly. This
6385 : * is to prevent more than one level of recursion into btrfs_log_inode()
6386 : * by having something like this:
6387 : *
6388 : * $ mkdir -p a/b/c/d/e/f/g/h/...
6389 : * $ xfs_io -c "fsync" a
6390 : *
6391 : * Where all directories in the path did not exist before and are
6392 : * created in the current transaction.
6393 : * So in such a case we directly log the delayed items of the main
6394 : * directory ("a") without flushing them first, while for each of its
6395 : * subdirectories we flush their delayed items before logging them.
6396 : * This prevents a potential unbounded recursion like this:
6397 : *
6398 : * btrfs_log_inode()
6399 : * log_new_delayed_dentries()
6400 : * btrfs_log_inode()
6401 : * log_new_delayed_dentries()
6402 : * btrfs_log_inode()
6403 : * log_new_delayed_dentries()
6404 : * (...)
6405 : *
6406 : * We have thresholds for the maximum number of delayed items to have in
6407 : * memory, and once they are hit, the items are flushed asynchronously.
6408 : * However the limit is quite high, so lets prevent deep levels of
6409 : * recursion to happen by limiting the maximum depth to be 1.
6410 : */
6411 0 : if (full_dir_logging && ctx->logging_new_delayed_dentries) {
6412 0 : ret = btrfs_commit_inode_delayed_items(trans, inode);
6413 0 : if (ret)
6414 0 : goto out;
6415 : }
6416 :
6417 0 : mutex_lock(&inode->log_mutex);
6418 :
6419 : /*
6420 : * For symlinks, we must always log their content, which is stored in an
6421 : * inline extent, otherwise we could end up with an empty symlink after
6422 : * log replay, which is invalid on linux (symlink(2) returns -ENOENT if
6423 : * one attempts to create an empty symlink).
6424 : * We don't need to worry about flushing delalloc, because when we create
6425 : * the inline extent when the symlink is created (we never have delalloc
6426 : * for symlinks).
6427 : */
6428 0 : if (S_ISLNK(inode->vfs_inode.i_mode))
6429 0 : inode_only = LOG_INODE_ALL;
6430 :
6431 : /*
6432 : * Before logging the inode item, cache the value returned by
6433 : * inode_logged(), because after that we have the need to figure out if
6434 : * the inode was previously logged in this transaction.
6435 : */
6436 0 : ret = inode_logged(trans, inode, path);
6437 0 : if (ret < 0)
6438 0 : goto out_unlock;
6439 0 : ctx->logged_before = (ret == 1);
6440 0 : ret = 0;
6441 :
6442 : /*
6443 : * This is for cases where logging a directory could result in losing a
6444 : * a file after replaying the log. For example, if we move a file from a
6445 : * directory A to a directory B, then fsync directory A, we have no way
6446 : * to known the file was moved from A to B, so logging just A would
6447 : * result in losing the file after a log replay.
6448 : */
6449 0 : if (full_dir_logging && inode->last_unlink_trans >= trans->transid) {
6450 0 : ret = BTRFS_LOG_FORCE_COMMIT;
6451 0 : goto out_unlock;
6452 : }
6453 :
6454 : /*
6455 : * a brute force approach to making sure we get the most uptodate
6456 : * copies of everything.
6457 : */
6458 0 : if (S_ISDIR(inode->vfs_inode.i_mode)) {
6459 0 : clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
6460 0 : if (ctx->logged_before)
6461 0 : ret = drop_inode_items(trans, log, path, inode,
6462 : BTRFS_XATTR_ITEM_KEY);
6463 : } else {
6464 0 : if (inode_only == LOG_INODE_EXISTS && ctx->logged_before) {
6465 : /*
6466 : * Make sure the new inode item we write to the log has
6467 : * the same isize as the current one (if it exists).
6468 : * This is necessary to prevent data loss after log
6469 : * replay, and also to prevent doing a wrong expanding
6470 : * truncate - for e.g. create file, write 4K into offset
6471 : * 0, fsync, write 4K into offset 4096, add hard link,
6472 : * fsync some other file (to sync log), power fail - if
6473 : * we use the inode's current i_size, after log replay
6474 : * we get a 8Kb file, with the last 4Kb extent as a hole
6475 : * (zeroes), as if an expanding truncate happened,
6476 : * instead of getting a file of 4Kb only.
6477 : */
6478 0 : ret = logged_inode_size(log, inode, path, &logged_isize);
6479 0 : if (ret)
6480 0 : goto out_unlock;
6481 : }
6482 0 : if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
6483 : &inode->runtime_flags)) {
6484 0 : if (inode_only == LOG_INODE_EXISTS) {
6485 0 : max_key.type = BTRFS_XATTR_ITEM_KEY;
6486 0 : if (ctx->logged_before)
6487 0 : ret = drop_inode_items(trans, log, path,
6488 : inode, max_key.type);
6489 : } else {
6490 0 : clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
6491 : &inode->runtime_flags);
6492 0 : clear_bit(BTRFS_INODE_COPY_EVERYTHING,
6493 : &inode->runtime_flags);
6494 0 : if (ctx->logged_before)
6495 0 : ret = truncate_inode_items(trans, log,
6496 : inode, 0, 0);
6497 : }
6498 0 : } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
6499 0 : &inode->runtime_flags) ||
6500 : inode_only == LOG_INODE_EXISTS) {
6501 0 : if (inode_only == LOG_INODE_ALL)
6502 0 : fast_search = true;
6503 0 : max_key.type = BTRFS_XATTR_ITEM_KEY;
6504 0 : if (ctx->logged_before)
6505 0 : ret = drop_inode_items(trans, log, path, inode,
6506 : max_key.type);
6507 : } else {
6508 0 : if (inode_only == LOG_INODE_ALL)
6509 0 : fast_search = true;
6510 0 : inode_item_dropped = false;
6511 0 : goto log_extents;
6512 : }
6513 :
6514 : }
6515 0 : if (ret)
6516 0 : goto out_unlock;
6517 :
6518 : /*
6519 : * If we are logging a directory in full mode, collect the delayed items
6520 : * before iterating the subvolume tree, so that we don't miss any new
6521 : * dir index items in case they get flushed while or right after we are
6522 : * iterating the subvolume tree.
6523 : */
6524 0 : if (full_dir_logging && !ctx->logging_new_delayed_dentries)
6525 0 : btrfs_log_get_delayed_items(inode, &delayed_ins_list,
6526 : &delayed_del_list);
6527 :
6528 0 : ret = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
6529 : path, dst_path, logged_isize,
6530 : inode_only, ctx,
6531 : &need_log_inode_item);
6532 0 : if (ret)
6533 0 : goto out_unlock;
6534 :
6535 0 : btrfs_release_path(path);
6536 0 : btrfs_release_path(dst_path);
6537 0 : ret = btrfs_log_all_xattrs(trans, inode, path, dst_path);
6538 0 : if (ret)
6539 0 : goto out_unlock;
6540 0 : xattrs_logged = true;
6541 0 : if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
6542 0 : btrfs_release_path(path);
6543 0 : btrfs_release_path(dst_path);
6544 0 : ret = btrfs_log_holes(trans, inode, path);
6545 0 : if (ret)
6546 0 : goto out_unlock;
6547 : }
6548 0 : log_extents:
6549 0 : btrfs_release_path(path);
6550 0 : btrfs_release_path(dst_path);
6551 0 : if (need_log_inode_item) {
6552 0 : ret = log_inode_item(trans, log, dst_path, inode, inode_item_dropped);
6553 0 : if (ret)
6554 0 : goto out_unlock;
6555 : /*
6556 : * If we are doing a fast fsync and the inode was logged before
6557 : * in this transaction, we don't need to log the xattrs because
6558 : * they were logged before. If xattrs were added, changed or
6559 : * deleted since the last time we logged the inode, then we have
6560 : * already logged them because the inode had the runtime flag
6561 : * BTRFS_INODE_COPY_EVERYTHING set.
6562 : */
6563 0 : if (!xattrs_logged && inode->logged_trans < trans->transid) {
6564 0 : ret = btrfs_log_all_xattrs(trans, inode, path, dst_path);
6565 0 : if (ret)
6566 0 : goto out_unlock;
6567 0 : btrfs_release_path(path);
6568 : }
6569 : }
6570 0 : if (fast_search) {
6571 0 : ret = btrfs_log_changed_extents(trans, inode, dst_path, ctx);
6572 0 : if (ret)
6573 0 : goto out_unlock;
6574 0 : } else if (inode_only == LOG_INODE_ALL) {
6575 0 : struct extent_map *em, *n;
6576 :
6577 0 : write_lock(&em_tree->lock);
6578 0 : list_for_each_entry_safe(em, n, &em_tree->modified_extents, list)
6579 0 : list_del_init(&em->list);
6580 0 : write_unlock(&em_tree->lock);
6581 : }
6582 :
6583 0 : if (full_dir_logging) {
6584 0 : ret = log_directory_changes(trans, inode, path, dst_path, ctx);
6585 0 : if (ret)
6586 0 : goto out_unlock;
6587 0 : ret = log_delayed_insertion_items(trans, inode, path,
6588 : &delayed_ins_list, ctx);
6589 0 : if (ret)
6590 0 : goto out_unlock;
6591 0 : ret = log_delayed_deletion_items(trans, inode, path,
6592 : &delayed_del_list, ctx);
6593 0 : if (ret)
6594 0 : goto out_unlock;
6595 : }
6596 :
6597 0 : spin_lock(&inode->lock);
6598 0 : inode->logged_trans = trans->transid;
6599 : /*
6600 : * Don't update last_log_commit if we logged that an inode exists.
6601 : * We do this for three reasons:
6602 : *
6603 : * 1) We might have had buffered writes to this inode that were
6604 : * flushed and had their ordered extents completed in this
6605 : * transaction, but we did not previously log the inode with
6606 : * LOG_INODE_ALL. Later the inode was evicted and after that
6607 : * it was loaded again and this LOG_INODE_EXISTS log operation
6608 : * happened. We must make sure that if an explicit fsync against
6609 : * the inode is performed later, it logs the new extents, an
6610 : * updated inode item, etc, and syncs the log. The same logic
6611 : * applies to direct IO writes instead of buffered writes.
6612 : *
6613 : * 2) When we log the inode with LOG_INODE_EXISTS, its inode item
6614 : * is logged with an i_size of 0 or whatever value was logged
6615 : * before. If later the i_size of the inode is increased by a
6616 : * truncate operation, the log is synced through an fsync of
6617 : * some other inode and then finally an explicit fsync against
6618 : * this inode is made, we must make sure this fsync logs the
6619 : * inode with the new i_size, the hole between old i_size and
6620 : * the new i_size, and syncs the log.
6621 : *
6622 : * 3) If we are logging that an ancestor inode exists as part of
6623 : * logging a new name from a link or rename operation, don't update
6624 : * its last_log_commit - otherwise if an explicit fsync is made
6625 : * against an ancestor, the fsync considers the inode in the log
6626 : * and doesn't sync the log, resulting in the ancestor missing after
6627 : * a power failure unless the log was synced as part of an fsync
6628 : * against any other unrelated inode.
6629 : */
6630 0 : if (inode_only != LOG_INODE_EXISTS)
6631 0 : inode->last_log_commit = inode->last_sub_trans;
6632 0 : spin_unlock(&inode->lock);
6633 :
6634 : /*
6635 : * Reset the last_reflink_trans so that the next fsync does not need to
6636 : * go through the slower path when logging extents and their checksums.
6637 : */
6638 0 : if (inode_only == LOG_INODE_ALL)
6639 0 : inode->last_reflink_trans = 0;
6640 :
6641 0 : out_unlock:
6642 0 : mutex_unlock(&inode->log_mutex);
6643 0 : out:
6644 0 : btrfs_free_path(path);
6645 0 : btrfs_free_path(dst_path);
6646 :
6647 0 : if (ret)
6648 0 : free_conflicting_inodes(ctx);
6649 : else
6650 0 : ret = log_conflicting_inodes(trans, inode->root, ctx);
6651 :
6652 0 : if (full_dir_logging && !ctx->logging_new_delayed_dentries) {
6653 0 : if (!ret)
6654 0 : ret = log_new_delayed_dentries(trans, inode,
6655 : &delayed_ins_list, ctx);
6656 :
6657 0 : btrfs_log_put_delayed_items(inode, &delayed_ins_list,
6658 : &delayed_del_list);
6659 : }
6660 :
6661 : return ret;
6662 : }
6663 :
6664 0 : static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
6665 : struct btrfs_inode *inode,
6666 : struct btrfs_log_ctx *ctx)
6667 : {
6668 0 : struct btrfs_fs_info *fs_info = trans->fs_info;
6669 0 : int ret;
6670 0 : struct btrfs_path *path;
6671 0 : struct btrfs_key key;
6672 0 : struct btrfs_root *root = inode->root;
6673 0 : const u64 ino = btrfs_ino(inode);
6674 :
6675 0 : path = btrfs_alloc_path();
6676 0 : if (!path)
6677 : return -ENOMEM;
6678 0 : path->skip_locking = 1;
6679 0 : path->search_commit_root = 1;
6680 :
6681 0 : key.objectid = ino;
6682 0 : key.type = BTRFS_INODE_REF_KEY;
6683 0 : key.offset = 0;
6684 0 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6685 0 : if (ret < 0)
6686 0 : goto out;
6687 :
6688 0 : while (true) {
6689 0 : struct extent_buffer *leaf = path->nodes[0];
6690 0 : int slot = path->slots[0];
6691 0 : u32 cur_offset = 0;
6692 0 : u32 item_size;
6693 0 : unsigned long ptr;
6694 :
6695 0 : if (slot >= btrfs_header_nritems(leaf)) {
6696 0 : ret = btrfs_next_leaf(root, path);
6697 0 : if (ret < 0)
6698 0 : goto out;
6699 0 : else if (ret > 0)
6700 : break;
6701 0 : continue;
6702 : }
6703 :
6704 0 : btrfs_item_key_to_cpu(leaf, &key, slot);
6705 : /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
6706 0 : if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
6707 : break;
6708 :
6709 0 : item_size = btrfs_item_size(leaf, slot);
6710 0 : ptr = btrfs_item_ptr_offset(leaf, slot);
6711 0 : while (cur_offset < item_size) {
6712 0 : struct btrfs_key inode_key;
6713 0 : struct inode *dir_inode;
6714 :
6715 0 : inode_key.type = BTRFS_INODE_ITEM_KEY;
6716 0 : inode_key.offset = 0;
6717 :
6718 0 : if (key.type == BTRFS_INODE_EXTREF_KEY) {
6719 0 : struct btrfs_inode_extref *extref;
6720 :
6721 0 : extref = (struct btrfs_inode_extref *)
6722 0 : (ptr + cur_offset);
6723 0 : inode_key.objectid = btrfs_inode_extref_parent(
6724 : leaf, extref);
6725 0 : cur_offset += sizeof(*extref);
6726 0 : cur_offset += btrfs_inode_extref_name_len(leaf,
6727 : extref);
6728 : } else {
6729 0 : inode_key.objectid = key.offset;
6730 0 : cur_offset = item_size;
6731 : }
6732 :
6733 0 : dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
6734 : root);
6735 : /*
6736 : * If the parent inode was deleted, return an error to
6737 : * fallback to a transaction commit. This is to prevent
6738 : * getting an inode that was moved from one parent A to
6739 : * a parent B, got its former parent A deleted and then
6740 : * it got fsync'ed, from existing at both parents after
6741 : * a log replay (and the old parent still existing).
6742 : * Example:
6743 : *
6744 : * mkdir /mnt/A
6745 : * mkdir /mnt/B
6746 : * touch /mnt/B/bar
6747 : * sync
6748 : * mv /mnt/B/bar /mnt/A/bar
6749 : * mv -T /mnt/A /mnt/B
6750 : * fsync /mnt/B/bar
6751 : * <power fail>
6752 : *
6753 : * If we ignore the old parent B which got deleted,
6754 : * after a log replay we would have file bar linked
6755 : * at both parents and the old parent B would still
6756 : * exist.
6757 : */
6758 0 : if (IS_ERR(dir_inode)) {
6759 0 : ret = PTR_ERR(dir_inode);
6760 0 : goto out;
6761 : }
6762 :
6763 0 : if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
6764 0 : btrfs_add_delayed_iput(BTRFS_I(dir_inode));
6765 0 : continue;
6766 : }
6767 :
6768 0 : ctx->log_new_dentries = false;
6769 0 : ret = btrfs_log_inode(trans, BTRFS_I(dir_inode),
6770 : LOG_INODE_ALL, ctx);
6771 0 : if (!ret && ctx->log_new_dentries)
6772 0 : ret = log_new_dir_dentries(trans,
6773 : BTRFS_I(dir_inode), ctx);
6774 0 : btrfs_add_delayed_iput(BTRFS_I(dir_inode));
6775 0 : if (ret)
6776 0 : goto out;
6777 : }
6778 0 : path->slots[0]++;
6779 : }
6780 : ret = 0;
6781 0 : out:
6782 0 : btrfs_free_path(path);
6783 0 : return ret;
6784 : }
6785 :
6786 0 : static int log_new_ancestors(struct btrfs_trans_handle *trans,
6787 : struct btrfs_root *root,
6788 : struct btrfs_path *path,
6789 : struct btrfs_log_ctx *ctx)
6790 : {
6791 0 : struct btrfs_key found_key;
6792 :
6793 0 : btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
6794 :
6795 0 : while (true) {
6796 0 : struct btrfs_fs_info *fs_info = root->fs_info;
6797 0 : struct extent_buffer *leaf = path->nodes[0];
6798 0 : int slot = path->slots[0];
6799 0 : struct btrfs_key search_key;
6800 0 : struct inode *inode;
6801 0 : u64 ino;
6802 0 : int ret = 0;
6803 :
6804 0 : btrfs_release_path(path);
6805 :
6806 0 : ino = found_key.offset;
6807 :
6808 0 : search_key.objectid = found_key.offset;
6809 0 : search_key.type = BTRFS_INODE_ITEM_KEY;
6810 0 : search_key.offset = 0;
6811 0 : inode = btrfs_iget(fs_info->sb, ino, root);
6812 0 : if (IS_ERR(inode))
6813 0 : return PTR_ERR(inode);
6814 :
6815 0 : if (BTRFS_I(inode)->generation >= trans->transid &&
6816 0 : need_log_inode(trans, BTRFS_I(inode)))
6817 0 : ret = btrfs_log_inode(trans, BTRFS_I(inode),
6818 : LOG_INODE_EXISTS, ctx);
6819 0 : btrfs_add_delayed_iput(BTRFS_I(inode));
6820 0 : if (ret)
6821 0 : return ret;
6822 :
6823 0 : if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
6824 : break;
6825 :
6826 0 : search_key.type = BTRFS_INODE_REF_KEY;
6827 0 : ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6828 0 : if (ret < 0)
6829 0 : return ret;
6830 :
6831 0 : leaf = path->nodes[0];
6832 0 : slot = path->slots[0];
6833 0 : if (slot >= btrfs_header_nritems(leaf)) {
6834 0 : ret = btrfs_next_leaf(root, path);
6835 0 : if (ret < 0)
6836 0 : return ret;
6837 0 : else if (ret > 0)
6838 : return -ENOENT;
6839 0 : leaf = path->nodes[0];
6840 0 : slot = path->slots[0];
6841 : }
6842 :
6843 0 : btrfs_item_key_to_cpu(leaf, &found_key, slot);
6844 0 : if (found_key.objectid != search_key.objectid ||
6845 0 : found_key.type != BTRFS_INODE_REF_KEY)
6846 : return -ENOENT;
6847 : }
6848 0 : return 0;
6849 : }
6850 :
6851 0 : static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
6852 : struct btrfs_inode *inode,
6853 : struct dentry *parent,
6854 : struct btrfs_log_ctx *ctx)
6855 : {
6856 0 : struct btrfs_root *root = inode->root;
6857 0 : struct dentry *old_parent = NULL;
6858 0 : struct super_block *sb = inode->vfs_inode.i_sb;
6859 0 : int ret = 0;
6860 :
6861 0 : while (true) {
6862 0 : if (!parent || d_really_is_negative(parent) ||
6863 0 : sb != parent->d_sb)
6864 : break;
6865 :
6866 0 : inode = BTRFS_I(d_inode(parent));
6867 0 : if (root != inode->root)
6868 : break;
6869 :
6870 0 : if (inode->generation >= trans->transid &&
6871 0 : need_log_inode(trans, inode)) {
6872 0 : ret = btrfs_log_inode(trans, inode,
6873 : LOG_INODE_EXISTS, ctx);
6874 0 : if (ret)
6875 : break;
6876 : }
6877 0 : if (IS_ROOT(parent))
6878 : break;
6879 :
6880 0 : parent = dget_parent(parent);
6881 0 : dput(old_parent);
6882 0 : old_parent = parent;
6883 : }
6884 0 : dput(old_parent);
6885 :
6886 0 : return ret;
6887 : }
6888 :
6889 0 : static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
6890 : struct btrfs_inode *inode,
6891 : struct dentry *parent,
6892 : struct btrfs_log_ctx *ctx)
6893 : {
6894 0 : struct btrfs_root *root = inode->root;
6895 0 : const u64 ino = btrfs_ino(inode);
6896 0 : struct btrfs_path *path;
6897 0 : struct btrfs_key search_key;
6898 0 : int ret;
6899 :
6900 : /*
6901 : * For a single hard link case, go through a fast path that does not
6902 : * need to iterate the fs/subvolume tree.
6903 : */
6904 0 : if (inode->vfs_inode.i_nlink < 2)
6905 0 : return log_new_ancestors_fast(trans, inode, parent, ctx);
6906 :
6907 0 : path = btrfs_alloc_path();
6908 0 : if (!path)
6909 : return -ENOMEM;
6910 :
6911 0 : search_key.objectid = ino;
6912 0 : search_key.type = BTRFS_INODE_REF_KEY;
6913 0 : search_key.offset = 0;
6914 0 : again:
6915 0 : ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6916 0 : if (ret < 0)
6917 0 : goto out;
6918 0 : if (ret == 0)
6919 0 : path->slots[0]++;
6920 :
6921 0 : while (true) {
6922 0 : struct extent_buffer *leaf = path->nodes[0];
6923 0 : int slot = path->slots[0];
6924 0 : struct btrfs_key found_key;
6925 :
6926 0 : if (slot >= btrfs_header_nritems(leaf)) {
6927 0 : ret = btrfs_next_leaf(root, path);
6928 0 : if (ret < 0)
6929 0 : goto out;
6930 0 : else if (ret > 0)
6931 : break;
6932 0 : continue;
6933 : }
6934 :
6935 0 : btrfs_item_key_to_cpu(leaf, &found_key, slot);
6936 0 : if (found_key.objectid != ino ||
6937 0 : found_key.type > BTRFS_INODE_EXTREF_KEY)
6938 : break;
6939 :
6940 : /*
6941 : * Don't deal with extended references because they are rare
6942 : * cases and too complex to deal with (we would need to keep
6943 : * track of which subitem we are processing for each item in
6944 : * this loop, etc). So just return some error to fallback to
6945 : * a transaction commit.
6946 : */
6947 0 : if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
6948 0 : ret = -EMLINK;
6949 0 : goto out;
6950 : }
6951 :
6952 : /*
6953 : * Logging ancestors needs to do more searches on the fs/subvol
6954 : * tree, so it releases the path as needed to avoid deadlocks.
6955 : * Keep track of the last inode ref key and resume from that key
6956 : * after logging all new ancestors for the current hard link.
6957 : */
6958 0 : memcpy(&search_key, &found_key, sizeof(search_key));
6959 :
6960 0 : ret = log_new_ancestors(trans, root, path, ctx);
6961 0 : if (ret)
6962 0 : goto out;
6963 0 : btrfs_release_path(path);
6964 0 : goto again;
6965 : }
6966 0 : ret = 0;
6967 0 : out:
6968 0 : btrfs_free_path(path);
6969 0 : return ret;
6970 : }
6971 :
6972 : /*
6973 : * helper function around btrfs_log_inode to make sure newly created
6974 : * parent directories also end up in the log. A minimal inode and backref
6975 : * only logging is done of any parent directories that are older than
6976 : * the last committed transaction
6977 : */
6978 0 : static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6979 : struct btrfs_inode *inode,
6980 : struct dentry *parent,
6981 : int inode_only,
6982 : struct btrfs_log_ctx *ctx)
6983 : {
6984 0 : struct btrfs_root *root = inode->root;
6985 0 : struct btrfs_fs_info *fs_info = root->fs_info;
6986 0 : int ret = 0;
6987 0 : bool log_dentries = false;
6988 :
6989 0 : if (btrfs_test_opt(fs_info, NOTREELOG)) {
6990 0 : ret = BTRFS_LOG_FORCE_COMMIT;
6991 0 : goto end_no_trans;
6992 : }
6993 :
6994 0 : if (btrfs_root_refs(&root->root_item) == 0) {
6995 0 : ret = BTRFS_LOG_FORCE_COMMIT;
6996 0 : goto end_no_trans;
6997 : }
6998 :
6999 : /*
7000 : * Skip already logged inodes or inodes corresponding to tmpfiles
7001 : * (since logging them is pointless, a link count of 0 means they
7002 : * will never be accessible).
7003 : */
7004 0 : if ((btrfs_inode_in_log(inode, trans->transid) &&
7005 0 : list_empty(&ctx->ordered_extents)) ||
7006 0 : inode->vfs_inode.i_nlink == 0) {
7007 0 : ret = BTRFS_NO_LOG_SYNC;
7008 0 : goto end_no_trans;
7009 : }
7010 :
7011 0 : ret = start_log_trans(trans, root, ctx);
7012 0 : if (ret)
7013 0 : goto end_no_trans;
7014 :
7015 0 : ret = btrfs_log_inode(trans, inode, inode_only, ctx);
7016 0 : if (ret)
7017 0 : goto end_trans;
7018 :
7019 : /*
7020 : * for regular files, if its inode is already on disk, we don't
7021 : * have to worry about the parents at all. This is because
7022 : * we can use the last_unlink_trans field to record renames
7023 : * and other fun in this file.
7024 : */
7025 0 : if (S_ISREG(inode->vfs_inode.i_mode) &&
7026 0 : inode->generation < trans->transid &&
7027 0 : inode->last_unlink_trans < trans->transid) {
7028 0 : ret = 0;
7029 0 : goto end_trans;
7030 : }
7031 :
7032 0 : if (S_ISDIR(inode->vfs_inode.i_mode) && ctx->log_new_dentries)
7033 0 : log_dentries = true;
7034 :
7035 : /*
7036 : * On unlink we must make sure all our current and old parent directory
7037 : * inodes are fully logged. This is to prevent leaving dangling
7038 : * directory index entries in directories that were our parents but are
7039 : * not anymore. Not doing this results in old parent directory being
7040 : * impossible to delete after log replay (rmdir will always fail with
7041 : * error -ENOTEMPTY).
7042 : *
7043 : * Example 1:
7044 : *
7045 : * mkdir testdir
7046 : * touch testdir/foo
7047 : * ln testdir/foo testdir/bar
7048 : * sync
7049 : * unlink testdir/bar
7050 : * xfs_io -c fsync testdir/foo
7051 : * <power failure>
7052 : * mount fs, triggers log replay
7053 : *
7054 : * If we don't log the parent directory (testdir), after log replay the
7055 : * directory still has an entry pointing to the file inode using the bar
7056 : * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
7057 : * the file inode has a link count of 1.
7058 : *
7059 : * Example 2:
7060 : *
7061 : * mkdir testdir
7062 : * touch foo
7063 : * ln foo testdir/foo2
7064 : * ln foo testdir/foo3
7065 : * sync
7066 : * unlink testdir/foo3
7067 : * xfs_io -c fsync foo
7068 : * <power failure>
7069 : * mount fs, triggers log replay
7070 : *
7071 : * Similar as the first example, after log replay the parent directory
7072 : * testdir still has an entry pointing to the inode file with name foo3
7073 : * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
7074 : * and has a link count of 2.
7075 : */
7076 0 : if (inode->last_unlink_trans >= trans->transid) {
7077 0 : ret = btrfs_log_all_parents(trans, inode, ctx);
7078 0 : if (ret)
7079 0 : goto end_trans;
7080 : }
7081 :
7082 0 : ret = log_all_new_ancestors(trans, inode, parent, ctx);
7083 0 : if (ret)
7084 0 : goto end_trans;
7085 :
7086 0 : if (log_dentries)
7087 0 : ret = log_new_dir_dentries(trans, inode, ctx);
7088 : else
7089 : ret = 0;
7090 0 : end_trans:
7091 0 : if (ret < 0) {
7092 0 : btrfs_set_log_full_commit(trans);
7093 0 : ret = BTRFS_LOG_FORCE_COMMIT;
7094 : }
7095 :
7096 0 : if (ret)
7097 0 : btrfs_remove_log_ctx(root, ctx);
7098 0 : btrfs_end_log_trans(root);
7099 0 : end_no_trans:
7100 0 : return ret;
7101 : }
7102 :
7103 : /*
7104 : * it is not safe to log dentry if the chunk root has added new
7105 : * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
7106 : * If this returns 1, you must commit the transaction to safely get your
7107 : * data on disk.
7108 : */
7109 0 : int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
7110 : struct dentry *dentry,
7111 : struct btrfs_log_ctx *ctx)
7112 : {
7113 0 : struct dentry *parent = dget_parent(dentry);
7114 0 : int ret;
7115 :
7116 0 : ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
7117 : LOG_INODE_ALL, ctx);
7118 0 : dput(parent);
7119 :
7120 0 : return ret;
7121 : }
7122 :
7123 : /*
7124 : * should be called during mount to recover any replay any log trees
7125 : * from the FS
7126 : */
7127 0 : int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
7128 : {
7129 0 : int ret;
7130 0 : struct btrfs_path *path;
7131 0 : struct btrfs_trans_handle *trans;
7132 0 : struct btrfs_key key;
7133 0 : struct btrfs_key found_key;
7134 0 : struct btrfs_root *log;
7135 0 : struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
7136 0 : struct walk_control wc = {
7137 : .process_func = process_one_buffer,
7138 : .stage = LOG_WALK_PIN_ONLY,
7139 : };
7140 :
7141 0 : path = btrfs_alloc_path();
7142 0 : if (!path)
7143 : return -ENOMEM;
7144 :
7145 0 : set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
7146 :
7147 0 : trans = btrfs_start_transaction(fs_info->tree_root, 0);
7148 0 : if (IS_ERR(trans)) {
7149 0 : ret = PTR_ERR(trans);
7150 0 : goto error;
7151 : }
7152 :
7153 0 : wc.trans = trans;
7154 0 : wc.pin = 1;
7155 :
7156 0 : ret = walk_log_tree(trans, log_root_tree, &wc);
7157 0 : if (ret) {
7158 0 : btrfs_abort_transaction(trans, ret);
7159 0 : goto error;
7160 : }
7161 :
7162 0 : again:
7163 0 : key.objectid = BTRFS_TREE_LOG_OBJECTID;
7164 0 : key.offset = (u64)-1;
7165 0 : key.type = BTRFS_ROOT_ITEM_KEY;
7166 :
7167 0 : while (1) {
7168 0 : ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
7169 :
7170 0 : if (ret < 0) {
7171 0 : btrfs_abort_transaction(trans, ret);
7172 0 : goto error;
7173 : }
7174 0 : if (ret > 0) {
7175 0 : if (path->slots[0] == 0)
7176 : break;
7177 0 : path->slots[0]--;
7178 : }
7179 0 : btrfs_item_key_to_cpu(path->nodes[0], &found_key,
7180 : path->slots[0]);
7181 0 : btrfs_release_path(path);
7182 0 : if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
7183 : break;
7184 :
7185 0 : log = btrfs_read_tree_root(log_root_tree, &found_key);
7186 0 : if (IS_ERR(log)) {
7187 0 : ret = PTR_ERR(log);
7188 0 : btrfs_abort_transaction(trans, ret);
7189 0 : goto error;
7190 : }
7191 :
7192 0 : wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset,
7193 : true);
7194 0 : if (IS_ERR(wc.replay_dest)) {
7195 0 : ret = PTR_ERR(wc.replay_dest);
7196 :
7197 : /*
7198 : * We didn't find the subvol, likely because it was
7199 : * deleted. This is ok, simply skip this log and go to
7200 : * the next one.
7201 : *
7202 : * We need to exclude the root because we can't have
7203 : * other log replays overwriting this log as we'll read
7204 : * it back in a few more times. This will keep our
7205 : * block from being modified, and we'll just bail for
7206 : * each subsequent pass.
7207 : */
7208 0 : if (ret == -ENOENT)
7209 0 : ret = btrfs_pin_extent_for_log_replay(trans,
7210 0 : log->node->start,
7211 0 : log->node->len);
7212 0 : btrfs_put_root(log);
7213 :
7214 0 : if (!ret)
7215 0 : goto next;
7216 0 : btrfs_abort_transaction(trans, ret);
7217 0 : goto error;
7218 : }
7219 :
7220 0 : wc.replay_dest->log_root = log;
7221 0 : ret = btrfs_record_root_in_trans(trans, wc.replay_dest);
7222 0 : if (ret)
7223 : /* The loop needs to continue due to the root refs */
7224 0 : btrfs_abort_transaction(trans, ret);
7225 : else
7226 0 : ret = walk_log_tree(trans, log, &wc);
7227 :
7228 0 : if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
7229 0 : ret = fixup_inode_link_counts(trans, wc.replay_dest,
7230 : path);
7231 0 : if (ret)
7232 0 : btrfs_abort_transaction(trans, ret);
7233 : }
7234 :
7235 0 : if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
7236 0 : struct btrfs_root *root = wc.replay_dest;
7237 :
7238 0 : btrfs_release_path(path);
7239 :
7240 : /*
7241 : * We have just replayed everything, and the highest
7242 : * objectid of fs roots probably has changed in case
7243 : * some inode_item's got replayed.
7244 : *
7245 : * root->objectid_mutex is not acquired as log replay
7246 : * could only happen during mount.
7247 : */
7248 0 : ret = btrfs_init_root_free_objectid(root);
7249 0 : if (ret)
7250 0 : btrfs_abort_transaction(trans, ret);
7251 : }
7252 :
7253 0 : wc.replay_dest->log_root = NULL;
7254 0 : btrfs_put_root(wc.replay_dest);
7255 0 : btrfs_put_root(log);
7256 :
7257 0 : if (ret)
7258 0 : goto error;
7259 0 : next:
7260 0 : if (found_key.offset == 0)
7261 : break;
7262 0 : key.offset = found_key.offset - 1;
7263 : }
7264 0 : btrfs_release_path(path);
7265 :
7266 : /* step one is to pin it all, step two is to replay just inodes */
7267 0 : if (wc.pin) {
7268 0 : wc.pin = 0;
7269 0 : wc.process_func = replay_one_buffer;
7270 0 : wc.stage = LOG_WALK_REPLAY_INODES;
7271 0 : goto again;
7272 : }
7273 : /* step three is to replay everything */
7274 0 : if (wc.stage < LOG_WALK_REPLAY_ALL) {
7275 0 : wc.stage++;
7276 0 : goto again;
7277 : }
7278 :
7279 0 : btrfs_free_path(path);
7280 :
7281 : /* step 4: commit the transaction, which also unpins the blocks */
7282 0 : ret = btrfs_commit_transaction(trans);
7283 0 : if (ret)
7284 : return ret;
7285 :
7286 0 : log_root_tree->log_root = NULL;
7287 0 : clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
7288 0 : btrfs_put_root(log_root_tree);
7289 :
7290 0 : return 0;
7291 0 : error:
7292 0 : if (wc.trans)
7293 0 : btrfs_end_transaction(wc.trans);
7294 0 : clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
7295 0 : btrfs_free_path(path);
7296 0 : return ret;
7297 : }
7298 :
7299 : /*
7300 : * there are some corner cases where we want to force a full
7301 : * commit instead of allowing a directory to be logged.
7302 : *
7303 : * They revolve around files there were unlinked from the directory, and
7304 : * this function updates the parent directory so that a full commit is
7305 : * properly done if it is fsync'd later after the unlinks are done.
7306 : *
7307 : * Must be called before the unlink operations (updates to the subvolume tree,
7308 : * inodes, etc) are done.
7309 : */
7310 0 : void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
7311 : struct btrfs_inode *dir, struct btrfs_inode *inode,
7312 : bool for_rename)
7313 : {
7314 : /*
7315 : * when we're logging a file, if it hasn't been renamed
7316 : * or unlinked, and its inode is fully committed on disk,
7317 : * we don't have to worry about walking up the directory chain
7318 : * to log its parents.
7319 : *
7320 : * So, we use the last_unlink_trans field to put this transid
7321 : * into the file. When the file is logged we check it and
7322 : * don't log the parents if the file is fully on disk.
7323 : */
7324 0 : mutex_lock(&inode->log_mutex);
7325 0 : inode->last_unlink_trans = trans->transid;
7326 0 : mutex_unlock(&inode->log_mutex);
7327 :
7328 0 : if (!for_rename)
7329 : return;
7330 :
7331 : /*
7332 : * If this directory was already logged, any new names will be logged
7333 : * with btrfs_log_new_name() and old names will be deleted from the log
7334 : * tree with btrfs_del_dir_entries_in_log() or with
7335 : * btrfs_del_inode_ref_in_log().
7336 : */
7337 0 : if (inode_logged(trans, dir, NULL) == 1)
7338 : return;
7339 :
7340 : /*
7341 : * If the inode we're about to unlink was logged before, the log will be
7342 : * properly updated with the new name with btrfs_log_new_name() and the
7343 : * old name removed with btrfs_del_dir_entries_in_log() or with
7344 : * btrfs_del_inode_ref_in_log().
7345 : */
7346 0 : if (inode_logged(trans, inode, NULL) == 1)
7347 : return;
7348 :
7349 : /*
7350 : * when renaming files across directories, if the directory
7351 : * there we're unlinking from gets fsync'd later on, there's
7352 : * no way to find the destination directory later and fsync it
7353 : * properly. So, we have to be conservative and force commits
7354 : * so the new name gets discovered.
7355 : */
7356 0 : mutex_lock(&dir->log_mutex);
7357 0 : dir->last_unlink_trans = trans->transid;
7358 0 : mutex_unlock(&dir->log_mutex);
7359 : }
7360 :
7361 : /*
7362 : * Make sure that if someone attempts to fsync the parent directory of a deleted
7363 : * snapshot, it ends up triggering a transaction commit. This is to guarantee
7364 : * that after replaying the log tree of the parent directory's root we will not
7365 : * see the snapshot anymore and at log replay time we will not see any log tree
7366 : * corresponding to the deleted snapshot's root, which could lead to replaying
7367 : * it after replaying the log tree of the parent directory (which would replay
7368 : * the snapshot delete operation).
7369 : *
7370 : * Must be called before the actual snapshot destroy operation (updates to the
7371 : * parent root and tree of tree roots trees, etc) are done.
7372 : */
7373 0 : void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
7374 : struct btrfs_inode *dir)
7375 : {
7376 0 : mutex_lock(&dir->log_mutex);
7377 0 : dir->last_unlink_trans = trans->transid;
7378 0 : mutex_unlock(&dir->log_mutex);
7379 0 : }
7380 :
7381 : /*
7382 : * Update the log after adding a new name for an inode.
7383 : *
7384 : * @trans: Transaction handle.
7385 : * @old_dentry: The dentry associated with the old name and the old
7386 : * parent directory.
7387 : * @old_dir: The inode of the previous parent directory for the case
7388 : * of a rename. For a link operation, it must be NULL.
7389 : * @old_dir_index: The index number associated with the old name, meaningful
7390 : * only for rename operations (when @old_dir is not NULL).
7391 : * Ignored for link operations.
7392 : * @parent: The dentry associated with the directory under which the
7393 : * new name is located.
7394 : *
7395 : * Call this after adding a new name for an inode, as a result of a link or
7396 : * rename operation, and it will properly update the log to reflect the new name.
7397 : */
7398 0 : void btrfs_log_new_name(struct btrfs_trans_handle *trans,
7399 : struct dentry *old_dentry, struct btrfs_inode *old_dir,
7400 : u64 old_dir_index, struct dentry *parent)
7401 : {
7402 0 : struct btrfs_inode *inode = BTRFS_I(d_inode(old_dentry));
7403 0 : struct btrfs_root *root = inode->root;
7404 0 : struct btrfs_log_ctx ctx;
7405 0 : bool log_pinned = false;
7406 0 : int ret;
7407 :
7408 : /*
7409 : * this will force the logging code to walk the dentry chain
7410 : * up for the file
7411 : */
7412 0 : if (!S_ISDIR(inode->vfs_inode.i_mode))
7413 0 : inode->last_unlink_trans = trans->transid;
7414 :
7415 : /*
7416 : * if this inode hasn't been logged and directory we're renaming it
7417 : * from hasn't been logged, we don't need to log it
7418 : */
7419 0 : ret = inode_logged(trans, inode, NULL);
7420 0 : if (ret < 0) {
7421 0 : goto out;
7422 0 : } else if (ret == 0) {
7423 0 : if (!old_dir)
7424 0 : return;
7425 : /*
7426 : * If the inode was not logged and we are doing a rename (old_dir is not
7427 : * NULL), check if old_dir was logged - if it was not we can return and
7428 : * do nothing.
7429 : */
7430 0 : ret = inode_logged(trans, old_dir, NULL);
7431 0 : if (ret < 0)
7432 0 : goto out;
7433 0 : else if (ret == 0)
7434 : return;
7435 : }
7436 0 : ret = 0;
7437 :
7438 : /*
7439 : * If we are doing a rename (old_dir is not NULL) from a directory that
7440 : * was previously logged, make sure that on log replay we get the old
7441 : * dir entry deleted. This is needed because we will also log the new
7442 : * name of the renamed inode, so we need to make sure that after log
7443 : * replay we don't end up with both the new and old dir entries existing.
7444 : */
7445 0 : if (old_dir && old_dir->logged_trans == trans->transid) {
7446 0 : struct btrfs_root *log = old_dir->root->log_root;
7447 0 : struct btrfs_path *path;
7448 0 : struct fscrypt_name fname;
7449 :
7450 0 : ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX);
7451 :
7452 0 : ret = fscrypt_setup_filename(&old_dir->vfs_inode,
7453 0 : &old_dentry->d_name, 0, &fname);
7454 0 : if (ret)
7455 0 : goto out;
7456 : /*
7457 : * We have two inodes to update in the log, the old directory and
7458 : * the inode that got renamed, so we must pin the log to prevent
7459 : * anyone from syncing the log until we have updated both inodes
7460 : * in the log.
7461 : */
7462 0 : ret = join_running_log_trans(root);
7463 : /*
7464 : * At least one of the inodes was logged before, so this should
7465 : * not fail, but if it does, it's not serious, just bail out and
7466 : * mark the log for a full commit.
7467 : */
7468 0 : if (WARN_ON_ONCE(ret < 0)) {
7469 0 : fscrypt_free_filename(&fname);
7470 0 : goto out;
7471 : }
7472 :
7473 0 : log_pinned = true;
7474 :
7475 0 : path = btrfs_alloc_path();
7476 0 : if (!path) {
7477 0 : ret = -ENOMEM;
7478 0 : fscrypt_free_filename(&fname);
7479 0 : goto out;
7480 : }
7481 :
7482 : /*
7483 : * Other concurrent task might be logging the old directory,
7484 : * as it can be triggered when logging other inode that had or
7485 : * still has a dentry in the old directory. We lock the old
7486 : * directory's log_mutex to ensure the deletion of the old
7487 : * name is persisted, because during directory logging we
7488 : * delete all BTRFS_DIR_LOG_INDEX_KEY keys and the deletion of
7489 : * the old name's dir index item is in the delayed items, so
7490 : * it could be missed by an in progress directory logging.
7491 : */
7492 0 : mutex_lock(&old_dir->log_mutex);
7493 0 : ret = del_logged_dentry(trans, log, path, btrfs_ino(old_dir),
7494 : &fname.disk_name, old_dir_index);
7495 0 : if (ret > 0) {
7496 : /*
7497 : * The dentry does not exist in the log, so record its
7498 : * deletion.
7499 : */
7500 0 : btrfs_release_path(path);
7501 0 : ret = insert_dir_log_key(trans, log, path,
7502 : btrfs_ino(old_dir),
7503 : old_dir_index, old_dir_index);
7504 : }
7505 0 : mutex_unlock(&old_dir->log_mutex);
7506 :
7507 0 : btrfs_free_path(path);
7508 0 : fscrypt_free_filename(&fname);
7509 0 : if (ret < 0)
7510 0 : goto out;
7511 : }
7512 :
7513 0 : btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
7514 0 : ctx.logging_new_name = true;
7515 : /*
7516 : * We don't care about the return value. If we fail to log the new name
7517 : * then we know the next attempt to sync the log will fallback to a full
7518 : * transaction commit (due to a call to btrfs_set_log_full_commit()), so
7519 : * we don't need to worry about getting a log committed that has an
7520 : * inconsistent state after a rename operation.
7521 : */
7522 0 : btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx);
7523 0 : ASSERT(list_empty(&ctx.conflict_inodes));
7524 0 : out:
7525 : /*
7526 : * If an error happened mark the log for a full commit because it's not
7527 : * consistent and up to date or we couldn't find out if one of the
7528 : * inodes was logged before in this transaction. Do it before unpinning
7529 : * the log, to avoid any races with someone else trying to commit it.
7530 : */
7531 0 : if (ret < 0)
7532 0 : btrfs_set_log_full_commit(trans);
7533 0 : if (log_pinned)
7534 0 : btrfs_end_log_trans(root);
7535 : }
7536 :
|