Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2020-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : *
6 : * The xfs_swap_extent_* functions are:
7 : * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 : * Copyright (c) 2012 Red Hat, Inc.
9 : * All Rights Reserved.
10 : */
11 : #include "xfs.h"
12 : #include "xfs_shared.h"
13 : #include "xfs_format.h"
14 : #include "xfs_log_format.h"
15 : #include "xfs_trans_resv.h"
16 : #include "xfs_mount.h"
17 : #include "xfs_defer.h"
18 : #include "xfs_inode.h"
19 : #include "xfs_trans.h"
20 : #include "xfs_quota.h"
21 : #include "xfs_bmap_util.h"
22 : #include "xfs_bmap_btree.h"
23 : #include "xfs_reflink.h"
24 : #include "xfs_trace.h"
25 : #include "xfs_swapext.h"
26 : #include "xfs_xchgrange.h"
27 : #include "xfs_sb.h"
28 : #include "xfs_icache.h"
29 : #include "xfs_log.h"
30 : #include "xfs_rtalloc.h"
31 : #include "xfs_rtbitmap.h"
32 : #include <linux/fsnotify.h>
33 :
34 : /*
35 : * Generic code for exchanging ranges of two files via XFS_IOC_EXCHANGE_RANGE.
36 : * This part does not deal with XFS-specific data structures, and may some day
37 : * be ported to the VFS.
38 : *
39 : * The goal is to exchange fxr.length bytes starting at fxr.file1_offset in
40 : * file1 with the same number of bytes starting at fxr.file2_offset in file2.
41 : * Implementations must call xfs_exch_range_prep to prepare the two files
42 : * prior to taking locks; they must call xfs_exch_range_check_fresh once
43 : * the inode is locked to abort the call if file2 has changed; and they must
44 : * update the inode change and mod times of both files as part of the metadata
45 : * update. The timestamp updates must be done atomically as part of the data
46 : * exchange operation to ensure correctness of the freshness check.
47 : */
48 :
49 : /*
50 : * Check that both files' metadata agree with the snapshot that we took for
51 : * the range exchange request.
52 : *
53 : * This should be called after the filesystem has locked /all/ inode metadata
54 : * against modification.
55 : */
56 : STATIC int
57 529290 : xfs_exch_range_check_fresh(
58 : struct inode *inode2,
59 : const struct xfs_exch_range *fxr)
60 : {
61 : /* Check that file2 hasn't otherwise been modified. */
62 529290 : if ((fxr->flags & XFS_EXCH_RANGE_FILE2_FRESH) &&
63 13054 : (fxr->file2_ino != inode2->i_ino ||
64 13054 : fxr->file2_ctime != inode2->i_ctime.tv_sec ||
65 13054 : fxr->file2_ctime_nsec != inode2->i_ctime.tv_nsec ||
66 13048 : fxr->file2_mtime != inode2->i_mtime.tv_sec ||
67 13048 : fxr->file2_mtime_nsec != inode2->i_mtime.tv_nsec))
68 6 : return -EBUSY;
69 :
70 : return 0;
71 : }
72 :
73 : /* Performs necessary checks before doing a range exchange. */
74 : STATIC int
75 264682 : xfs_exch_range_checks(
76 : struct file *file1,
77 : struct file *file2,
78 : struct xfs_exch_range *fxr,
79 : unsigned int blocksize)
80 : {
81 264682 : struct inode *inode1 = file1->f_mapping->host;
82 264682 : struct inode *inode2 = file2->f_mapping->host;
83 264682 : uint64_t blkmask = blocksize - 1;
84 264682 : int64_t test_len;
85 264682 : uint64_t blen;
86 264682 : loff_t size1, size2;
87 264682 : int error;
88 :
89 : /* Don't touch certain kinds of inodes */
90 264682 : if (IS_IMMUTABLE(inode1) || IS_IMMUTABLE(inode2))
91 : return -EPERM;
92 264680 : if (IS_SWAPFILE(inode1) || IS_SWAPFILE(inode2))
93 : return -ETXTBSY;
94 :
95 264678 : size1 = i_size_read(inode1);
96 264678 : size2 = i_size_read(inode2);
97 :
98 : /* Ranges cannot start after EOF. */
99 264678 : if (fxr->file1_offset > size1 || fxr->file2_offset > size2)
100 : return -EINVAL;
101 :
102 : /*
103 : * If the caller asked for full files, check that the offset/length
104 : * values cover all of both files.
105 : */
106 264670 : if ((fxr->flags & XFS_EXCH_RANGE_FULL_FILES) &&
107 6438 : (fxr->file1_offset != 0 || fxr->file2_offset != 0 ||
108 6438 : fxr->length != size1 || fxr->length != size2))
109 : return -EDOM;
110 :
111 : /*
112 : * If the caller said to exchange to EOF, we set the length of the
113 : * request large enough to cover everything to the end of both files.
114 : */
115 264666 : if (fxr->flags & XFS_EXCH_RANGE_TO_EOF)
116 74 : fxr->length = max_t(int64_t, size1 - fxr->file1_offset,
117 : size2 - fxr->file2_offset);
118 :
119 : /* The start of both ranges must be aligned to an fs block. */
120 264666 : if (!IS_ALIGNED(fxr->file1_offset, blocksize) ||
121 264664 : !IS_ALIGNED(fxr->file2_offset, blocksize))
122 : return -EINVAL;
123 :
124 : /* Ensure offsets don't wrap. */
125 264664 : if (fxr->file1_offset + fxr->length < fxr->file1_offset ||
126 264664 : fxr->file2_offset + fxr->length < fxr->file2_offset)
127 : return -EINVAL;
128 :
129 : /*
130 : * We require both ranges to be within EOF, unless we're exchanging
131 : * to EOF. xfs_xchg_range_prep already checked that both
132 : * fxr->file1_offset and fxr->file2_offset are within EOF.
133 : */
134 264664 : if (!(fxr->flags & XFS_EXCH_RANGE_TO_EOF) &&
135 264590 : (fxr->file1_offset + fxr->length > size1 ||
136 264586 : fxr->file2_offset + fxr->length > size2))
137 : return -EINVAL;
138 :
139 : /*
140 : * Make sure we don't hit any file size limits. If we hit any size
141 : * limits such that test_length was adjusted, we abort the whole
142 : * operation.
143 : */
144 264658 : test_len = fxr->length;
145 264658 : error = generic_write_check_limits(file2, fxr->file2_offset, &test_len);
146 264658 : if (error)
147 : return error;
148 264658 : error = generic_write_check_limits(file1, fxr->file1_offset, &test_len);
149 264658 : if (error)
150 : return error;
151 264658 : if (test_len != fxr->length)
152 : return -EINVAL;
153 :
154 : /*
155 : * If the user wanted us to exchange up to the infile's EOF, round up
156 : * to the next block boundary for this check. Do the same for the
157 : * outfile.
158 : *
159 : * Otherwise, reject the range length if it's not block aligned. We
160 : * already confirmed the starting offsets' block alignment.
161 : */
162 264656 : if (fxr->file1_offset + fxr->length == size1)
163 6608 : blen = ALIGN(size1, blocksize) - fxr->file1_offset;
164 258048 : else if (fxr->file2_offset + fxr->length == size2)
165 7793 : blen = ALIGN(size2, blocksize) - fxr->file2_offset;
166 250255 : else if (!IS_ALIGNED(fxr->length, blocksize))
167 : return -EINVAL;
168 : else
169 : blen = fxr->length;
170 :
171 : /* Don't allow overlapped exchanges within the same file. */
172 264656 : if (inode1 == inode2 &&
173 258114 : fxr->file2_offset + blen > fxr->file1_offset &&
174 135467 : fxr->file1_offset + blen > fxr->file2_offset)
175 : return -EINVAL;
176 :
177 : /* If we already failed the freshness check, we're done. */
178 264650 : error = xfs_exch_range_check_fresh(inode2, fxr);
179 264650 : if (error)
180 : return error;
181 :
182 : /*
183 : * Ensure that we don't exchange a partial EOF block into the middle of
184 : * another file.
185 : */
186 264644 : if ((fxr->length & blkmask) == 0)
187 : return 0;
188 :
189 1606 : blen = fxr->length;
190 1606 : if (fxr->file2_offset + blen < size2)
191 2 : blen &= ~blkmask;
192 :
193 1606 : if (fxr->file1_offset + blen < size1)
194 4 : blen &= ~blkmask;
195 :
196 1606 : return blen == fxr->length ? 0 : -EINVAL;
197 : }
198 :
199 : /*
200 : * Check that the two inodes are eligible for range exchanges, the ranges make
201 : * sense, and then flush all dirty data. Caller must ensure that the inodes
202 : * have been locked against any other modifications.
203 : */
204 : int
205 264682 : xfs_exch_range_prep(
206 : struct file *file1,
207 : struct file *file2,
208 : struct xfs_exch_range *fxr,
209 : unsigned int blocksize)
210 : {
211 264682 : struct inode *inode1 = file_inode(file1);
212 264682 : struct inode *inode2 = file_inode(file2);
213 264682 : bool same_inode = (inode1 == inode2);
214 264682 : int error;
215 :
216 : /* Check that we don't violate system file offset limits. */
217 264682 : error = xfs_exch_range_checks(file1, file2, fxr, blocksize);
218 264682 : if (error || fxr->length == 0)
219 : return error;
220 :
221 : /* Wait for the completion of any pending IOs on both files */
222 264526 : inode_dio_wait(inode1);
223 264526 : if (!same_inode)
224 6532 : inode_dio_wait(inode2);
225 :
226 264526 : error = filemap_write_and_wait_range(inode1->i_mapping,
227 : fxr->file1_offset,
228 264526 : fxr->file1_offset + fxr->length - 1);
229 264526 : if (error)
230 : return error;
231 :
232 264526 : error = filemap_write_and_wait_range(inode2->i_mapping,
233 : fxr->file2_offset,
234 264526 : fxr->file2_offset + fxr->length - 1);
235 264526 : if (error)
236 : return error;
237 :
238 : /*
239 : * If the files or inodes involved require synchronous writes, amend
240 : * the request to force the filesystem to flush all data and metadata
241 : * to disk after the operation completes.
242 : */
243 264526 : if (((file1->f_flags | file2->f_flags) & (__O_SYNC | O_DSYNC)) ||
244 258650 : IS_SYNC(inode1) || IS_SYNC(inode2))
245 5876 : fxr->flags |= XFS_EXCH_RANGE_FSYNC;
246 :
247 : return 0;
248 : }
249 :
250 : /*
251 : * Finish a range exchange operation, if it was successful. Caller must ensure
252 : * that the inodes are still locked against any other modifications.
253 : */
254 : int
255 264624 : xfs_exch_range_finish(
256 : struct file *file1,
257 : struct file *file2)
258 : {
259 264624 : int error;
260 :
261 264624 : error = file_remove_privs(file1);
262 264624 : if (error)
263 : return error;
264 264624 : if (file_inode(file1) == file_inode(file2))
265 : return 0;
266 :
267 6516 : return file_remove_privs(file2);
268 : }
269 :
270 : /* Decide if it's ok to remap the selected range of a given file. */
271 : STATIC int
272 529364 : xfs_exch_range_verify_area(
273 : struct file *file,
274 : loff_t pos,
275 : struct xfs_exch_range *fxr)
276 : {
277 529364 : int64_t len = fxr->length;
278 :
279 529364 : if (pos < 0)
280 : return -EINVAL;
281 :
282 529364 : if (fxr->flags & XFS_EXCH_RANGE_TO_EOF)
283 148 : len = min_t(int64_t, len, i_size_read(file_inode(file)) - pos);
284 529364 : return remap_verify_area(file, pos, len, true);
285 : }
286 :
287 : /* Prepare for and exchange parts of two files. */
288 : static inline int
289 2313611 : __xfs_exch_range(
290 : struct file *file1,
291 : struct file *file2,
292 : struct xfs_exch_range *fxr)
293 : {
294 2313611 : struct inode *inode1 = file_inode(file1);
295 2313611 : struct inode *inode2 = file_inode(file2);
296 2313611 : int ret;
297 :
298 4627224 : if ((fxr->flags & ~XFS_EXCH_RANGE_ALL_FLAGS) ||
299 2313611 : memchr_inv(&fxr->pad, 0, sizeof(fxr->pad)))
300 0 : return -EINVAL;
301 :
302 2313613 : if ((fxr->flags & XFS_EXCH_RANGE_FULL_FILES) &&
303 : (fxr->flags & XFS_EXCH_RANGE_TO_EOF))
304 : return -EINVAL;
305 :
306 : /*
307 : * The ioctl enforces that src and dest files are on the same mount.
308 : * However, they only need to be on the same file system.
309 : */
310 2313613 : if (inode1->i_sb != inode2->i_sb)
311 : return -EXDEV;
312 :
313 : /* This only works for regular files. */
314 2313613 : if (S_ISDIR(inode1->i_mode) || S_ISDIR(inode2->i_mode))
315 : return -EISDIR;
316 2313611 : if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
317 : return -EINVAL;
318 :
319 2313611 : ret = generic_file_rw_checks(file1, file2);
320 2313618 : if (ret < 0)
321 : return ret;
322 :
323 2313616 : ret = generic_file_rw_checks(file2, file1);
324 2313611 : if (ret < 0)
325 : return ret;
326 :
327 264682 : ret = xfs_exch_range_verify_area(file1, fxr->file1_offset, fxr);
328 264682 : if (ret)
329 : return ret;
330 :
331 264682 : ret = xfs_exch_range_verify_area(file2, fxr->file2_offset, fxr);
332 264682 : if (ret)
333 : return ret;
334 :
335 264682 : ret = xfs_file_xchg_range(file1, file2, fxr);
336 264682 : if (ret)
337 : return ret;
338 :
339 264624 : fsnotify_modify(file1);
340 264624 : if (file2 != file1)
341 6520 : fsnotify_modify(file2);
342 : return 0;
343 : }
344 :
345 : /* Exchange parts of two files. */
346 : int
347 2313616 : xfs_exch_range(
348 : struct file *file1,
349 : struct file *file2,
350 : struct xfs_exch_range *fxr)
351 : {
352 2313616 : int error;
353 :
354 2313616 : file_start_write(file2);
355 2313616 : error = __xfs_exch_range(file1, file2, fxr);
356 2313609 : file_end_write(file2);
357 2313620 : return error;
358 : }
359 :
360 : /* XFS-specific parts of XFS_IOC_EXCHANGE_RANGE */
361 :
362 : /*
363 : * Exchanging ranges as a file operation. This is the binding between the
364 : * VFS-level concepts and the XFS-specific implementation.
365 : */
366 : int
367 264682 : xfs_file_xchg_range(
368 : struct file *file1,
369 : struct file *file2,
370 : struct xfs_exch_range *fxr)
371 : {
372 264682 : struct inode *inode1 = file_inode(file1);
373 264682 : struct inode *inode2 = file_inode(file2);
374 264682 : struct xfs_inode *ip1 = XFS_I(inode1);
375 264682 : struct xfs_inode *ip2 = XFS_I(inode2);
376 264682 : struct xfs_mount *mp = ip1->i_mount;
377 264682 : unsigned int priv_flags = 0;
378 264682 : bool use_logging = false;
379 264682 : int error;
380 :
381 529364 : if (xfs_is_shutdown(mp))
382 : return -EIO;
383 :
384 : /* Update cmtime if the fd/inode don't forbid it. */
385 264682 : if (likely(!(file1->f_mode & FMODE_NOCMTIME) && !IS_NOCMTIME(inode1)))
386 264682 : priv_flags |= XFS_XCHG_RANGE_UPD_CMTIME1;
387 264682 : if (likely(!(file2->f_mode & FMODE_NOCMTIME) && !IS_NOCMTIME(inode2)))
388 258724 : priv_flags |= XFS_XCHG_RANGE_UPD_CMTIME2;
389 :
390 : /* Lock both files against IO */
391 264682 : error = xfs_ilock2_io_mmap(ip1, ip2);
392 264682 : if (error)
393 0 : goto out_err;
394 :
395 : /* Prepare and then exchange file contents. */
396 264682 : error = xfs_xchg_range_prep(file1, file2, fxr, priv_flags);
397 264682 : if (error)
398 42 : goto out_unlock;
399 :
400 : /* Get permission to use log-assisted file content swaps. */
401 264640 : error = xfs_xchg_range_grab_log_assist(mp,
402 264640 : !(fxr->flags & XFS_EXCH_RANGE_NONATOMIC),
403 : &use_logging);
404 264640 : if (error)
405 0 : goto out_unlock;
406 264640 : if (use_logging)
407 258200 : priv_flags |= XFS_XCHG_RANGE_LOGGED;
408 :
409 264640 : error = xfs_xchg_range(ip1, ip2, fxr, priv_flags);
410 264640 : if (error)
411 16 : goto out_drop_feat;
412 :
413 : /*
414 : * Finish the exchange by removing special file privileges like any
415 : * other file write would do. This may involve turning on support for
416 : * logged xattrs if either file has security capabilities, which means
417 : * xfs_xchg_range_grab_log_assist before xfs_attr_grab_log_assist.
418 : */
419 264624 : error = xfs_exch_range_finish(file1, file2);
420 264624 : if (error)
421 0 : goto out_drop_feat;
422 :
423 264624 : out_drop_feat:
424 264640 : if (use_logging)
425 258200 : xfs_xchg_range_rele_log_assist(mp);
426 6440 : out_unlock:
427 264682 : xfs_iunlock2_io_mmap(ip1, ip2);
428 264682 : out_err:
429 264682 : if (error)
430 58 : trace_xfs_file_xchg_range_error(ip2, error, _RET_IP_);
431 : return error;
432 : }
433 :
434 : /* Lock (and optionally join) two inodes for a file range exchange. */
435 : void
436 922166 : xfs_xchg_range_ilock(
437 : struct xfs_trans *tp,
438 : struct xfs_inode *ip1,
439 : struct xfs_inode *ip2)
440 : {
441 922166 : if (ip1 != ip2)
442 405950 : xfs_lock_two_inodes(ip1, XFS_ILOCK_EXCL,
443 : ip2, XFS_ILOCK_EXCL);
444 : else
445 516216 : xfs_ilock(ip1, XFS_ILOCK_EXCL);
446 922172 : if (tp) {
447 613989 : xfs_trans_ijoin(tp, ip1, 0);
448 613994 : if (ip2 != ip1)
449 355886 : xfs_trans_ijoin(tp, ip2, 0);
450 : }
451 :
452 922177 : }
453 :
454 : /* Unlock two inodes after a file range exchange operation. */
455 : void
456 572829 : xfs_xchg_range_iunlock(
457 : struct xfs_inode *ip1,
458 : struct xfs_inode *ip2)
459 : {
460 572829 : if (ip2 != ip1)
461 56613 : xfs_iunlock(ip2, XFS_ILOCK_EXCL);
462 572829 : xfs_iunlock(ip1, XFS_ILOCK_EXCL);
463 572829 : }
464 :
465 : /*
466 : * Estimate the resource requirements to exchange file contents between the two
467 : * files. The caller is required to hold the IOLOCK and the MMAPLOCK and to
468 : * have flushed both inodes' pagecache and active direct-ios.
469 : */
470 : int
471 308177 : xfs_xchg_range_estimate(
472 : struct xfs_swapext_req *req)
473 : {
474 308177 : int error;
475 :
476 308177 : xfs_xchg_range_ilock(NULL, req->ip1, req->ip2);
477 308177 : error = xfs_swapext_estimate(req);
478 308177 : xfs_xchg_range_iunlock(req->ip1, req->ip2);
479 308177 : return error;
480 : }
481 :
482 : /*
483 : * We need to check that the format of the data fork in the temporary inode is
484 : * valid for the target inode before doing the swap. This is not a problem with
485 : * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
486 : * data fork depending on the space the attribute fork is taking so we can get
487 : * invalid formats on the target inode.
488 : *
489 : * E.g. target has space for 7 extents in extent format, temp inode only has
490 : * space for 6. If we defragment down to 7 extents, then the tmp format is a
491 : * btree, but when swapped it needs to be in extent format. Hence we can't just
492 : * blindly swap data forks on attr2 filesystems.
493 : *
494 : * Note that we check the swap in both directions so that we don't end up with
495 : * a corrupt temporary inode, either.
496 : *
497 : * Note that fixing the way xfs_fsr sets up the attribute fork in the source
498 : * inode will prevent this situation from occurring, so all we do here is
499 : * reject and log the attempt. basically we are putting the responsibility on
500 : * userspace to get this right.
501 : */
502 : STATIC int
503 0 : xfs_swap_extents_check_format(
504 : struct xfs_inode *ip, /* target inode */
505 : struct xfs_inode *tip) /* tmp inode */
506 : {
507 0 : struct xfs_ifork *ifp = &ip->i_df;
508 0 : struct xfs_ifork *tifp = &tip->i_df;
509 :
510 : /* User/group/project quota ids must match if quotas are enforced. */
511 0 : if (XFS_IS_QUOTA_ON(ip->i_mount) &&
512 0 : (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
513 0 : !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
514 0 : ip->i_projid != tip->i_projid))
515 : return -EINVAL;
516 :
517 : /* Should never get a local format */
518 0 : if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
519 0 : tifp->if_format == XFS_DINODE_FMT_LOCAL)
520 : return -EINVAL;
521 :
522 : /*
523 : * if the target inode has less extents that then temporary inode then
524 : * why did userspace call us?
525 : */
526 0 : if (ifp->if_nextents < tifp->if_nextents)
527 : return -EINVAL;
528 :
529 : /*
530 : * If we have to use the (expensive) rmap swap method, we can
531 : * handle any number of extents and any format.
532 : */
533 0 : if (xfs_has_rmapbt(ip->i_mount))
534 : return 0;
535 :
536 : /*
537 : * if the target inode is in extent form and the temp inode is in btree
538 : * form then we will end up with the target inode in the wrong format
539 : * as we already know there are less extents in the temp inode.
540 : */
541 0 : if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
542 : tifp->if_format == XFS_DINODE_FMT_BTREE)
543 : return -EINVAL;
544 :
545 : /* Check temp in extent form to max in target */
546 0 : if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
547 0 : tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
548 : return -EINVAL;
549 :
550 : /* Check target in extent form to max in temp */
551 0 : if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
552 0 : ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
553 : return -EINVAL;
554 :
555 : /*
556 : * If we are in a btree format, check that the temp root block will fit
557 : * in the target and that it has enough extents to be in btree format
558 : * in the target.
559 : *
560 : * Note that we have to be careful to allow btree->extent conversions
561 : * (a common defrag case) which will occur when the temp inode is in
562 : * extent format...
563 : */
564 0 : if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
565 0 : if (xfs_inode_has_attr_fork(ip) &&
566 0 : xfs_bmap_bmdr_space(tifp->if_broot) > xfs_inode_fork_boff(ip))
567 : return -EINVAL;
568 0 : if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
569 : return -EINVAL;
570 : }
571 :
572 : /* Reciprocal target->temp btree format checks */
573 0 : if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
574 0 : if (xfs_inode_has_attr_fork(tip) &&
575 0 : xfs_bmap_bmdr_space(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
576 : return -EINVAL;
577 0 : if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
578 0 : return -EINVAL;
579 : }
580 :
581 : return 0;
582 : }
583 :
584 : /*
585 : * Fix up the owners of the bmbt blocks to refer to the current inode. The
586 : * change owner scan attempts to order all modified buffers in the current
587 : * transaction. In the event of ordered buffer failure, the offending buffer is
588 : * physically logged as a fallback and the scan returns -EAGAIN. We must roll
589 : * the transaction in this case to replenish the fallback log reservation and
590 : * restart the scan. This process repeats until the scan completes.
591 : */
592 : static int
593 0 : xfs_swap_change_owner(
594 : struct xfs_trans **tpp,
595 : struct xfs_inode *ip,
596 : struct xfs_inode *tmpip)
597 : {
598 0 : int error;
599 0 : struct xfs_trans *tp = *tpp;
600 :
601 0 : do {
602 0 : error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
603 : NULL);
604 : /* success or fatal error */
605 0 : if (error != -EAGAIN)
606 : break;
607 :
608 0 : error = xfs_trans_roll(tpp);
609 0 : if (error)
610 : break;
611 0 : tp = *tpp;
612 :
613 : /*
614 : * Redirty both inodes so they can relog and keep the log tail
615 : * moving forward.
616 : */
617 0 : xfs_trans_ijoin(tp, ip, 0);
618 0 : xfs_trans_ijoin(tp, tmpip, 0);
619 0 : xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
620 0 : xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
621 : } while (true);
622 :
623 0 : return error;
624 : }
625 :
626 : /* Swap the extents of two files by swapping data forks. */
627 : STATIC int
628 0 : xfs_swap_extent_forks(
629 : struct xfs_trans **tpp,
630 : struct xfs_swapext_req *req)
631 : {
632 0 : struct xfs_inode *ip = req->ip2;
633 0 : struct xfs_inode *tip = req->ip1;
634 0 : xfs_filblks_t aforkblks = 0;
635 0 : xfs_filblks_t taforkblks = 0;
636 0 : xfs_extnum_t junk;
637 0 : uint64_t tmp;
638 0 : int src_log_flags = XFS_ILOG_CORE;
639 0 : int target_log_flags = XFS_ILOG_CORE;
640 0 : int error;
641 :
642 : /*
643 : * Count the number of extended attribute blocks
644 : */
645 0 : if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 &&
646 0 : ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
647 0 : error = xfs_bmap_count_blocks(*tpp, ip, XFS_ATTR_FORK, &junk,
648 : &aforkblks);
649 0 : if (error)
650 : return error;
651 : }
652 0 : if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 &&
653 0 : tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
654 0 : error = xfs_bmap_count_blocks(*tpp, tip, XFS_ATTR_FORK, &junk,
655 : &taforkblks);
656 0 : if (error)
657 : return error;
658 : }
659 :
660 : /*
661 : * Btree format (v3) inodes have the inode number stamped in the bmbt
662 : * block headers. We can't start changing the bmbt blocks until the
663 : * inode owner change is logged so recovery does the right thing in the
664 : * event of a crash. Set the owner change log flags now and leave the
665 : * bmbt scan as the last step.
666 : */
667 0 : if (xfs_has_v3inodes(ip->i_mount)) {
668 0 : if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
669 0 : target_log_flags |= XFS_ILOG_DOWNER;
670 0 : if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
671 0 : src_log_flags |= XFS_ILOG_DOWNER;
672 : }
673 :
674 : /*
675 : * Swap the data forks of the inodes
676 : */
677 0 : swap(ip->i_df, tip->i_df);
678 :
679 : /*
680 : * Fix the on-disk inode values
681 : */
682 0 : tmp = (uint64_t)ip->i_nblocks;
683 0 : ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
684 0 : tip->i_nblocks = tmp + taforkblks - aforkblks;
685 :
686 : /*
687 : * The extents in the source inode could still contain speculative
688 : * preallocation beyond EOF (e.g. the file is open but not modified
689 : * while defrag is in progress). In that case, we need to copy over the
690 : * number of delalloc blocks the data fork in the source inode is
691 : * tracking beyond EOF so that when the fork is truncated away when the
692 : * temporary inode is unlinked we don't underrun the i_delayed_blks
693 : * counter on that inode.
694 : */
695 0 : ASSERT(tip->i_delayed_blks == 0);
696 0 : tip->i_delayed_blks = ip->i_delayed_blks;
697 0 : ip->i_delayed_blks = 0;
698 :
699 0 : switch (ip->i_df.if_format) {
700 0 : case XFS_DINODE_FMT_EXTENTS:
701 0 : src_log_flags |= XFS_ILOG_DEXT;
702 0 : break;
703 0 : case XFS_DINODE_FMT_BTREE:
704 0 : ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
705 : (src_log_flags & XFS_ILOG_DOWNER));
706 0 : src_log_flags |= XFS_ILOG_DBROOT;
707 0 : break;
708 : }
709 :
710 0 : switch (tip->i_df.if_format) {
711 0 : case XFS_DINODE_FMT_EXTENTS:
712 0 : target_log_flags |= XFS_ILOG_DEXT;
713 0 : break;
714 0 : case XFS_DINODE_FMT_BTREE:
715 0 : target_log_flags |= XFS_ILOG_DBROOT;
716 0 : ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
717 : (target_log_flags & XFS_ILOG_DOWNER));
718 : break;
719 : }
720 :
721 : /* Do we have to swap reflink flags? */
722 0 : if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
723 0 : (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
724 0 : uint64_t f;
725 :
726 0 : f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
727 0 : ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
728 0 : ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
729 0 : tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
730 0 : tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
731 : }
732 :
733 : /* Swap the cow forks. */
734 0 : if (xfs_has_reflink(ip->i_mount)) {
735 0 : ASSERT(!ip->i_cowfp ||
736 : ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
737 0 : ASSERT(!tip->i_cowfp ||
738 : tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
739 :
740 0 : swap(ip->i_cowfp, tip->i_cowfp);
741 :
742 0 : if (ip->i_cowfp && ip->i_cowfp->if_bytes)
743 0 : xfs_inode_set_cowblocks_tag(ip);
744 : else
745 0 : xfs_inode_clear_cowblocks_tag(ip);
746 0 : if (tip->i_cowfp && tip->i_cowfp->if_bytes)
747 0 : xfs_inode_set_cowblocks_tag(tip);
748 : else
749 0 : xfs_inode_clear_cowblocks_tag(tip);
750 : }
751 :
752 0 : xfs_trans_log_inode(*tpp, ip, src_log_flags);
753 0 : xfs_trans_log_inode(*tpp, tip, target_log_flags);
754 :
755 : /*
756 : * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
757 : * have inode number owner values in the bmbt blocks that still refer to
758 : * the old inode. Scan each bmbt to fix up the owner values with the
759 : * inode number of the current inode.
760 : */
761 0 : if (src_log_flags & XFS_ILOG_DOWNER) {
762 0 : error = xfs_swap_change_owner(tpp, ip, tip);
763 0 : if (error)
764 : return error;
765 : }
766 0 : if (target_log_flags & XFS_ILOG_DOWNER) {
767 0 : error = xfs_swap_change_owner(tpp, tip, ip);
768 0 : if (error)
769 0 : return error;
770 : }
771 :
772 : return 0;
773 : }
774 :
775 : /*
776 : * There may be partially written rt extents lurking in the ranges to be
777 : * swapped. According to the rules for realtime files with big rt extents, we
778 : * must guarantee that an outside observer (an IO thread, realistically) never
779 : * can see multiple physical rt extents mapped to the same logical file rt
780 : * extent. The deferred bmap log intent items that we use under the hood
781 : * operate on single block mappings and not rt extents, which means we must
782 : * have a strategy to ensure that log recovery after a failure won't stop in
783 : * the middle of an rt extent.
784 : *
785 : * The preferred strategy is to use deferred extent swap log intent items to
786 : * track the status of the overall swap operation so that we can complete the
787 : * work during crash recovery. If that isn't possible, we fall back to
788 : * requiring the selected mappings in both forks to be aligned to rt extent
789 : * boundaries. As an aside, the old fork swap routine didn't have this
790 : * requirement, but at an extreme cost in flexibilty (full files only, and no
791 : * support if rmapbt is enabled).
792 : */
793 : static bool
794 264526 : xfs_xchg_range_need_rt_conversion(
795 : struct xfs_inode *ip,
796 : unsigned int xchg_flags)
797 : {
798 264526 : struct xfs_mount *mp = ip->i_mount;
799 :
800 : /*
801 : * Caller got permission to use logged swapext, so log recovery will
802 : * finish the swap and not leave us with partially swapped rt extents
803 : * exposed to userspace.
804 : */
805 264526 : if (xchg_flags & XFS_XCHG_RANGE_LOGGED)
806 : return false;
807 :
808 : /*
809 : * If we can't use log intent items at all, the only supported
810 : * operation is full fork swaps, so no conversions are needed.
811 : * The range requirements are enforced by the swapext code itself.
812 : */
813 264526 : if (!xfs_swapext_supported(mp))
814 : return false;
815 :
816 : /* Conversion is only needed for realtime files with big rt extents */
817 264526 : return xfs_inode_has_bigrtextents(ip);
818 : }
819 :
820 : /*
821 : * Check the alignment of an exchange request when the allocation unit size
822 : * isn't a power of two. The VFS helpers use (fast) bitmask-based alignment
823 : * checks, but here we have to use slow long division.
824 : */
825 : static int
826 0 : xfs_xchg_range_check_rtalign(
827 : struct xfs_inode *ip1,
828 : struct xfs_inode *ip2,
829 : const struct xfs_exch_range *fxr)
830 : {
831 0 : struct xfs_mount *mp = ip1->i_mount;
832 0 : uint32_t rextbytes;
833 0 : uint64_t length = fxr->length;
834 0 : uint64_t blen;
835 0 : loff_t size1, size2;
836 :
837 0 : rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
838 0 : size1 = i_size_read(VFS_I(ip1));
839 0 : size2 = i_size_read(VFS_I(ip2));
840 :
841 : /* The start of both ranges must be aligned to a rt extent. */
842 0 : if (!isaligned_64(fxr->file1_offset, rextbytes) ||
843 0 : !isaligned_64(fxr->file2_offset, rextbytes))
844 : return -EINVAL;
845 :
846 : /*
847 : * If the caller asked for full files, check that the offset/length
848 : * values cover all of both files.
849 : */
850 0 : if ((fxr->flags & XFS_EXCH_RANGE_FULL_FILES) &&
851 0 : (fxr->file1_offset != 0 || fxr->file2_offset != 0 ||
852 0 : fxr->length != size1 || fxr->length != size2))
853 : return -EDOM;
854 :
855 0 : if (fxr->flags & XFS_EXCH_RANGE_TO_EOF)
856 0 : length = max_t(int64_t, size1 - fxr->file1_offset,
857 : size2 - fxr->file2_offset);
858 :
859 : /*
860 : * If the user wanted us to exchange up to the infile's EOF, round up
861 : * to the next rt extent boundary for this check. Do the same for the
862 : * outfile.
863 : *
864 : * Otherwise, reject the range length if it's not rt extent aligned.
865 : * We already confirmed the starting offsets' rt extent block
866 : * alignment.
867 : */
868 0 : if (fxr->file1_offset + length == size1)
869 0 : blen = roundup_64(size1, rextbytes) - fxr->file1_offset;
870 0 : else if (fxr->file2_offset + length == size2)
871 0 : blen = roundup_64(size2, rextbytes) - fxr->file2_offset;
872 0 : else if (!isaligned_64(length, rextbytes))
873 : return -EINVAL;
874 : else
875 : blen = length;
876 :
877 : /* Don't allow overlapped exchanges within the same file. */
878 0 : if (ip1 == ip2 &&
879 0 : fxr->file2_offset + blen > fxr->file1_offset &&
880 0 : fxr->file1_offset + blen > fxr->file2_offset)
881 : return -EINVAL;
882 :
883 : /*
884 : * Ensure that we don't exchange a partial EOF rt extent into the
885 : * middle of another file.
886 : */
887 0 : if (isaligned_64(length, rextbytes))
888 : return 0;
889 :
890 0 : blen = length;
891 0 : if (fxr->file2_offset + length < size2)
892 0 : blen = rounddown_64(blen, rextbytes);
893 :
894 0 : if (fxr->file1_offset + blen < size1)
895 0 : blen = rounddown_64(blen, rextbytes);
896 :
897 0 : return blen == length ? 0 : -EINVAL;
898 : }
899 :
900 : /* Prepare two files to have their data exchanged. */
901 : int
902 264682 : xfs_xchg_range_prep(
903 : struct file *file1,
904 : struct file *file2,
905 : struct xfs_exch_range *fxr,
906 : unsigned int xchg_flags)
907 : {
908 264682 : struct xfs_inode *ip1 = XFS_I(file_inode(file1));
909 264682 : struct xfs_inode *ip2 = XFS_I(file_inode(file2));
910 264682 : unsigned int alloc_unit = xfs_inode_alloc_unitsize(ip2);
911 264682 : int error;
912 :
913 264682 : trace_xfs_xchg_range_prep(ip1, fxr, ip2, 0);
914 :
915 : /* Verify both files are either real-time or non-realtime */
916 649078 : if (XFS_IS_REALTIME_INODE(ip1) != XFS_IS_REALTIME_INODE(ip2))
917 : return -EINVAL;
918 :
919 : /* Check non-power of two alignment issues, if necessary. */
920 337166 : if (XFS_IS_REALTIME_INODE(ip2) && !is_power_of_2(alloc_unit)) {
921 0 : error = xfs_xchg_range_check_rtalign(ip1, ip2, fxr);
922 0 : if (error)
923 : return error;
924 :
925 : /* Do the VFS checks with the regular block alignment. */
926 0 : alloc_unit = ip1->i_mount->m_sb.sb_blocksize;
927 : }
928 :
929 264682 : error = xfs_exch_range_prep(file1, file2, fxr, alloc_unit);
930 264682 : if (error || fxr->length == 0)
931 : return error;
932 :
933 : /* Attach dquots to both inodes before changing block maps. */
934 264526 : error = xfs_qm_dqattach(ip2);
935 264526 : if (error)
936 : return error;
937 264526 : error = xfs_qm_dqattach(ip1);
938 264526 : if (error)
939 : return error;
940 :
941 264526 : trace_xfs_xchg_range_flush(ip1, fxr, ip2, 0);
942 :
943 : /* Flush the relevant ranges of both files. */
944 264526 : error = xfs_flush_unmap_range(ip2, fxr->file2_offset, fxr->length);
945 264526 : if (error)
946 : return error;
947 264526 : error = xfs_flush_unmap_range(ip1, fxr->file1_offset, fxr->length);
948 264526 : if (error)
949 : return error;
950 :
951 : /*
952 : * Cancel CoW fork preallocations for the ranges of both files. The
953 : * prep function should have flushed all the dirty data, so the only
954 : * extents remaining should be speculative.
955 : */
956 529052 : if (xfs_inode_has_cow_data(ip1)) {
957 44217 : error = xfs_reflink_cancel_cow_range(ip1, fxr->file1_offset,
958 44217 : fxr->length, true);
959 44217 : if (error)
960 : return error;
961 : }
962 :
963 529052 : if (xfs_inode_has_cow_data(ip2)) {
964 42630 : error = xfs_reflink_cancel_cow_range(ip2, fxr->file2_offset,
965 42630 : fxr->length, true);
966 42630 : if (error)
967 : return error;
968 : }
969 :
970 : /* Convert unwritten sub-extent mappings if required. */
971 264526 : if (xfs_xchg_range_need_rt_conversion(ip2, xchg_flags)) {
972 0 : error = xfs_rtfile_convert_unwritten(ip2, fxr->file2_offset,
973 : fxr->length);
974 0 : if (error)
975 : return error;
976 :
977 0 : error = xfs_rtfile_convert_unwritten(ip1, fxr->file1_offset,
978 : fxr->length);
979 0 : if (error)
980 0 : return error;
981 : }
982 :
983 : return 0;
984 : }
985 :
986 : #define QRETRY_IP1 (0x1)
987 : #define QRETRY_IP2 (0x2)
988 :
989 : /*
990 : * Obtain a quota reservation to make sure we don't hit EDQUOT. We can skip
991 : * this if quota enforcement is disabled or if both inodes' dquots are the
992 : * same. The qretry structure must be initialized to zeroes before the first
993 : * call to this function.
994 : */
995 : STATIC int
996 264640 : xfs_xchg_range_reserve_quota(
997 : struct xfs_trans *tp,
998 : const struct xfs_swapext_req *req,
999 : unsigned int *qretry)
1000 : {
1001 264640 : int64_t ddelta, rdelta;
1002 264640 : int ip1_error = 0;
1003 264640 : int error;
1004 :
1005 : /*
1006 : * Don't bother with a quota reservation if we're not enforcing them
1007 : * or the two inodes have the same dquots.
1008 : */
1009 264640 : if (!XFS_IS_QUOTA_ON(tp->t_mountp) || req->ip1 == req->ip2 ||
1010 6528 : (req->ip1->i_udquot == req->ip2->i_udquot &&
1011 6512 : req->ip1->i_gdquot == req->ip2->i_gdquot &&
1012 6506 : req->ip1->i_pdquot == req->ip2->i_pdquot))
1013 : return 0;
1014 :
1015 22 : *qretry = 0;
1016 :
1017 : /*
1018 : * For each file, compute the net gain in the number of regular blocks
1019 : * that will be mapped into that file and reserve that much quota. The
1020 : * quota counts must be able to absorb at least that much space.
1021 : */
1022 22 : ddelta = req->ip2_bcount - req->ip1_bcount;
1023 22 : rdelta = req->ip2_rtbcount - req->ip1_rtbcount;
1024 22 : if (ddelta > 0 || rdelta > 0) {
1025 8 : error = xfs_trans_reserve_quota_nblks(tp, req->ip1,
1026 : ddelta > 0 ? ddelta : 0,
1027 : rdelta > 0 ? rdelta : 0,
1028 : false);
1029 8 : if (error == -EDQUOT || error == -ENOSPC) {
1030 : /*
1031 : * Save this error and see what happens if we try to
1032 : * reserve quota for ip2. Then report both.
1033 : */
1034 4 : *qretry |= QRETRY_IP1;
1035 4 : ip1_error = error;
1036 4 : error = 0;
1037 : }
1038 8 : if (error)
1039 : return error;
1040 : }
1041 22 : if (ddelta < 0 || rdelta < 0) {
1042 2 : error = xfs_trans_reserve_quota_nblks(tp, req->ip2,
1043 : ddelta < 0 ? -ddelta : 0,
1044 : rdelta < 0 ? -rdelta : 0,
1045 : false);
1046 2 : if (error == -EDQUOT || error == -ENOSPC)
1047 0 : *qretry |= QRETRY_IP2;
1048 2 : if (error)
1049 : return error;
1050 : }
1051 22 : if (ip1_error)
1052 : return ip1_error;
1053 :
1054 : /*
1055 : * For each file, forcibly reserve the gross gain in mapped blocks so
1056 : * that we don't trip over any quota block reservation assertions.
1057 : * We must reserve the gross gain because the quota code subtracts from
1058 : * bcount the number of blocks that we unmap; it does not add that
1059 : * quantity back to the quota block reservation.
1060 : */
1061 18 : error = xfs_trans_reserve_quota_nblks(tp, req->ip1, req->ip1_bcount,
1062 18 : req->ip1_rtbcount, true);
1063 18 : if (error)
1064 : return error;
1065 :
1066 18 : return xfs_trans_reserve_quota_nblks(tp, req->ip2, req->ip2_bcount,
1067 18 : req->ip2_rtbcount, true);
1068 : }
1069 :
1070 : /*
1071 : * Get permission to use log-assisted atomic exchange of file extents.
1072 : *
1073 : * Callers must hold the IOLOCK and MMAPLOCK of both files. They must not be
1074 : * running any transactions or hold any ILOCKS. If @use_logging is set after a
1075 : * successful return, callers must call xfs_xchg_range_rele_log_assist after
1076 : * the exchange is completed.
1077 : */
1078 : int
1079 672766 : xfs_xchg_range_grab_log_assist(
1080 : struct xfs_mount *mp,
1081 : bool force,
1082 : bool *use_logging)
1083 : {
1084 672766 : int error = 0;
1085 :
1086 : /*
1087 : * Protect ourselves from an idle log clearing the atomic swapext
1088 : * log incompat feature bit.
1089 : */
1090 672766 : xlog_use_incompat_feat(mp->m_log, XLOG_INCOMPAT_FEAT_SWAPEXT);
1091 672768 : *use_logging = true;
1092 :
1093 : /*
1094 : * If log-assisted swapping is already enabled, the caller can use the
1095 : * log assisted swap functions with the log-incompat reference we got.
1096 : */
1097 1345536 : if (xfs_sb_version_haslogswapext(&mp->m_sb))
1098 : return 0;
1099 :
1100 : /*
1101 : * If the caller doesn't /require/ log-assisted swapping, drop the
1102 : * log-incompat feature protection and exit. The caller cannot use
1103 : * log assisted swapping.
1104 : */
1105 18223 : if (!force)
1106 6440 : goto drop_incompat;
1107 :
1108 : /*
1109 : * Caller requires log-assisted swapping but the fs feature set isn't
1110 : * rich enough to support it. Bail out.
1111 : */
1112 11783 : if (!xfs_swapext_supported(mp)) {
1113 0 : error = -EOPNOTSUPP;
1114 0 : goto drop_incompat;
1115 : }
1116 :
1117 11783 : error = xfs_add_incompat_log_feature(mp,
1118 : XFS_SB_FEAT_INCOMPAT_LOG_SWAPEXT);
1119 11782 : if (error)
1120 0 : goto drop_incompat;
1121 :
1122 11782 : xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SWAPEXT,
1123 : "EXPERIMENTAL atomic file range swap feature in use. Use at your own risk!");
1124 :
1125 : return 0;
1126 6440 : drop_incompat:
1127 6440 : xlog_drop_incompat_feat(mp->m_log, XLOG_INCOMPAT_FEAT_SWAPEXT);
1128 6440 : *use_logging = false;
1129 6440 : return error;
1130 : }
1131 :
1132 : /* Release permission to use log-assisted extent swapping. */
1133 : void
1134 408136 : xfs_xchg_range_rele_log_assist(
1135 : struct xfs_mount *mp)
1136 : {
1137 666336 : xlog_drop_incompat_feat(mp->m_log, XLOG_INCOMPAT_FEAT_SWAPEXT);
1138 258200 : }
1139 :
1140 : /* Decide if we can use the old data fork exchange code. */
1141 : static inline bool
1142 0 : xfs_xchg_use_forkswap(
1143 : const struct xfs_exch_range *fxr,
1144 : struct xfs_inode *ip1,
1145 : struct xfs_inode *ip2)
1146 : {
1147 0 : if (!(fxr->flags & XFS_EXCH_RANGE_NONATOMIC))
1148 : return false;
1149 0 : if (!(fxr->flags & XFS_EXCH_RANGE_FULL_FILES))
1150 : return false;
1151 0 : if (fxr->flags & XFS_EXCH_RANGE_TO_EOF)
1152 : return false;
1153 0 : if (fxr->file1_offset != 0 || fxr->file2_offset != 0)
1154 : return false;
1155 0 : if (fxr->length != ip1->i_disk_size)
1156 : return false;
1157 0 : if (fxr->length != ip2->i_disk_size)
1158 0 : return false;
1159 : return true;
1160 : }
1161 :
1162 : enum xchg_strategy {
1163 : SWAPEXT = 1, /* xfs_swapext() */
1164 : FORKSWAP = 2, /* exchange forks */
1165 : };
1166 :
1167 : /* Exchange the contents of two files. */
1168 : int
1169 264640 : xfs_xchg_range(
1170 : struct xfs_inode *ip1,
1171 : struct xfs_inode *ip2,
1172 : const struct xfs_exch_range *fxr,
1173 : unsigned int xchg_flags)
1174 : {
1175 264640 : struct xfs_mount *mp = ip1->i_mount;
1176 264640 : struct xfs_swapext_req req = {
1177 : .ip1 = ip1,
1178 : .ip2 = ip2,
1179 : .whichfork = XFS_DATA_FORK,
1180 264640 : .startoff1 = XFS_B_TO_FSBT(mp, fxr->file1_offset),
1181 264640 : .startoff2 = XFS_B_TO_FSBT(mp, fxr->file2_offset),
1182 264640 : .blockcount = XFS_B_TO_FSB(mp, fxr->length),
1183 : };
1184 264640 : struct xfs_trans *tp;
1185 264640 : unsigned int qretry;
1186 264640 : unsigned int flags = 0;
1187 264640 : bool retried = false;
1188 264640 : enum xchg_strategy strategy;
1189 264640 : int error;
1190 :
1191 264640 : trace_xfs_xchg_range(ip1, fxr, ip2, xchg_flags);
1192 :
1193 264640 : if (fxr->flags & XFS_EXCH_RANGE_TO_EOF)
1194 68 : req.req_flags |= XFS_SWAP_REQ_SET_SIZES;
1195 264640 : if (fxr->flags & XFS_EXCH_RANGE_FILE1_WRITTEN)
1196 12 : req.req_flags |= XFS_SWAP_REQ_INO1_WRITTEN;
1197 264640 : if (xchg_flags & XFS_XCHG_RANGE_LOGGED)
1198 258200 : req.req_flags |= XFS_SWAP_REQ_LOGGED;
1199 :
1200 : /*
1201 : * Round the request length up to the nearest fundamental unit of
1202 : * allocation. The prep function already checked that the request
1203 : * offsets and length in @fxr are safe to round up.
1204 : */
1205 264640 : if (XFS_IS_REALTIME_INODE(ip2))
1206 72484 : req.blockcount = xfs_rtb_roundup_rtx(mp, req.blockcount);
1207 :
1208 264640 : error = xfs_xchg_range_estimate(&req);
1209 264640 : if (error)
1210 : return error;
1211 :
1212 : /*
1213 : * We haven't decided which exchange strategy we want to use yet, but
1214 : * here we must choose if we want freed blocks during the swap to be
1215 : * added to the transaction block reservation (RES_FDBLKS) or freed
1216 : * into the global fdblocks. The legacy fork swap mechanism doesn't
1217 : * free any blocks, so it doesn't require it. It is also the only
1218 : * option that works for older filesystems.
1219 : *
1220 : * The bmap log intent items that were added with rmap and reflink can
1221 : * change the bmbt shape, so the intent-based swap strategies require
1222 : * us to set RES_FDBLKS.
1223 : */
1224 264638 : if (xfs_has_lazysbcount(mp))
1225 264638 : flags |= XFS_TRANS_RES_FDBLKS;
1226 :
1227 264638 : retry:
1228 : /* Allocate the transaction, lock the inodes, and join them. */
1229 264640 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, req.resblks, 0,
1230 : flags, &tp);
1231 264640 : if (error)
1232 0 : return error;
1233 :
1234 264640 : xfs_xchg_range_ilock(tp, ip1, ip2);
1235 :
1236 264640 : trace_xfs_swap_extent_before(ip2, 0);
1237 264640 : trace_xfs_swap_extent_before(ip1, 1);
1238 :
1239 264640 : if (fxr->flags & XFS_EXCH_RANGE_FILE2_FRESH)
1240 6522 : trace_xfs_xchg_range_freshness(ip2, fxr);
1241 :
1242 : /*
1243 : * Now that we've excluded all other inode metadata changes by taking
1244 : * the ILOCK, repeat the freshness check.
1245 : */
1246 264640 : error = xfs_exch_range_check_fresh(VFS_I(ip2), fxr);
1247 264640 : if (error)
1248 0 : goto out_trans_cancel;
1249 :
1250 264640 : error = xfs_swapext_check_extents(mp, &req);
1251 264640 : if (error)
1252 0 : goto out_trans_cancel;
1253 :
1254 : /*
1255 : * Reserve ourselves some quota if any of them are in enforcing mode.
1256 : * In theory we only need enough to satisfy the change in the number
1257 : * of blocks between the two ranges being remapped.
1258 : */
1259 264640 : error = xfs_xchg_range_reserve_quota(tp, &req, &qretry);
1260 264640 : if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1261 2 : xfs_trans_cancel(tp);
1262 2 : xfs_xchg_range_iunlock(ip1, ip2);
1263 2 : if (qretry & QRETRY_IP1)
1264 2 : xfs_blockgc_free_quota(ip1, 0);
1265 2 : if (qretry & QRETRY_IP2)
1266 0 : xfs_blockgc_free_quota(ip2, 0);
1267 2 : retried = true;
1268 2 : goto retry;
1269 : }
1270 264638 : if (error)
1271 2 : goto out_trans_cancel;
1272 :
1273 271072 : if ((xchg_flags & XFS_XCHG_RANGE_LOGGED) || xfs_swapext_supported(mp)) {
1274 : /*
1275 : * xfs_swapext() uses deferred bmap log intent items to swap
1276 : * extents between file forks. If the atomic log swap feature
1277 : * is enabled, it will also use swapext log intent items to
1278 : * restart the operation in case of failure.
1279 : *
1280 : * This means that we can use it if we previously obtained
1281 : * permission from the log to use log-assisted atomic extent
1282 : * swapping; or if the fs supports rmap or reflink and the
1283 : * user said NONATOMIC.
1284 : */
1285 : strategy = SWAPEXT;
1286 0 : } else if (xfs_xchg_use_forkswap(fxr, ip1, ip2)) {
1287 : /*
1288 : * Exchange the file contents by using the old bmap fork
1289 : * exchange code, if we're a defrag tool doing a full file
1290 : * swap.
1291 : */
1292 0 : strategy = FORKSWAP;
1293 :
1294 0 : error = xfs_swap_extents_check_format(ip2, ip1);
1295 0 : if (error) {
1296 0 : xfs_notice(mp,
1297 : "%s: inode 0x%llx format is incompatible for exchanging.",
1298 : __func__, ip2->i_ino);
1299 0 : goto out_trans_cancel;
1300 : }
1301 : } else {
1302 : /* We cannot exchange the file contents. */
1303 0 : error = -EOPNOTSUPP;
1304 0 : goto out_trans_cancel;
1305 : }
1306 :
1307 : /* If we got this far on a dry run, all parameters are ok. */
1308 264636 : if (fxr->flags & XFS_EXCH_RANGE_DRY_RUN)
1309 116 : goto out_trans_cancel;
1310 :
1311 : /* Update the mtime and ctime of both files. */
1312 264520 : if (xchg_flags & XFS_XCHG_RANGE_UPD_CMTIME1)
1313 264520 : xfs_trans_ichgtime(tp, ip1,
1314 : XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1315 264520 : if (xchg_flags & XFS_XCHG_RANGE_UPD_CMTIME2)
1316 258562 : xfs_trans_ichgtime(tp, ip2,
1317 : XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1318 :
1319 264520 : if (strategy == SWAPEXT) {
1320 264520 : xfs_swapext(tp, &req);
1321 : } else {
1322 0 : error = xfs_swap_extent_forks(&tp, &req);
1323 0 : if (error)
1324 0 : goto out_trans_cancel;
1325 : }
1326 :
1327 : /*
1328 : * Force the log to persist metadata updates if the caller or the
1329 : * administrator requires this. The VFS prep function already flushed
1330 : * the relevant parts of the page cache.
1331 : */
1332 264520 : if (xfs_has_wsync(mp) || (fxr->flags & XFS_EXCH_RANGE_FSYNC))
1333 5948 : xfs_trans_set_sync(tp);
1334 :
1335 264520 : error = xfs_trans_commit(tp);
1336 :
1337 264520 : trace_xfs_swap_extent_after(ip2, 0);
1338 264520 : trace_xfs_swap_extent_after(ip1, 1);
1339 :
1340 264520 : if (error)
1341 12 : goto out_unlock;
1342 :
1343 : /*
1344 : * If the caller wanted us to exchange the contents of two complete
1345 : * files of unequal length, exchange the incore sizes now. This should
1346 : * be safe because we flushed both files' page caches, moved all the
1347 : * extents, and updated the ondisk sizes.
1348 : */
1349 264508 : if (fxr->flags & XFS_EXCH_RANGE_TO_EOF) {
1350 66 : loff_t temp;
1351 :
1352 66 : temp = i_size_read(VFS_I(ip2));
1353 66 : i_size_write(VFS_I(ip2), i_size_read(VFS_I(ip1)));
1354 66 : i_size_write(VFS_I(ip1), temp);
1355 : }
1356 :
1357 264442 : out_unlock:
1358 264638 : xfs_xchg_range_iunlock(ip1, ip2);
1359 264638 : return error;
1360 :
1361 118 : out_trans_cancel:
1362 118 : xfs_trans_cancel(tp);
1363 118 : goto out_unlock;
1364 : }
|