LCOV - code coverage report
Current view: top level - fs/ext4 - extents.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsx @ Mon Jul 31 20:08:34 PDT 2023 Lines: 2196 2873 76.4 %
Date: 2023-07-31 20:08:34 Functions: 77 87 88.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
       4             :  * Written by Alex Tomas <alex@clusterfs.com>
       5             :  *
       6             :  * Architecture independence:
       7             :  *   Copyright (c) 2005, Bull S.A.
       8             :  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
       9             :  */
      10             : 
      11             : /*
      12             :  * Extents support for EXT4
      13             :  *
      14             :  * TODO:
      15             :  *   - ext4*_error() should be used in some situations
      16             :  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
      17             :  *   - smart tree reduction
      18             :  */
      19             : 
      20             : #include <linux/fs.h>
      21             : #include <linux/time.h>
      22             : #include <linux/jbd2.h>
      23             : #include <linux/highuid.h>
      24             : #include <linux/pagemap.h>
      25             : #include <linux/quotaops.h>
      26             : #include <linux/string.h>
      27             : #include <linux/slab.h>
      28             : #include <linux/uaccess.h>
      29             : #include <linux/fiemap.h>
      30             : #include <linux/iomap.h>
      31             : #include <linux/sched/mm.h>
      32             : #include "ext4_jbd2.h"
      33             : #include "ext4_extents.h"
      34             : #include "xattr.h"
      35             : 
      36             : #include <trace/events/ext4.h>
      37             : 
      38             : /*
      39             :  * used by extent splitting.
      40             :  */
      41             : #define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe to zeroout if split fails \
      42             :                                         due to ENOSPC */
      43             : #define EXT4_EXT_MARK_UNWRIT1   0x2  /* mark first half unwritten */
      44             : #define EXT4_EXT_MARK_UNWRIT2   0x4  /* mark second half unwritten */
      45             : 
      46             : #define EXT4_EXT_DATA_VALID1    0x8  /* first half contains valid data */
      47             : #define EXT4_EXT_DATA_VALID2    0x10 /* second half contains valid data */
      48             : 
      49    18650114 : static __le32 ext4_extent_block_csum(struct inode *inode,
      50             :                                      struct ext4_extent_header *eh)
      51             : {
      52    18650114 :         struct ext4_inode_info *ei = EXT4_I(inode);
      53    18650114 :         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
      54    18650114 :         __u32 csum;
      55             : 
      56    18650114 :         csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
      57    18650114 :                            EXT4_EXTENT_TAIL_OFFSET(eh));
      58    18650582 :         return cpu_to_le32(csum);
      59             : }
      60             : 
      61       12445 : static int ext4_extent_block_csum_verify(struct inode *inode,
      62             :                                          struct ext4_extent_header *eh)
      63             : {
      64       12445 :         struct ext4_extent_tail *et;
      65             : 
      66       12445 :         if (!ext4_has_metadata_csum(inode->i_sb))
      67             :                 return 1;
      68             : 
      69       12445 :         et = find_ext4_extent_tail(eh);
      70       12445 :         if (et->et_checksum != ext4_extent_block_csum(inode, eh))
      71           0 :                 return 0;
      72             :         return 1;
      73             : }
      74             : 
      75    18638328 : static void ext4_extent_block_csum_set(struct inode *inode,
      76             :                                        struct ext4_extent_header *eh)
      77             : {
      78    18638328 :         struct ext4_extent_tail *et;
      79             : 
      80    18638328 :         if (!ext4_has_metadata_csum(inode->i_sb))
      81             :                 return;
      82             : 
      83    18637638 :         et = find_ext4_extent_tail(eh);
      84    18637638 :         et->et_checksum = ext4_extent_block_csum(inode, eh);
      85             : }
      86             : 
      87             : static int ext4_split_extent_at(handle_t *handle,
      88             :                              struct inode *inode,
      89             :                              struct ext4_ext_path **ppath,
      90             :                              ext4_lblk_t split,
      91             :                              int split_flag,
      92             :                              int flags);
      93             : 
      94          74 : static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
      95             : {
      96             :         /*
      97             :          * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
      98             :          * moment, get_block can be called only for blocks inside i_size since
      99             :          * page cache has been already dropped and writes are blocked by
     100             :          * i_rwsem. So we can safely drop the i_data_sem here.
     101             :          */
     102          74 :         BUG_ON(EXT4_JOURNAL(inode) == NULL);
     103          74 :         ext4_discard_preallocations(inode, 0);
     104          74 :         up_write(&EXT4_I(inode)->i_data_sem);
     105          74 :         *dropped = 1;
     106          74 :         return 0;
     107             : }
     108             : 
     109    35654416 : static void ext4_ext_drop_refs(struct ext4_ext_path *path)
     110             : {
     111    35654416 :         int depth, i;
     112             : 
     113    35654416 :         if (!path)
     114             :                 return;
     115    27725282 :         depth = path->p_depth;
     116    87398444 :         for (i = 0; i <= depth; i++, path++) {
     117    59680900 :                 brelse(path->p_bh);
     118    59673162 :                 path->p_bh = NULL;
     119             :         }
     120             : }
     121             : 
     122     3775446 : void ext4_free_ext_path(struct ext4_ext_path *path)
     123             : {
     124     3775446 :         ext4_ext_drop_refs(path);
     125    24516567 :         kfree(path);
     126        1045 : }
     127             : 
     128             : /*
     129             :  * Make sure 'handle' has at least 'check_cred' credits. If not, restart
     130             :  * transaction with 'restart_cred' credits. The function drops i_data_sem
     131             :  * when restarting transaction and gets it after transaction is restarted.
     132             :  *
     133             :  * The function returns 0 on success, 1 if transaction had to be restarted,
     134             :  * and < 0 in case of fatal error.
     135             :  */
     136     2887927 : int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
     137             :                                 int check_cred, int restart_cred,
     138             :                                 int revoke_cred)
     139             : {
     140     2887927 :         int ret;
     141     2887927 :         int dropped = 0;
     142             : 
     143     2888001 :         ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
     144             :                 revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
     145     2887880 :         if (dropped)
     146          74 :                 down_write(&EXT4_I(inode)->i_data_sem);
     147     2887880 :         return ret;
     148             : }
     149             : 
     150             : /*
     151             :  * could return:
     152             :  *  - EROFS
     153             :  *  - ENOMEM
     154             :  */
     155    21847894 : static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
     156             :                                 struct ext4_ext_path *path)
     157             : {
     158    21847894 :         int err = 0;
     159             : 
     160    21847894 :         if (path->p_bh) {
     161             :                 /* path points to block */
     162    18605051 :                 BUFFER_TRACE(path->p_bh, "get_write_access");
     163    18605051 :                 err = ext4_journal_get_write_access(handle, inode->i_sb,
     164             :                                                     path->p_bh, EXT4_JTR_NONE);
     165             :                 /*
     166             :                  * The extent buffer's verified bit will be set again in
     167             :                  * __ext4_ext_dirty(). We could leave an inconsistent
     168             :                  * buffer if the extents updating procudure break off du
     169             :                  * to some error happens, force to check it again.
     170             :                  */
     171    18605406 :                 if (!err)
     172    18605405 :                         clear_buffer_verified(path->p_bh);
     173             :         }
     174             :         /* path points to leaf/index in inode body */
     175             :         /* we use in-core data, no need to protect them */
     176    21848477 :         return err;
     177             : }
     178             : 
     179             : /*
     180             :  * could return:
     181             :  *  - EROFS
     182             :  *  - ENOMEM
     183             :  *  - EIO
     184             :  */
     185    21847624 : static int __ext4_ext_dirty(const char *where, unsigned int line,
     186             :                             handle_t *handle, struct inode *inode,
     187             :                             struct ext4_ext_path *path)
     188             : {
     189    21847624 :         int err;
     190             : 
     191    21847624 :         WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
     192    21847624 :         if (path->p_bh) {
     193    18564182 :                 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
     194             :                 /* path points to block */
     195    18564312 :                 err = __ext4_handle_dirty_metadata(where, line, handle,
     196             :                                                    inode, path->p_bh);
     197             :                 /* Extents updating done, re-set verified flag */
     198    18564508 :                 if (!err)
     199    18564443 :                         set_buffer_verified(path->p_bh);
     200             :         } else {
     201             :                 /* path points to leaf/index in inode body */
     202     3283442 :                 err = ext4_mark_inode_dirty(handle, inode);
     203             :         }
     204    21849315 :         return err;
     205             : }
     206             : 
     207             : #define ext4_ext_dirty(handle, inode, path) \
     208             :                 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
     209             : 
     210     3926847 : static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
     211             :                               struct ext4_ext_path *path,
     212             :                               ext4_lblk_t block)
     213             : {
     214     3926847 :         if (path) {
     215     3926847 :                 int depth = path->p_depth;
     216     3926847 :                 struct ext4_extent *ex;
     217             : 
     218             :                 /*
     219             :                  * Try to predict block placement assuming that we are
     220             :                  * filling in a file which will eventually be
     221             :                  * non-sparse --- i.e., in the case of libbfd writing
     222             :                  * an ELF object sections out-of-order but in a way
     223             :                  * the eventually results in a contiguous object or
     224             :                  * executable file, or some database extending a table
     225             :                  * space file.  However, this is actually somewhat
     226             :                  * non-ideal if we are writing a sparse file such as
     227             :                  * qemu or KVM writing a raw image file that is going
     228             :                  * to stay fairly sparse, since it will end up
     229             :                  * fragmenting the file system's free space.  Maybe we
     230             :                  * should have some hueristics or some way to allow
     231             :                  * userspace to pass a hint to file system,
     232             :                  * especially if the latter case turns out to be
     233             :                  * common.
     234             :                  */
     235     3926847 :                 ex = path[depth].p_ext;
     236     3926847 :                 if (ex) {
     237     2997339 :                         ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
     238     2997339 :                         ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
     239             : 
     240     2997339 :                         if (block > ext_block)
     241     2913925 :                                 return ext_pblk + (block - ext_block);
     242             :                         else
     243       83414 :                                 return ext_pblk - (ext_block - block);
     244             :                 }
     245             : 
     246             :                 /* it looks like index is empty;
     247             :                  * try to find starting block from index itself */
     248      929508 :                 if (path[depth].p_bh)
     249           0 :                         return path[depth].p_bh->b_blocknr;
     250             :         }
     251             : 
     252             :         /* OK. use inode's group */
     253      929508 :         return ext4_inode_to_goal_block(inode);
     254             : }
     255             : 
     256             : /*
     257             :  * Allocation for a meta data block
     258             :  */
     259             : static ext4_fsblk_t
     260        7830 : ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
     261             :                         struct ext4_ext_path *path,
     262             :                         struct ext4_extent *ex, int *err, unsigned int flags)
     263             : {
     264        7830 :         ext4_fsblk_t goal, newblock;
     265             : 
     266        7830 :         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
     267        7830 :         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
     268             :                                         NULL, err);
     269        7830 :         return newblock;
     270             : }
     271             : 
     272             : static inline int ext4_ext_space_block(struct inode *inode, int check)
     273             : {
     274       86543 :         int size;
     275             : 
     276       86543 :         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
     277       86543 :                         / sizeof(struct ext4_extent);
     278             : #ifdef AGGRESSIVE_TEST
     279             :         if (!check && size > 6)
     280             :                 size = 6;
     281             : #endif
     282       86543 :         return size;
     283             : }
     284             : 
     285             : static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
     286             : {
     287         115 :         int size;
     288             : 
     289         115 :         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
     290         115 :                         / sizeof(struct ext4_extent_idx);
     291             : #ifdef AGGRESSIVE_TEST
     292             :         if (!check && size > 5)
     293             :                 size = 5;
     294             : #endif
     295         115 :         return size;
     296             : }
     297             : 
     298             : static inline int ext4_ext_space_root(struct inode *inode, int check)
     299             : {
     300             :         int size;
     301             : 
     302             :         size = sizeof(EXT4_I(inode)->i_data);
     303             :         size -= sizeof(struct ext4_extent_header);
     304             :         size /= sizeof(struct ext4_extent);
     305             : #ifdef AGGRESSIVE_TEST
     306             :         if (!check && size > 3)
     307             :                 size = 3;
     308             : #endif
     309             :         return size;
     310             : }
     311             : 
     312             : static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
     313             : {
     314             :         int size;
     315             : 
     316             :         size = sizeof(EXT4_I(inode)->i_data);
     317             :         size -= sizeof(struct ext4_extent_header);
     318             :         size /= sizeof(struct ext4_extent_idx);
     319             : #ifdef AGGRESSIVE_TEST
     320             :         if (!check && size > 4)
     321             :                 size = 4;
     322             : #endif
     323             :         return size;
     324             : }
     325             : 
     326             : static inline int
     327     3846082 : ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
     328             :                            struct ext4_ext_path **ppath, ext4_lblk_t lblk,
     329             :                            int nofail)
     330             : {
     331     3846082 :         struct ext4_ext_path *path = *ppath;
     332     3846082 :         int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
     333     3846082 :         int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
     334             : 
     335     3846082 :         if (nofail)
     336      165295 :                 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
     337             : 
     338     4041152 :         return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
     339             :                         EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
     340             :                         flags);
     341             : }
     342             : 
     343             : static int
     344     1029488 : ext4_ext_max_entries(struct inode *inode, int depth)
     345             : {
     346     1029488 :         int max;
     347             : 
     348     1029488 :         if (depth == ext_depth(inode)) {
     349             :                 if (depth == 0)
     350             :                         max = ext4_ext_space_root(inode, 1);
     351             :                 else
     352             :                         max = ext4_ext_space_root_idx(inode, 1);
     353             :         } else {
     354       12447 :                 if (depth == 0)
     355       12420 :                         max = ext4_ext_space_block(inode, 1);
     356             :                 else
     357          27 :                         max = ext4_ext_space_block_idx(inode, 1);
     358             :         }
     359             : 
     360     1029488 :         return max;
     361             : }
     362             : 
     363     1955847 : static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
     364             : {
     365     1955847 :         ext4_fsblk_t block = ext4_ext_pblock(ext);
     366     1955847 :         int len = ext4_ext_get_actual_len(ext);
     367     1955847 :         ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
     368             : 
     369             :         /*
     370             :          * We allow neither:
     371             :          *  - zero length
     372             :          *  - overflow/wrap-around
     373             :          */
     374     1955847 :         if (lblock + len <= lblock)
     375             :                 return 0;
     376     1955821 :         return ext4_inode_block_valid(inode, block, len);
     377             : }
     378             : 
     379      177452 : static int ext4_valid_extent_idx(struct inode *inode,
     380             :                                 struct ext4_extent_idx *ext_idx)
     381             : {
     382      177452 :         ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
     383             : 
     384      177452 :         return ext4_inode_block_valid(inode, block, 1);
     385             : }
     386             : 
     387     1029518 : static int ext4_valid_extent_entries(struct inode *inode,
     388             :                                      struct ext4_extent_header *eh,
     389             :                                      ext4_lblk_t lblk, ext4_fsblk_t *pblk,
     390             :                                      int depth)
     391             : {
     392     1029518 :         unsigned short entries;
     393     1029518 :         ext4_lblk_t lblock = 0;
     394     1029518 :         ext4_lblk_t cur = 0;
     395             : 
     396     1029518 :         if (eh->eh_entries == 0)
     397             :                 return 1;
     398             : 
     399      695059 :         entries = le16_to_cpu(eh->eh_entries);
     400             : 
     401      695059 :         if (depth == 0) {
     402             :                 /* leaf entries */
     403      520998 :                 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
     404             : 
     405             :                 /*
     406             :                  * The logical block in the first entry should equal to
     407             :                  * the number in the index block.
     408             :                  */
     409      520998 :                 if (depth != ext_depth(inode) &&
     410       12106 :                     lblk != le32_to_cpu(ext->ee_block))
     411             :                         return 0;
     412     2476899 :                 while (entries) {
     413     1955833 :                         if (!ext4_valid_extent(inode, ext))
     414             :                                 return 0;
     415             : 
     416             :                         /* Check for overlapping extents */
     417     1955903 :                         lblock = le32_to_cpu(ext->ee_block);
     418     1955903 :                         if (lblock < cur) {
     419           0 :                                 *pblk = ext4_ext_pblock(ext);
     420           0 :                                 return 0;
     421             :                         }
     422     1955903 :                         cur = lblock + ext4_ext_get_actual_len(ext);
     423     1955903 :                         ext++;
     424     1955903 :                         entries--;
     425             :                 }
     426             :         } else {
     427      174061 :                 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
     428             : 
     429             :                 /*
     430             :                  * The logical block in the first entry should equal to
     431             :                  * the number in the parent index block.
     432             :                  */
     433      174061 :                 if (depth != ext_depth(inode) &&
     434          27 :                     lblk != le32_to_cpu(ext_idx->ei_block))
     435             :                         return 0;
     436      351514 :                 while (entries) {
     437      177449 :                         if (!ext4_valid_extent_idx(inode, ext_idx))
     438             :                                 return 0;
     439             : 
     440             :                         /* Check for overlapping index extents */
     441      177453 :                         lblock = le32_to_cpu(ext_idx->ei_block);
     442      177453 :                         if (lblock < cur) {
     443           0 :                                 *pblk = ext4_idx_pblock(ext_idx);
     444           0 :                                 return 0;
     445             :                         }
     446      177453 :                         ext_idx++;
     447      177453 :                         entries--;
     448      177453 :                         cur = lblock + 1;
     449             :                 }
     450             :         }
     451             :         return 1;
     452             : }
     453             : 
     454     1029367 : static int __ext4_ext_check(const char *function, unsigned int line,
     455             :                             struct inode *inode, struct ext4_extent_header *eh,
     456             :                             int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk)
     457             : {
     458     1029367 :         const char *error_msg;
     459     1029367 :         int max = 0, err = -EFSCORRUPTED;
     460             : 
     461     1029367 :         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
     462           0 :                 error_msg = "invalid magic";
     463           0 :                 goto corrupted;
     464             :         }
     465     1029367 :         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
     466           0 :                 error_msg = "unexpected eh_depth";
     467           0 :                 goto corrupted;
     468             :         }
     469     1029367 :         if (unlikely(eh->eh_max == 0)) {
     470           0 :                 error_msg = "invalid eh_max";
     471           0 :                 goto corrupted;
     472             :         }
     473     1029367 :         max = ext4_ext_max_entries(inode, depth);
     474     1029367 :         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
     475           0 :                 error_msg = "too large eh_max";
     476           0 :                 goto corrupted;
     477             :         }
     478     1029367 :         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
     479           0 :                 error_msg = "invalid eh_entries";
     480           0 :                 goto corrupted;
     481             :         }
     482     1029367 :         if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
     483           0 :                 error_msg = "eh_entries is 0 but eh_depth is > 0";
     484           0 :                 goto corrupted;
     485             :         }
     486     1029367 :         if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
     487           2 :                 error_msg = "invalid extent entries";
     488           2 :                 goto corrupted;
     489             :         }
     490     1029560 :         if (unlikely(depth > 32)) {
     491           0 :                 error_msg = "too large eh_depth";
     492           0 :                 goto corrupted;
     493             :         }
     494             :         /* Verify checksum on non-root extent tree nodes */
     495     1042005 :         if (ext_depth(inode) != depth &&
     496       12445 :             !ext4_extent_block_csum_verify(inode, eh)) {
     497           0 :                 error_msg = "extent tree corrupted";
     498           0 :                 err = -EFSBADCRC;
     499           0 :                 goto corrupted;
     500             :         }
     501             :         return 0;
     502             : 
     503           2 : corrupted:
     504           2 :         ext4_error_inode_err(inode, function, line, 0, -err,
     505             :                              "pblk %llu bad header/extent: %s - magic %x, "
     506             :                              "entries %u, max %u(%u), depth %u(%u)",
     507             :                              (unsigned long long) pblk, error_msg,
     508             :                              le16_to_cpu(eh->eh_magic),
     509             :                              le16_to_cpu(eh->eh_entries),
     510             :                              le16_to_cpu(eh->eh_max),
     511             :                              max, le16_to_cpu(eh->eh_depth), depth);
     512           2 :         return err;
     513             : }
     514             : 
     515             : #define ext4_ext_check(inode, eh, depth, pblk)                  \
     516             :         __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0)
     517             : 
     518      217373 : int ext4_ext_check_inode(struct inode *inode)
     519             : {
     520      217373 :         return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
     521             : }
     522             : 
     523     4086762 : static void ext4_cache_extents(struct inode *inode,
     524             :                                struct ext4_extent_header *eh)
     525             : {
     526     4086762 :         struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
     527     4086762 :         ext4_lblk_t prev = 0;
     528     4086762 :         int i;
     529             : 
     530     7498333 :         for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
     531     3410014 :                 unsigned int status = EXTENT_STATUS_WRITTEN;
     532     3410014 :                 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
     533     3410014 :                 int len = ext4_ext_get_actual_len(ex);
     534             : 
     535     3410014 :                 if (prev && (prev != lblk))
     536     1343679 :                         ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
     537             :                                              EXTENT_STATUS_HOLE);
     538             : 
     539     3410016 :                 if (ext4_ext_is_unwritten(ex))
     540     1457351 :                         status = EXTENT_STATUS_UNWRITTEN;
     541     3410016 :                 ext4_es_cache_extent(inode, lblk, len,
     542             :                                      ext4_ext_pblock(ex), status);
     543     3411571 :                 prev = lblk + len;
     544             :         }
     545     4088319 : }
     546             : 
     547             : static struct buffer_head *
     548    32083189 : __read_extent_tree_block(const char *function, unsigned int line,
     549             :                          struct inode *inode, struct ext4_extent_idx *idx,
     550             :                          int depth, int flags)
     551             : {
     552    32083189 :         struct buffer_head              *bh;
     553    32083189 :         int                             err;
     554    32083189 :         gfp_t                           gfp_flags = __GFP_MOVABLE | GFP_NOFS;
     555    32083189 :         ext4_fsblk_t                    pblk;
     556             : 
     557    32083189 :         if (flags & EXT4_EX_NOFAIL)
     558      393203 :                 gfp_flags |= __GFP_NOFAIL;
     559             : 
     560    32083189 :         pblk = ext4_idx_pblock(idx);
     561    32083189 :         bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
     562    32086290 :         if (unlikely(!bh))
     563             :                 return ERR_PTR(-ENOMEM);
     564             : 
     565    32086290 :         if (!bh_uptodate_or_lock(bh)) {
     566        4810 :                 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
     567        4810 :                 err = ext4_read_bh(bh, 0, NULL);
     568        4810 :                 if (err < 0)
     569           0 :                         goto errout;
     570             :         }
     571    64169798 :         if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
     572             :                 return bh;
     573       12400 :         err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh),
     574       12400 :                                depth, pblk, le32_to_cpu(idx->ei_block));
     575       12447 :         if (err)
     576           2 :                 goto errout;
     577       12445 :         set_buffer_verified(bh);
     578             :         /*
     579             :          * If this is a leaf block, cache all of its entries
     580             :          */
     581       12445 :         if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
     582        6595 :                 struct ext4_extent_header *eh = ext_block_hdr(bh);
     583        6595 :                 ext4_cache_extents(inode, eh);
     584             :         }
     585             :         return bh;
     586           2 : errout:
     587           2 :         put_bh(bh);
     588           2 :         return ERR_PTR(err);
     589             : 
     590             : }
     591             : 
     592             : #define read_extent_tree_block(inode, idx, depth, flags)                \
     593             :         __read_extent_tree_block(__func__, __LINE__, (inode), (idx),    \
     594             :                                  (depth), (flags))
     595             : 
     596             : /*
     597             :  * This function is called to cache a file's extent information in the
     598             :  * extent status tree
     599             :  */
     600           0 : int ext4_ext_precache(struct inode *inode)
     601             : {
     602           0 :         struct ext4_inode_info *ei = EXT4_I(inode);
     603           0 :         struct ext4_ext_path *path = NULL;
     604           0 :         struct buffer_head *bh;
     605           0 :         int i = 0, depth, ret = 0;
     606             : 
     607           0 :         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
     608             :                 return 0;       /* not an extent-mapped inode */
     609             : 
     610           0 :         down_read(&ei->i_data_sem);
     611           0 :         depth = ext_depth(inode);
     612             : 
     613             :         /* Don't cache anything if there are no external extent blocks */
     614           0 :         if (!depth) {
     615           0 :                 up_read(&ei->i_data_sem);
     616           0 :                 return ret;
     617             :         }
     618             : 
     619           0 :         path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
     620             :                        GFP_NOFS);
     621           0 :         if (path == NULL) {
     622           0 :                 up_read(&ei->i_data_sem);
     623           0 :                 return -ENOMEM;
     624             :         }
     625             : 
     626           0 :         path[0].p_hdr = ext_inode_hdr(inode);
     627           0 :         ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
     628           0 :         if (ret)
     629           0 :                 goto out;
     630           0 :         path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
     631           0 :         while (i >= 0) {
     632             :                 /*
     633             :                  * If this is a leaf block or we've reached the end of
     634             :                  * the index block, go up
     635             :                  */
     636           0 :                 if ((i == depth) ||
     637           0 :                     path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
     638           0 :                         brelse(path[i].p_bh);
     639           0 :                         path[i].p_bh = NULL;
     640           0 :                         i--;
     641           0 :                         continue;
     642             :                 }
     643           0 :                 bh = read_extent_tree_block(inode, path[i].p_idx++,
     644             :                                             depth - i - 1,
     645             :                                             EXT4_EX_FORCE_CACHE);
     646           0 :                 if (IS_ERR(bh)) {
     647           0 :                         ret = PTR_ERR(bh);
     648           0 :                         break;
     649             :                 }
     650           0 :                 i++;
     651           0 :                 path[i].p_bh = bh;
     652           0 :                 path[i].p_hdr = ext_block_hdr(bh);
     653           0 :                 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
     654             :         }
     655           0 :         ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
     656           0 : out:
     657           0 :         up_read(&ei->i_data_sem);
     658           0 :         ext4_free_ext_path(path);
     659           0 :         return ret;
     660             : }
     661             : 
     662             : #ifdef EXT_DEBUG
     663             : static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
     664             : {
     665             :         int k, l = path->p_depth;
     666             : 
     667             :         ext_debug(inode, "path:");
     668             :         for (k = 0; k <= l; k++, path++) {
     669             :                 if (path->p_idx) {
     670             :                         ext_debug(inode, "  %d->%llu",
     671             :                                   le32_to_cpu(path->p_idx->ei_block),
     672             :                                   ext4_idx_pblock(path->p_idx));
     673             :                 } else if (path->p_ext) {
     674             :                         ext_debug(inode, "  %d:[%d]%d:%llu ",
     675             :                                   le32_to_cpu(path->p_ext->ee_block),
     676             :                                   ext4_ext_is_unwritten(path->p_ext),
     677             :                                   ext4_ext_get_actual_len(path->p_ext),
     678             :                                   ext4_ext_pblock(path->p_ext));
     679             :                 } else
     680             :                         ext_debug(inode, "  []");
     681             :         }
     682             :         ext_debug(inode, "\n");
     683             : }
     684             : 
     685             : static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
     686             : {
     687             :         int depth = ext_depth(inode);
     688             :         struct ext4_extent_header *eh;
     689             :         struct ext4_extent *ex;
     690             :         int i;
     691             : 
     692             :         if (!path)
     693             :                 return;
     694             : 
     695             :         eh = path[depth].p_hdr;
     696             :         ex = EXT_FIRST_EXTENT(eh);
     697             : 
     698             :         ext_debug(inode, "Displaying leaf extents\n");
     699             : 
     700             :         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
     701             :                 ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
     702             :                           ext4_ext_is_unwritten(ex),
     703             :                           ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
     704             :         }
     705             :         ext_debug(inode, "\n");
     706             : }
     707             : 
     708             : static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
     709             :                         ext4_fsblk_t newblock, int level)
     710             : {
     711             :         int depth = ext_depth(inode);
     712             :         struct ext4_extent *ex;
     713             : 
     714             :         if (depth != level) {
     715             :                 struct ext4_extent_idx *idx;
     716             :                 idx = path[level].p_idx;
     717             :                 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
     718             :                         ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
     719             :                                   level, le32_to_cpu(idx->ei_block),
     720             :                                   ext4_idx_pblock(idx), newblock);
     721             :                         idx++;
     722             :                 }
     723             : 
     724             :                 return;
     725             :         }
     726             : 
     727             :         ex = path[depth].p_ext;
     728             :         while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
     729             :                 ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
     730             :                                 le32_to_cpu(ex->ee_block),
     731             :                                 ext4_ext_pblock(ex),
     732             :                                 ext4_ext_is_unwritten(ex),
     733             :                                 ext4_ext_get_actual_len(ex),
     734             :                                 newblock);
     735             :                 ex++;
     736             :         }
     737             : }
     738             : 
     739             : #else
     740             : #define ext4_ext_show_path(inode, path)
     741             : #define ext4_ext_show_leaf(inode, path)
     742             : #define ext4_ext_show_move(inode, path, newblock, level)
     743             : #endif
     744             : 
     745             : /*
     746             :  * ext4_ext_binsearch_idx:
     747             :  * binary search for the closest index of the given block
     748             :  * the header must be checked before calling this
     749             :  */
     750             : static void
     751    31832691 : ext4_ext_binsearch_idx(struct inode *inode,
     752             :                         struct ext4_ext_path *path, ext4_lblk_t block)
     753             : {
     754    31832691 :         struct ext4_extent_header *eh = path->p_hdr;
     755    31832691 :         struct ext4_extent_idx *r, *l, *m;
     756             : 
     757             : 
     758    31832691 :         ext_debug(inode, "binsearch for %u(idx):  ", block);
     759             : 
     760    31832691 :         l = EXT_FIRST_INDEX(eh) + 1;
     761    31832691 :         r = EXT_LAST_INDEX(eh);
     762    91417656 :         while (l <= r) {
     763    59584965 :                 m = l + (r - l) / 2;
     764    59584965 :                 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
     765             :                           le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
     766             :                           r, le32_to_cpu(r->ei_block));
     767             : 
     768    59584965 :                 if (block < le32_to_cpu(m->ei_block))
     769    32812112 :                         r = m - 1;
     770             :                 else
     771    26772853 :                         l = m + 1;
     772             :         }
     773             : 
     774    31832691 :         path->p_idx = l - 1;
     775    31832691 :         ext_debug(inode, "  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
     776             :                   ext4_idx_pblock(path->p_idx));
     777             : 
     778             : #ifdef CHECK_BINSEARCH
     779             :         {
     780             :                 struct ext4_extent_idx *chix, *ix;
     781             :                 int k;
     782             : 
     783             :                 chix = ix = EXT_FIRST_INDEX(eh);
     784             :                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
     785             :                         if (k != 0 && le32_to_cpu(ix->ei_block) <=
     786             :                             le32_to_cpu(ix[-1].ei_block)) {
     787             :                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
     788             :                                        "first=0x%p\n", k,
     789             :                                        ix, EXT_FIRST_INDEX(eh));
     790             :                                 printk(KERN_DEBUG "%u <= %u\n",
     791             :                                        le32_to_cpu(ix->ei_block),
     792             :                                        le32_to_cpu(ix[-1].ei_block));
     793             :                         }
     794             :                         BUG_ON(k && le32_to_cpu(ix->ei_block)
     795             :                                            <= le32_to_cpu(ix[-1].ei_block));
     796             :                         if (block < le32_to_cpu(ix->ei_block))
     797             :                                 break;
     798             :                         chix = ix;
     799             :                 }
     800             :                 BUG_ON(chix != path->p_idx);
     801             :         }
     802             : #endif
     803             : 
     804    31832691 : }
     805             : 
     806             : /*
     807             :  * ext4_ext_binsearch:
     808             :  * binary search for closest extent of the given block
     809             :  * the header must be checked before calling this
     810             :  */
     811             : static void
     812    26919928 : ext4_ext_binsearch(struct inode *inode,
     813             :                 struct ext4_ext_path *path, ext4_lblk_t block)
     814             : {
     815    26919928 :         struct ext4_extent_header *eh = path->p_hdr;
     816    26919928 :         struct ext4_extent *r, *l, *m;
     817             : 
     818    26919928 :         if (eh->eh_entries == 0) {
     819             :                 /*
     820             :                  * this leaf is empty:
     821             :                  * we get such a leaf in split/add case
     822             :                  */
     823             :                 return;
     824             :         }
     825             : 
     826    23998129 :         ext_debug(inode, "binsearch for %u:  ", block);
     827             : 
     828    23998129 :         l = EXT_FIRST_EXTENT(eh) + 1;
     829    23998129 :         r = EXT_LAST_EXTENT(eh);
     830             : 
     831   162168055 :         while (l <= r) {
     832   138169926 :                 m = l + (r - l) / 2;
     833   138169926 :                 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
     834             :                           le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
     835             :                           r, le32_to_cpu(r->ee_block));
     836             : 
     837   138169926 :                 if (block < le32_to_cpu(m->ee_block))
     838    65626109 :                         r = m - 1;
     839             :                 else
     840    72543817 :                         l = m + 1;
     841             :         }
     842             : 
     843    23998129 :         path->p_ext = l - 1;
     844    23998129 :         ext_debug(inode, "  -> %d:%llu:[%d]%d ",
     845             :                         le32_to_cpu(path->p_ext->ee_block),
     846             :                         ext4_ext_pblock(path->p_ext),
     847             :                         ext4_ext_is_unwritten(path->p_ext),
     848             :                         ext4_ext_get_actual_len(path->p_ext));
     849             : 
     850             : #ifdef CHECK_BINSEARCH
     851             :         {
     852             :                 struct ext4_extent *chex, *ex;
     853             :                 int k;
     854             : 
     855             :                 chex = ex = EXT_FIRST_EXTENT(eh);
     856             :                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
     857             :                         BUG_ON(k && le32_to_cpu(ex->ee_block)
     858             :                                           <= le32_to_cpu(ex[-1].ee_block));
     859             :                         if (block < le32_to_cpu(ex->ee_block))
     860             :                                 break;
     861             :                         chex = ex;
     862             :                 }
     863             :                 BUG_ON(chex != path->p_ext);
     864             :         }
     865             : #endif
     866             : 
     867             : }
     868             : 
     869     2651324 : void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
     870             : {
     871     2651324 :         struct ext4_extent_header *eh;
     872             : 
     873     2651324 :         eh = ext_inode_hdr(inode);
     874     2651324 :         eh->eh_depth = 0;
     875     2651324 :         eh->eh_entries = 0;
     876     2651324 :         eh->eh_magic = EXT4_EXT_MAGIC;
     877     2651324 :         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
     878     2651324 :         eh->eh_generation = 0;
     879     2651324 :         ext4_mark_inode_dirty(handle, inode);
     880     2654036 : }
     881             : 
     882             : struct ext4_ext_path *
     883    26919949 : ext4_find_extent(struct inode *inode, ext4_lblk_t block,
     884             :                  struct ext4_ext_path **orig_path, int flags)
     885             : {
     886    26919949 :         struct ext4_extent_header *eh;
     887    26919949 :         struct buffer_head *bh;
     888    26919949 :         struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
     889    26919949 :         short int depth, i, ppos = 0;
     890    26919949 :         int ret;
     891    26919949 :         gfp_t gfp_flags = GFP_NOFS;
     892             : 
     893    26919949 :         if (flags & EXT4_EX_NOFAIL)
     894      427874 :                 gfp_flags |= __GFP_NOFAIL;
     895             : 
     896    26919949 :         eh = ext_inode_hdr(inode);
     897    26919949 :         depth = ext_depth(inode);
     898    26919949 :         if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
     899           0 :                 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
     900             :                                  depth);
     901           0 :                 ret = -EFSCORRUPTED;
     902           0 :                 goto err;
     903             :         }
     904             : 
     905    26919949 :         if (path) {
     906     2889783 :                 ext4_ext_drop_refs(path);
     907     2889793 :                 if (depth > path[0].p_maxdepth) {
     908           0 :                         kfree(path);
     909           0 :                         *orig_path = path = NULL;
     910             :                 }
     911             :         }
     912    26919959 :         if (!path) {
     913             :                 /* account possible depth increase */
     914    24030760 :                 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
     915             :                                 gfp_flags);
     916    24022309 :                 if (unlikely(!path))
     917             :                         return ERR_PTR(-ENOMEM);
     918    24022309 :                 path[0].p_maxdepth = depth + 1;
     919             :         }
     920    26911508 :         path[0].p_hdr = eh;
     921    26911508 :         path[0].p_bh = NULL;
     922             : 
     923    26911508 :         i = depth;
     924    26911508 :         if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
     925     4080135 :                 ext4_cache_extents(inode, eh);
     926             :         /* walk through the tree */
     927    58749870 :         while (i) {
     928    31831142 :                 ext_debug(inode, "depth %d: num %d, max %d\n",
     929             :                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
     930             : 
     931    31831142 :                 ext4_ext_binsearch_idx(inode, path + ppos, block);
     932    31833387 :                 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
     933    31833387 :                 path[ppos].p_depth = i;
     934    31833387 :                 path[ppos].p_ext = NULL;
     935             : 
     936    31833387 :                 bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
     937    31835349 :                 if (IS_ERR(bh)) {
     938           1 :                         ret = PTR_ERR(bh);
     939           1 :                         goto err;
     940             :                 }
     941             : 
     942    31835348 :                 eh = ext_block_hdr(bh);
     943    31835348 :                 ppos++;
     944    31835348 :                 path[ppos].p_bh = bh;
     945    31835348 :                 path[ppos].p_hdr = eh;
     946             :         }
     947             : 
     948    26918728 :         path[ppos].p_depth = i;
     949    26918728 :         path[ppos].p_ext = NULL;
     950    26918728 :         path[ppos].p_idx = NULL;
     951             : 
     952             :         /* find extent */
     953    26918728 :         ext4_ext_binsearch(inode, path + ppos, block);
     954             :         /* if not an empty leaf */
     955    26917766 :         if (path[ppos].p_ext)
     956    23998297 :                 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
     957             : 
     958             :         ext4_ext_show_path(inode, path);
     959             : 
     960             :         return path;
     961             : 
     962           1 : err:
     963           1 :         ext4_free_ext_path(path);
     964           1 :         if (orig_path)
     965           0 :                 *orig_path = NULL;
     966           1 :         return ERR_PTR(ret);
     967             : }
     968             : 
     969             : /*
     970             :  * ext4_ext_insert_index:
     971             :  * insert new index [@logical;@ptr] into the block at @curp;
     972             :  * check where to insert: before @curp or after @curp
     973             :  */
     974        7816 : static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
     975             :                                  struct ext4_ext_path *curp,
     976             :                                  int logical, ext4_fsblk_t ptr)
     977             : {
     978        7816 :         struct ext4_extent_idx *ix;
     979        7816 :         int len, err;
     980             : 
     981        7816 :         err = ext4_ext_get_access(handle, inode, curp);
     982        7816 :         if (err)
     983             :                 return err;
     984             : 
     985        7816 :         if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
     986           0 :                 EXT4_ERROR_INODE(inode,
     987             :                                  "logical %d == ei_block %d!",
     988             :                                  logical, le32_to_cpu(curp->p_idx->ei_block));
     989           0 :                 return -EFSCORRUPTED;
     990             :         }
     991             : 
     992        7816 :         if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
     993             :                              >= le16_to_cpu(curp->p_hdr->eh_max))) {
     994           0 :                 EXT4_ERROR_INODE(inode,
     995             :                                  "eh_entries %d >= eh_max %d!",
     996             :                                  le16_to_cpu(curp->p_hdr->eh_entries),
     997             :                                  le16_to_cpu(curp->p_hdr->eh_max));
     998           0 :                 return -EFSCORRUPTED;
     999             :         }
    1000             : 
    1001        7816 :         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
    1002             :                 /* insert after */
    1003        7816 :                 ext_debug(inode, "insert new index %d after: %llu\n",
    1004             :                           logical, ptr);
    1005        7816 :                 ix = curp->p_idx + 1;
    1006             :         } else {
    1007             :                 /* insert before */
    1008             :                 ext_debug(inode, "insert new index %d before: %llu\n",
    1009             :                           logical, ptr);
    1010             :                 ix = curp->p_idx;
    1011             :         }
    1012             : 
    1013        7816 :         len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
    1014        7816 :         BUG_ON(len < 0);
    1015        7816 :         if (len > 0) {
    1016        5793 :                 ext_debug(inode, "insert new index %d: "
    1017             :                                 "move %d indices from 0x%p to 0x%p\n",
    1018             :                                 logical, len, ix, ix + 1);
    1019       11586 :                 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
    1020             :         }
    1021             : 
    1022        7816 :         if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
    1023           0 :                 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
    1024           0 :                 return -EFSCORRUPTED;
    1025             :         }
    1026             : 
    1027        7816 :         ix->ei_block = cpu_to_le32(logical);
    1028        7816 :         ext4_idx_store_pblock(ix, ptr);
    1029        7816 :         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
    1030             : 
    1031        7816 :         if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
    1032           0 :                 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
    1033           0 :                 return -EFSCORRUPTED;
    1034             :         }
    1035             : 
    1036        7816 :         err = ext4_ext_dirty(handle, inode, curp);
    1037        7816 :         ext4_std_error(inode->i_sb, err);
    1038             : 
    1039             :         return err;
    1040             : }
    1041             : 
    1042             : /*
    1043             :  * ext4_ext_split:
    1044             :  * inserts new subtree into the path, using free index entry
    1045             :  * at depth @at:
    1046             :  * - allocates all needed blocks (new leaf and all intermediate index blocks)
    1047             :  * - makes decision where to split
    1048             :  * - moves remaining extents and index entries (right to the split point)
    1049             :  *   into the newly allocated blocks
    1050             :  * - initializes subtree
    1051             :  */
    1052        7816 : static int ext4_ext_split(handle_t *handle, struct inode *inode,
    1053             :                           unsigned int flags,
    1054             :                           struct ext4_ext_path *path,
    1055             :                           struct ext4_extent *newext, int at)
    1056             : {
    1057        7816 :         struct buffer_head *bh = NULL;
    1058        7816 :         int depth = ext_depth(inode);
    1059        7816 :         struct ext4_extent_header *neh;
    1060        7816 :         struct ext4_extent_idx *fidx;
    1061        7816 :         int i = at, k, m, a;
    1062        7816 :         ext4_fsblk_t newblock, oldblock;
    1063        7816 :         __le32 border;
    1064        7816 :         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
    1065        7816 :         gfp_t gfp_flags = GFP_NOFS;
    1066        7816 :         int err = 0;
    1067        7816 :         size_t ext_size = 0;
    1068             : 
    1069        7816 :         if (flags & EXT4_EX_NOFAIL)
    1070           0 :                 gfp_flags |= __GFP_NOFAIL;
    1071             : 
    1072             :         /* make decision: where to split? */
    1073             :         /* FIXME: now decision is simplest: at current extent */
    1074             : 
    1075             :         /* if current leaf will be split, then we should use
    1076             :          * border from split point */
    1077        7816 :         if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
    1078           0 :                 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
    1079           0 :                 return -EFSCORRUPTED;
    1080             :         }
    1081        7816 :         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
    1082        7468 :                 border = path[depth].p_ext[1].ee_block;
    1083        7468 :                 ext_debug(inode, "leaf will be split."
    1084             :                                 " next leaf starts at %d\n",
    1085             :                                   le32_to_cpu(border));
    1086             :         } else {
    1087         348 :                 border = newext->ee_block;
    1088         348 :                 ext_debug(inode, "leaf will be added."
    1089             :                                 " next leaf starts at %d\n",
    1090             :                                 le32_to_cpu(border));
    1091             :         }
    1092             : 
    1093             :         /*
    1094             :          * If error occurs, then we break processing
    1095             :          * and mark filesystem read-only. index won't
    1096             :          * be inserted and tree will be in consistent
    1097             :          * state. Next mount will repair buffers too.
    1098             :          */
    1099             : 
    1100             :         /*
    1101             :          * Get array to track all allocated blocks.
    1102             :          * We need this to handle errors and free blocks
    1103             :          * upon them.
    1104             :          */
    1105        7816 :         ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
    1106        7816 :         if (!ablocks)
    1107             :                 return -ENOMEM;
    1108             : 
    1109             :         /* allocate all needed blocks */
    1110             :         ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
    1111       15646 :         for (a = 0; a < depth - at; a++) {
    1112        7830 :                 newblock = ext4_ext_new_meta_block(handle, inode, path,
    1113             :                                                    newext, &err, flags);
    1114        7830 :                 if (newblock == 0)
    1115           0 :                         goto cleanup;
    1116        7830 :                 ablocks[a] = newblock;
    1117             :         }
    1118             : 
    1119             :         /* initialize new leaf */
    1120        7816 :         newblock = ablocks[--a];
    1121        7816 :         if (unlikely(newblock == 0)) {
    1122           0 :                 EXT4_ERROR_INODE(inode, "newblock == 0!");
    1123           0 :                 err = -EFSCORRUPTED;
    1124           0 :                 goto cleanup;
    1125             :         }
    1126        7816 :         bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
    1127        7816 :         if (unlikely(!bh)) {
    1128           0 :                 err = -ENOMEM;
    1129           0 :                 goto cleanup;
    1130             :         }
    1131        7816 :         lock_buffer(bh);
    1132             : 
    1133        7816 :         err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
    1134             :                                              EXT4_JTR_NONE);
    1135        7816 :         if (err)
    1136           0 :                 goto cleanup;
    1137             : 
    1138        7816 :         neh = ext_block_hdr(bh);
    1139        7816 :         neh->eh_entries = 0;
    1140        7816 :         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
    1141        7816 :         neh->eh_magic = EXT4_EXT_MAGIC;
    1142        7816 :         neh->eh_depth = 0;
    1143        7816 :         neh->eh_generation = 0;
    1144             : 
    1145             :         /* move remainder of path[depth] to the new leaf */
    1146        7816 :         if (unlikely(path[depth].p_hdr->eh_entries !=
    1147             :                      path[depth].p_hdr->eh_max)) {
    1148           0 :                 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
    1149             :                                  path[depth].p_hdr->eh_entries,
    1150             :                                  path[depth].p_hdr->eh_max);
    1151           0 :                 err = -EFSCORRUPTED;
    1152           0 :                 goto cleanup;
    1153             :         }
    1154             :         /* start copy from next extent */
    1155        7816 :         m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
    1156        7816 :         ext4_ext_show_move(inode, path, newblock, depth);
    1157        7816 :         if (m) {
    1158        7468 :                 struct ext4_extent *ex;
    1159        7468 :                 ex = EXT_FIRST_EXTENT(neh);
    1160       14936 :                 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
    1161        7468 :                 le16_add_cpu(&neh->eh_entries, m);
    1162             :         }
    1163             : 
    1164             :         /* zero out unused area in the extent block */
    1165        7816 :         ext_size = sizeof(struct ext4_extent_header) +
    1166        7816 :                 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
    1167        7816 :         memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
    1168        7816 :         ext4_extent_block_csum_set(inode, neh);
    1169        7816 :         set_buffer_uptodate(bh);
    1170        7816 :         unlock_buffer(bh);
    1171             : 
    1172        7816 :         err = ext4_handle_dirty_metadata(handle, inode, bh);
    1173        7816 :         if (err)
    1174           0 :                 goto cleanup;
    1175        7816 :         brelse(bh);
    1176        7816 :         bh = NULL;
    1177             : 
    1178             :         /* correct old leaf */
    1179        7816 :         if (m) {
    1180        7468 :                 err = ext4_ext_get_access(handle, inode, path + depth);
    1181        7468 :                 if (err)
    1182           0 :                         goto cleanup;
    1183        7468 :                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
    1184        7468 :                 err = ext4_ext_dirty(handle, inode, path + depth);
    1185        7468 :                 if (err)
    1186           0 :                         goto cleanup;
    1187             : 
    1188             :         }
    1189             : 
    1190             :         /* create intermediate indexes */
    1191        7816 :         k = depth - at - 1;
    1192        7816 :         if (unlikely(k < 0)) {
    1193           0 :                 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
    1194           0 :                 err = -EFSCORRUPTED;
    1195           0 :                 goto cleanup;
    1196             :         }
    1197        7816 :         if (k)
    1198             :                 ext_debug(inode, "create %d intermediate indices\n", k);
    1199             :         /* insert new index into current index block */
    1200             :         /* current depth stored in i var */
    1201        7816 :         i = depth - 1;
    1202        7830 :         while (k--) {
    1203          14 :                 oldblock = newblock;
    1204          14 :                 newblock = ablocks[--a];
    1205          14 :                 bh = sb_getblk(inode->i_sb, newblock);
    1206          14 :                 if (unlikely(!bh)) {
    1207           0 :                         err = -ENOMEM;
    1208           0 :                         goto cleanup;
    1209             :                 }
    1210          14 :                 lock_buffer(bh);
    1211             : 
    1212          14 :                 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
    1213             :                                                      EXT4_JTR_NONE);
    1214          14 :                 if (err)
    1215           0 :                         goto cleanup;
    1216             : 
    1217          14 :                 neh = ext_block_hdr(bh);
    1218          14 :                 neh->eh_entries = cpu_to_le16(1);
    1219          14 :                 neh->eh_magic = EXT4_EXT_MAGIC;
    1220          14 :                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
    1221          14 :                 neh->eh_depth = cpu_to_le16(depth - i);
    1222          14 :                 neh->eh_generation = 0;
    1223          14 :                 fidx = EXT_FIRST_INDEX(neh);
    1224          14 :                 fidx->ei_block = border;
    1225          14 :                 ext4_idx_store_pblock(fidx, oldblock);
    1226             : 
    1227          14 :                 ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
    1228             :                                 i, newblock, le32_to_cpu(border), oldblock);
    1229             : 
    1230             :                 /* move remainder of path[i] to the new index block */
    1231          14 :                 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
    1232             :                                         EXT_LAST_INDEX(path[i].p_hdr))) {
    1233           0 :                         EXT4_ERROR_INODE(inode,
    1234             :                                          "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
    1235             :                                          le32_to_cpu(path[i].p_ext->ee_block));
    1236           0 :                         err = -EFSCORRUPTED;
    1237           0 :                         goto cleanup;
    1238             :                 }
    1239             :                 /* start copy indexes */
    1240          14 :                 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
    1241          14 :                 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
    1242             :                                 EXT_MAX_INDEX(path[i].p_hdr));
    1243          14 :                 ext4_ext_show_move(inode, path, newblock, i);
    1244          14 :                 if (m) {
    1245          14 :                         memmove(++fidx, path[i].p_idx,
    1246             :                                 sizeof(struct ext4_extent_idx) * m);
    1247          14 :                         le16_add_cpu(&neh->eh_entries, m);
    1248             :                 }
    1249             :                 /* zero out unused area in the extent block */
    1250          14 :                 ext_size = sizeof(struct ext4_extent_header) +
    1251          14 :                    (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
    1252          14 :                 memset(bh->b_data + ext_size, 0,
    1253             :                         inode->i_sb->s_blocksize - ext_size);
    1254          14 :                 ext4_extent_block_csum_set(inode, neh);
    1255          14 :                 set_buffer_uptodate(bh);
    1256          14 :                 unlock_buffer(bh);
    1257             : 
    1258          14 :                 err = ext4_handle_dirty_metadata(handle, inode, bh);
    1259          14 :                 if (err)
    1260           0 :                         goto cleanup;
    1261          14 :                 brelse(bh);
    1262          14 :                 bh = NULL;
    1263             : 
    1264             :                 /* correct old index */
    1265          14 :                 if (m) {
    1266          14 :                         err = ext4_ext_get_access(handle, inode, path + i);
    1267          14 :                         if (err)
    1268           0 :                                 goto cleanup;
    1269          14 :                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
    1270          14 :                         err = ext4_ext_dirty(handle, inode, path + i);
    1271          14 :                         if (err)
    1272           0 :                                 goto cleanup;
    1273             :                 }
    1274             : 
    1275          14 :                 i--;
    1276             :         }
    1277             : 
    1278             :         /* insert new index */
    1279        7816 :         err = ext4_ext_insert_index(handle, inode, path + at,
    1280             :                                     le32_to_cpu(border), newblock);
    1281             : 
    1282           0 : cleanup:
    1283        7816 :         if (bh) {
    1284           0 :                 if (buffer_locked(bh))
    1285           0 :                         unlock_buffer(bh);
    1286           0 :                 brelse(bh);
    1287             :         }
    1288             : 
    1289        7816 :         if (err) {
    1290             :                 /* free all allocated blocks in error case */
    1291           0 :                 for (i = 0; i < depth; i++) {
    1292           0 :                         if (!ablocks[i])
    1293           0 :                                 continue;
    1294           0 :                         ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
    1295             :                                          EXT4_FREE_BLOCKS_METADATA);
    1296             :                 }
    1297             :         }
    1298        7816 :         kfree(ablocks);
    1299             : 
    1300        7816 :         return err;
    1301             : }
    1302             : 
    1303             : /*
    1304             :  * ext4_ext_grow_indepth:
    1305             :  * implements tree growing procedure:
    1306             :  * - allocates new block
    1307             :  * - moves top-level data (index block or leaf) into the new block
    1308             :  * - initializes new top-level, creating index that points to the
    1309             :  *   just created block
    1310             :  */
    1311       66798 : static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
    1312             :                                  unsigned int flags)
    1313             : {
    1314       66798 :         struct ext4_extent_header *neh;
    1315       66798 :         struct buffer_head *bh;
    1316       66798 :         ext4_fsblk_t newblock, goal = 0;
    1317       66798 :         struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
    1318       66798 :         int err = 0;
    1319       66798 :         size_t ext_size = 0;
    1320             : 
    1321             :         /* Try to prepend new index to old one */
    1322       66798 :         if (ext_depth(inode))
    1323          74 :                 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
    1324       66798 :         if (goal > le32_to_cpu(es->s_first_data_block)) {
    1325          74 :                 flags |= EXT4_MB_HINT_TRY_GOAL;
    1326          74 :                 goal--;
    1327             :         } else
    1328       66724 :                 goal = ext4_inode_to_goal_block(inode);
    1329       66797 :         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
    1330             :                                         NULL, &err);
    1331       66798 :         if (newblock == 0)
    1332         416 :                 return err;
    1333             : 
    1334       66382 :         bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
    1335       66382 :         if (unlikely(!bh))
    1336             :                 return -ENOMEM;
    1337       66382 :         lock_buffer(bh);
    1338             : 
    1339       66381 :         err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
    1340             :                                              EXT4_JTR_NONE);
    1341       66381 :         if (err) {
    1342           0 :                 unlock_buffer(bh);
    1343           0 :                 goto out;
    1344             :         }
    1345             : 
    1346       66381 :         ext_size = sizeof(EXT4_I(inode)->i_data);
    1347             :         /* move top-level index/leaf into new block */
    1348      132762 :         memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
    1349             :         /* zero out unused area in the extent block */
    1350       66381 :         memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
    1351             : 
    1352             :         /* set size of new block */
    1353       66381 :         neh = ext_block_hdr(bh);
    1354             :         /* old root could have indexes or leaves
    1355             :          * so calculate e_max right way */
    1356       66381 :         if (ext_depth(inode))
    1357          74 :                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
    1358             :         else
    1359       66307 :                 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
    1360       66381 :         neh->eh_magic = EXT4_EXT_MAGIC;
    1361       66381 :         ext4_extent_block_csum_set(inode, neh);
    1362       66379 :         set_buffer_uptodate(bh);
    1363       66382 :         set_buffer_verified(bh);
    1364       66381 :         unlock_buffer(bh);
    1365             : 
    1366       66380 :         err = ext4_handle_dirty_metadata(handle, inode, bh);
    1367       66382 :         if (err)
    1368           0 :                 goto out;
    1369             : 
    1370             :         /* Update top-level index: num,max,pointer */
    1371       66382 :         neh = ext_inode_hdr(inode);
    1372       66382 :         neh->eh_entries = cpu_to_le16(1);
    1373       66382 :         ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
    1374       66382 :         if (neh->eh_depth == 0) {
    1375             :                 /* Root extent block becomes index block */
    1376       66308 :                 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
    1377       66308 :                 EXT_FIRST_INDEX(neh)->ei_block =
    1378             :                         EXT_FIRST_EXTENT(neh)->ee_block;
    1379             :         }
    1380       66382 :         ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
    1381             :                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
    1382             :                   le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
    1383             :                   ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
    1384             : 
    1385       66382 :         le16_add_cpu(&neh->eh_depth, 1);
    1386       66382 :         err = ext4_mark_inode_dirty(handle, inode);
    1387       66382 : out:
    1388       66382 :         brelse(bh);
    1389             : 
    1390       66382 :         return err;
    1391             : }
    1392             : 
    1393             : /*
    1394             :  * ext4_ext_create_new_leaf:
    1395             :  * finds empty index and adds new leaf.
    1396             :  * if no free index is found, then it requests in-depth growing.
    1397             :  */
    1398       74540 : static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
    1399             :                                     unsigned int mb_flags,
    1400             :                                     unsigned int gb_flags,
    1401             :                                     struct ext4_ext_path **ppath,
    1402             :                                     struct ext4_extent *newext)
    1403             : {
    1404       74540 :         struct ext4_ext_path *path = *ppath;
    1405       74540 :         struct ext4_ext_path *curp;
    1406       74540 :         int depth, i, err = 0;
    1407             : 
    1408       74614 : repeat:
    1409       74614 :         i = depth = ext_depth(inode);
    1410             : 
    1411             :         /* walk up to the tree and look for free index entry */
    1412       74614 :         curp = path + depth;
    1413       82520 :         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
    1414        7906 :                 i--;
    1415        7906 :                 curp--;
    1416             :         }
    1417             : 
    1418             :         /* we use already allocated block for index block,
    1419             :          * so subsequent data blocks should be contiguous */
    1420       74614 :         if (EXT_HAS_FREE_INDEX(curp)) {
    1421             :                 /* if we found index with free entry, then use that
    1422             :                  * entry: create all needed subtree and add new leaf */
    1423        7816 :                 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
    1424        7816 :                 if (err)
    1425           0 :                         goto out;
    1426             : 
    1427             :                 /* refill path */
    1428        7816 :                 path = ext4_find_extent(inode,
    1429        7816 :                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
    1430             :                                     ppath, gb_flags);
    1431        7816 :                 if (IS_ERR(path))
    1432           0 :                         err = PTR_ERR(path);
    1433             :         } else {
    1434             :                 /* tree is full, time to grow in depth */
    1435       66798 :                 err = ext4_ext_grow_indepth(handle, inode, mb_flags);
    1436       66797 :                 if (err)
    1437         416 :                         goto out;
    1438             : 
    1439             :                 /* refill path */
    1440       66381 :                 path = ext4_find_extent(inode,
    1441       66381 :                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
    1442             :                                     ppath, gb_flags);
    1443       66377 :                 if (IS_ERR(path)) {
    1444           0 :                         err = PTR_ERR(path);
    1445           0 :                         goto out;
    1446             :                 }
    1447             : 
    1448             :                 /*
    1449             :                  * only first (depth 0 -> 1) produces free space;
    1450             :                  * in all other cases we have to split the grown tree
    1451             :                  */
    1452       66377 :                 depth = ext_depth(inode);
    1453       66377 :                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
    1454             :                         /* now we need to split */
    1455          74 :                         goto repeat;
    1456             :                 }
    1457             :         }
    1458             : 
    1459       66303 : out:
    1460       74535 :         return err;
    1461             : }
    1462             : 
    1463             : /*
    1464             :  * search the closest allocated block to the left for *logical
    1465             :  * and returns it at @logical + it's physical address at @phys
    1466             :  * if *logical is the smallest allocated block, the function
    1467             :  * returns 0 at @phys
    1468             :  * return value contains 0 (success) or error code
    1469             :  */
    1470     3920418 : static int ext4_ext_search_left(struct inode *inode,
    1471             :                                 struct ext4_ext_path *path,
    1472             :                                 ext4_lblk_t *logical, ext4_fsblk_t *phys)
    1473             : {
    1474     3920418 :         struct ext4_extent_idx *ix;
    1475     3920418 :         struct ext4_extent *ex;
    1476     3920418 :         int depth, ee_len;
    1477             : 
    1478     3920418 :         if (unlikely(path == NULL)) {
    1479           0 :                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
    1480           0 :                 return -EFSCORRUPTED;
    1481             :         }
    1482     3920418 :         depth = path->p_depth;
    1483     3920418 :         *phys = 0;
    1484             : 
    1485     3920418 :         if (depth == 0 && path->p_ext == NULL)
    1486             :                 return 0;
    1487             : 
    1488             :         /* usually extent in the path covers blocks smaller
    1489             :          * then *logical, but it can be that extent is the
    1490             :          * first one in the file */
    1491             : 
    1492     2990539 :         ex = path[depth].p_ext;
    1493     2990539 :         ee_len = ext4_ext_get_actual_len(ex);
    1494     2990539 :         if (*logical < le32_to_cpu(ex->ee_block)) {
    1495       83412 :                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
    1496           0 :                         EXT4_ERROR_INODE(inode,
    1497             :                                          "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
    1498             :                                          *logical, le32_to_cpu(ex->ee_block));
    1499           0 :                         return -EFSCORRUPTED;
    1500             :                 }
    1501      105192 :                 while (--depth >= 0) {
    1502       21780 :                         ix = path[depth].p_idx;
    1503       21780 :                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
    1504           0 :                                 EXT4_ERROR_INODE(inode,
    1505             :                                   "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
    1506             :                                   ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
    1507             :                                   le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block),
    1508             :                                   depth);
    1509           0 :                                 return -EFSCORRUPTED;
    1510             :                         }
    1511             :                 }
    1512             :                 return 0;
    1513             :         }
    1514             : 
    1515     2907127 :         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
    1516           0 :                 EXT4_ERROR_INODE(inode,
    1517             :                                  "logical %d < ee_block %d + ee_len %d!",
    1518             :                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
    1519           0 :                 return -EFSCORRUPTED;
    1520             :         }
    1521             : 
    1522     2907127 :         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
    1523     2907127 :         *phys = ext4_ext_pblock(ex) + ee_len - 1;
    1524     2907127 :         return 0;
    1525             : }
    1526             : 
    1527             : /*
    1528             :  * Search the closest allocated block to the right for *logical
    1529             :  * and returns it at @logical + it's physical address at @phys.
    1530             :  * If not exists, return 0 and @phys is set to 0. We will return
    1531             :  * 1 which means we found an allocated block and ret_ex is valid.
    1532             :  * Or return a (< 0) error code.
    1533             :  */
    1534     3920195 : static int ext4_ext_search_right(struct inode *inode,
    1535             :                                  struct ext4_ext_path *path,
    1536             :                                  ext4_lblk_t *logical, ext4_fsblk_t *phys,
    1537             :                                  struct ext4_extent *ret_ex)
    1538             : {
    1539     3920195 :         struct buffer_head *bh = NULL;
    1540     3920195 :         struct ext4_extent_header *eh;
    1541     3920195 :         struct ext4_extent_idx *ix;
    1542     3920195 :         struct ext4_extent *ex;
    1543     3920195 :         int depth;      /* Note, NOT eh_depth; depth from top of tree */
    1544     3920195 :         int ee_len;
    1545             : 
    1546     3920195 :         if (unlikely(path == NULL)) {
    1547           0 :                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
    1548           0 :                 return -EFSCORRUPTED;
    1549             :         }
    1550     3920195 :         depth = path->p_depth;
    1551     3920195 :         *phys = 0;
    1552             : 
    1553     3920195 :         if (depth == 0 && path->p_ext == NULL)
    1554             :                 return 0;
    1555             : 
    1556             :         /* usually extent in the path covers blocks smaller
    1557             :          * then *logical, but it can be that extent is the
    1558             :          * first one in the file */
    1559             : 
    1560     2990425 :         ex = path[depth].p_ext;
    1561     2990425 :         ee_len = ext4_ext_get_actual_len(ex);
    1562     2990425 :         if (*logical < le32_to_cpu(ex->ee_block)) {
    1563       83411 :                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
    1564           0 :                         EXT4_ERROR_INODE(inode,
    1565             :                                          "first_extent(path[%d].p_hdr) != ex",
    1566             :                                          depth);
    1567           0 :                         return -EFSCORRUPTED;
    1568             :                 }
    1569      105191 :                 while (--depth >= 0) {
    1570       21780 :                         ix = path[depth].p_idx;
    1571       21780 :                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
    1572           0 :                                 EXT4_ERROR_INODE(inode,
    1573             :                                                  "ix != EXT_FIRST_INDEX *logical %d!",
    1574             :                                                  *logical);
    1575           0 :                                 return -EFSCORRUPTED;
    1576             :                         }
    1577             :                 }
    1578       83411 :                 goto found_extent;
    1579             :         }
    1580             : 
    1581     2907014 :         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
    1582           0 :                 EXT4_ERROR_INODE(inode,
    1583             :                                  "logical %d < ee_block %d + ee_len %d!",
    1584             :                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
    1585           0 :                 return -EFSCORRUPTED;
    1586             :         }
    1587             : 
    1588     2907014 :         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
    1589             :                 /* next allocated block in this leaf */
    1590     1781881 :                 ex++;
    1591     1781881 :                 goto found_extent;
    1592             :         }
    1593             : 
    1594             :         /* go up and search for index to the right */
    1595     2079326 :         while (--depth >= 0) {
    1596      997996 :                 ix = path[depth].p_idx;
    1597      997996 :                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
    1598       43803 :                         goto got_index;
    1599             :         }
    1600             : 
    1601             :         /* we've gone up to the root and found no index to the right */
    1602             :         return 0;
    1603             : 
    1604             : got_index:
    1605             :         /* we've found index to the right, let's
    1606             :          * follow it and find the closest allocated
    1607             :          * block to the right */
    1608       43803 :         ix++;
    1609       43816 :         while (++depth < path->p_depth) {
    1610             :                 /* subtract from p_depth to get proper eh_depth */
    1611          13 :                 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
    1612          13 :                 if (IS_ERR(bh))
    1613           0 :                         return PTR_ERR(bh);
    1614          13 :                 eh = ext_block_hdr(bh);
    1615          13 :                 ix = EXT_FIRST_INDEX(eh);
    1616          13 :                 put_bh(bh);
    1617             :         }
    1618             : 
    1619       43803 :         bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
    1620       43803 :         if (IS_ERR(bh))
    1621           0 :                 return PTR_ERR(bh);
    1622       43803 :         eh = ext_block_hdr(bh);
    1623       43803 :         ex = EXT_FIRST_EXTENT(eh);
    1624     1909095 : found_extent:
    1625     1909095 :         *logical = le32_to_cpu(ex->ee_block);
    1626     1909095 :         *phys = ext4_ext_pblock(ex);
    1627     1909095 :         if (ret_ex)
    1628     1909095 :                 *ret_ex = *ex;
    1629     1909095 :         if (bh)
    1630       43824 :                 put_bh(bh);
    1631             :         return 1;
    1632             : }
    1633             : 
    1634             : /*
    1635             :  * ext4_ext_next_allocated_block:
    1636             :  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
    1637             :  * NOTE: it considers block number from index entry as
    1638             :  * allocated block. Thus, index entries have to be consistent
    1639             :  * with leaves.
    1640             :  */
    1641             : ext4_lblk_t
    1642     4533070 : ext4_ext_next_allocated_block(struct ext4_ext_path *path)
    1643             : {
    1644     4533070 :         int depth;
    1645             : 
    1646     4533070 :         BUG_ON(path == NULL);
    1647     4533070 :         depth = path->p_depth;
    1648             : 
    1649     4533070 :         if (depth == 0 && path->p_ext == NULL)
    1650             :                 return EXT_MAX_BLOCKS;
    1651             : 
    1652     7705488 :         while (depth >= 0) {
    1653     6012902 :                 struct ext4_ext_path *p = &path[depth];
    1654             : 
    1655     6012902 :                 if (depth == path->p_depth) {
    1656             :                         /* leaf */
    1657     4533066 :                         if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
    1658     2796520 :                                 return le32_to_cpu(p->p_ext[1].ee_block);
    1659             :                 } else {
    1660             :                         /* index */
    1661     1479836 :                         if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
    1662       43964 :                                 return le32_to_cpu(p->p_idx[1].ei_block);
    1663             :                 }
    1664     3172418 :                 depth--;
    1665             :         }
    1666             : 
    1667             :         return EXT_MAX_BLOCKS;
    1668             : }
    1669             : 
    1670             : /*
    1671             :  * ext4_ext_next_leaf_block:
    1672             :  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
    1673             :  */
    1674       29476 : static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
    1675             : {
    1676       29476 :         int depth;
    1677             : 
    1678       29476 :         BUG_ON(path == NULL);
    1679       29476 :         depth = path->p_depth;
    1680             : 
    1681             :         /* zero-tree has no leaf blocks at all */
    1682       29476 :         if (depth == 0)
    1683             :                 return EXT_MAX_BLOCKS;
    1684             : 
    1685             :         /* go to index block */
    1686        1662 :         depth--;
    1687             : 
    1688        2326 :         while (depth >= 0) {
    1689        1978 :                 if (path[depth].p_idx !=
    1690        1978 :                                 EXT_LAST_INDEX(path[depth].p_hdr))
    1691        1314 :                         return (ext4_lblk_t)
    1692             :                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
    1693         664 :                 depth--;
    1694             :         }
    1695             : 
    1696             :         return EXT_MAX_BLOCKS;
    1697             : }
    1698             : 
    1699             : /*
    1700             :  * ext4_ext_correct_indexes:
    1701             :  * if leaf gets modified and modified extent is first in the leaf,
    1702             :  * then we have to correct all indexes above.
    1703             :  * TODO: do we need to correct tree in all cases?
    1704             :  */
    1705     7926804 : static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
    1706             :                                 struct ext4_ext_path *path)
    1707             : {
    1708     7926804 :         struct ext4_extent_header *eh;
    1709     7926804 :         int depth = ext_depth(inode);
    1710     7926804 :         struct ext4_extent *ex;
    1711     7926804 :         __le32 border;
    1712     7926804 :         int k, err = 0;
    1713             : 
    1714     7926804 :         eh = path[depth].p_hdr;
    1715     7926804 :         ex = path[depth].p_ext;
    1716             : 
    1717     7926804 :         if (unlikely(ex == NULL || eh == NULL)) {
    1718           0 :                 EXT4_ERROR_INODE(inode,
    1719             :                                  "ex %p == NULL or eh %p == NULL", ex, eh);
    1720           0 :                 return -EFSCORRUPTED;
    1721             :         }
    1722             : 
    1723     7926804 :         if (depth == 0) {
    1724             :                 /* there is no tree at all */
    1725             :                 return 0;
    1726             :         }
    1727             : 
    1728     6658106 :         if (ex != EXT_FIRST_EXTENT(eh)) {
    1729             :                 /* we correct tree if first leaf got modified only */
    1730             :                 return 0;
    1731             :         }
    1732             : 
    1733             :         /*
    1734             :          * TODO: we need correction if border is smaller than current one
    1735             :          */
    1736       40514 :         k = depth - 1;
    1737       40514 :         border = path[depth].p_ext->ee_block;
    1738       40514 :         err = ext4_ext_get_access(handle, inode, path + k);
    1739       40514 :         if (err)
    1740             :                 return err;
    1741       40514 :         path[k].p_idx->ei_block = border;
    1742       40514 :         err = ext4_ext_dirty(handle, inode, path + k);
    1743       40514 :         if (err)
    1744             :                 return err;
    1745             : 
    1746       40570 :         while (k--) {
    1747             :                 /* change all left-side indexes */
    1748        1831 :                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
    1749             :                         break;
    1750          56 :                 err = ext4_ext_get_access(handle, inode, path + k);
    1751          56 :                 if (err)
    1752             :                         break;
    1753          56 :                 path[k].p_idx->ei_block = border;
    1754          56 :                 err = ext4_ext_dirty(handle, inode, path + k);
    1755          56 :                 if (err)
    1756             :                         break;
    1757             :         }
    1758             : 
    1759             :         return err;
    1760             : }
    1761             : 
    1762    25380677 : static int ext4_can_extents_be_merged(struct inode *inode,
    1763             :                                       struct ext4_extent *ex1,
    1764             :                                       struct ext4_extent *ex2)
    1765             : {
    1766    25380677 :         unsigned short ext1_ee_len, ext2_ee_len;
    1767             : 
    1768    25380677 :         if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
    1769             :                 return 0;
    1770             : 
    1771    22191055 :         ext1_ee_len = ext4_ext_get_actual_len(ex1);
    1772    22191055 :         ext2_ee_len = ext4_ext_get_actual_len(ex2);
    1773             : 
    1774    22191055 :         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
    1775    22191055 :                         le32_to_cpu(ex2->ee_block))
    1776             :                 return 0;
    1777             : 
    1778    19528712 :         if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
    1779             :                 return 0;
    1780             : 
    1781    19451004 :         if (ext4_ext_is_unwritten(ex1) &&
    1782             :             ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
    1783             :                 return 0;
    1784             : #ifdef AGGRESSIVE_TEST
    1785             :         if (ext1_ee_len >= 4)
    1786             :                 return 0;
    1787             : #endif
    1788             : 
    1789    19442714 :         if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
    1790     4314977 :                 return 1;
    1791             :         return 0;
    1792             : }
    1793             : 
    1794             : /*
    1795             :  * This function tries to merge the "ex" extent to the next extent in the tree.
    1796             :  * It always tries to merge towards right. If you want to merge towards
    1797             :  * left, pass "ex - 1" as argument instead of "ex".
    1798             :  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
    1799             :  * 1 if they got merged.
    1800             :  */
    1801    20957731 : static int ext4_ext_try_to_merge_right(struct inode *inode,
    1802             :                                  struct ext4_ext_path *path,
    1803             :                                  struct ext4_extent *ex)
    1804             : {
    1805    20957731 :         struct ext4_extent_header *eh;
    1806    20957731 :         unsigned int depth, len;
    1807    20957731 :         int merge_done = 0, unwritten;
    1808             : 
    1809    20957731 :         depth = ext_depth(inode);
    1810    20957731 :         BUG_ON(path[depth].p_hdr == NULL);
    1811             :         eh = path[depth].p_hdr;
    1812             : 
    1813    24814332 :         while (ex < EXT_LAST_EXTENT(eh)) {
    1814    22724390 :                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
    1815             :                         break;
    1816             :                 /* merge with next extent! */
    1817     3856426 :                 unwritten = ext4_ext_is_unwritten(ex);
    1818     7712852 :                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
    1819             :                                 + ext4_ext_get_actual_len(ex + 1));
    1820     3856426 :                 if (unwritten)
    1821     3448605 :                         ext4_ext_mark_unwritten(ex);
    1822             : 
    1823     3856426 :                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
    1824     3749115 :                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
    1825     3749115 :                                 * sizeof(struct ext4_extent);
    1826     7498230 :                         memmove(ex + 1, ex + 2, len);
    1827             :                 }
    1828     3856426 :                 le16_add_cpu(&eh->eh_entries, -1);
    1829     3856426 :                 merge_done = 1;
    1830     3856426 :                 WARN_ON(eh->eh_entries == 0);
    1831     3856601 :                 if (!eh->eh_entries)
    1832           0 :                         EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
    1833             :         }
    1834             : 
    1835    20957906 :         return merge_done;
    1836             : }
    1837             : 
    1838             : /*
    1839             :  * This function does a very simple check to see if we can collapse
    1840             :  * an extent tree with a single extent tree leaf block into the inode.
    1841             :  */
    1842     7789123 : static void ext4_ext_try_to_merge_up(handle_t *handle,
    1843             :                                      struct inode *inode,
    1844             :                                      struct ext4_ext_path *path)
    1845             : {
    1846     7789123 :         size_t s;
    1847     7789123 :         unsigned max_root = ext4_ext_space_root(inode, 0);
    1848     7789123 :         ext4_fsblk_t blk;
    1849             : 
    1850     7789123 :         if ((path[0].p_depth != 1) ||
    1851     3487878 :             (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
    1852     3304084 :             (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
    1853             :                 return;
    1854             : 
    1855             :         /*
    1856             :          * We need to modify the block allocation bitmap and the block
    1857             :          * group descriptor to release the extent tree block.  If we
    1858             :          * can't get the journal credits, give up.
    1859             :          */
    1860       81534 :         if (ext4_journal_extend(handle, 2,
    1861             :                         ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
    1862             :                 return;
    1863             : 
    1864             :         /*
    1865             :          * Copy the extent data up to the inode
    1866             :          */
    1867       40717 :         blk = ext4_idx_pblock(path[0].p_idx);
    1868       40717 :         s = le16_to_cpu(path[1].p_hdr->eh_entries) *
    1869             :                 sizeof(struct ext4_extent_idx);
    1870       40717 :         s += sizeof(struct ext4_extent_header);
    1871             : 
    1872       40717 :         path[1].p_maxdepth = path[0].p_maxdepth;
    1873       81434 :         memcpy(path[0].p_hdr, path[1].p_hdr, s);
    1874       40717 :         path[0].p_depth = 0;
    1875       40717 :         path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
    1876       40717 :                 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
    1877       40717 :         path[0].p_hdr->eh_max = cpu_to_le16(max_root);
    1878             : 
    1879       40717 :         brelse(path[1].p_bh);
    1880       40717 :         ext4_free_blocks(handle, inode, NULL, blk, 1,
    1881             :                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
    1882             : }
    1883             : 
    1884             : /*
    1885             :  * This function tries to merge the @ex extent to neighbours in the tree, then
    1886             :  * tries to collapse the extent tree into the inode.
    1887             :  */
    1888     7789352 : static void ext4_ext_try_to_merge(handle_t *handle,
    1889             :                                   struct inode *inode,
    1890             :                                   struct ext4_ext_path *path,
    1891             :                                   struct ext4_extent *ex)
    1892             : {
    1893     7789352 :         struct ext4_extent_header *eh;
    1894     7789352 :         unsigned int depth;
    1895     7789352 :         int merge_done = 0;
    1896             : 
    1897     7789352 :         depth = ext_depth(inode);
    1898     7789352 :         BUG_ON(path[depth].p_hdr == NULL);
    1899     7789352 :         eh = path[depth].p_hdr;
    1900             : 
    1901     7789352 :         if (ex > EXT_FIRST_EXTENT(eh))
    1902     6780543 :                 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
    1903             : 
    1904     6780528 :         if (!merge_done)
    1905     4061824 :                 (void) ext4_ext_try_to_merge_right(inode, path, ex);
    1906             : 
    1907     7789180 :         ext4_ext_try_to_merge_up(handle, inode, path);
    1908     7789150 : }
    1909             : 
    1910             : /*
    1911             :  * check if a portion of the "newext" extent overlaps with an
    1912             :  * existing extent.
    1913             :  *
    1914             :  * If there is an overlap discovered, it updates the length of the newext
    1915             :  * such that there will be no overlap, and then returns 1.
    1916             :  * If there is no overlap found, it returns 0.
    1917             :  */
    1918     3919573 : static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
    1919             :                                            struct inode *inode,
    1920             :                                            struct ext4_extent *newext,
    1921             :                                            struct ext4_ext_path *path)
    1922             : {
    1923     3919573 :         ext4_lblk_t b1, b2;
    1924     3919573 :         unsigned int depth, len1;
    1925     3919573 :         unsigned int ret = 0;
    1926             : 
    1927     3919573 :         b1 = le32_to_cpu(newext->ee_block);
    1928     3919573 :         len1 = ext4_ext_get_actual_len(newext);
    1929     3919573 :         depth = ext_depth(inode);
    1930     3919573 :         if (!path[depth].p_ext)
    1931      929679 :                 goto out;
    1932     2989894 :         b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
    1933             : 
    1934             :         /*
    1935             :          * get the next allocated block if the extent in the path
    1936             :          * is before the requested block(s)
    1937             :          */
    1938     2989894 :         if (b2 < b1) {
    1939     2906436 :                 b2 = ext4_ext_next_allocated_block(path);
    1940     2906149 :                 if (b2 == EXT_MAX_BLOCKS)
    1941     1080672 :                         goto out;
    1942     1825477 :                 b2 = EXT4_LBLK_CMASK(sbi, b2);
    1943             :         }
    1944             : 
    1945             :         /* check for wrap through zero on extent logical start block*/
    1946     1908935 :         if (b1 + len1 < b1) {
    1947           0 :                 len1 = EXT_MAX_BLOCKS - b1;
    1948           0 :                 newext->ee_len = cpu_to_le16(len1);
    1949           0 :                 ret = 1;
    1950             :         }
    1951             : 
    1952             :         /* check for overlap */
    1953     1908935 :         if (b1 + len1 > b2) {
    1954           0 :                 newext->ee_len = cpu_to_le16(b2 - b1);
    1955           0 :                 ret = 1;
    1956             :         }
    1957     1908935 : out:
    1958     3919286 :         return ret;
    1959             : }
    1960             : 
    1961             : /*
    1962             :  * ext4_ext_insert_extent:
    1963             :  * tries to merge requested extent into the existing extent or
    1964             :  * inserts requested extent as new one into the tree,
    1965             :  * creating new leaf in the no-space case.
    1966             :  */
    1967     7904445 : int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
    1968             :                                 struct ext4_ext_path **ppath,
    1969             :                                 struct ext4_extent *newext, int gb_flags)
    1970             : {
    1971     7904445 :         struct ext4_ext_path *path = *ppath;
    1972     7904445 :         struct ext4_extent_header *eh;
    1973     7904445 :         struct ext4_extent *ex, *fex;
    1974     7904445 :         struct ext4_extent *nearex; /* nearest extent */
    1975     7904445 :         struct ext4_ext_path *npath = NULL;
    1976     7904445 :         int depth, len, err;
    1977     7904445 :         ext4_lblk_t next;
    1978     7904445 :         int mb_flags = 0, unwritten;
    1979             : 
    1980     7904445 :         if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
    1981      874240 :                 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
    1982    15808890 :         if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
    1983           0 :                 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
    1984           0 :                 return -EFSCORRUPTED;
    1985             :         }
    1986     7904445 :         depth = ext_depth(inode);
    1987     7904445 :         ex = path[depth].p_ext;
    1988     7904445 :         eh = path[depth].p_hdr;
    1989     7904445 :         if (unlikely(path[depth].p_hdr == NULL)) {
    1990           0 :                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
    1991           0 :                 return -EFSCORRUPTED;
    1992             :         }
    1993             : 
    1994             :         /* try to insert block into found extent and return */
    1995     7904445 :         if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
    1996             : 
    1997             :                 /*
    1998             :                  * Try to see whether we should rather test the extent on
    1999             :                  * right from ex, or from the left of ex. This is because
    2000             :                  * ext4_find_extent() can return either extent on the
    2001             :                  * left, or on the right from the searched position. This
    2002             :                  * will make merging more effective.
    2003             :                  */
    2004     1626526 :                 if (ex < EXT_LAST_EXTENT(eh) &&
    2005      747607 :                     (le32_to_cpu(ex->ee_block) +
    2006      747607 :                     ext4_ext_get_actual_len(ex) <
    2007      747607 :                     le32_to_cpu(newext->ee_block))) {
    2008      139465 :                         ex += 1;
    2009      139465 :                         goto prepend;
    2010     1487061 :                 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
    2011     1383175 :                            (le32_to_cpu(newext->ee_block) +
    2012     1383175 :                            ext4_ext_get_actual_len(newext) <
    2013     1383175 :                            le32_to_cpu(ex->ee_block)))
    2014           0 :                         ex -= 1;
    2015             : 
    2016             :                 /* Try to append newex to the ex */
    2017     1487061 :                 if (ext4_can_extents_be_merged(inode, ex, newext)) {
    2018      457282 :                         ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
    2019             :                                   "(from %llu)\n",
    2020             :                                   ext4_ext_is_unwritten(newext),
    2021             :                                   ext4_ext_get_actual_len(newext),
    2022             :                                   le32_to_cpu(ex->ee_block),
    2023             :                                   ext4_ext_is_unwritten(ex),
    2024             :                                   ext4_ext_get_actual_len(ex),
    2025             :                                   ext4_ext_pblock(ex));
    2026      457282 :                         err = ext4_ext_get_access(handle, inode,
    2027             :                                                   path + depth);
    2028      457862 :                         if (err)
    2029             :                                 return err;
    2030      457862 :                         unwritten = ext4_ext_is_unwritten(ex);
    2031      915724 :                         ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
    2032             :                                         + ext4_ext_get_actual_len(newext));
    2033      457862 :                         if (unwritten)
    2034      319409 :                                 ext4_ext_mark_unwritten(ex);
    2035      457862 :                         nearex = ex;
    2036      457862 :                         goto merge;
    2037             :                 }
    2038             : 
    2039     1029779 : prepend:
    2040             :                 /* Try to prepend newex to the ex */
    2041     1169244 :                 if (ext4_can_extents_be_merged(inode, newext, ex)) {
    2042        1177 :                         ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
    2043             :                                   "(from %llu)\n",
    2044             :                                   le32_to_cpu(newext->ee_block),
    2045             :                                   ext4_ext_is_unwritten(newext),
    2046             :                                   ext4_ext_get_actual_len(newext),
    2047             :                                   le32_to_cpu(ex->ee_block),
    2048             :                                   ext4_ext_is_unwritten(ex),
    2049             :                                   ext4_ext_get_actual_len(ex),
    2050             :                                   ext4_ext_pblock(ex));
    2051        1177 :                         err = ext4_ext_get_access(handle, inode,
    2052             :                                                   path + depth);
    2053        1177 :                         if (err)
    2054             :                                 return err;
    2055             : 
    2056        1177 :                         unwritten = ext4_ext_is_unwritten(ex);
    2057        1177 :                         ex->ee_block = newext->ee_block;
    2058        1177 :                         ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
    2059        2354 :                         ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
    2060             :                                         + ext4_ext_get_actual_len(newext));
    2061        1177 :                         if (unwritten)
    2062        1177 :                                 ext4_ext_mark_unwritten(ex);
    2063        1177 :                         nearex = ex;
    2064        1177 :                         goto merge;
    2065             :                 }
    2066             :         }
    2067             : 
    2068     7445986 :         depth = ext_depth(inode);
    2069     7445986 :         eh = path[depth].p_hdr;
    2070     7445986 :         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
    2071     7370132 :                 goto has_space;
    2072             : 
    2073             :         /* probably next leaf has space for us? */
    2074       75854 :         fex = EXT_LAST_EXTENT(eh);
    2075       75854 :         next = EXT_MAX_BLOCKS;
    2076       75854 :         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
    2077       29476 :                 next = ext4_ext_next_leaf_block(path);
    2078       29476 :         if (next != EXT_MAX_BLOCKS) {
    2079        1314 :                 ext_debug(inode, "next leaf block - %u\n", next);
    2080        1314 :                 BUG_ON(npath != NULL);
    2081        1314 :                 npath = ext4_find_extent(inode, next, NULL, gb_flags);
    2082        1314 :                 if (IS_ERR(npath))
    2083           0 :                         return PTR_ERR(npath);
    2084        1314 :                 BUG_ON(npath->p_depth != path->p_depth);
    2085        1314 :                 eh = npath[depth].p_hdr;
    2086        1314 :                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
    2087        1314 :                         ext_debug(inode, "next leaf isn't full(%d)\n",
    2088             :                                   le16_to_cpu(eh->eh_entries));
    2089        1314 :                         path = npath;
    2090        1314 :                         goto has_space;
    2091             :                 }
    2092             :                 ext_debug(inode, "next leaf has no free space(%d,%d)\n",
    2093             :                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
    2094             :         }
    2095             : 
    2096             :         /*
    2097             :          * There is no free space in the found leaf.
    2098             :          * We're gonna add a new leaf in the tree.
    2099             :          */
    2100       74540 :         if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
    2101       36667 :                 mb_flags |= EXT4_MB_USE_RESERVED;
    2102       74540 :         err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
    2103             :                                        ppath, newext);
    2104       74533 :         if (err)
    2105         416 :                 goto cleanup;
    2106       74117 :         depth = ext_depth(inode);
    2107       74117 :         eh = path[depth].p_hdr;
    2108             : 
    2109     7445563 : has_space:
    2110     7445563 :         nearex = path[depth].p_ext;
    2111             : 
    2112     7445563 :         err = ext4_ext_get_access(handle, inode, path + depth);
    2113     7445737 :         if (err)
    2114           0 :                 goto cleanup;
    2115             : 
    2116     7445737 :         if (!nearex) {
    2117             :                 /* there is no extent in this leaf, create first one */
    2118      837512 :                 ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
    2119             :                                 le32_to_cpu(newext->ee_block),
    2120             :                                 ext4_ext_pblock(newext),
    2121             :                                 ext4_ext_is_unwritten(newext),
    2122             :                                 ext4_ext_get_actual_len(newext));
    2123      837512 :                 nearex = EXT_FIRST_EXTENT(eh);
    2124             :         } else {
    2125     6608225 :                 if (le32_to_cpu(newext->ee_block)
    2126     6608225 :                            > le32_to_cpu(nearex->ee_block)) {
    2127             :                         /* Insert after */
    2128     6532920 :                         ext_debug(inode, "insert %u:%llu:[%d]%d before: "
    2129             :                                         "nearest %p\n",
    2130             :                                         le32_to_cpu(newext->ee_block),
    2131             :                                         ext4_ext_pblock(newext),
    2132             :                                         ext4_ext_is_unwritten(newext),
    2133             :                                         ext4_ext_get_actual_len(newext),
    2134             :                                         nearex);
    2135     6532920 :                         nearex++;
    2136             :                 } else {
    2137             :                         /* Insert before */
    2138       75305 :                         BUG_ON(newext->ee_block == nearex->ee_block);
    2139             :                         ext_debug(inode, "insert %u:%llu:[%d]%d after: "
    2140             :                                         "nearest %p\n",
    2141             :                                         le32_to_cpu(newext->ee_block),
    2142             :                                         ext4_ext_pblock(newext),
    2143             :                                         ext4_ext_is_unwritten(newext),
    2144             :                                         ext4_ext_get_actual_len(newext),
    2145             :                                         nearex);
    2146             :                 }
    2147     6608225 :                 len = EXT_LAST_EXTENT(eh) - nearex + 1;
    2148     6608225 :                 if (len > 0) {
    2149     5688228 :                         ext_debug(inode, "insert %u:%llu:[%d]%d: "
    2150             :                                         "move %d extents from 0x%p to 0x%p\n",
    2151             :                                         le32_to_cpu(newext->ee_block),
    2152             :                                         ext4_ext_pblock(newext),
    2153             :                                         ext4_ext_is_unwritten(newext),
    2154             :                                         ext4_ext_get_actual_len(newext),
    2155             :                                         len, nearex, nearex + 1);
    2156    11376456 :                         memmove(nearex + 1, nearex,
    2157             :                                 len * sizeof(struct ext4_extent));
    2158             :                 }
    2159             :         }
    2160             : 
    2161     7445737 :         le16_add_cpu(&eh->eh_entries, 1);
    2162     7445737 :         path[depth].p_ext = nearex;
    2163     7445737 :         nearex->ee_block = newext->ee_block;
    2164     7445737 :         ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
    2165     7445737 :         nearex->ee_len = newext->ee_len;
    2166             : 
    2167     7904776 : merge:
    2168             :         /* try to merge extents */
    2169     7904776 :         if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
    2170     2151548 :                 ext4_ext_try_to_merge(handle, inode, path, nearex);
    2171             : 
    2172             : 
    2173             :         /* time to correct all indexes above */
    2174     7904638 :         err = ext4_ext_correct_indexes(handle, inode, path);
    2175     7904113 :         if (err)
    2176           0 :                 goto cleanup;
    2177             : 
    2178     7904113 :         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
    2179             : 
    2180     7905396 : cleanup:
    2181     7905396 :         ext4_free_ext_path(npath);
    2182     7905121 :         return err;
    2183             : }
    2184             : 
    2185           0 : static int ext4_fill_es_cache_info(struct inode *inode,
    2186             :                                    ext4_lblk_t block, ext4_lblk_t num,
    2187             :                                    struct fiemap_extent_info *fieinfo)
    2188             : {
    2189           0 :         ext4_lblk_t next, end = block + num - 1;
    2190           0 :         struct extent_status es;
    2191           0 :         unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
    2192           0 :         unsigned int flags;
    2193           0 :         int err;
    2194             : 
    2195           0 :         while (block <= end) {
    2196           0 :                 next = 0;
    2197           0 :                 flags = 0;
    2198           0 :                 if (!ext4_es_lookup_extent(inode, block, &next, &es))
    2199             :                         break;
    2200           0 :                 if (ext4_es_is_unwritten(&es))
    2201           0 :                         flags |= FIEMAP_EXTENT_UNWRITTEN;
    2202           0 :                 if (ext4_es_is_delayed(&es))
    2203           0 :                         flags |= (FIEMAP_EXTENT_DELALLOC |
    2204             :                                   FIEMAP_EXTENT_UNKNOWN);
    2205           0 :                 if (ext4_es_is_hole(&es))
    2206           0 :                         flags |= EXT4_FIEMAP_EXTENT_HOLE;
    2207           0 :                 if (next == 0)
    2208           0 :                         flags |= FIEMAP_EXTENT_LAST;
    2209           0 :                 if (flags & (FIEMAP_EXTENT_DELALLOC|
    2210             :                              EXT4_FIEMAP_EXTENT_HOLE))
    2211           0 :                         es.es_pblk = 0;
    2212             :                 else
    2213           0 :                         es.es_pblk = ext4_es_pblock(&es);
    2214           0 :                 err = fiemap_fill_next_extent(fieinfo,
    2215           0 :                                 (__u64)es.es_lblk << blksize_bits,
    2216           0 :                                 (__u64)es.es_pblk << blksize_bits,
    2217           0 :                                 (__u64)es.es_len << blksize_bits,
    2218             :                                 flags);
    2219           0 :                 if (next == 0)
    2220             :                         break;
    2221           0 :                 block = next;
    2222           0 :                 if (err < 0)
    2223           0 :                         return err;
    2224           0 :                 if (err == 1)
    2225             :                         return 0;
    2226             :         }
    2227             :         return 0;
    2228             : }
    2229             : 
    2230             : 
    2231             : /*
    2232             :  * ext4_ext_determine_hole - determine hole around given block
    2233             :  * @inode:      inode we lookup in
    2234             :  * @path:       path in extent tree to @lblk
    2235             :  * @lblk:       pointer to logical block around which we want to determine hole
    2236             :  *
    2237             :  * Determine hole length (and start if easily possible) around given logical
    2238             :  * block. We don't try too hard to find the beginning of the hole but @path
    2239             :  * actually points to extent before @lblk, we provide it.
    2240             :  *
    2241             :  * The function returns the length of a hole starting at @lblk. We update @lblk
    2242             :  * to the beginning of the hole if we managed to find it.
    2243             :  */
    2244     3599036 : static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
    2245             :                                            struct ext4_ext_path *path,
    2246             :                                            ext4_lblk_t *lblk)
    2247             : {
    2248     3599036 :         int depth = ext_depth(inode);
    2249     3599036 :         struct ext4_extent *ex;
    2250     3599036 :         ext4_lblk_t len;
    2251             : 
    2252     3599036 :         ex = path[depth].p_ext;
    2253     3599036 :         if (ex == NULL) {
    2254             :                 /* there is no extent yet, so gap is [0;-] */
    2255     1965335 :                 *lblk = 0;
    2256     1965335 :                 len = EXT_MAX_BLOCKS;
    2257     1633701 :         } else if (*lblk < le32_to_cpu(ex->ee_block)) {
    2258       20943 :                 len = le32_to_cpu(ex->ee_block) - *lblk;
    2259     1612758 :         } else if (*lblk >= le32_to_cpu(ex->ee_block)
    2260     1612758 :                         + ext4_ext_get_actual_len(ex)) {
    2261     1612758 :                 ext4_lblk_t next;
    2262             : 
    2263     1612758 :                 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
    2264     1612758 :                 next = ext4_ext_next_allocated_block(path);
    2265     1612732 :                 BUG_ON(next == *lblk);
    2266     1612732 :                 len = next - *lblk;
    2267             :         } else {
    2268           0 :                 BUG();
    2269             :         }
    2270     3599010 :         return len;
    2271             : }
    2272             : 
    2273             : /*
    2274             :  * ext4_ext_put_gap_in_cache:
    2275             :  * calculate boundaries of the gap that the requested block fits into
    2276             :  * and cache this gap
    2277             :  */
    2278             : static void
    2279     3597874 : ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
    2280             :                           ext4_lblk_t hole_len)
    2281             : {
    2282     3597874 :         struct extent_status es;
    2283             : 
    2284     3597874 :         ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
    2285     3597874 :                                   hole_start + hole_len - 1, &es);
    2286     3606918 :         if (es.es_len) {
    2287             :                 /* There's delayed extent containing lblock? */
    2288     1032129 :                 if (es.es_lblk <= hole_start)
    2289      335204 :                         return;
    2290      696925 :                 hole_len = min(es.es_lblk - hole_start, hole_len);
    2291             :         }
    2292     3271714 :         ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
    2293     3271714 :         ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
    2294             :                               EXTENT_STATUS_HOLE);
    2295             : }
    2296             : 
    2297             : /*
    2298             :  * ext4_ext_rm_idx:
    2299             :  * removes index from the index block.
    2300             :  */
    2301       16190 : static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
    2302             :                         struct ext4_ext_path *path, int depth)
    2303             : {
    2304       16190 :         int err;
    2305       16190 :         ext4_fsblk_t leaf;
    2306             : 
    2307             :         /* free index block */
    2308       16190 :         depth--;
    2309       16190 :         path = path + depth;
    2310       16190 :         leaf = ext4_idx_pblock(path->p_idx);
    2311       16190 :         if (unlikely(path->p_hdr->eh_entries == 0)) {
    2312           0 :                 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
    2313           0 :                 return -EFSCORRUPTED;
    2314             :         }
    2315       16190 :         err = ext4_ext_get_access(handle, inode, path);
    2316       16190 :         if (err)
    2317             :                 return err;
    2318             : 
    2319       16190 :         if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
    2320           0 :                 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
    2321           0 :                 len *= sizeof(struct ext4_extent_idx);
    2322           0 :                 memmove(path->p_idx, path->p_idx + 1, len);
    2323             :         }
    2324             : 
    2325       16190 :         le16_add_cpu(&path->p_hdr->eh_entries, -1);
    2326       16190 :         err = ext4_ext_dirty(handle, inode, path);
    2327       16190 :         if (err)
    2328             :                 return err;
    2329       16190 :         ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
    2330       16190 :         trace_ext4_ext_rm_idx(inode, leaf);
    2331             : 
    2332       16190 :         ext4_free_blocks(handle, inode, NULL, leaf, 1,
    2333             :                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
    2334             : 
    2335       16249 :         while (--depth >= 0) {
    2336        2803 :                 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
    2337             :                         break;
    2338          59 :                 path--;
    2339          59 :                 err = ext4_ext_get_access(handle, inode, path);
    2340          59 :                 if (err)
    2341             :                         break;
    2342          59 :                 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
    2343          59 :                 err = ext4_ext_dirty(handle, inode, path);
    2344          59 :                 if (err)
    2345             :                         break;
    2346             :         }
    2347             :         return err;
    2348             : }
    2349             : 
    2350             : /*
    2351             :  * ext4_ext_calc_credits_for_single_extent:
    2352             :  * This routine returns max. credits that needed to insert an extent
    2353             :  * to the extent tree.
    2354             :  * When pass the actual path, the caller should calculate credits
    2355             :  * under i_data_sem.
    2356             :  */
    2357           0 : int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
    2358             :                                                 struct ext4_ext_path *path)
    2359             : {
    2360           0 :         if (path) {
    2361           0 :                 int depth = ext_depth(inode);
    2362           0 :                 int ret = 0;
    2363             : 
    2364             :                 /* probably there is space in leaf? */
    2365           0 :                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
    2366           0 :                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
    2367             : 
    2368             :                         /*
    2369             :                          *  There are some space in the leaf tree, no
    2370             :                          *  need to account for leaf block credit
    2371             :                          *
    2372             :                          *  bitmaps and block group descriptor blocks
    2373             :                          *  and other metadata blocks still need to be
    2374             :                          *  accounted.
    2375             :                          */
    2376             :                         /* 1 bitmap, 1 block group descriptor */
    2377           0 :                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
    2378           0 :                         return ret;
    2379             :                 }
    2380             :         }
    2381             : 
    2382           0 :         return ext4_chunk_trans_blocks(inode, nrblocks);
    2383             : }
    2384             : 
    2385             : /*
    2386             :  * How many index/leaf blocks need to change/allocate to add @extents extents?
    2387             :  *
    2388             :  * If we add a single extent, then in the worse case, each tree level
    2389             :  * index/leaf need to be changed in case of the tree split.
    2390             :  *
    2391             :  * If more extents are inserted, they could cause the whole tree split more
    2392             :  * than once, but this is really rare.
    2393             :  */
    2394     9383919 : int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
    2395             : {
    2396     9583268 :         int index;
    2397     9583268 :         int depth;
    2398             : 
    2399             :         /* If we are converting the inline data, only one is needed here. */
    2400     9383919 :         if (ext4_has_inline_data(inode))
    2401             :                 return 1;
    2402             : 
    2403     9583268 :         depth = ext_depth(inode);
    2404             : 
    2405     9383919 :         if (extents <= 1)
    2406     9383911 :                 index = depth * 2;
    2407             :         else
    2408      199357 :                 index = depth * 3;
    2409             : 
    2410             :         return index;
    2411             : }
    2412             : 
    2413     2538420 : static inline int get_default_free_blocks_flags(struct inode *inode)
    2414             : {
    2415     2538420 :         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
    2416             :             ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
    2417             :                 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
    2418     2337428 :         else if (ext4_should_journal_data(inode))
    2419           0 :                 return EXT4_FREE_BLOCKS_FORGET;
    2420             :         return 0;
    2421             : }
    2422             : 
    2423             : /*
    2424             :  * ext4_rereserve_cluster - increment the reserved cluster count when
    2425             :  *                          freeing a cluster with a pending reservation
    2426             :  *
    2427             :  * @inode - file containing the cluster
    2428             :  * @lblk - logical block in cluster to be reserved
    2429             :  *
    2430             :  * Increments the reserved cluster count and adjusts quota in a bigalloc
    2431             :  * file system when freeing a partial cluster containing at least one
    2432             :  * delayed and unwritten block.  A partial cluster meeting that
    2433             :  * requirement will have a pending reservation.  If so, the
    2434             :  * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to
    2435             :  * defer reserved and allocated space accounting to a subsequent call
    2436             :  * to this function.
    2437             :  */
    2438           0 : static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
    2439             : {
    2440           0 :         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
    2441           0 :         struct ext4_inode_info *ei = EXT4_I(inode);
    2442             : 
    2443           0 :         dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
    2444             : 
    2445           0 :         spin_lock(&ei->i_block_reservation_lock);
    2446           0 :         ei->i_reserved_data_blocks++;
    2447           0 :         percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
    2448           0 :         spin_unlock(&ei->i_block_reservation_lock);
    2449             : 
    2450           0 :         percpu_counter_add(&sbi->s_freeclusters_counter, 1);
    2451           0 :         ext4_remove_pending(inode, lblk);
    2452           0 : }
    2453             : 
    2454     2538412 : static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
    2455             :                               struct ext4_extent *ex,
    2456             :                               struct partial_cluster *partial,
    2457             :                               ext4_lblk_t from, ext4_lblk_t to)
    2458             : {
    2459     2538412 :         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
    2460     2538412 :         unsigned short ee_len = ext4_ext_get_actual_len(ex);
    2461     2538412 :         ext4_fsblk_t last_pblk, pblk;
    2462     2538412 :         ext4_lblk_t num;
    2463     2538412 :         int flags;
    2464             : 
    2465             :         /* only extent tail removal is allowed */
    2466     2538412 :         if (from < le32_to_cpu(ex->ee_block) ||
    2467     2538412 :             to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
    2468           0 :                 ext4_error(sbi->s_sb,
    2469             :                            "strange request: removal(2) %u-%u from %u:%u",
    2470             :                            from, to, le32_to_cpu(ex->ee_block), ee_len);
    2471           0 :                 return 0;
    2472             :         }
    2473             : 
    2474             : #ifdef EXTENTS_STATS
    2475             :         spin_lock(&sbi->s_ext_stats_lock);
    2476             :         sbi->s_ext_blocks += ee_len;
    2477             :         sbi->s_ext_extents++;
    2478             :         if (ee_len < sbi->s_ext_min)
    2479             :                 sbi->s_ext_min = ee_len;
    2480             :         if (ee_len > sbi->s_ext_max)
    2481             :                 sbi->s_ext_max = ee_len;
    2482             :         if (ext_depth(inode) > sbi->s_depth_max)
    2483             :                 sbi->s_depth_max = ext_depth(inode);
    2484             :         spin_unlock(&sbi->s_ext_stats_lock);
    2485             : #endif
    2486             : 
    2487     2538426 :         trace_ext4_remove_blocks(inode, ex, from, to, partial);
    2488             : 
    2489             :         /*
    2490             :          * if we have a partial cluster, and it's different from the
    2491             :          * cluster of the last block in the extent, we free it
    2492             :          */
    2493     2538432 :         last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
    2494             : 
    2495     2538432 :         if (partial->state != initial &&
    2496           0 :             partial->pclu != EXT4_B2C(sbi, last_pblk)) {
    2497           0 :                 if (partial->state == tofree) {
    2498           0 :                         flags = get_default_free_blocks_flags(inode);
    2499           0 :                         if (ext4_is_pending(inode, partial->lblk))
    2500           0 :                                 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
    2501           0 :                         ext4_free_blocks(handle, inode, NULL,
    2502           0 :                                          EXT4_C2B(sbi, partial->pclu),
    2503           0 :                                          sbi->s_cluster_ratio, flags);
    2504           0 :                         if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
    2505           0 :                                 ext4_rereserve_cluster(inode, partial->lblk);
    2506             :                 }
    2507           0 :                 partial->state = initial;
    2508             :         }
    2509             : 
    2510     2538432 :         num = le32_to_cpu(ex->ee_block) + ee_len - from;
    2511     2538432 :         pblk = ext4_ext_pblock(ex) + ee_len - num;
    2512             : 
    2513             :         /*
    2514             :          * We free the partial cluster at the end of the extent (if any),
    2515             :          * unless the cluster is used by another extent (partial_cluster
    2516             :          * state is nofree).  If a partial cluster exists here, it must be
    2517             :          * shared with the last block in the extent.
    2518             :          */
    2519     2538432 :         flags = get_default_free_blocks_flags(inode);
    2520             : 
    2521             :         /* partial, left end cluster aligned, right end unaligned */
    2522     2538409 :         if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
    2523           0 :             (EXT4_LBLK_CMASK(sbi, to) >= from) &&
    2524           0 :             (partial->state != nofree)) {
    2525           0 :                 if (ext4_is_pending(inode, to))
    2526           0 :                         flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
    2527           0 :                 ext4_free_blocks(handle, inode, NULL,
    2528           0 :                                  EXT4_PBLK_CMASK(sbi, last_pblk),
    2529           0 :                                  sbi->s_cluster_ratio, flags);
    2530           0 :                 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
    2531           0 :                         ext4_rereserve_cluster(inode, to);
    2532           0 :                 partial->state = initial;
    2533           0 :                 flags = get_default_free_blocks_flags(inode);
    2534             :         }
    2535             : 
    2536     2538409 :         flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
    2537             : 
    2538             :         /*
    2539             :          * For bigalloc file systems, we never free a partial cluster
    2540             :          * at the beginning of the extent.  Instead, we check to see if we
    2541             :          * need to free it on a subsequent call to ext4_remove_blocks,
    2542             :          * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
    2543             :          */
    2544     2538409 :         flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
    2545     2538409 :         ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
    2546             : 
    2547             :         /* reset the partial cluster if we've freed past it */
    2548     2538460 :         if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
    2549           0 :                 partial->state = initial;
    2550             : 
    2551             :         /*
    2552             :          * If we've freed the entire extent but the beginning is not left
    2553             :          * cluster aligned and is not marked as ineligible for freeing we
    2554             :          * record the partial cluster at the beginning of the extent.  It
    2555             :          * wasn't freed by the preceding ext4_free_blocks() call, and we
    2556             :          * need to look farther to the left to determine if it's to be freed
    2557             :          * (not shared with another extent). Else, reset the partial
    2558             :          * cluster - we're either  done freeing or the beginning of the
    2559             :          * extent is left cluster aligned.
    2560             :          */
    2561     2538460 :         if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
    2562           0 :                 if (partial->state == initial) {
    2563           0 :                         partial->pclu = EXT4_B2C(sbi, pblk);
    2564           0 :                         partial->lblk = from;
    2565           0 :                         partial->state = tofree;
    2566             :                 }
    2567             :         } else {
    2568     2538460 :                 partial->state = initial;
    2569             :         }
    2570             : 
    2571             :         return 0;
    2572             : }
    2573             : 
    2574             : /*
    2575             :  * ext4_ext_rm_leaf() Removes the extents associated with the
    2576             :  * blocks appearing between "start" and "end".  Both "start"
    2577             :  * and "end" must appear in the same extent or EIO is returned.
    2578             :  *
    2579             :  * @handle: The journal handle
    2580             :  * @inode:  The files inode
    2581             :  * @path:   The path to the leaf
    2582             :  * @partial_cluster: The cluster which we'll have to free if all extents
    2583             :  *                   has been released from it.  However, if this value is
    2584             :  *                   negative, it's a cluster just to the right of the
    2585             :  *                   punched region and it must not be freed.
    2586             :  * @start:  The first block to remove
    2587             :  * @end:   The last block to remove
    2588             :  */
    2589             : static int
    2590     1249346 : ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
    2591             :                  struct ext4_ext_path *path,
    2592             :                  struct partial_cluster *partial,
    2593             :                  ext4_lblk_t start, ext4_lblk_t end)
    2594             : {
    2595     1249346 :         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
    2596     1249346 :         int err = 0, correct_index = 0;
    2597     1249346 :         int depth = ext_depth(inode), credits, revoke_credits;
    2598     1249346 :         struct ext4_extent_header *eh;
    2599     1249346 :         ext4_lblk_t a, b;
    2600     1249346 :         unsigned num;
    2601     1249346 :         ext4_lblk_t ex_ee_block;
    2602     1249346 :         unsigned short ex_ee_len;
    2603     1249346 :         unsigned unwritten = 0;
    2604     1249346 :         struct ext4_extent *ex;
    2605     1249346 :         ext4_fsblk_t pblk;
    2606             : 
    2607             :         /* the header must be checked already in ext4_ext_remove_space() */
    2608     1249346 :         ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
    2609     1249346 :         if (!path[depth].p_hdr)
    2610      205692 :                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
    2611     1249346 :         eh = path[depth].p_hdr;
    2612     1249346 :         if (unlikely(path[depth].p_hdr == NULL)) {
    2613           0 :                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
    2614           0 :                 return -EFSCORRUPTED;
    2615             :         }
    2616             :         /* find where to start removing */
    2617     1249346 :         ex = path[depth].p_ext;
    2618     1249346 :         if (!ex)
    2619      836707 :                 ex = EXT_LAST_EXTENT(eh);
    2620             : 
    2621     1249346 :         ex_ee_block = le32_to_cpu(ex->ee_block);
    2622     1249346 :         ex_ee_len = ext4_ext_get_actual_len(ex);
    2623             : 
    2624     1249346 :         trace_ext4_ext_rm_leaf(inode, start, ex, partial);
    2625             : 
    2626     3959877 :         while (ex >= EXT_FIRST_EXTENT(eh) &&
    2627     3320217 :                         ex_ee_block + ex_ee_len > start) {
    2628             : 
    2629     2710696 :                 if (ext4_ext_is_unwritten(ex))
    2630             :                         unwritten = 1;
    2631             :                 else
    2632     1414412 :                         unwritten = 0;
    2633             : 
    2634     2710696 :                 ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
    2635             :                           unwritten, ex_ee_len);
    2636     2710696 :                 path[depth].p_ext = ex;
    2637             : 
    2638     2710696 :                 a = max(ex_ee_block, start);
    2639     2710696 :                 b = min(ex_ee_block + ex_ee_len - 1, end);
    2640             : 
    2641     2710696 :                 ext_debug(inode, "  border %u:%u\n", a, b);
    2642             : 
    2643             :                 /* If this extent is beyond the end of the hole, skip it */
    2644     2710696 :                 if (end < ex_ee_block) {
    2645             :                         /*
    2646             :                          * We're going to skip this extent and move to another,
    2647             :                          * so note that its first cluster is in use to avoid
    2648             :                          * freeing it when removing blocks.  Eventually, the
    2649             :                          * right edge of the truncated/punched region will
    2650             :                          * be just to the left.
    2651             :                          */
    2652      172040 :                         if (sbi->s_cluster_ratio > 1) {
    2653           0 :                                 pblk = ext4_ext_pblock(ex);
    2654           0 :                                 partial->pclu = EXT4_B2C(sbi, pblk);
    2655           0 :                                 partial->state = nofree;
    2656             :                         }
    2657      172040 :                         ex--;
    2658      172040 :                         ex_ee_block = le32_to_cpu(ex->ee_block);
    2659      172040 :                         ex_ee_len = ext4_ext_get_actual_len(ex);
    2660      172040 :                         continue;
    2661     2538656 :                 } else if (b != ex_ee_block + ex_ee_len - 1) {
    2662           0 :                         EXT4_ERROR_INODE(inode,
    2663             :                                          "can not handle truncate %u:%u "
    2664             :                                          "on extent %u:%u",
    2665             :                                          start, end, ex_ee_block,
    2666             :                                          ex_ee_block + ex_ee_len - 1);
    2667           0 :                         err = -EFSCORRUPTED;
    2668           0 :                         goto out;
    2669     2538656 :                 } else if (a != ex_ee_block) {
    2670             :                         /* remove tail of the extent */
    2671      216684 :                         num = a - ex_ee_block;
    2672             :                 } else {
    2673             :                         /* remove whole extent: excellent! */
    2674             :                         num = 0;
    2675             :                 }
    2676             :                 /*
    2677             :                  * 3 for leaf, sb, and inode plus 2 (bmap and group
    2678             :                  * descriptor) for each block group; assume two block
    2679             :                  * groups plus ex_ee_len/blocks_per_block_group for
    2680             :                  * the worst case
    2681             :                  */
    2682     2538656 :                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
    2683     2538656 :                 if (ex == EXT_FIRST_EXTENT(eh)) {
    2684      438860 :                         correct_index = 1;
    2685      438860 :                         credits += (ext_depth(inode)) + 1;
    2686             :                 }
    2687     2538656 :                 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
    2688             :                 /*
    2689             :                  * We may end up freeing some index blocks and data from the
    2690             :                  * punched range. Note that partial clusters are accounted for
    2691             :                  * by ext4_free_data_revoke_credits().
    2692             :                  */
    2693     2538656 :                 revoke_credits =
    2694             :                         ext4_free_metadata_revoke_credits(inode->i_sb,
    2695             :                                                           ext_depth(inode)) +
    2696     2538656 :                         ext4_free_data_revoke_credits(inode, b - a + 1);
    2697             : 
    2698     2538498 :                 err = ext4_datasem_ensure_credits(handle, inode, credits,
    2699             :                                                   credits, revoke_credits);
    2700     2538491 :                 if (err) {
    2701          74 :                         if (err > 0)
    2702          74 :                                 err = -EAGAIN;
    2703          74 :                         goto out;
    2704             :                 }
    2705             : 
    2706     2538417 :                 err = ext4_ext_get_access(handle, inode, path + depth);
    2707     2538423 :                 if (err)
    2708           0 :                         goto out;
    2709             : 
    2710     2538423 :                 err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
    2711     2538444 :                 if (err)
    2712           0 :                         goto out;
    2713             : 
    2714     2538444 :                 if (num == 0)
    2715             :                         /* this extent is removed; mark slot entirely unused */
    2716     2321771 :                         ext4_ext_store_pblock(ex, 0);
    2717             : 
    2718     2538444 :                 ex->ee_len = cpu_to_le16(num);
    2719             :                 /*
    2720             :                  * Do not mark unwritten if all the blocks in the
    2721             :                  * extent have been removed.
    2722             :                  */
    2723     2538444 :                 if (unwritten && num)
    2724       79090 :                         ext4_ext_mark_unwritten(ex);
    2725             :                 /*
    2726             :                  * If the extent was completely released,
    2727             :                  * we need to remove it from the leaf
    2728             :                  */
    2729     2538444 :                 if (num == 0) {
    2730     2321772 :                         if (end != EXT_MAX_BLOCKS - 1) {
    2731             :                                 /*
    2732             :                                  * For hole punching, we need to scoot all the
    2733             :                                  * extents up when an extent is removed so that
    2734             :                                  * we dont have blank extents in the middle
    2735             :                                  */
    2736      784070 :                                 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
    2737             :                                         sizeof(struct ext4_extent));
    2738             : 
    2739             :                                 /* Now get rid of the one at the end */
    2740      784070 :                                 memset(EXT_LAST_EXTENT(eh), 0,
    2741             :                                         sizeof(struct ext4_extent));
    2742             :                         }
    2743     2321772 :                         le16_add_cpu(&eh->eh_entries, -1);
    2744             :                 }
    2745             : 
    2746     2538444 :                 err = ext4_ext_dirty(handle, inode, path + depth);
    2747     2538491 :                 if (err)
    2748           0 :                         goto out;
    2749             : 
    2750     2538491 :                 ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
    2751             :                                 ext4_ext_pblock(ex));
    2752     2538491 :                 ex--;
    2753     2538491 :                 ex_ee_block = le32_to_cpu(ex->ee_block);
    2754     5076982 :                 ex_ee_len = ext4_ext_get_actual_len(ex);
    2755             :         }
    2756             : 
    2757     1249116 :         if (correct_index && eh->eh_entries)
    2758       22386 :                 err = ext4_ext_correct_indexes(handle, inode, path);
    2759             : 
    2760             :         /*
    2761             :          * If there's a partial cluster and at least one extent remains in
    2762             :          * the leaf, free the partial cluster if it isn't shared with the
    2763             :          * current extent.  If it is shared with the current extent
    2764             :          * we reset the partial cluster because we've reached the start of the
    2765             :          * truncated/punched region and we're done removing blocks.
    2766             :          */
    2767     1249116 :         if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
    2768           0 :                 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
    2769           0 :                 if (partial->pclu != EXT4_B2C(sbi, pblk)) {
    2770           0 :                         int flags = get_default_free_blocks_flags(inode);
    2771             : 
    2772           0 :                         if (ext4_is_pending(inode, partial->lblk))
    2773           0 :                                 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
    2774           0 :                         ext4_free_blocks(handle, inode, NULL,
    2775           0 :                                          EXT4_C2B(sbi, partial->pclu),
    2776           0 :                                          sbi->s_cluster_ratio, flags);
    2777           0 :                         if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
    2778           0 :                                 ext4_rereserve_cluster(inode, partial->lblk);
    2779             :                 }
    2780           0 :                 partial->state = initial;
    2781             :         }
    2782             : 
    2783             :         /* if this leaf is free, then we should
    2784             :          * remove it from index block above */
    2785     1249116 :         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
    2786       16131 :                 err = ext4_ext_rm_idx(handle, inode, path, depth);
    2787             : 
    2788     1232985 : out:
    2789             :         return err;
    2790             : }
    2791             : 
    2792             : /*
    2793             :  * ext4_ext_more_to_rm:
    2794             :  * returns 1 if current index has to be freed (even partial)
    2795             :  */
    2796             : static int
    2797      767802 : ext4_ext_more_to_rm(struct ext4_ext_path *path)
    2798             : {
    2799      767802 :         BUG_ON(path->p_idx == NULL);
    2800             : 
    2801      767802 :         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
    2802             :                 return 0;
    2803             : 
    2804             :         /*
    2805             :          * if truncate on deeper level happened, it wasn't partial,
    2806             :          * so we have to consider current index for truncation
    2807             :          */
    2808      238320 :         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
    2809       32405 :                 return 0;
    2810             :         return 1;
    2811             : }
    2812             : 
    2813     1222833 : int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
    2814             :                           ext4_lblk_t end)
    2815             : {
    2816     1222833 :         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
    2817     1222833 :         int depth = ext_depth(inode);
    2818     1222833 :         struct ext4_ext_path *path = NULL;
    2819     1222833 :         struct partial_cluster partial;
    2820     1222833 :         handle_t *handle;
    2821     1222833 :         int i = 0, err = 0;
    2822             : 
    2823     1222833 :         partial.pclu = 0;
    2824     1222833 :         partial.lblk = 0;
    2825     1222833 :         partial.state = initial;
    2826             : 
    2827     1222833 :         ext_debug(inode, "truncate since %u to %u\n", start, end);
    2828             : 
    2829             :         /* probably first extent we're gonna free will be last in block */
    2830     1222833 :         handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
    2831             :                         depth + 1,
    2832             :                         ext4_free_metadata_revoke_credits(inode->i_sb, depth));
    2833     1222798 :         if (IS_ERR(handle))
    2834           0 :                 return PTR_ERR(handle);
    2835             : 
    2836     1222798 : again:
    2837     1222872 :         trace_ext4_ext_remove_space(inode, start, end, depth);
    2838             : 
    2839             :         /*
    2840             :          * Check if we are removing extents inside the extent tree. If that
    2841             :          * is the case, we are going to punch a hole inside the extent tree
    2842             :          * so we have to check whether we need to split the extent covering
    2843             :          * the last block to remove so we can easily remove the part of it
    2844             :          * in ext4_ext_rm_leaf().
    2845             :          */
    2846     1222891 :         if (end < EXT_MAX_BLOCKS - 1) {
    2847      423136 :                 struct ext4_extent *ex;
    2848      423136 :                 ext4_lblk_t ee_block, ex_end, lblk;
    2849      423136 :                 ext4_fsblk_t pblk;
    2850             : 
    2851             :                 /* find extent for or closest extent to this block */
    2852      423136 :                 path = ext4_find_extent(inode, end, NULL,
    2853             :                                         EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
    2854      423134 :                 if (IS_ERR(path)) {
    2855           0 :                         ext4_journal_stop(handle);
    2856           0 :                         return PTR_ERR(path);
    2857             :                 }
    2858      423134 :                 depth = ext_depth(inode);
    2859             :                 /* Leaf not may not exist only if inode has no blocks at all */
    2860      423134 :                 ex = path[depth].p_ext;
    2861      423134 :                 if (!ex) {
    2862       10562 :                         if (depth) {
    2863           0 :                                 EXT4_ERROR_INODE(inode,
    2864             :                                                  "path[%d].p_hdr == NULL",
    2865             :                                                  depth);
    2866           0 :                                 err = -EFSCORRUPTED;
    2867             :                         }
    2868       10562 :                         goto out;
    2869             :                 }
    2870             : 
    2871      412572 :                 ee_block = le32_to_cpu(ex->ee_block);
    2872      412572 :                 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
    2873             : 
    2874             :                 /*
    2875             :                  * See if the last block is inside the extent, if so split
    2876             :                  * the extent at 'end' block so we can easily remove the
    2877             :                  * tail of the first part of the split extent in
    2878             :                  * ext4_ext_rm_leaf().
    2879             :                  */
    2880      412572 :                 if (end >= ee_block && end < ex_end) {
    2881             : 
    2882             :                         /*
    2883             :                          * If we're going to split the extent, note that
    2884             :                          * the cluster containing the block after 'end' is
    2885             :                          * in use to avoid freeing it when removing blocks.
    2886             :                          */
    2887      165295 :                         if (sbi->s_cluster_ratio > 1) {
    2888           0 :                                 pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
    2889           0 :                                 partial.pclu = EXT4_B2C(sbi, pblk);
    2890           0 :                                 partial.state = nofree;
    2891             :                         }
    2892             : 
    2893             :                         /*
    2894             :                          * Split the extent in two so that 'end' is the last
    2895             :                          * block in the first new extent. Also we should not
    2896             :                          * fail removing space due to ENOSPC so try to use
    2897             :                          * reserved block if that happens.
    2898             :                          */
    2899      165295 :                         err = ext4_force_split_extent_at(handle, inode, &path,
    2900             :                                                          end + 1, 1);
    2901      165295 :                         if (err < 0)
    2902           0 :                                 goto out;
    2903             : 
    2904      247277 :                 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
    2905           0 :                            partial.state == initial) {
    2906             :                         /*
    2907             :                          * If we're punching, there's an extent to the right.
    2908             :                          * If the partial cluster hasn't been set, set it to
    2909             :                          * that extent's first cluster and its state to nofree
    2910             :                          * so it won't be freed should it contain blocks to be
    2911             :                          * removed. If it's already set (tofree/nofree), we're
    2912             :                          * retrying and keep the original partial cluster info
    2913             :                          * so a cluster marked tofree as a result of earlier
    2914             :                          * extent removal is not lost.
    2915             :                          */
    2916           0 :                         lblk = ex_end + 1;
    2917           0 :                         err = ext4_ext_search_right(inode, path, &lblk, &pblk,
    2918             :                                                     NULL);
    2919           0 :                         if (err < 0)
    2920           0 :                                 goto out;
    2921           0 :                         if (pblk) {
    2922           0 :                                 partial.pclu = EXT4_B2C(sbi, pblk);
    2923           0 :                                 partial.state = nofree;
    2924             :                         }
    2925             :                 }
    2926             :         }
    2927             :         /*
    2928             :          * We start scanning from right side, freeing all the blocks
    2929             :          * after i_size and walking into the tree depth-wise.
    2930             :          */
    2931     1212327 :         depth = ext_depth(inode);
    2932     1212327 :         if (path) {
    2933             :                 int k = i = depth;
    2934      445168 :                 while (--k > 0)
    2935       32583 :                         path[k].p_block =
    2936       32583 :                                 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
    2937             :         } else {
    2938      799742 :                 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
    2939             :                                GFP_NOFS | __GFP_NOFAIL);
    2940      799683 :                 if (path == NULL) {
    2941           0 :                         ext4_journal_stop(handle);
    2942           0 :                         return -ENOMEM;
    2943             :                 }
    2944      799683 :                 path[0].p_maxdepth = path[0].p_depth = depth;
    2945      799683 :                 path[0].p_hdr = ext_inode_hdr(inode);
    2946      799683 :                 i = 0;
    2947             : 
    2948      799683 :                 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
    2949           0 :                         err = -EFSCORRUPTED;
    2950           0 :                         goto out;
    2951             :                 }
    2952             :         }
    2953             :         err = 0;
    2954             : 
    2955     3229273 :         while (i >= 0 && err == 0) {
    2956     2017004 :                 if (i == depth) {
    2957             :                         /* this is leaf block */
    2958     1249299 :                         err = ext4_ext_rm_leaf(handle, inode, path,
    2959             :                                                &partial, start, end);
    2960             :                         /* root level has p_bh == NULL, brelse() eats this */
    2961     1249108 :                         brelse(path[i].p_bh);
    2962     1249216 :                         path[i].p_bh = NULL;
    2963     1249216 :                         i--;
    2964     1249216 :                         continue;
    2965             :                 }
    2966             : 
    2967             :                 /* this is index block */
    2968      767705 :                 if (!path[i].p_hdr) {
    2969         230 :                         ext_debug(inode, "initialize header\n");
    2970         230 :                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
    2971             :                 }
    2972             : 
    2973      767705 :                 if (!path[i].p_idx) {
    2974             :                         /* this level hasn't been touched yet */
    2975      168876 :                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
    2976      168876 :                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
    2977      168876 :                         ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
    2978             :                                   path[i].p_hdr,
    2979             :                                   le16_to_cpu(path[i].p_hdr->eh_entries));
    2980             :                 } else {
    2981             :                         /* we were already here, see at next index */
    2982      598829 :                         path[i].p_idx--;
    2983             :                 }
    2984             : 
    2985      767705 :                 ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
    2986             :                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
    2987             :                                 path[i].p_idx);
    2988      767705 :                 if (ext4_ext_more_to_rm(path + i)) {
    2989      205914 :                         struct buffer_head *bh;
    2990             :                         /* go to the next level */
    2991      205914 :                         ext_debug(inode, "move to level %d (block %llu)\n",
    2992             :                                   i + 1, ext4_idx_pblock(path[i].p_idx));
    2993      205914 :                         memset(path + i + 1, 0, sizeof(*path));
    2994      205914 :                         bh = read_extent_tree_block(inode, path[i].p_idx,
    2995             :                                                     depth - i - 1,
    2996             :                                                     EXT4_EX_NOCACHE);
    2997      205924 :                         if (IS_ERR(bh)) {
    2998             :                                 /* should we reset i_size? */
    2999           1 :                                 err = PTR_ERR(bh);
    3000           1 :                                 break;
    3001             :                         }
    3002             :                         /* Yield here to deal with large extent trees.
    3003             :                          * Should be a no-op if we did IO above. */
    3004      205923 :                         cond_resched();
    3005      205923 :                         if (WARN_ON(i + 1 > depth)) {
    3006             :                                 err = -EFSCORRUPTED;
    3007             :                                 break;
    3008             :                         }
    3009      205923 :                         path[i + 1].p_bh = bh;
    3010             : 
    3011             :                         /* save actual number of indexes since this
    3012             :                          * number is changed at the next iteration */
    3013      205923 :                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
    3014      205923 :                         i++;
    3015             :                 } else {
    3016             :                         /* we finished processing this index, go up */
    3017      561887 :                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
    3018             :                                 /* index is empty, remove it;
    3019             :                                  * handle must be already prepared by the
    3020             :                                  * truncatei_leaf() */
    3021          59 :                                 err = ext4_ext_rm_idx(handle, inode, path, i);
    3022             :                         }
    3023             :                         /* root level has p_bh == NULL, brelse() eats this */
    3024      561887 :                         brelse(path[i].p_bh);
    3025      561885 :                         path[i].p_bh = NULL;
    3026      561885 :                         i--;
    3027      561885 :                         ext_debug(inode, "return to level %d\n", i);
    3028             :                 }
    3029             :         }
    3030             : 
    3031     1212270 :         trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
    3032     1212270 :                                          path->p_hdr->eh_entries);
    3033             : 
    3034             :         /*
    3035             :          * if there's a partial cluster and we have removed the first extent
    3036             :          * in the file, then we also free the partial cluster, if any
    3037             :          */
    3038     1212012 :         if (partial.state == tofree && err == 0) {
    3039           0 :                 int flags = get_default_free_blocks_flags(inode);
    3040             : 
    3041           0 :                 if (ext4_is_pending(inode, partial.lblk))
    3042           0 :                         flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
    3043           0 :                 ext4_free_blocks(handle, inode, NULL,
    3044           0 :                                  EXT4_C2B(sbi, partial.pclu),
    3045           0 :                                  sbi->s_cluster_ratio, flags);
    3046           0 :                 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
    3047           0 :                         ext4_rereserve_cluster(inode, partial.lblk);
    3048           0 :                 partial.state = initial;
    3049             :         }
    3050             : 
    3051             :         /* TODO: flexible tree reduction should be here */
    3052     1212012 :         if (path->p_hdr->eh_entries == 0) {
    3053             :                 /*
    3054             :                  * truncate to zero freed all the tree,
    3055             :                  * so we need to correct eh_depth
    3056             :                  */
    3057      607525 :                 err = ext4_ext_get_access(handle, inode, path);
    3058      607514 :                 if (err == 0) {
    3059      607514 :                         ext_inode_hdr(inode)->eh_depth = 0;
    3060      607514 :                         ext_inode_hdr(inode)->eh_max =
    3061             :                                 cpu_to_le16(ext4_ext_space_root(inode, 0));
    3062      607514 :                         err = ext4_ext_dirty(handle, inode, path);
    3063             :                 }
    3064             :         }
    3065      604487 : out:
    3066     1223096 :         ext4_free_ext_path(path);
    3067     1222783 :         path = NULL;
    3068     1222783 :         if (err == -EAGAIN)
    3069          74 :                 goto again;
    3070     1222709 :         ext4_journal_stop(handle);
    3071             : 
    3072     1222709 :         return err;
    3073             : }
    3074             : 
    3075             : /*
    3076             :  * called at mount time
    3077             :  */
    3078        2536 : void ext4_ext_init(struct super_block *sb)
    3079             : {
    3080             :         /*
    3081             :          * possible initialization would be here
    3082             :          */
    3083             : 
    3084        2536 :         if (ext4_has_feature_extents(sb)) {
    3085             : #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
    3086             :                 printk(KERN_INFO "EXT4-fs: file extents enabled"
    3087             : #ifdef AGGRESSIVE_TEST
    3088             :                        ", aggressive tests"
    3089             : #endif
    3090             : #ifdef CHECK_BINSEARCH
    3091             :                        ", check binsearch"
    3092             : #endif
    3093             : #ifdef EXTENTS_STATS
    3094             :                        ", stats"
    3095             : #endif
    3096             :                        "\n");
    3097             : #endif
    3098             : #ifdef EXTENTS_STATS
    3099             :                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
    3100             :                 EXT4_SB(sb)->s_ext_min = 1 << 30;
    3101             :                 EXT4_SB(sb)->s_ext_max = 0;
    3102             : #endif
    3103        2536 :         }
    3104        2536 : }
    3105             : 
    3106             : /*
    3107             :  * called at umount time
    3108             :  */
    3109        2536 : void ext4_ext_release(struct super_block *sb)
    3110             : {
    3111        2536 :         if (!ext4_has_feature_extents(sb))
    3112             :                 return;
    3113             : 
    3114             : #ifdef EXTENTS_STATS
    3115             :         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
    3116             :                 struct ext4_sb_info *sbi = EXT4_SB(sb);
    3117             :                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
    3118             :                         sbi->s_ext_blocks, sbi->s_ext_extents,
    3119             :                         sbi->s_ext_blocks / sbi->s_ext_extents);
    3120             :                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
    3121             :                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
    3122             :         }
    3123             : #endif
    3124             : }
    3125             : 
    3126       10447 : static void ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
    3127             : {
    3128       10447 :         ext4_lblk_t  ee_block;
    3129       10447 :         ext4_fsblk_t ee_pblock;
    3130       10447 :         unsigned int ee_len;
    3131             : 
    3132       10447 :         ee_block  = le32_to_cpu(ex->ee_block);
    3133       10447 :         ee_len    = ext4_ext_get_actual_len(ex);
    3134       10447 :         ee_pblock = ext4_ext_pblock(ex);
    3135             : 
    3136       10447 :         if (ee_len == 0)
    3137             :                 return;
    3138             : 
    3139           1 :         ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
    3140             :                               EXTENT_STATUS_WRITTEN);
    3141             : }
    3142             : 
    3143             : /* FIXME!! we need to try to merge to left or right after zero-out  */
    3144           1 : static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
    3145             : {
    3146           1 :         ext4_fsblk_t ee_pblock;
    3147           1 :         unsigned int ee_len;
    3148             : 
    3149           1 :         ee_len    = ext4_ext_get_actual_len(ex);
    3150           1 :         ee_pblock = ext4_ext_pblock(ex);
    3151           1 :         return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
    3152             :                                   ee_len);
    3153             : }
    3154             : 
    3155             : /*
    3156             :  * ext4_split_extent_at() splits an extent at given block.
    3157             :  *
    3158             :  * @handle: the journal handle
    3159             :  * @inode: the file inode
    3160             :  * @path: the path to the extent
    3161             :  * @split: the logical block where the extent is splitted.
    3162             :  * @split_flags: indicates if the extent could be zeroout if split fails, and
    3163             :  *               the states(init or unwritten) of new extents.
    3164             :  * @flags: flags used to insert new extent to extent tree.
    3165             :  *
    3166             :  *
    3167             :  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
    3168             :  * of which are determined by split_flag.
    3169             :  *
    3170             :  * There are two cases:
    3171             :  *  a> the extent are splitted into two extent.
    3172             :  *  b> split is not needed, and just mark the extent.
    3173             :  *
    3174             :  * return 0 on success.
    3175             :  */
    3176     4688784 : static int ext4_split_extent_at(handle_t *handle,
    3177             :                              struct inode *inode,
    3178             :                              struct ext4_ext_path **ppath,
    3179             :                              ext4_lblk_t split,
    3180             :                              int split_flag,
    3181             :                              int flags)
    3182             : {
    3183     4688784 :         struct ext4_ext_path *path = *ppath;
    3184     4688784 :         ext4_fsblk_t newblock;
    3185     4688784 :         ext4_lblk_t ee_block;
    3186     4688784 :         struct ext4_extent *ex, newex, orig_ex, zero_ex;
    3187     4688784 :         struct ext4_extent *ex2 = NULL;
    3188     4688784 :         unsigned int ee_len, depth;
    3189     4688784 :         int err = 0;
    3190             : 
    3191     4688784 :         BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
    3192             :                (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
    3193             : 
    3194     4688784 :         ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
    3195             : 
    3196     4688784 :         ext4_ext_show_leaf(inode, path);
    3197             : 
    3198     4688784 :         depth = ext_depth(inode);
    3199     4688784 :         ex = path[depth].p_ext;
    3200     4688784 :         ee_block = le32_to_cpu(ex->ee_block);
    3201     4688784 :         ee_len = ext4_ext_get_actual_len(ex);
    3202     4688784 :         newblock = split - ee_block + ext4_ext_pblock(ex);
    3203             : 
    3204     4688784 :         BUG_ON(split < ee_block || split >= (ee_block + ee_len));
    3205     4688784 :         BUG_ON(!ext4_ext_is_unwritten(ex) &&
    3206             :                split_flag & (EXT4_EXT_MAY_ZEROOUT |
    3207             :                              EXT4_EXT_MARK_UNWRIT1 |
    3208             :                              EXT4_EXT_MARK_UNWRIT2));
    3209             : 
    3210     4688784 :         err = ext4_ext_get_access(handle, inode, path + depth);
    3211     4688789 :         if (err)
    3212           0 :                 goto out;
    3213             : 
    3214     4688789 :         if (split == ee_block) {
    3215             :                 /*
    3216             :                  * case b: block @split is the block that the extent begins with
    3217             :                  * then we just change the state of the extent, and splitting
    3218             :                  * is not needed.
    3219             :                  */
    3220      275935 :                 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
    3221      251834 :                         ext4_ext_mark_unwritten(ex);
    3222             :                 else
    3223       24101 :                         ext4_ext_mark_initialized(ex);
    3224             : 
    3225      275935 :                 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
    3226        2281 :                         ext4_ext_try_to_merge(handle, inode, path, ex);
    3227             : 
    3228      275935 :                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
    3229      275939 :                 goto out;
    3230             :         }
    3231             : 
    3232             :         /* case a */
    3233     4412854 :         memcpy(&orig_ex, ex, sizeof(orig_ex));
    3234     4412854 :         ex->ee_len = cpu_to_le16(split - ee_block);
    3235     4412854 :         if (split_flag & EXT4_EXT_MARK_UNWRIT1)
    3236     4126817 :                 ext4_ext_mark_unwritten(ex);
    3237             : 
    3238             :         /*
    3239             :          * path may lead to new leaf, not to original leaf any more
    3240             :          * after ext4_ext_insert_extent() returns,
    3241             :          */
    3242     4412854 :         err = ext4_ext_dirty(handle, inode, path + depth);
    3243     4412854 :         if (err)
    3244           0 :                 goto fix_extent_len;
    3245             : 
    3246     4412854 :         ex2 = &newex;
    3247     4412854 :         ex2->ee_block = cpu_to_le32(split);
    3248     4412854 :         ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
    3249     4412854 :         ext4_ext_store_pblock(ex2, newblock);
    3250     4412854 :         if (split_flag & EXT4_EXT_MARK_UNWRIT2)
    3251     4124240 :                 ext4_ext_mark_unwritten(ex2);
    3252             : 
    3253     4412854 :         err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
    3254     4412849 :         if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
    3255     4412844 :                 goto out;
    3256             : 
    3257           5 :         if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
    3258           1 :                 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
    3259           1 :                         if (split_flag & EXT4_EXT_DATA_VALID1) {
    3260           1 :                                 err = ext4_ext_zeroout(inode, ex2);
    3261           1 :                                 zero_ex.ee_block = ex2->ee_block;
    3262           1 :                                 zero_ex.ee_len = cpu_to_le16(
    3263             :                                                 ext4_ext_get_actual_len(ex2));
    3264           1 :                                 ext4_ext_store_pblock(&zero_ex,
    3265             :                                                       ext4_ext_pblock(ex2));
    3266             :                         } else {
    3267           0 :                                 err = ext4_ext_zeroout(inode, ex);
    3268           0 :                                 zero_ex.ee_block = ex->ee_block;
    3269           0 :                                 zero_ex.ee_len = cpu_to_le16(
    3270             :                                                 ext4_ext_get_actual_len(ex));
    3271           0 :                                 ext4_ext_store_pblock(&zero_ex,
    3272             :                                                       ext4_ext_pblock(ex));
    3273             :                         }
    3274             :                 } else {
    3275           0 :                         err = ext4_ext_zeroout(inode, &orig_ex);
    3276           0 :                         zero_ex.ee_block = orig_ex.ee_block;
    3277           0 :                         zero_ex.ee_len = cpu_to_le16(
    3278             :                                                 ext4_ext_get_actual_len(&orig_ex));
    3279           0 :                         ext4_ext_store_pblock(&zero_ex,
    3280             :                                               ext4_ext_pblock(&orig_ex));
    3281             :                 }
    3282             : 
    3283           1 :                 if (!err) {
    3284             :                         /* update the extent length and mark as initialized */
    3285           1 :                         ex->ee_len = cpu_to_le16(ee_len);
    3286           1 :                         ext4_ext_try_to_merge(handle, inode, path, ex);
    3287           1 :                         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
    3288           1 :                         if (!err)
    3289             :                                 /* update extent status tree */
    3290           1 :                                 ext4_zeroout_es(inode, &zero_ex);
    3291             :                         /* If we failed at this point, we don't know in which
    3292             :                          * state the extent tree exactly is so don't try to fix
    3293             :                          * length of the original extent as it may do even more
    3294             :                          * damage.
    3295             :                          */
    3296           1 :                         goto out;
    3297             :                 }
    3298             :         }
    3299             : 
    3300           4 : fix_extent_len:
    3301           4 :         ex->ee_len = orig_ex.ee_len;
    3302             :         /*
    3303             :          * Ignore ext4_ext_dirty return value since we are already in error path
    3304             :          * and err is a non-zero error code.
    3305             :          */
    3306           4 :         ext4_ext_dirty(handle, inode, path + path->p_depth);
    3307           4 :         return err;
    3308             : out:
    3309             :         ext4_ext_show_leaf(inode, path);
    3310             :         return err;
    3311             : }
    3312             : 
    3313             : /*
    3314             :  * ext4_split_extents() splits an extent and mark extent which is covered
    3315             :  * by @map as split_flags indicates
    3316             :  *
    3317             :  * It may result in splitting the extent into multiple extents (up to three)
    3318             :  * There are three possibilities:
    3319             :  *   a> There is no split required
    3320             :  *   b> Splits in two extents: Split is happening at either end of the extent
    3321             :  *   c> Splits in three extents: Somone is splitting in middle of the extent
    3322             :  *
    3323             :  */
    3324      520454 : static int ext4_split_extent(handle_t *handle,
    3325             :                               struct inode *inode,
    3326             :                               struct ext4_ext_path **ppath,
    3327             :                               struct ext4_map_blocks *map,
    3328             :                               int split_flag,
    3329             :                               int flags)
    3330             : {
    3331      520454 :         struct ext4_ext_path *path = *ppath;
    3332      520454 :         ext4_lblk_t ee_block;
    3333      520454 :         struct ext4_extent *ex;
    3334      520454 :         unsigned int ee_len, depth;
    3335      520454 :         int err = 0;
    3336      520454 :         int unwritten;
    3337      520454 :         int split_flag1, flags1;
    3338      520454 :         int allocated = map->m_len;
    3339             : 
    3340      520454 :         depth = ext_depth(inode);
    3341      520454 :         ex = path[depth].p_ext;
    3342      520454 :         ee_block = le32_to_cpu(ex->ee_block);
    3343      520454 :         ee_len = ext4_ext_get_actual_len(ex);
    3344      520454 :         unwritten = ext4_ext_is_unwritten(ex);
    3345             : 
    3346      520454 :         if (map->m_lblk + map->m_len < ee_block + ee_len) {
    3347      274216 :                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
    3348      274216 :                 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
    3349      274216 :                 if (unwritten)
    3350      245444 :                         split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
    3351             :                                        EXT4_EXT_MARK_UNWRIT2;
    3352      274216 :                 if (split_flag & EXT4_EXT_DATA_VALID2)
    3353      245444 :                         split_flag1 |= EXT4_EXT_DATA_VALID1;
    3354      274216 :                 err = ext4_split_extent_at(handle, inode, ppath,
    3355             :                                 map->m_lblk + map->m_len, split_flag1, flags1);
    3356      274215 :                 if (err)
    3357           1 :                         goto out;
    3358             :         } else {
    3359      246238 :                 allocated = ee_len - (map->m_lblk - ee_block);
    3360             :         }
    3361             :         /*
    3362             :          * Update path is required because previous ext4_split_extent_at() may
    3363             :          * result in split of original leaf or extent zeroout.
    3364             :          */
    3365      520452 :         path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
    3366      520450 :         if (IS_ERR(path))
    3367           0 :                 return PTR_ERR(path);
    3368      520450 :         depth = ext_depth(inode);
    3369      520450 :         ex = path[depth].p_ext;
    3370      520450 :         if (!ex) {
    3371           0 :                 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
    3372             :                                  (unsigned long) map->m_lblk);
    3373           0 :                 return -EFSCORRUPTED;
    3374             :         }
    3375      520450 :         unwritten = ext4_ext_is_unwritten(ex);
    3376             : 
    3377      520450 :         if (map->m_lblk >= ee_block) {
    3378      520450 :                 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
    3379      520450 :                 if (unwritten) {
    3380      466781 :                         split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
    3381      466781 :                         split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
    3382             :                                                      EXT4_EXT_MARK_UNWRIT2);
    3383             :                 }
    3384      520450 :                 err = ext4_split_extent_at(handle, inode, ppath,
    3385             :                                 map->m_lblk, split_flag1, flags);
    3386      520455 :                 if (err)
    3387           3 :                         goto out;
    3388             :         }
    3389             : 
    3390      520456 :         ext4_ext_show_leaf(inode, path);
    3391      520452 : out:
    3392      520456 :         return err ? err : allocated;
    3393             : }
    3394             : 
    3395             : /*
    3396             :  * This function is called by ext4_ext_map_blocks() if someone tries to write
    3397             :  * to an unwritten extent. It may result in splitting the unwritten
    3398             :  * extent into multiple extents (up to three - one initialized and two
    3399             :  * unwritten).
    3400             :  * There are three possibilities:
    3401             :  *   a> There is no split required: Entire extent should be initialized
    3402             :  *   b> Splits in two extents: Write is happening at either end of the extent
    3403             :  *   c> Splits in three extents: Somone is writing in middle of the extent
    3404             :  *
    3405             :  * Pre-conditions:
    3406             :  *  - The extent pointed to by 'path' is unwritten.
    3407             :  *  - The extent pointed to by 'path' contains a superset
    3408             :  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
    3409             :  *
    3410             :  * Post-conditions on success:
    3411             :  *  - the returned value is the number of blocks beyond map->l_lblk
    3412             :  *    that are allocated and initialized.
    3413             :  *    It is guaranteed to be >= map->m_len.
    3414             :  */
    3415        5223 : static int ext4_ext_convert_to_initialized(handle_t *handle,
    3416             :                                            struct inode *inode,
    3417             :                                            struct ext4_map_blocks *map,
    3418             :                                            struct ext4_ext_path **ppath,
    3419             :                                            int flags)
    3420             : {
    3421        5223 :         struct ext4_ext_path *path = *ppath;
    3422        5223 :         struct ext4_sb_info *sbi;
    3423        5223 :         struct ext4_extent_header *eh;
    3424        5223 :         struct ext4_map_blocks split_map;
    3425        5223 :         struct ext4_extent zero_ex1, zero_ex2;
    3426        5223 :         struct ext4_extent *ex, *abut_ex;
    3427        5223 :         ext4_lblk_t ee_block, eof_block;
    3428        5223 :         unsigned int ee_len, depth, map_len = map->m_len;
    3429        5223 :         int allocated = 0, max_zeroout = 0;
    3430        5223 :         int err = 0;
    3431        5223 :         int split_flag = EXT4_EXT_DATA_VALID2;
    3432             : 
    3433        5223 :         ext_debug(inode, "logical block %llu, max_blocks %u\n",
    3434             :                   (unsigned long long)map->m_lblk, map_len);
    3435             : 
    3436        5223 :         sbi = EXT4_SB(inode->i_sb);
    3437       10446 :         eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
    3438        5223 :                         >> inode->i_sb->s_blocksize_bits;
    3439        5223 :         if (eof_block < map->m_lblk + map_len)
    3440             :                 eof_block = map->m_lblk + map_len;
    3441             : 
    3442        5223 :         depth = ext_depth(inode);
    3443        5223 :         eh = path[depth].p_hdr;
    3444        5223 :         ex = path[depth].p_ext;
    3445        5223 :         ee_block = le32_to_cpu(ex->ee_block);
    3446        5223 :         ee_len = ext4_ext_get_actual_len(ex);
    3447        5223 :         zero_ex1.ee_len = 0;
    3448        5223 :         zero_ex2.ee_len = 0;
    3449             : 
    3450        5223 :         trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
    3451             : 
    3452             :         /* Pre-conditions */
    3453        5223 :         BUG_ON(!ext4_ext_is_unwritten(ex));
    3454        5223 :         BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
    3455             : 
    3456             :         /*
    3457             :          * Attempt to transfer newly initialized blocks from the currently
    3458             :          * unwritten extent to its neighbor. This is much cheaper
    3459             :          * than an insertion followed by a merge as those involve costly
    3460             :          * memmove() calls. Transferring to the left is the common case in
    3461             :          * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
    3462             :          * followed by append writes.
    3463             :          *
    3464             :          * Limitations of the current logic:
    3465             :          *  - L1: we do not deal with writes covering the whole extent.
    3466             :          *    This would require removing the extent if the transfer
    3467             :          *    is possible.
    3468             :          *  - L2: we only attempt to merge with an extent stored in the
    3469             :          *    same extent tree node.
    3470             :          */
    3471        5223 :         if ((map->m_lblk == ee_block) &&
    3472             :                 /* See if we can merge left */
    3473        1667 :                 (map_len < ee_len) &&                /*L1*/
    3474        1667 :                 (ex > EXT_FIRST_EXTENT(eh))) {       /*L2*/
    3475        1663 :                 ext4_lblk_t prev_lblk;
    3476        1663 :                 ext4_fsblk_t prev_pblk, ee_pblk;
    3477        1663 :                 unsigned int prev_len;
    3478             : 
    3479        1663 :                 abut_ex = ex - 1;
    3480        1663 :                 prev_lblk = le32_to_cpu(abut_ex->ee_block);
    3481        1663 :                 prev_len = ext4_ext_get_actual_len(abut_ex);
    3482        1663 :                 prev_pblk = ext4_ext_pblock(abut_ex);
    3483        1663 :                 ee_pblk = ext4_ext_pblock(ex);
    3484             : 
    3485             :                 /*
    3486             :                  * A transfer of blocks from 'ex' to 'abut_ex' is allowed
    3487             :                  * upon those conditions:
    3488             :                  * - C1: abut_ex is initialized,
    3489             :                  * - C2: abut_ex is logically abutting ex,
    3490             :                  * - C3: abut_ex is physically abutting ex,
    3491             :                  * - C4: abut_ex can receive the additional blocks without
    3492             :                  *   overflowing the (initialized) length limit.
    3493             :                  */
    3494        1663 :                 if ((!ext4_ext_is_unwritten(abut_ex)) &&                /*C1*/
    3495        1436 :                         ((prev_lblk + prev_len) == ee_block) &&         /*C2*/
    3496        1394 :                         ((prev_pblk + prev_len) == ee_pblk) &&          /*C3*/
    3497         365 :                         (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
    3498         365 :                         err = ext4_ext_get_access(handle, inode, path + depth);
    3499         365 :                         if (err)
    3500           0 :                                 goto out;
    3501             : 
    3502         365 :                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
    3503             :                                 map, ex, abut_ex);
    3504             : 
    3505             :                         /* Shift the start of ex by 'map_len' blocks */
    3506         365 :                         ex->ee_block = cpu_to_le32(ee_block + map_len);
    3507         365 :                         ext4_ext_store_pblock(ex, ee_pblk + map_len);
    3508         365 :                         ex->ee_len = cpu_to_le16(ee_len - map_len);
    3509         365 :                         ext4_ext_mark_unwritten(ex); /* Restore the flag */
    3510             : 
    3511             :                         /* Extend abut_ex by 'map_len' blocks */
    3512         365 :                         abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
    3513             : 
    3514             :                         /* Result: number of initialized blocks past m_lblk */
    3515         365 :                         allocated = map_len;
    3516             :                 }
    3517        3560 :         } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
    3518        1010 :                    (map_len < ee_len) &&     /*L1*/
    3519        1010 :                    ex < EXT_LAST_EXTENT(eh)) {       /*L2*/
    3520             :                 /* See if we can merge right */
    3521         200 :                 ext4_lblk_t next_lblk;
    3522         200 :                 ext4_fsblk_t next_pblk, ee_pblk;
    3523         200 :                 unsigned int next_len;
    3524             : 
    3525         200 :                 abut_ex = ex + 1;
    3526         200 :                 next_lblk = le32_to_cpu(abut_ex->ee_block);
    3527         200 :                 next_len = ext4_ext_get_actual_len(abut_ex);
    3528         200 :                 next_pblk = ext4_ext_pblock(abut_ex);
    3529         200 :                 ee_pblk = ext4_ext_pblock(ex);
    3530             : 
    3531             :                 /*
    3532             :                  * A transfer of blocks from 'ex' to 'abut_ex' is allowed
    3533             :                  * upon those conditions:
    3534             :                  * - C1: abut_ex is initialized,
    3535             :                  * - C2: abut_ex is logically abutting ex,
    3536             :                  * - C3: abut_ex is physically abutting ex,
    3537             :                  * - C4: abut_ex can receive the additional blocks without
    3538             :                  *   overflowing the (initialized) length limit.
    3539             :                  */
    3540         200 :                 if ((!ext4_ext_is_unwritten(abut_ex)) &&                /*C1*/
    3541           0 :                     ((map->m_lblk + map_len) == next_lblk) &&                /*C2*/
    3542           0 :                     ((ee_pblk + ee_len) == next_pblk) &&                /*C3*/
    3543           0 :                     (next_len < (EXT_INIT_MAX_LEN - map_len))) {     /*C4*/
    3544           0 :                         err = ext4_ext_get_access(handle, inode, path + depth);
    3545           0 :                         if (err)
    3546           0 :                                 goto out;
    3547             : 
    3548           0 :                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
    3549             :                                 map, ex, abut_ex);
    3550             : 
    3551             :                         /* Shift the start of abut_ex by 'map_len' blocks */
    3552           0 :                         abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
    3553           0 :                         ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
    3554           0 :                         ex->ee_len = cpu_to_le16(ee_len - map_len);
    3555           0 :                         ext4_ext_mark_unwritten(ex); /* Restore the flag */
    3556             : 
    3557             :                         /* Extend abut_ex by 'map_len' blocks */
    3558           0 :                         abut_ex->ee_len = cpu_to_le16(next_len + map_len);
    3559             : 
    3560             :                         /* Result: number of initialized blocks past m_lblk */
    3561           0 :                         allocated = map_len;
    3562             :                 }
    3563             :         }
    3564         365 :         if (allocated) {
    3565             :                 /* Mark the block containing both extents as dirty */
    3566         365 :                 err = ext4_ext_dirty(handle, inode, path + depth);
    3567             : 
    3568             :                 /* Update path to point to the right extent */
    3569         365 :                 path[depth].p_ext = abut_ex;
    3570         365 :                 goto out;
    3571             :         } else
    3572        4858 :                 allocated = ee_len - (map->m_lblk - ee_block);
    3573             : 
    3574        4858 :         WARN_ON(map->m_lblk < ee_block);
    3575             :         /*
    3576             :          * It is safe to convert extent to initialized via explicit
    3577             :          * zeroout only if extent is fully inside i_size or new_size.
    3578             :          */
    3579        4858 :         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
    3580             : 
    3581        4858 :         if (EXT4_EXT_MAY_ZEROOUT & split_flag)
    3582        1989 :                 max_zeroout = sbi->s_extent_max_zeroout_kb >>
    3583        1989 :                         (inode->i_sb->s_blocksize_bits - 10);
    3584             : 
    3585             :         /*
    3586             :          * five cases:
    3587             :          * 1. split the extent into three extents.
    3588             :          * 2. split the extent into two extents, zeroout the head of the first
    3589             :          *    extent.
    3590             :          * 3. split the extent into two extents, zeroout the tail of the second
    3591             :          *    extent.
    3592             :          * 4. split the extent into two extents with out zeroout.
    3593             :          * 5. no splitting needed, just possibly zeroout the head and / or the
    3594             :          *    tail of the extent.
    3595             :          */
    3596        4858 :         split_map.m_lblk = map->m_lblk;
    3597        4858 :         split_map.m_len = map->m_len;
    3598             : 
    3599        4858 :         if (max_zeroout && (allocated > split_map.m_len)) {
    3600           0 :                 if (allocated <= max_zeroout) {
    3601             :                         /* case 3 or 5 */
    3602           0 :                         zero_ex1.ee_block =
    3603           0 :                                  cpu_to_le32(split_map.m_lblk +
    3604             :                                              split_map.m_len);
    3605           0 :                         zero_ex1.ee_len =
    3606           0 :                                 cpu_to_le16(allocated - split_map.m_len);
    3607           0 :                         ext4_ext_store_pblock(&zero_ex1,
    3608           0 :                                 ext4_ext_pblock(ex) + split_map.m_lblk +
    3609           0 :                                 split_map.m_len - ee_block);
    3610           0 :                         err = ext4_ext_zeroout(inode, &zero_ex1);
    3611           0 :                         if (err)
    3612           0 :                                 goto fallback;
    3613           0 :                         split_map.m_len = allocated;
    3614             :                 }
    3615           0 :                 if (split_map.m_lblk - ee_block + split_map.m_len <
    3616             :                                                                 max_zeroout) {
    3617             :                         /* case 2 or 5 */
    3618           0 :                         if (split_map.m_lblk != ee_block) {
    3619           0 :                                 zero_ex2.ee_block = ex->ee_block;
    3620           0 :                                 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
    3621             :                                                         ee_block);
    3622           0 :                                 ext4_ext_store_pblock(&zero_ex2,
    3623             :                                                       ext4_ext_pblock(ex));
    3624           0 :                                 err = ext4_ext_zeroout(inode, &zero_ex2);
    3625           0 :                                 if (err)
    3626           0 :                                         goto fallback;
    3627             :                         }
    3628             : 
    3629           0 :                         split_map.m_len += split_map.m_lblk - ee_block;
    3630           0 :                         split_map.m_lblk = ee_block;
    3631           0 :                         allocated = map->m_len;
    3632             :                 }
    3633             :         }
    3634             : 
    3635        4858 : fallback:
    3636        4858 :         err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
    3637             :                                 flags);
    3638        4858 :         if (err > 0)
    3639             :                 err = 0;
    3640           0 : out:
    3641             :         /* If we have gotten a failure, don't zero out status tree */
    3642         365 :         if (!err) {
    3643        5223 :                 ext4_zeroout_es(inode, &zero_ex1);
    3644        5223 :                 ext4_zeroout_es(inode, &zero_ex2);
    3645             :         }
    3646        5223 :         return err ? err : allocated;
    3647             : }
    3648             : 
    3649             : /*
    3650             :  * This function is called by ext4_ext_map_blocks() from
    3651             :  * ext4_get_blocks_dio_write() when DIO to write
    3652             :  * to an unwritten extent.
    3653             :  *
    3654             :  * Writing to an unwritten extent may result in splitting the unwritten
    3655             :  * extent into multiple initialized/unwritten extents (up to three)
    3656             :  * There are three possibilities:
    3657             :  *   a> There is no split required: Entire extent should be unwritten
    3658             :  *   b> Splits in two extents: Write is happening at either end of the extent
    3659             :  *   c> Splits in three extents: Somone is writing in middle of the extent
    3660             :  *
    3661             :  * This works the same way in the case of initialized -> unwritten conversion.
    3662             :  *
    3663             :  * One of more index blocks maybe needed if the extent tree grow after
    3664             :  * the unwritten extent split. To prevent ENOSPC occur at the IO
    3665             :  * complete, we need to split the unwritten extent before DIO submit
    3666             :  * the IO. The unwritten extent called at this time will be split
    3667             :  * into three unwritten extent(at most). After IO complete, the part
    3668             :  * being filled will be convert to initialized by the end_io callback function
    3669             :  * via ext4_convert_unwritten_extents().
    3670             :  *
    3671             :  * Returns the size of unwritten extent to be written on success.
    3672             :  */
    3673      515593 : static int ext4_split_convert_extents(handle_t *handle,
    3674             :                                         struct inode *inode,
    3675             :                                         struct ext4_map_blocks *map,
    3676             :                                         struct ext4_ext_path **ppath,
    3677             :                                         int flags)
    3678             : {
    3679      515593 :         struct ext4_ext_path *path = *ppath;
    3680      515593 :         ext4_lblk_t eof_block;
    3681      515593 :         ext4_lblk_t ee_block;
    3682      515593 :         struct ext4_extent *ex;
    3683      515593 :         unsigned int ee_len;
    3684      515593 :         int split_flag = 0, depth;
    3685             : 
    3686      515593 :         ext_debug(inode, "logical block %llu, max_blocks %u\n",
    3687             :                   (unsigned long long)map->m_lblk, map->m_len);
    3688             : 
    3689     1031186 :         eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
    3690      515593 :                         >> inode->i_sb->s_blocksize_bits;
    3691      515593 :         if (eof_block < map->m_lblk + map->m_len)
    3692             :                 eof_block = map->m_lblk + map->m_len;
    3693             :         /*
    3694             :          * It is safe to convert extent to initialized via explicit
    3695             :          * zeroout only if extent is fully inside i_size or new_size.
    3696             :          */
    3697      515593 :         depth = ext_depth(inode);
    3698      515593 :         ex = path[depth].p_ext;
    3699      515593 :         ee_block = le32_to_cpu(ex->ee_block);
    3700      515593 :         ee_len = ext4_ext_get_actual_len(ex);
    3701             : 
    3702             :         /* Convert to unwritten */
    3703      515593 :         if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
    3704             :                 split_flag |= EXT4_EXT_DATA_VALID1;
    3705             :         /* Convert to initialized */
    3706      461924 :         } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
    3707      461924 :                 split_flag |= ee_block + ee_len <= eof_block ?
    3708      461924 :                               EXT4_EXT_MAY_ZEROOUT : 0;
    3709      461924 :                 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
    3710             :         }
    3711      515593 :         flags |= EXT4_GET_BLOCKS_PRE_IO;
    3712      515593 :         return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
    3713             : }
    3714             : 
    3715     1801893 : static int ext4_convert_unwritten_extents_endio(handle_t *handle,
    3716             :                                                 struct inode *inode,
    3717             :                                                 struct ext4_map_blocks *map,
    3718             :                                                 struct ext4_ext_path **ppath)
    3719             : {
    3720     1801893 :         struct ext4_ext_path *path = *ppath;
    3721     1801893 :         struct ext4_extent *ex;
    3722     1801893 :         ext4_lblk_t ee_block;
    3723     1801893 :         unsigned int ee_len;
    3724     1801893 :         int depth;
    3725     1801893 :         int err = 0;
    3726             : 
    3727     1801893 :         depth = ext_depth(inode);
    3728     1801893 :         ex = path[depth].p_ext;
    3729     1801893 :         ee_block = le32_to_cpu(ex->ee_block);
    3730     1801893 :         ee_len = ext4_ext_get_actual_len(ex);
    3731             : 
    3732     1801893 :         ext_debug(inode, "logical block %llu, max_blocks %u\n",
    3733             :                   (unsigned long long)ee_block, ee_len);
    3734             : 
    3735             :         /* If extent is larger than requested it is a clear sign that we still
    3736             :          * have some extent state machine issues left. So extent_split is still
    3737             :          * required.
    3738             :          * TODO: Once all related issues will be fixed this situation should be
    3739             :          * illegal.
    3740             :          */
    3741     1801893 :         if (ee_block != map->m_lblk || ee_len > map->m_len) {
    3742             : #ifdef CONFIG_EXT4_DEBUG
    3743          34 :                 ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
    3744             :                              " len %u; IO logical block %llu, len %u",
    3745             :                              inode->i_ino, (unsigned long long)ee_block, ee_len,
    3746             :                              (unsigned long long)map->m_lblk, map->m_len);
    3747             : #endif
    3748          32 :                 err = ext4_split_convert_extents(handle, inode, map, ppath,
    3749             :                                                  EXT4_GET_BLOCKS_CONVERT);
    3750          32 :                 if (err < 0)
    3751             :                         return err;
    3752          32 :                 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
    3753          32 :                 if (IS_ERR(path))
    3754           0 :                         return PTR_ERR(path);
    3755          32 :                 depth = ext_depth(inode);
    3756          32 :                 ex = path[depth].p_ext;
    3757             :         }
    3758             : 
    3759     1801891 :         err = ext4_ext_get_access(handle, inode, path + depth);
    3760     1801914 :         if (err)
    3761           0 :                 goto out;
    3762             :         /* first mark the extent as initialized */
    3763     1801914 :         ext4_ext_mark_initialized(ex);
    3764             : 
    3765             :         /* note: ext4_ext_correct_indexes() isn't needed here because
    3766             :          * borders are not changed
    3767             :          */
    3768     1801914 :         ext4_ext_try_to_merge(handle, inode, path, ex);
    3769             : 
    3770             :         /* Mark modified extent as dirty */
    3771     1801875 :         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
    3772             : out:
    3773             :         ext4_ext_show_leaf(inode, path);
    3774             :         return err;
    3775             : }
    3776             : 
    3777             : static int
    3778      122551 : convert_initialized_extent(handle_t *handle, struct inode *inode,
    3779             :                            struct ext4_map_blocks *map,
    3780             :                            struct ext4_ext_path **ppath,
    3781             :                            unsigned int *allocated)
    3782             : {
    3783      122551 :         struct ext4_ext_path *path = *ppath;
    3784      122551 :         struct ext4_extent *ex;
    3785      122551 :         ext4_lblk_t ee_block;
    3786      122551 :         unsigned int ee_len;
    3787      122551 :         int depth;
    3788      122551 :         int err = 0;
    3789             : 
    3790             :         /*
    3791             :          * Make sure that the extent is no bigger than we support with
    3792             :          * unwritten extent
    3793             :          */
    3794      122551 :         if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
    3795           0 :                 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
    3796             : 
    3797      122551 :         depth = ext_depth(inode);
    3798      122551 :         ex = path[depth].p_ext;
    3799      122551 :         ee_block = le32_to_cpu(ex->ee_block);
    3800      122551 :         ee_len = ext4_ext_get_actual_len(ex);
    3801             : 
    3802      122551 :         ext_debug(inode, "logical block %llu, max_blocks %u\n",
    3803             :                   (unsigned long long)ee_block, ee_len);
    3804             : 
    3805      122551 :         if (ee_block != map->m_lblk || ee_len > map->m_len) {
    3806       53669 :                 err = ext4_split_convert_extents(handle, inode, map, ppath,
    3807             :                                 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
    3808       53669 :                 if (err < 0)
    3809             :                         return err;
    3810       53665 :                 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
    3811       53665 :                 if (IS_ERR(path))
    3812           0 :                         return PTR_ERR(path);
    3813       53665 :                 depth = ext_depth(inode);
    3814       53665 :                 ex = path[depth].p_ext;
    3815       53665 :                 if (!ex) {
    3816           0 :                         EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
    3817             :                                          (unsigned long) map->m_lblk);
    3818           0 :                         return -EFSCORRUPTED;
    3819             :                 }
    3820             :         }
    3821             : 
    3822      122547 :         err = ext4_ext_get_access(handle, inode, path + depth);
    3823      122547 :         if (err)
    3824             :                 return err;
    3825             :         /* first mark the extent as unwritten */
    3826      122547 :         ext4_ext_mark_unwritten(ex);
    3827             : 
    3828             :         /* note: ext4_ext_correct_indexes() isn't needed here because
    3829             :          * borders are not changed
    3830             :          */
    3831      122547 :         ext4_ext_try_to_merge(handle, inode, path, ex);
    3832             : 
    3833             :         /* Mark modified extent as dirty */
    3834      122547 :         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
    3835      122547 :         if (err)
    3836             :                 return err;
    3837      122547 :         ext4_ext_show_leaf(inode, path);
    3838             : 
    3839      122547 :         ext4_update_inode_fsync_trans(handle, inode, 1);
    3840             : 
    3841      122547 :         map->m_flags |= EXT4_MAP_UNWRITTEN;
    3842      122547 :         if (*allocated > map->m_len)
    3843       28770 :                 *allocated = map->m_len;
    3844      122547 :         map->m_len = *allocated;
    3845      122547 :         return 0;
    3846             : }
    3847             : 
    3848             : static int
    3849     3244191 : ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
    3850             :                         struct ext4_map_blocks *map,
    3851             :                         struct ext4_ext_path **ppath, int flags,
    3852             :                         unsigned int allocated, ext4_fsblk_t newblock)
    3853             : {
    3854     3244191 :         struct ext4_ext_path __maybe_unused *path = *ppath;
    3855     3244191 :         int ret = 0;
    3856     3244191 :         int err = 0;
    3857             : 
    3858     3244191 :         ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
    3859             :                   (unsigned long long)map->m_lblk, map->m_len, flags,
    3860             :                   allocated);
    3861     3244191 :         ext4_ext_show_leaf(inode, path);
    3862             : 
    3863             :         /*
    3864             :          * When writing into unwritten space, we should not fail to
    3865             :          * allocate metadata blocks for the new extent block if needed.
    3866             :          */
    3867     3244191 :         flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
    3868             : 
    3869     3244191 :         trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
    3870             :                                                     allocated, newblock);
    3871             : 
    3872             :         /* get_block() before submitting IO, split the extent */
    3873     3244232 :         if (flags & EXT4_GET_BLOCKS_PRE_IO) {
    3874      461893 :                 ret = ext4_split_convert_extents(handle, inode, map, ppath,
    3875             :                                          flags | EXT4_GET_BLOCKS_CONVERT);
    3876      461898 :                 if (ret < 0) {
    3877           0 :                         err = ret;
    3878           0 :                         goto out2;
    3879             :                 }
    3880             :                 /*
    3881             :                  * shouldn't get a 0 return when splitting an extent unless
    3882             :                  * m_len is 0 (bug) or extent has been corrupted
    3883             :                  */
    3884      461898 :                 if (unlikely(ret == 0)) {
    3885           0 :                         EXT4_ERROR_INODE(inode,
    3886             :                                          "unexpected ret == 0, m_len = %u",
    3887             :                                          map->m_len);
    3888           0 :                         err = -EFSCORRUPTED;
    3889           0 :                         goto out2;
    3890             :                 }
    3891      461898 :                 map->m_flags |= EXT4_MAP_UNWRITTEN;
    3892      461898 :                 goto out;
    3893             :         }
    3894             :         /* IO end_io complete, convert the filled extent to written */
    3895     2782339 :         if (flags & EXT4_GET_BLOCKS_CONVERT) {
    3896     1801899 :                 err = ext4_convert_unwritten_extents_endio(handle, inode, map,
    3897             :                                                            ppath);
    3898     1801915 :                 if (err < 0)
    3899           0 :                         goto out2;
    3900     1801915 :                 ext4_update_inode_fsync_trans(handle, inode, 1);
    3901     1801906 :                 goto map_out;
    3902             :         }
    3903             :         /* buffered IO cases */
    3904             :         /*
    3905             :          * repeat fallocate creation request
    3906             :          * we already have an unwritten extent
    3907             :          */
    3908      980440 :         if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
    3909      555863 :                 map->m_flags |= EXT4_MAP_UNWRITTEN;
    3910      555863 :                 goto map_out;
    3911             :         }
    3912             : 
    3913             :         /* buffered READ or buffered write_begin() lookup */
    3914      424577 :         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
    3915             :                 /*
    3916             :                  * We have blocks reserved already.  We
    3917             :                  * return allocated blocks so that delalloc
    3918             :                  * won't do block reservation for us.  But
    3919             :                  * the buffer head will be unmapped so that
    3920             :                  * a read from the block returns 0s.
    3921             :                  */
    3922      419354 :                 map->m_flags |= EXT4_MAP_UNWRITTEN;
    3923      419354 :                 goto out1;
    3924             :         }
    3925             : 
    3926             :         /*
    3927             :          * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1.
    3928             :          * For buffered writes, at writepage time, etc.  Convert a
    3929             :          * discovered unwritten extent to written.
    3930             :          */
    3931        5223 :         ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
    3932        5223 :         if (ret < 0) {
    3933           0 :                 err = ret;
    3934           0 :                 goto out2;
    3935             :         }
    3936        5223 :         ext4_update_inode_fsync_trans(handle, inode, 1);
    3937             :         /*
    3938             :          * shouldn't get a 0 return when converting an unwritten extent
    3939             :          * unless m_len is 0 (bug) or extent has been corrupted
    3940             :          */
    3941        5223 :         if (unlikely(ret == 0)) {
    3942           0 :                 EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
    3943             :                                  map->m_len);
    3944           0 :                 err = -EFSCORRUPTED;
    3945           0 :                 goto out2;
    3946             :         }
    3947             : 
    3948        5223 : out:
    3949      467121 :         allocated = ret;
    3950      467121 :         map->m_flags |= EXT4_MAP_NEW;
    3951     2824890 : map_out:
    3952     2824890 :         map->m_flags |= EXT4_MAP_MAPPED;
    3953     3244244 : out1:
    3954     3244244 :         map->m_pblk = newblock;
    3955     3244244 :         if (allocated > map->m_len)
    3956             :                 allocated = map->m_len;
    3957     3244244 :         map->m_len = allocated;
    3958     3244244 :         ext4_ext_show_leaf(inode, path);
    3959     3244244 : out2:
    3960     3244244 :         return err ? err : allocated;
    3961             : }
    3962             : 
    3963             : /*
    3964             :  * get_implied_cluster_alloc - check to see if the requested
    3965             :  * allocation (in the map structure) overlaps with a cluster already
    3966             :  * allocated in an extent.
    3967             :  *      @sb     The filesystem superblock structure
    3968             :  *      @map    The requested lblk->pblk mapping
    3969             :  *      @ex     The extent structure which might contain an implied
    3970             :  *                      cluster allocation
    3971             :  *
    3972             :  * This function is called by ext4_ext_map_blocks() after we failed to
    3973             :  * find blocks that were already in the inode's extent tree.  Hence,
    3974             :  * we know that the beginning of the requested region cannot overlap
    3975             :  * the extent from the inode's extent tree.  There are three cases we
    3976             :  * want to catch.  The first is this case:
    3977             :  *
    3978             :  *               |--- cluster # N--|
    3979             :  *    |--- extent ---|  |---- requested region ---|
    3980             :  *                      |==========|
    3981             :  *
    3982             :  * The second case that we need to test for is this one:
    3983             :  *
    3984             :  *   |--------- cluster # N ----------------|
    3985             :  *         |--- requested region --|   |------- extent ----|
    3986             :  *         |=======================|
    3987             :  *
    3988             :  * The third case is when the requested region lies between two extents
    3989             :  * within the same cluster:
    3990             :  *          |------------- cluster # N-------------|
    3991             :  * |----- ex -----|                  |---- ex_right ----|
    3992             :  *                  |------ requested region ------|
    3993             :  *                  |================|
    3994             :  *
    3995             :  * In each of the above cases, we need to set the map->m_pblk and
    3996             :  * map->m_len so it corresponds to the return the extent labelled as
    3997             :  * "|====|" from cluster #N, since it is already in use for data in
    3998             :  * cluster EXT4_B2C(sbi, map->m_lblk).       We will then return 1 to
    3999             :  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
    4000             :  * as a new "allocated" block region.  Otherwise, we will return 0 and
    4001             :  * ext4_ext_map_blocks() will then allocate one or more new clusters
    4002             :  * by calling ext4_mb_new_blocks().
    4003             :  */
    4004           1 : static int get_implied_cluster_alloc(struct super_block *sb,
    4005             :                                      struct ext4_map_blocks *map,
    4006             :                                      struct ext4_extent *ex,
    4007             :                                      struct ext4_ext_path *path)
    4008             : {
    4009           1 :         struct ext4_sb_info *sbi = EXT4_SB(sb);
    4010           1 :         ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
    4011           1 :         ext4_lblk_t ex_cluster_start, ex_cluster_end;
    4012           1 :         ext4_lblk_t rr_cluster_start;
    4013           1 :         ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
    4014           1 :         ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
    4015           1 :         unsigned short ee_len = ext4_ext_get_actual_len(ex);
    4016             : 
    4017             :         /* The extent passed in that we are trying to match */
    4018           1 :         ex_cluster_start = EXT4_B2C(sbi, ee_block);
    4019           1 :         ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
    4020             : 
    4021             :         /* The requested region passed into ext4_map_blocks() */
    4022           1 :         rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
    4023             : 
    4024           1 :         if ((rr_cluster_start == ex_cluster_end) ||
    4025           1 :             (rr_cluster_start == ex_cluster_start)) {
    4026           1 :                 if (rr_cluster_start == ex_cluster_end)
    4027           1 :                         ee_start += ee_len - 1;
    4028           1 :                 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
    4029           1 :                 map->m_len = min(map->m_len,
    4030             :                                  (unsigned) sbi->s_cluster_ratio - c_offset);
    4031             :                 /*
    4032             :                  * Check for and handle this case:
    4033             :                  *
    4034             :                  *   |--------- cluster # N-------------|
    4035             :                  *                     |------- extent ----|
    4036             :                  *         |--- requested region ---|
    4037             :                  *         |===========|
    4038             :                  */
    4039             : 
    4040           1 :                 if (map->m_lblk < ee_block)
    4041           0 :                         map->m_len = min(map->m_len, ee_block - map->m_lblk);
    4042             : 
    4043             :                 /*
    4044             :                  * Check for the case where there is already another allocated
    4045             :                  * block to the right of 'ex' but before the end of the cluster.
    4046             :                  *
    4047             :                  *          |------------- cluster # N-------------|
    4048             :                  * |----- ex -----|                  |---- ex_right ----|
    4049             :                  *                  |------ requested region ------|
    4050             :                  *                  |================|
    4051             :                  */
    4052           1 :                 if (map->m_lblk > ee_block) {
    4053           1 :                         ext4_lblk_t next = ext4_ext_next_allocated_block(path);
    4054           1 :                         map->m_len = min(map->m_len, next - map->m_lblk);
    4055             :                 }
    4056             : 
    4057           1 :                 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
    4058           1 :                 return 1;
    4059             :         }
    4060             : 
    4061           0 :         trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
    4062           0 :         return 0;
    4063             : }
    4064             : 
    4065             : 
    4066             : /*
    4067             :  * Block allocation/map/preallocation routine for extents based files
    4068             :  *
    4069             :  *
    4070             :  * Need to be called with
    4071             :  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
    4072             :  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
    4073             :  *
    4074             :  * return > 0, number of blocks already mapped/allocated
    4075             :  *          if create == 0 and these are pre-allocated blocks
    4076             :  *              buffer head is unmapped
    4077             :  *          otherwise blocks are mapped
    4078             :  *
    4079             :  * return = 0, if plain look up failed (blocks have not been allocated)
    4080             :  *          buffer head is unmapped
    4081             :  *
    4082             :  * return < 0, error case.
    4083             :  */
    4084    11753099 : int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
    4085             :                         struct ext4_map_blocks *map, int flags)
    4086             : {
    4087    11753099 :         struct ext4_ext_path *path = NULL;
    4088    11753099 :         struct ext4_extent newex, *ex, ex2;
    4089    11753099 :         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
    4090    11753099 :         ext4_fsblk_t newblock = 0, pblk;
    4091    11753099 :         int err = 0, depth, ret;
    4092    11753099 :         unsigned int allocated = 0, offset = 0;
    4093    11753099 :         unsigned int allocated_clusters = 0;
    4094    11753099 :         struct ext4_allocation_request ar;
    4095    11753099 :         ext4_lblk_t cluster_offset;
    4096             : 
    4097    11753099 :         ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
    4098    11753099 :         trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
    4099             : 
    4100             :         /* find extent for this block */
    4101    11743649 :         path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
    4102    11744498 :         if (IS_ERR(path)) {
    4103           1 :                 err = PTR_ERR(path);
    4104           1 :                 path = NULL;
    4105           1 :                 goto out;
    4106             :         }
    4107             : 
    4108    11744497 :         depth = ext_depth(inode);
    4109             : 
    4110             :         /*
    4111             :          * consistent leaf must not be empty;
    4112             :          * this situation is possible, though, _during_ tree modification;
    4113             :          * this is why assert can't be put in ext4_find_extent()
    4114             :          */
    4115    11744497 :         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
    4116           0 :                 EXT4_ERROR_INODE(inode, "bad extent address "
    4117             :                                  "lblock: %lu, depth: %d pblock %lld",
    4118             :                                  (unsigned long) map->m_lblk, depth,
    4119             :                                  path[depth].p_block);
    4120           0 :                 err = -EFSCORRUPTED;
    4121           0 :                 goto out;
    4122             :         }
    4123             : 
    4124    11744497 :         ex = path[depth].p_ext;
    4125    11744497 :         if (ex) {
    4126     8850909 :                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
    4127     8850909 :                 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
    4128     8850909 :                 unsigned short ee_len;
    4129             : 
    4130             : 
    4131             :                 /*
    4132             :                  * unwritten extents are treated as holes, except that
    4133             :                  * we split out initialized portions during a write.
    4134             :                  */
    4135     8850909 :                 ee_len = ext4_ext_get_actual_len(ex);
    4136             : 
    4137     8850909 :                 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
    4138             : 
    4139             :                 /* if found extent covers block, simply return it */
    4140     8852363 :                 if (in_range(map->m_lblk, ee_block, ee_len)) {
    4141     4226085 :                         newblock = map->m_lblk - ee_block + ee_start;
    4142             :                         /* number of remaining blocks in the extent */
    4143     4226085 :                         allocated = ee_len - (map->m_lblk - ee_block);
    4144     4226085 :                         ext_debug(inode, "%u fit into %u:%d -> %llu\n",
    4145             :                                   map->m_lblk, ee_block, ee_len, newblock);
    4146             : 
    4147             :                         /*
    4148             :                          * If the extent is initialized check whether the
    4149             :                          * caller wants to convert it to unwritten.
    4150             :                          */
    4151     4226085 :                         if ((!ext4_ext_is_unwritten(ex)) &&
    4152      981881 :                             (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
    4153      122551 :                                 err = convert_initialized_extent(handle,
    4154             :                                         inode, map, &path, &allocated);
    4155      122551 :                                 goto out;
    4156     4103534 :                         } else if (!ext4_ext_is_unwritten(ex)) {
    4157      859331 :                                 map->m_flags |= EXT4_MAP_MAPPED;
    4158      859331 :                                 map->m_pblk = newblock;
    4159      859331 :                                 if (allocated > map->m_len)
    4160      193612 :                                         allocated = map->m_len;
    4161      859331 :                                 map->m_len = allocated;
    4162      859331 :                                 ext4_ext_show_leaf(inode, path);
    4163      859331 :                                 goto out;
    4164             :                         }
    4165             : 
    4166     3244203 :                         ret = ext4_ext_handle_unwritten_extents(
    4167             :                                 handle, inode, map, &path, flags,
    4168             :                                 allocated, newblock);
    4169     3244196 :                         if (ret < 0)
    4170           0 :                                 err = ret;
    4171             :                         else
    4172     3244196 :                                 allocated = ret;
    4173     3244196 :                         goto out;
    4174             :                 }
    4175             :         }
    4176             : 
    4177             :         /*
    4178             :          * requested block isn't allocated yet;
    4179             :          * we couldn't try to create block if create flag is zero
    4180             :          */
    4181     7519866 :         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
    4182     3598995 :                 ext4_lblk_t hole_start, hole_len;
    4183             : 
    4184     3598995 :                 hole_start = map->m_lblk;
    4185     3598995 :                 hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
    4186             :                 /*
    4187             :                  * put just found gap into cache to speed up
    4188             :                  * subsequent requests
    4189             :                  */
    4190     3595931 :                 ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
    4191             : 
    4192             :                 /* Update hole_len to reflect hole size after map->m_lblk */
    4193     3604404 :                 if (hole_start != map->m_lblk)
    4194     1591776 :                         hole_len -= map->m_lblk - hole_start;
    4195     3604404 :                 map->m_pblk = 0;
    4196     3604404 :                 map->m_len = min_t(unsigned int, map->m_len, hole_len);
    4197             : 
    4198     3604404 :                 goto out;
    4199             :         }
    4200             : 
    4201             :         /*
    4202             :          * Okay, we need to do block allocation.
    4203             :          */
    4204     3920871 :         newex.ee_block = cpu_to_le32(map->m_lblk);
    4205     3920871 :         cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
    4206             : 
    4207             :         /*
    4208             :          * If we are doing bigalloc, check to see if the extent returned
    4209             :          * by ext4_find_extent() implies a cluster we can use.
    4210             :          */
    4211     3920872 :         if (cluster_offset && ex &&
    4212           1 :             get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
    4213           1 :                 ar.len = allocated = map->m_len;
    4214           1 :                 newblock = map->m_pblk;
    4215           1 :                 goto got_allocated_blocks;
    4216             :         }
    4217             : 
    4218             :         /* find neighbour allocated blocks */
    4219     3920870 :         ar.lleft = map->m_lblk;
    4220     3920870 :         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
    4221     3919839 :         if (err)
    4222           0 :                 goto out;
    4223     3919839 :         ar.lright = map->m_lblk;
    4224     3919839 :         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
    4225     3919870 :         if (err < 0)
    4226           0 :                 goto out;
    4227             : 
    4228             :         /* Check if the extent after searching to the right implies a
    4229             :          * cluster we can use. */
    4230     3919870 :         if ((sbi->s_cluster_ratio > 1) && err &&
    4231           0 :             get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
    4232           0 :                 ar.len = allocated = map->m_len;
    4233           0 :                 newblock = map->m_pblk;
    4234           0 :                 goto got_allocated_blocks;
    4235             :         }
    4236             : 
    4237             :         /*
    4238             :          * See if request is beyond maximum number of blocks we can have in
    4239             :          * a single extent. For an initialized extent this limit is
    4240             :          * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
    4241             :          * EXT_UNWRITTEN_MAX_LEN.
    4242             :          */
    4243     3919870 :         if (map->m_len > EXT_INIT_MAX_LEN &&
    4244      509366 :             !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
    4245           0 :                 map->m_len = EXT_INIT_MAX_LEN;
    4246     3919870 :         else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
    4247      509409 :                  (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
    4248      509409 :                 map->m_len = EXT_UNWRITTEN_MAX_LEN;
    4249             : 
    4250             :         /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
    4251     3919870 :         newex.ee_len = cpu_to_le16(map->m_len);
    4252     3919870 :         err = ext4_ext_check_overlap(sbi, inode, &newex, path);
    4253     3918670 :         if (err)
    4254           0 :                 allocated = ext4_ext_get_actual_len(&newex);
    4255             :         else
    4256     3918670 :                 allocated = map->m_len;
    4257             : 
    4258             :         /* allocate new block */
    4259     3918670 :         ar.inode = inode;
    4260     3918670 :         ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
    4261     3918988 :         ar.logical = map->m_lblk;
    4262             :         /*
    4263             :          * We calculate the offset from the beginning of the cluster
    4264             :          * for the logical block number, since when we allocate a
    4265             :          * physical cluster, the physical block should start at the
    4266             :          * same offset from the beginning of the cluster.  This is
    4267             :          * needed so that future calls to get_implied_cluster_alloc()
    4268             :          * work correctly.
    4269             :          */
    4270     3918988 :         offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
    4271     3918988 :         ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
    4272     3918988 :         ar.goal -= offset;
    4273     3918988 :         ar.logical -= offset;
    4274     3918988 :         if (S_ISREG(inode->i_mode))
    4275     3466834 :                 ar.flags = EXT4_MB_HINT_DATA;
    4276             :         else
    4277             :                 /* disable in-core preallocation for non-regular files */
    4278      452154 :                 ar.flags = 0;
    4279     3918988 :         if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
    4280      690442 :                 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
    4281     3918988 :         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
    4282      863064 :                 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
    4283     3918988 :         if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
    4284      865818 :                 ar.flags |= EXT4_MB_USE_RESERVED;
    4285     3918988 :         newblock = ext4_mb_new_blocks(handle, &ar, &err);
    4286     3920977 :         if (!newblock)
    4287      429263 :                 goto out;
    4288     3491714 :         allocated_clusters = ar.len;
    4289     3491714 :         ar.len = EXT4_C2B(sbi, ar.len) - offset;
    4290     3491714 :         ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
    4291             :                   ar.goal, newblock, ar.len, allocated);
    4292     3491714 :         if (ar.len > allocated)
    4293           1 :                 ar.len = allocated;
    4294             : 
    4295     3491713 : got_allocated_blocks:
    4296             :         /* try to insert new extent into found leaf and return */
    4297     3491715 :         pblk = newblock + offset;
    4298     3491715 :         ext4_ext_store_pblock(&newex, pblk);
    4299     3491715 :         newex.ee_len = cpu_to_le16(ar.len);
    4300             :         /* Mark unwritten */
    4301     3491715 :         if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
    4302     2827747 :                 ext4_ext_mark_unwritten(&newex);
    4303     2827747 :                 map->m_flags |= EXT4_MAP_UNWRITTEN;
    4304             :         }
    4305             : 
    4306     3491715 :         err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
    4307     3491697 :         if (err) {
    4308         411 :                 if (allocated_clusters) {
    4309         411 :                         int fb_flags = 0;
    4310             : 
    4311             :                         /*
    4312             :                          * free data blocks we just allocated.
    4313             :                          * not a good idea to call discard here directly,
    4314             :                          * but otherwise we'd need to call it every free().
    4315             :                          */
    4316         411 :                         ext4_discard_preallocations(inode, 0);
    4317         411 :                         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
    4318           4 :                                 fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
    4319         822 :                         ext4_free_blocks(handle, inode, NULL, newblock,
    4320         411 :                                          EXT4_C2B(sbi, allocated_clusters),
    4321             :                                          fb_flags);
    4322             :                 }
    4323         411 :                 goto out;
    4324             :         }
    4325             : 
    4326             :         /*
    4327             :          * Reduce the reserved cluster count to reflect successful deferred
    4328             :          * allocation of delayed allocated clusters or direct allocation of
    4329             :          * clusters discovered to be delayed allocated.  Once allocated, a
    4330             :          * cluster is not included in the reserved count.
    4331             :          */
    4332     3491286 :         if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
    4333     3486072 :                 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
    4334             :                         /*
    4335             :                          * When allocating delayed allocated clusters, simply
    4336             :                          * reduce the reserved cluster count and claim quota
    4337             :                          */
    4338      862961 :                         ext4_da_update_reserve_space(inode, allocated_clusters,
    4339             :                                                         1);
    4340             :                 } else {
    4341     2623111 :                         ext4_lblk_t lblk, len;
    4342     2623111 :                         unsigned int n;
    4343             : 
    4344             :                         /*
    4345             :                          * When allocating non-delayed allocated clusters
    4346             :                          * (from fallocate, filemap, DIO, or clusters
    4347             :                          * allocated when delalloc has been disabled by
    4348             :                          * ext4_nonda_switch), reduce the reserved cluster
    4349             :                          * count by the number of allocated clusters that
    4350             :                          * have previously been delayed allocated.  Quota
    4351             :                          * has been claimed by ext4_mb_new_blocks() above,
    4352             :                          * so release the quota reservations made for any
    4353             :                          * previously delayed allocated clusters.
    4354             :                          */
    4355     2623111 :                         lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
    4356     2623111 :                         len = allocated_clusters << sbi->s_cluster_bits;
    4357     2623111 :                         n = ext4_es_delayed_clu(inode, lblk, len);
    4358     2623246 :                         if (n > 0)
    4359       37984 :                                 ext4_da_update_reserve_space(inode, (int) n, 0);
    4360             :                 }
    4361             :         }
    4362             : 
    4363             :         /*
    4364             :          * Cache the extent and update transaction to commit on fdatasync only
    4365             :          * when it is _not_ an unwritten extent.
    4366             :          */
    4367     3491447 :         if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
    4368      663645 :                 ext4_update_inode_fsync_trans(handle, inode, 1);
    4369             :         else
    4370     2827802 :                 ext4_update_inode_fsync_trans(handle, inode, 0);
    4371             : 
    4372     3490955 :         map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
    4373     3490955 :         map->m_pblk = pblk;
    4374     3490955 :         map->m_len = ar.len;
    4375     3490955 :         allocated = map->m_len;
    4376    11751112 :         ext4_ext_show_leaf(inode, path);
    4377    11751112 : out:
    4378    11751112 :         ext4_free_ext_path(path);
    4379             : 
    4380    11745813 :         trace_ext4_ext_map_blocks_exit(inode, flags, map,
    4381    11745813 :                                        err ? err : allocated);
    4382    11747571 :         return err ? err : allocated;
    4383             : }
    4384             : 
    4385      799813 : int ext4_ext_truncate(handle_t *handle, struct inode *inode)
    4386             : {
    4387      799813 :         struct super_block *sb = inode->i_sb;
    4388      799813 :         ext4_lblk_t last_block;
    4389      799813 :         int err = 0;
    4390             : 
    4391             :         /*
    4392             :          * TODO: optimization is possible here.
    4393             :          * Probably we need not scan at all,
    4394             :          * because page truncation is enough.
    4395             :          */
    4396             : 
    4397             :         /* we have to know where to truncate from in crash case */
    4398      799813 :         EXT4_I(inode)->i_disksize = inode->i_size;
    4399      799813 :         err = ext4_mark_inode_dirty(handle, inode);
    4400      799957 :         if (err)
    4401             :                 return err;
    4402             : 
    4403     1599914 :         last_block = (inode->i_size + sb->s_blocksize - 1)
    4404      799957 :                         >> EXT4_BLOCK_SIZE_BITS(sb);
    4405      799957 :         ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
    4406             : 
    4407      799695 : retry_remove_space:
    4408      799695 :         err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
    4409      799565 :         if (err == -ENOMEM) {
    4410           0 :                 memalloc_retry_wait(GFP_ATOMIC);
    4411           0 :                 goto retry_remove_space;
    4412             :         }
    4413             :         return err;
    4414             : }
    4415             : 
    4416     1066143 : static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
    4417             :                                   ext4_lblk_t len, loff_t new_size,
    4418             :                                   int flags)
    4419             : {
    4420     1066143 :         struct inode *inode = file_inode(file);
    4421     1066143 :         handle_t *handle;
    4422     1066143 :         int ret = 0, ret2 = 0, ret3 = 0;
    4423     1066143 :         int retries = 0;
    4424     1066143 :         int depth = 0;
    4425     1066143 :         struct ext4_map_blocks map;
    4426     1066143 :         unsigned int credits;
    4427     1066143 :         loff_t epos;
    4428             : 
    4429     1066143 :         BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
    4430     1066143 :         map.m_lblk = offset;
    4431     1066143 :         map.m_len = len;
    4432             :         /*
    4433             :          * Don't normalize the request if it can fit in one extent so
    4434             :          * that it doesn't get unnecessarily split into multiple
    4435             :          * extents.
    4436             :          */
    4437     1066143 :         if (len <= EXT_UNWRITTEN_MAX_LEN)
    4438     1065975 :                 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
    4439             : 
    4440             :         /*
    4441             :          * credits to insert 1 extent into extent tree
    4442             :          */
    4443     1066143 :         credits = ext4_chunk_trans_blocks(inode, len);
    4444     1066136 :         depth = ext_depth(inode);
    4445             : 
    4446             : retry:
    4447     3445754 :         while (len) {
    4448             :                 /*
    4449             :                  * Recalculate credits when extent tree depth changes.
    4450             :                  */
    4451     2398314 :                 if (depth != ext_depth(inode)) {
    4452        8720 :                         credits = ext4_chunk_trans_blocks(inode, len);
    4453        8720 :                         depth = ext_depth(inode);
    4454             :                 }
    4455             : 
    4456     2398314 :                 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
    4457             :                                             credits);
    4458     2398318 :                 if (IS_ERR(handle)) {
    4459           0 :                         ret = PTR_ERR(handle);
    4460           0 :                         break;
    4461             :                 }
    4462     2398318 :                 ret = ext4_map_blocks(handle, inode, &map, flags);
    4463     2398327 :                 if (ret <= 0) {
    4464       45132 :                         ext4_debug("inode #%lu: block %u: len %u: "
    4465             :                                    "ext4_ext_map_blocks returned %d",
    4466             :                                    inode->i_ino, map.m_lblk,
    4467             :                                    map.m_len, ret);
    4468       45132 :                         ext4_mark_inode_dirty(handle, inode);
    4469       45135 :                         ext4_journal_stop(handle);
    4470       45135 :                         break;
    4471             :                 }
    4472             :                 /*
    4473             :                  * allow a full retry cycle for any remaining allocations
    4474             :                  */
    4475     2353195 :                 retries = 0;
    4476     2353195 :                 map.m_lblk += ret;
    4477     2353195 :                 map.m_len = len = len - ret;
    4478     2353195 :                 epos = (loff_t)map.m_lblk << inode->i_blkbits;
    4479     2353195 :                 inode->i_ctime = current_time(inode);
    4480     2353188 :                 if (new_size) {
    4481     1182984 :                         if (epos > new_size)
    4482             :                                 epos = new_size;
    4483     1182984 :                         if (ext4_update_inode_size(inode, epos) & 0x1)
    4484      626126 :                                 inode->i_mtime = inode->i_ctime;
    4485             :                 }
    4486     2353188 :                 ret2 = ext4_mark_inode_dirty(handle, inode);
    4487     2353215 :                 ext4_update_inode_fsync_trans(handle, inode, 1);
    4488     2353212 :                 ret3 = ext4_journal_stop(handle);
    4489     2353209 :                 ret2 = ret3 ? ret3 : ret2;
    4490     2353209 :                 if (unlikely(ret2))
    4491             :                         break;
    4492             :         }
    4493     1092571 :         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
    4494       26409 :                 goto retry;
    4495             : 
    4496     1066143 :         return ret > 0 ? ret2 : ret;
    4497             : }
    4498             : 
    4499             : static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
    4500             : 
    4501             : static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
    4502             : 
    4503      208160 : static long ext4_zero_range(struct file *file, loff_t offset,
    4504             :                             loff_t len, int mode)
    4505             : {
    4506      208160 :         struct inode *inode = file_inode(file);
    4507      208160 :         struct address_space *mapping = file->f_mapping;
    4508      208160 :         handle_t *handle = NULL;
    4509      208160 :         unsigned int max_blocks;
    4510      208160 :         loff_t new_size = 0;
    4511      208160 :         int ret = 0;
    4512      208160 :         int flags;
    4513      208160 :         int credits;
    4514      208160 :         int partial_begin, partial_end;
    4515      208160 :         loff_t start, end;
    4516      208160 :         ext4_lblk_t lblk;
    4517      208160 :         unsigned int blkbits = inode->i_blkbits;
    4518             : 
    4519      208160 :         trace_ext4_zero_range(inode, offset, len, mode);
    4520             : 
    4521             :         /*
    4522             :          * Round up offset. This is not fallocate, we need to zero out
    4523             :          * blocks, so convert interior block aligned part of the range to
    4524             :          * unwritten and possibly manually zero out unaligned parts of the
    4525             :          * range.
    4526             :          */
    4527      208159 :         start = round_up(offset, 1 << blkbits);
    4528      208159 :         end = round_down((offset + len), 1 << blkbits);
    4529             : 
    4530      208159 :         if (start < offset || end > offset + len)
    4531             :                 return -EINVAL;
    4532      208159 :         partial_begin = offset & ((1 << blkbits) - 1);
    4533      208159 :         partial_end = (offset + len) & ((1 << blkbits) - 1);
    4534             : 
    4535      208159 :         lblk = start >> blkbits;
    4536      208159 :         max_blocks = (end >> blkbits);
    4537      208159 :         if (max_blocks < lblk)
    4538             :                 max_blocks = 0;
    4539             :         else
    4540      203511 :                 max_blocks -= lblk;
    4541             : 
    4542      208159 :         inode_lock(inode);
    4543             : 
    4544             :         /*
    4545             :          * Indirect files do not support unwritten extents
    4546             :          */
    4547      208157 :         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
    4548          17 :                 ret = -EOPNOTSUPP;
    4549          17 :                 goto out_mutex;
    4550             :         }
    4551             : 
    4552      208140 :         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
    4553      102355 :             (offset + len > inode->i_size ||
    4554       67036 :              offset + len > EXT4_I(inode)->i_disksize)) {
    4555       38001 :                 new_size = offset + len;
    4556       38001 :                 ret = inode_newsize_ok(inode, new_size);
    4557       38001 :                 if (ret)
    4558           0 :                         goto out_mutex;
    4559             :         }
    4560             : 
    4561      208140 :         flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
    4562             : 
    4563             :         /* Wait all existing dio workers, newcomers will block on i_rwsem */
    4564      208140 :         inode_dio_wait(inode);
    4565             : 
    4566      208140 :         ret = file_modified(file);
    4567      208141 :         if (ret)
    4568           0 :                 goto out_mutex;
    4569             : 
    4570             :         /* Preallocate the range including the unaligned edges */
    4571      208141 :         if (partial_begin || partial_end) {
    4572      204500 :                 ret = ext4_alloc_file_blocks(file,
    4573      204500 :                                 round_down(offset, 1 << blkbits) >> blkbits,
    4574      204500 :                                 (round_up((offset + len), 1 << blkbits) -
    4575      204500 :                                  round_down(offset, 1 << blkbits)) >> blkbits,
    4576             :                                 new_size, flags);
    4577      204501 :                 if (ret)
    4578        5151 :                         goto out_mutex;
    4579             : 
    4580             :         }
    4581             : 
    4582             :         /* Zero range excluding the unaligned edges */
    4583      202991 :         if (max_blocks > 0) {
    4584      188501 :                 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
    4585             :                           EXT4_EX_NOCACHE);
    4586             : 
    4587             :                 /*
    4588             :                  * Prevent page faults from reinstantiating pages we have
    4589             :                  * released from page cache.
    4590             :                  */
    4591      188501 :                 filemap_invalidate_lock(mapping);
    4592             : 
    4593      188501 :                 ret = ext4_break_layouts(inode);
    4594      188498 :                 if (ret) {
    4595           0 :                         filemap_invalidate_unlock(mapping);
    4596           0 :                         goto out_mutex;
    4597             :                 }
    4598             : 
    4599      188498 :                 ret = ext4_update_disksize_before_punch(inode, offset, len);
    4600      188499 :                 if (ret) {
    4601           0 :                         filemap_invalidate_unlock(mapping);
    4602           0 :                         goto out_mutex;
    4603             :                 }
    4604             : 
    4605             :                 /*
    4606             :                  * For journalled data we need to write (and checkpoint) pages
    4607             :                  * before discarding page cache to avoid inconsitent data on
    4608             :                  * disk in case of crash before zeroing trans is committed.
    4609             :                  */
    4610      188499 :                 if (ext4_should_journal_data(inode)) {
    4611           0 :                         ret = filemap_write_and_wait_range(mapping, start, end);
    4612           0 :                         if (ret) {
    4613           0 :                                 filemap_invalidate_unlock(mapping);
    4614           0 :                                 goto out_mutex;
    4615             :                         }
    4616             :                 }
    4617             : 
    4618             :                 /* Now release the pages and zero block aligned part of pages */
    4619      188499 :                 truncate_pagecache_range(inode, start, end - 1);
    4620      188496 :                 inode->i_mtime = inode->i_ctime = current_time(inode);
    4621             : 
    4622      188500 :                 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
    4623             :                                              flags);
    4624      188502 :                 filemap_invalidate_unlock(mapping);
    4625      188502 :                 if (ret)
    4626           2 :                         goto out_mutex;
    4627             :         }
    4628      202990 :         if (!partial_begin && !partial_end)
    4629        3641 :                 goto out_mutex;
    4630             : 
    4631             :         /*
    4632             :          * In worst case we have to writeout two nonadjacent unwritten
    4633             :          * blocks and update the inode
    4634             :          */
    4635      199349 :         credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
    4636      199349 :         if (ext4_should_journal_data(inode))
    4637           0 :                 credits += 2;
    4638      199349 :         handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
    4639      199348 :         if (IS_ERR(handle)) {
    4640           0 :                 ret = PTR_ERR(handle);
    4641           0 :                 ext4_std_error(inode->i_sb, ret);
    4642           0 :                 goto out_mutex;
    4643             :         }
    4644             : 
    4645      199348 :         inode->i_mtime = inode->i_ctime = current_time(inode);
    4646      199349 :         if (new_size)
    4647       35843 :                 ext4_update_inode_size(inode, new_size);
    4648      199349 :         ret = ext4_mark_inode_dirty(handle, inode);
    4649      199349 :         if (unlikely(ret))
    4650           0 :                 goto out_handle;
    4651             :         /* Zero out partial block at the edges of the range */
    4652      199349 :         ret = ext4_zero_partial_blocks(handle, inode, offset, len);
    4653      199348 :         if (ret >= 0)
    4654      199349 :                 ext4_update_inode_fsync_trans(handle, inode, 1);
    4655             : 
    4656      199348 :         if (file->f_flags & O_SYNC)
    4657           0 :                 ext4_handle_sync(handle);
    4658             : 
    4659      199348 : out_handle:
    4660      199348 :         ext4_journal_stop(handle);
    4661      208160 : out_mutex:
    4662      208160 :         inode_unlock(inode);
    4663      208160 :         return ret;
    4664             : }
    4665             : 
    4666             : /*
    4667             :  * preallocate space for a file. This implements ext4's fallocate file
    4668             :  * operation, which gets called from sys_fallocate system call.
    4669             :  * For block-mapped files, posix_fallocate should fall back to the method
    4670             :  * of writing zeroes to the required new blocks (the same behavior which is
    4671             :  * expected for file systems which do not support fallocate() system call).
    4672             :  */
    4673     1490439 : long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
    4674             : {
    4675     1490439 :         struct inode *inode = file_inode(file);
    4676     1490439 :         loff_t new_size = 0;
    4677     1490439 :         unsigned int max_blocks;
    4678     1490439 :         int ret = 0;
    4679     1490439 :         int flags;
    4680     1490439 :         ext4_lblk_t lblk;
    4681     1490439 :         unsigned int blkbits = inode->i_blkbits;
    4682             : 
    4683             :         /*
    4684             :          * Encrypted inodes can't handle collapse range or insert
    4685             :          * range since we would need to re-encrypt blocks with a
    4686             :          * different IV or XTS tweak (which are based on the logical
    4687             :          * block number).
    4688             :          */
    4689     1490439 :         if (IS_ENCRYPTED(inode) &&
    4690           0 :             (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
    4691             :                 return -EOPNOTSUPP;
    4692             : 
    4693             :         /* Return error if mode is not supported */
    4694     1490439 :         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
    4695             :                      FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
    4696             :                      FALLOC_FL_INSERT_RANGE))
    4697             :                 return -EOPNOTSUPP;
    4698             : 
    4699     1490439 :         inode_lock(inode);
    4700     1490450 :         ret = ext4_convert_inline_data(inode);
    4701     1490450 :         inode_unlock(inode);
    4702     1490454 :         if (ret)
    4703           0 :                 goto exit;
    4704             : 
    4705     1490454 :         if (mode & FALLOC_FL_PUNCH_HOLE) {
    4706      255395 :                 ret = ext4_punch_hole(file, offset, len);
    4707      255404 :                 goto exit;
    4708             :         }
    4709             : 
    4710     1235059 :         if (mode & FALLOC_FL_COLLAPSE_RANGE) {
    4711      204807 :                 ret = ext4_collapse_range(file, offset, len);
    4712      204807 :                 goto exit;
    4713             :         }
    4714             : 
    4715     1030252 :         if (mode & FALLOC_FL_INSERT_RANGE) {
    4716      148930 :                 ret = ext4_insert_range(file, offset, len);
    4717      148931 :                 goto exit;
    4718             :         }
    4719             : 
    4720      881322 :         if (mode & FALLOC_FL_ZERO_RANGE) {
    4721      208160 :                 ret = ext4_zero_range(file, offset, len, mode);
    4722      208160 :                 goto exit;
    4723             :         }
    4724      673162 :         trace_ext4_fallocate_enter(inode, offset, len, mode);
    4725      673143 :         lblk = offset >> blkbits;
    4726             : 
    4727      673143 :         max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
    4728      673143 :         flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
    4729             : 
    4730      673143 :         inode_lock(inode);
    4731             : 
    4732             :         /*
    4733             :          * We only support preallocation for extent-based files only
    4734             :          */
    4735      673172 :         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
    4736          29 :                 ret = -EOPNOTSUPP;
    4737          29 :                 goto out;
    4738             :         }
    4739             : 
    4740      673143 :         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
    4741      553160 :             (offset + len > inode->i_size ||
    4742       98020 :              offset + len > EXT4_I(inode)->i_disksize)) {
    4743      457729 :                 new_size = offset + len;
    4744      457729 :                 ret = inode_newsize_ok(inode, new_size);
    4745      457729 :                 if (ret)
    4746           1 :                         goto out;
    4747             :         }
    4748             : 
    4749             :         /* Wait all existing dio workers, newcomers will block on i_rwsem */
    4750      673142 :         inode_dio_wait(inode);
    4751             : 
    4752      673135 :         ret = file_modified(file);
    4753      673143 :         if (ret)
    4754           0 :                 goto out;
    4755             : 
    4756      673143 :         ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
    4757      673142 :         if (ret)
    4758       13558 :                 goto out;
    4759             : 
    4760      659584 :         if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
    4761           0 :                 ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
    4762           0 :                                         EXT4_I(inode)->i_sync_tid);
    4763             :         }
    4764      659584 : out:
    4765      673172 :         inode_unlock(inode);
    4766      673171 :         trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
    4767     1490470 : exit:
    4768     1490470 :         return ret;
    4769             : }
    4770             : 
    4771             : /*
    4772             :  * This function convert a range of blocks to written extents
    4773             :  * The caller of this function will pass the start offset and the size.
    4774             :  * all unwritten extents within this range will be converted to
    4775             :  * written extents.
    4776             :  *
    4777             :  * This function is called from the direct IO end io call back
    4778             :  * function, to convert the fallocated extents after IO is completed.
    4779             :  * Returns 0 on success.
    4780             :  */
    4781     1663136 : int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
    4782             :                                    loff_t offset, ssize_t len)
    4783             : {
    4784     1663136 :         unsigned int max_blocks;
    4785     1663136 :         int ret = 0, ret2 = 0, ret3 = 0;
    4786     1663136 :         struct ext4_map_blocks map;
    4787     1663136 :         unsigned int blkbits = inode->i_blkbits;
    4788     1663136 :         unsigned int credits = 0;
    4789             : 
    4790     1663136 :         map.m_lblk = offset >> blkbits;
    4791     1663136 :         max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
    4792             : 
    4793     1663136 :         if (!handle) {
    4794             :                 /*
    4795             :                  * credits to insert 1 extent into extent tree
    4796             :                  */
    4797      545816 :                 credits = ext4_chunk_trans_blocks(inode, max_blocks);
    4798             :         }
    4799     3564747 :         while (ret >= 0 && ret < max_blocks) {
    4800     1901611 :                 map.m_lblk += ret;
    4801     1901611 :                 map.m_len = (max_blocks -= ret);
    4802     1901611 :                 if (credits) {
    4803      784273 :                         handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
    4804             :                                                     credits);
    4805      784269 :                         if (IS_ERR(handle)) {
    4806           1 :                                 ret = PTR_ERR(handle);
    4807           1 :                                 break;
    4808             :                         }
    4809             :                 }
    4810     1901606 :                 ret = ext4_map_blocks(handle, inode, &map,
    4811             :                                       EXT4_GET_BLOCKS_IO_CONVERT_EXT);
    4812     1901560 :                 if (ret <= 0)
    4813           0 :                         ext4_warning(inode->i_sb,
    4814             :                                      "inode #%lu: block %u: len %u: "
    4815             :                                      "ext4_ext_map_blocks returned %d",
    4816             :                                      inode->i_ino, map.m_lblk,
    4817             :                                      map.m_len, ret);
    4818     1901560 :                 ret2 = ext4_mark_inode_dirty(handle, inode);
    4819     1901605 :                 if (credits) {
    4820      784270 :                         ret3 = ext4_journal_stop(handle);
    4821      784272 :                         if (unlikely(ret3))
    4822           0 :                                 ret2 = ret3;
    4823             :                 }
    4824             : 
    4825     1901607 :                 if (ret <= 0 || ret2)
    4826             :                         break;
    4827             :         }
    4828     1663137 :         return ret > 0 ? ret2 : ret;
    4829             : }
    4830             : 
    4831     1117362 : int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
    4832             : {
    4833     1117362 :         int ret = 0, err = 0;
    4834     1117362 :         struct ext4_io_end_vec *io_end_vec;
    4835             : 
    4836             :         /*
    4837             :          * This is somewhat ugly but the idea is clear: When transaction is
    4838             :          * reserved, everything goes into it. Otherwise we rather start several
    4839             :          * smaller transactions for conversion of each extent separately.
    4840             :          */
    4841     1117362 :         if (handle) {
    4842     1117341 :                 handle = ext4_journal_start_reserved(handle,
    4843             :                                                      EXT4_HT_EXT_CONVERT);
    4844     1117341 :                 if (IS_ERR(handle))
    4845           0 :                         return PTR_ERR(handle);
    4846             :         }
    4847             : 
    4848     2234724 :         list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
    4849     1117362 :                 ret = ext4_convert_unwritten_extents(handle, io_end->inode,
    4850             :                                                      io_end_vec->offset,
    4851             :                                                      io_end_vec->size);
    4852     1117362 :                 if (ret)
    4853             :                         break;
    4854             :         }
    4855             : 
    4856     1117362 :         if (handle)
    4857     1117341 :                 err = ext4_journal_stop(handle);
    4858             : 
    4859     1117362 :         return ret < 0 ? ret : err;
    4860             : }
    4861             : 
    4862        4409 : static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
    4863             : {
    4864        4409 :         __u64 physical = 0;
    4865        4409 :         __u64 length = 0;
    4866        4409 :         int blockbits = inode->i_sb->s_blocksize_bits;
    4867        4409 :         int error = 0;
    4868        4409 :         u16 iomap_type;
    4869             : 
    4870             :         /* in-inode? */
    4871        4409 :         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
    4872         783 :                 struct ext4_iloc iloc;
    4873         783 :                 int offset;     /* offset of xattr in inode */
    4874             : 
    4875         783 :                 error = ext4_get_inode_loc(inode, &iloc);
    4876         783 :                 if (error)
    4877           0 :                         return error;
    4878         783 :                 physical = (__u64)iloc.bh->b_blocknr << blockbits;
    4879         783 :                 offset = EXT4_GOOD_OLD_INODE_SIZE +
    4880         783 :                                 EXT4_I(inode)->i_extra_isize;
    4881         783 :                 physical += offset;
    4882         783 :                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
    4883         783 :                 brelse(iloc.bh);
    4884         783 :                 iomap_type = IOMAP_INLINE;
    4885        3626 :         } else if (EXT4_I(inode)->i_file_acl) { /* external block */
    4886         222 :                 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
    4887         222 :                 length = inode->i_sb->s_blocksize;
    4888         222 :                 iomap_type = IOMAP_MAPPED;
    4889             :         } else {
    4890             :                 /* no in-inode or external block for xattr, so return -ENOENT */
    4891        3404 :                 error = -ENOENT;
    4892        3404 :                 goto out;
    4893             :         }
    4894             : 
    4895        1005 :         iomap->addr = physical;
    4896        1005 :         iomap->offset = 0;
    4897        1005 :         iomap->length = length;
    4898        1005 :         iomap->type = iomap_type;
    4899        1005 :         iomap->flags = 0;
    4900             : out:
    4901             :         return error;
    4902             : }
    4903             : 
    4904        4409 : static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
    4905             :                                   loff_t length, unsigned flags,
    4906             :                                   struct iomap *iomap, struct iomap *srcmap)
    4907             : {
    4908        4409 :         int error;
    4909             : 
    4910        4409 :         error = ext4_iomap_xattr_fiemap(inode, iomap);
    4911        4409 :         if (error == 0 && (offset >= iomap->length))
    4912        1001 :                 error = -ENOENT;
    4913        4409 :         return error;
    4914             : }
    4915             : 
    4916             : static const struct iomap_ops ext4_iomap_xattr_ops = {
    4917             :         .iomap_begin            = ext4_iomap_xattr_begin,
    4918             : };
    4919             : 
    4920       46872 : static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
    4921             : {
    4922       46872 :         u64 maxbytes;
    4923             : 
    4924       46872 :         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
    4925       46860 :                 maxbytes = inode->i_sb->s_maxbytes;
    4926             :         else
    4927          12 :                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
    4928             : 
    4929       46872 :         if (*len == 0)
    4930             :                 return -EINVAL;
    4931       46872 :         if (start > maxbytes)
    4932             :                 return -EFBIG;
    4933             : 
    4934             :         /*
    4935             :          * Shrink request scope to what the fs can actually handle.
    4936             :          */
    4937       46872 :         if (*len > maxbytes || (maxbytes - *len) < start)
    4938       30682 :                 *len = maxbytes - start;
    4939             :         return 0;
    4940             : }
    4941             : 
    4942       46872 : int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
    4943             :                 u64 start, u64 len)
    4944             : {
    4945       46872 :         int error = 0;
    4946             : 
    4947       46872 :         if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
    4948           0 :                 error = ext4_ext_precache(inode);
    4949           0 :                 if (error)
    4950             :                         return error;
    4951           0 :                 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
    4952             :         }
    4953             : 
    4954             :         /*
    4955             :          * For bitmap files the maximum size limit could be smaller than
    4956             :          * s_maxbytes, so check len here manually instead of just relying on the
    4957             :          * generic check.
    4958             :          */
    4959       46872 :         error = ext4_fiemap_check_ranges(inode, start, &len);
    4960       46871 :         if (error)
    4961             :                 return error;
    4962             : 
    4963       46871 :         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
    4964        8843 :                 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
    4965        8843 :                 return iomap_fiemap(inode, fieinfo, start, len,
    4966             :                                     &ext4_iomap_xattr_ops);
    4967             :         }
    4968             : 
    4969       38028 :         return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
    4970             : }
    4971             : 
    4972           0 : int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
    4973             :                       __u64 start, __u64 len)
    4974             : {
    4975           0 :         ext4_lblk_t start_blk, len_blks;
    4976           0 :         __u64 last_blk;
    4977           0 :         int error = 0;
    4978             : 
    4979           0 :         if (ext4_has_inline_data(inode)) {
    4980           0 :                 int has_inline;
    4981             : 
    4982           0 :                 down_read(&EXT4_I(inode)->xattr_sem);
    4983           0 :                 has_inline = ext4_has_inline_data(inode);
    4984           0 :                 up_read(&EXT4_I(inode)->xattr_sem);
    4985           0 :                 if (has_inline)
    4986             :                         return 0;
    4987             :         }
    4988             : 
    4989           0 :         if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
    4990           0 :                 error = ext4_ext_precache(inode);
    4991           0 :                 if (error)
    4992             :                         return error;
    4993           0 :                 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
    4994             :         }
    4995             : 
    4996           0 :         error = fiemap_prep(inode, fieinfo, start, &len, 0);
    4997           0 :         if (error)
    4998             :                 return error;
    4999             : 
    5000           0 :         error = ext4_fiemap_check_ranges(inode, start, &len);
    5001           0 :         if (error)
    5002             :                 return error;
    5003             : 
    5004           0 :         start_blk = start >> inode->i_sb->s_blocksize_bits;
    5005           0 :         last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
    5006           0 :         if (last_blk >= EXT_MAX_BLOCKS)
    5007             :                 last_blk = EXT_MAX_BLOCKS-1;
    5008           0 :         len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
    5009             : 
    5010             :         /*
    5011             :          * Walk the extent tree gathering extent information
    5012             :          * and pushing extents back to the user.
    5013             :          */
    5014           0 :         return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
    5015             : }
    5016             : 
    5017             : /*
    5018             :  * ext4_ext_shift_path_extents:
    5019             :  * Shift the extents of a path structure lying between path[depth].p_ext
    5020             :  * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
    5021             :  * if it is right shift or left shift operation.
    5022             :  */
    5023             : static int
    5024      349412 : ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
    5025             :                             struct inode *inode, handle_t *handle,
    5026             :                             enum SHIFT_DIRECTION SHIFT)
    5027             : {
    5028      349412 :         int depth, err = 0;
    5029      349412 :         struct ext4_extent *ex_start, *ex_last;
    5030      349412 :         bool update = false;
    5031      349412 :         int credits, restart_credits;
    5032      349412 :         depth = path->p_depth;
    5033             : 
    5034      360608 :         while (depth >= 0) {
    5035      349418 :                 if (depth == path->p_depth) {
    5036      349409 :                         ex_start = path[depth].p_ext;
    5037      349409 :                         if (!ex_start)
    5038             :                                 return -EFSCORRUPTED;
    5039             : 
    5040      349409 :                         ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
    5041             :                         /* leaf + sb + inode */
    5042      349409 :                         credits = 3;
    5043      349409 :                         if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) {
    5044       59083 :                                 update = true;
    5045             :                                 /* extent tree + sb + inode */
    5046       59083 :                                 credits = depth + 2;
    5047             :                         }
    5048             : 
    5049      349409 :                         restart_credits = ext4_writepage_trans_blocks(inode);
    5050      349408 :                         err = ext4_datasem_ensure_credits(handle, inode, credits,
    5051             :                                         restart_credits, 0);
    5052      349404 :                         if (err) {
    5053           0 :                                 if (err > 0)
    5054           0 :                                         err = -EAGAIN;
    5055           0 :                                 goto out;
    5056             :                         }
    5057             : 
    5058      349404 :                         err = ext4_ext_get_access(handle, inode, path + depth);
    5059      349411 :                         if (err)
    5060           0 :                                 goto out;
    5061             : 
    5062    10515193 :                         while (ex_start <= ex_last) {
    5063    10165781 :                                 if (SHIFT == SHIFT_LEFT) {
    5064     9420737 :                                         le32_add_cpu(&ex_start->ee_block,
    5065             :                                                 -shift);
    5066             :                                         /* Try to merge to the left. */
    5067     9420737 :                                         if ((ex_start >
    5068     9420737 :                                             EXT_FIRST_EXTENT(path[depth].p_hdr))
    5069     9370727 :                                             &&
    5070     9370730 :                                             ext4_ext_try_to_merge_right(inode,
    5071             :                                             path, ex_start - 1))
    5072          84 :                                                 ex_last--;
    5073             :                                         else
    5074     9420650 :                                                 ex_start++;
    5075             :                                 } else {
    5076      745044 :                                         le32_add_cpu(&ex_last->ee_block, shift);
    5077      745044 :                                         ext4_ext_try_to_merge_right(inode, path,
    5078             :                                                 ex_last);
    5079      745048 :                                         ex_last--;
    5080             :                                 }
    5081             :                         }
    5082      349412 :                         err = ext4_ext_dirty(handle, inode, path + depth);
    5083      349413 :                         if (err)
    5084           0 :                                 goto out;
    5085             : 
    5086      349413 :                         if (--depth < 0 || !update)
    5087             :                                 break;
    5088             :                 }
    5089             : 
    5090             :                 /* Update index too */
    5091       51602 :                 err = ext4_ext_get_access(handle, inode, path + depth);
    5092       51598 :                 if (err)
    5093           0 :                         goto out;
    5094             : 
    5095       51598 :                 if (SHIFT == SHIFT_LEFT)
    5096       46211 :                         le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
    5097             :                 else
    5098        5387 :                         le32_add_cpu(&path[depth].p_idx->ei_block, shift);
    5099       51598 :                 err = ext4_ext_dirty(handle, inode, path + depth);
    5100       51598 :                 if (err)
    5101           0 :                         goto out;
    5102             : 
    5103             :                 /* we are done if current index is not a starting index */
    5104       51598 :                 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
    5105             :                         break;
    5106             : 
    5107       11196 :                 depth--;
    5108             :         }
    5109             : 
    5110      349412 : out:
    5111             :         return err;
    5112             : }
    5113             : 
    5114             : /*
    5115             :  * ext4_ext_shift_extents:
    5116             :  * All the extents which lies in the range from @start to the last allocated
    5117             :  * block for the @inode are shifted either towards left or right (depending
    5118             :  * upon @SHIFT) by @shift blocks.
    5119             :  * On success, 0 is returned, error otherwise.
    5120             :  */
    5121             : static int
    5122      339979 : ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
    5123             :                        ext4_lblk_t start, ext4_lblk_t shift,
    5124             :                        enum SHIFT_DIRECTION SHIFT)
    5125             : {
    5126      339979 :         struct ext4_ext_path *path;
    5127      339979 :         int ret = 0, depth;
    5128      339979 :         struct ext4_extent *extent;
    5129      339979 :         ext4_lblk_t stop, *iterator, ex_start, ex_end;
    5130      339979 :         ext4_lblk_t tmp = EXT_MAX_BLOCKS;
    5131             : 
    5132             :         /* Let path point to the last extent */
    5133      339979 :         path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
    5134             :                                 EXT4_EX_NOCACHE);
    5135      339976 :         if (IS_ERR(path))
    5136           0 :                 return PTR_ERR(path);
    5137             : 
    5138      339976 :         depth = path->p_depth;
    5139      339976 :         extent = path[depth].p_ext;
    5140      339976 :         if (!extent)
    5141       10132 :                 goto out;
    5142             : 
    5143      329844 :         stop = le32_to_cpu(extent->ee_block);
    5144             : 
    5145             :        /*
    5146             :         * For left shifts, make sure the hole on the left is big enough to
    5147             :         * accommodate the shift.  For right shifts, make sure the last extent
    5148             :         * won't be shifted beyond EXT_MAX_BLOCKS.
    5149             :         */
    5150      329844 :         if (SHIFT == SHIFT_LEFT) {
    5151      188551 :                 path = ext4_find_extent(inode, start - 1, &path,
    5152             :                                         EXT4_EX_NOCACHE);
    5153      188552 :                 if (IS_ERR(path))
    5154           0 :                         return PTR_ERR(path);
    5155      188552 :                 depth = path->p_depth;
    5156      188552 :                 extent =  path[depth].p_ext;
    5157      188552 :                 if (extent) {
    5158      188552 :                         ex_start = le32_to_cpu(extent->ee_block);
    5159      188552 :                         ex_end = le32_to_cpu(extent->ee_block) +
    5160      188552 :                                 ext4_ext_get_actual_len(extent);
    5161             :                 } else {
    5162             :                         ex_start = 0;
    5163             :                         ex_end = 0;
    5164             :                 }
    5165             : 
    5166      188552 :                 if ((start == ex_start && shift > ex_start) ||
    5167      188552 :                     (shift > start - ex_end)) {
    5168           0 :                         ret = -EINVAL;
    5169           0 :                         goto out;
    5170             :                 }
    5171             :         } else {
    5172      141293 :                 if (shift > EXT_MAX_BLOCKS -
    5173      141293 :                     (stop + ext4_ext_get_actual_len(extent))) {
    5174           1 :                         ret = -EINVAL;
    5175           1 :                         goto out;
    5176             :                 }
    5177             :         }
    5178             : 
    5179             :         /*
    5180             :          * In case of left shift, iterator points to start and it is increased
    5181             :          * till we reach stop. In case of right shift, iterator points to stop
    5182             :          * and it is decreased till we reach start.
    5183             :          */
    5184      141292 : again:
    5185      329844 :         ret = 0;
    5186      329844 :         if (SHIFT == SHIFT_LEFT)
    5187             :                 iterator = &start;
    5188             :         else
    5189      141292 :                 iterator = &stop;
    5190             : 
    5191      329844 :         if (tmp != EXT_MAX_BLOCKS)
    5192           0 :                 *iterator = tmp;
    5193             : 
    5194             :         /*
    5195             :          * Its safe to start updating extents.  Start and stop are unsigned, so
    5196             :          * in case of right shift if extent with 0 block is reached, iterator
    5197             :          * becomes NULL to indicate the end of the loop.
    5198             :          */
    5199      679256 :         while (iterator && start <= stop) {
    5200      349411 :                 path = ext4_find_extent(inode, *iterator, &path,
    5201             :                                         EXT4_EX_NOCACHE);
    5202      349412 :                 if (IS_ERR(path))
    5203           0 :                         return PTR_ERR(path);
    5204      349412 :                 depth = path->p_depth;
    5205      349412 :                 extent = path[depth].p_ext;
    5206      349412 :                 if (!extent) {
    5207           0 :                         EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
    5208             :                                          (unsigned long) *iterator);
    5209           0 :                         return -EFSCORRUPTED;
    5210             :                 }
    5211      349412 :                 if (SHIFT == SHIFT_LEFT && *iterator >
    5212      215970 :                     le32_to_cpu(extent->ee_block)) {
    5213             :                         /* Hole, move to the next extent */
    5214       63925 :                         if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
    5215       63925 :                                 path[depth].p_ext++;
    5216             :                         } else {
    5217           0 :                                 *iterator = ext4_ext_next_allocated_block(path);
    5218           0 :                                 continue;
    5219             :                         }
    5220             :                 }
    5221             : 
    5222      349412 :                 tmp = *iterator;
    5223      349412 :                 if (SHIFT == SHIFT_LEFT) {
    5224      215969 :                         extent = EXT_LAST_EXTENT(path[depth].p_hdr);
    5225      215969 :                         *iterator = le32_to_cpu(extent->ee_block) +
    5226      215969 :                                         ext4_ext_get_actual_len(extent);
    5227             :                 } else {
    5228      133443 :                         extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
    5229      133443 :                         if (le32_to_cpu(extent->ee_block) > start)
    5230         159 :                                 *iterator = le32_to_cpu(extent->ee_block) - 1;
    5231      133284 :                         else if (le32_to_cpu(extent->ee_block) == start)
    5232             :                                 iterator = NULL;
    5233             :                         else {
    5234      124365 :                                 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
    5235      768042 :                                 while (le32_to_cpu(extent->ee_block) >= start)
    5236      643677 :                                         extent--;
    5237             : 
    5238      124365 :                                 if (extent == EXT_LAST_EXTENT(path[depth].p_hdr))
    5239             :                                         break;
    5240             : 
    5241      124365 :                                 extent++;
    5242      124365 :                                 iterator = NULL;
    5243             :                         }
    5244      133443 :                         path[depth].p_ext = extent;
    5245             :                 }
    5246      349412 :                 ret = ext4_ext_shift_path_extents(path, shift, inode,
    5247             :                                 handle, SHIFT);
    5248             :                 /* iterator can be NULL which means we should break */
    5249      349412 :                 if (ret == -EAGAIN)
    5250           0 :                         goto again;
    5251      349412 :                 if (ret)
    5252             :                         break;
    5253             :         }
    5254      329845 : out:
    5255      339978 :         ext4_free_ext_path(path);
    5256      339976 :         return ret;
    5257             : }
    5258             : 
    5259             : /*
    5260             :  * ext4_collapse_range:
    5261             :  * This implements the fallocate's collapse range functionality for ext4
    5262             :  * Returns: 0 and non-zero on error.
    5263             :  */
    5264      204807 : static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
    5265             : {
    5266      204807 :         struct inode *inode = file_inode(file);
    5267      204807 :         struct super_block *sb = inode->i_sb;
    5268      204807 :         struct address_space *mapping = inode->i_mapping;
    5269      204807 :         ext4_lblk_t punch_start, punch_stop;
    5270      204807 :         handle_t *handle;
    5271      204807 :         unsigned int credits;
    5272      204807 :         loff_t new_size, ioffset;
    5273      204807 :         int ret;
    5274             : 
    5275             :         /*
    5276             :          * We need to test this early because xfstests assumes that a
    5277             :          * collapse range of (0, 1) will return EOPNOTSUPP if the file
    5278             :          * system does not support collapse range.
    5279             :          */
    5280      204807 :         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
    5281             :                 return -EOPNOTSUPP;
    5282             : 
    5283             :         /* Collapse range works only on fs cluster size aligned regions. */
    5284      204804 :         if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
    5285             :                 return -EINVAL;
    5286             : 
    5287      200556 :         trace_ext4_collapse_range(inode, offset, len);
    5288             : 
    5289      200555 :         punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
    5290      200555 :         punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
    5291             : 
    5292      200555 :         inode_lock(inode);
    5293             :         /*
    5294             :          * There is no need to overlap collapse range with EOF, in which case
    5295             :          * it is effectively a truncate operation
    5296             :          */
    5297      200554 :         if (offset + len >= inode->i_size) {
    5298        2917 :                 ret = -EINVAL;
    5299        2917 :                 goto out_mutex;
    5300             :         }
    5301             : 
    5302             :         /* Currently just for extent based files */
    5303      197637 :         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
    5304           0 :                 ret = -EOPNOTSUPP;
    5305           0 :                 goto out_mutex;
    5306             :         }
    5307             : 
    5308             :         /* Wait for existing dio to complete */
    5309      197637 :         inode_dio_wait(inode);
    5310             : 
    5311      197637 :         ret = file_modified(file);
    5312      197639 :         if (ret)
    5313           0 :                 goto out_mutex;
    5314             : 
    5315             :         /*
    5316             :          * Prevent page faults from reinstantiating pages we have released from
    5317             :          * page cache.
    5318             :          */
    5319      197639 :         filemap_invalidate_lock(mapping);
    5320             : 
    5321      197640 :         ret = ext4_break_layouts(inode);
    5322      197640 :         if (ret)
    5323           0 :                 goto out_mmap;
    5324             : 
    5325             :         /*
    5326             :          * Need to round down offset to be aligned with page size boundary
    5327             :          * for page size > block size.
    5328             :          */
    5329      197640 :         ioffset = round_down(offset, PAGE_SIZE);
    5330             :         /*
    5331             :          * Write tail of the last page before removed range since it will get
    5332             :          * removed from the page cache below.
    5333             :          */
    5334      197640 :         ret = filemap_write_and_wait_range(mapping, ioffset, offset);
    5335      197637 :         if (ret)
    5336           0 :                 goto out_mmap;
    5337             :         /*
    5338             :          * Write data that will be shifted to preserve them when discarding
    5339             :          * page cache below. We are also protected from pages becoming dirty
    5340             :          * by i_rwsem and invalidate_lock.
    5341             :          */
    5342      197637 :         ret = filemap_write_and_wait_range(mapping, offset + len,
    5343             :                                            LLONG_MAX);
    5344      197639 :         if (ret)
    5345           0 :                 goto out_mmap;
    5346      197639 :         truncate_pagecache(inode, ioffset);
    5347             : 
    5348      197639 :         credits = ext4_writepage_trans_blocks(inode);
    5349      197640 :         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
    5350      197640 :         if (IS_ERR(handle)) {
    5351           0 :                 ret = PTR_ERR(handle);
    5352           0 :                 goto out_mmap;
    5353             :         }
    5354      197640 :         ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
    5355             : 
    5356      197639 :         down_write(&EXT4_I(inode)->i_data_sem);
    5357      197640 :         ext4_discard_preallocations(inode, 0);
    5358      197640 :         ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
    5359             : 
    5360      197640 :         ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
    5361      197638 :         if (ret) {
    5362           0 :                 up_write(&EXT4_I(inode)->i_data_sem);
    5363           0 :                 goto out_stop;
    5364             :         }
    5365      197638 :         ext4_discard_preallocations(inode, 0);
    5366             : 
    5367      197638 :         ret = ext4_ext_shift_extents(inode, handle, punch_stop,
    5368             :                                      punch_stop - punch_start, SHIFT_LEFT);
    5369      197639 :         if (ret) {
    5370           0 :                 up_write(&EXT4_I(inode)->i_data_sem);
    5371           0 :                 goto out_stop;
    5372             :         }
    5373             : 
    5374      197639 :         new_size = inode->i_size - len;
    5375      197639 :         i_size_write(inode, new_size);
    5376      197639 :         EXT4_I(inode)->i_disksize = new_size;
    5377             : 
    5378      197639 :         up_write(&EXT4_I(inode)->i_data_sem);
    5379      197639 :         if (IS_SYNC(inode))
    5380           0 :                 ext4_handle_sync(handle);
    5381      197639 :         inode->i_mtime = inode->i_ctime = current_time(inode);
    5382      197637 :         ret = ext4_mark_inode_dirty(handle, inode);
    5383      197640 :         ext4_update_inode_fsync_trans(handle, inode, 1);
    5384             : 
    5385      197640 : out_stop:
    5386      197640 :         ext4_journal_stop(handle);
    5387      197640 : out_mmap:
    5388      197640 :         filemap_invalidate_unlock(mapping);
    5389      200557 : out_mutex:
    5390      200557 :         inode_unlock(inode);
    5391      200557 :         return ret;
    5392             : }
    5393             : 
    5394             : /*
    5395             :  * ext4_insert_range:
    5396             :  * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate.
    5397             :  * The data blocks starting from @offset to the EOF are shifted by @len
    5398             :  * towards right to create a hole in the @inode. Inode size is increased
    5399             :  * by len bytes.
    5400             :  * Returns 0 on success, error otherwise.
    5401             :  */
    5402      148931 : static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
    5403             : {
    5404      148931 :         struct inode *inode = file_inode(file);
    5405      148931 :         struct super_block *sb = inode->i_sb;
    5406      148931 :         struct address_space *mapping = inode->i_mapping;
    5407      148931 :         handle_t *handle;
    5408      148931 :         struct ext4_ext_path *path;
    5409      148931 :         struct ext4_extent *extent;
    5410      148931 :         ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
    5411      148931 :         unsigned int credits, ee_len;
    5412      148931 :         int ret = 0, depth, split_flag = 0;
    5413      148931 :         loff_t ioffset;
    5414             : 
    5415             :         /*
    5416             :          * We need to test this early because xfstests assumes that an
    5417             :          * insert range of (0, 1) will return EOPNOTSUPP if the file
    5418             :          * system does not support insert range.
    5419             :          */
    5420      148931 :         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
    5421             :                 return -EOPNOTSUPP;
    5422             : 
    5423             :         /* Insert range works only on fs cluster size aligned regions. */
    5424      148924 :         if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
    5425             :                 return -EINVAL;
    5426             : 
    5427      144622 :         trace_ext4_insert_range(inode, offset, len);
    5428             : 
    5429      144619 :         offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
    5430      144619 :         len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
    5431             : 
    5432      144619 :         inode_lock(inode);
    5433             :         /* Currently just for extent based files */
    5434      144621 :         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
    5435           0 :                 ret = -EOPNOTSUPP;
    5436           0 :                 goto out_mutex;
    5437             :         }
    5438             : 
    5439             :         /* Check whether the maximum file size would be exceeded */
    5440      144621 :         if (len > inode->i_sb->s_maxbytes - inode->i_size) {
    5441           1 :                 ret = -EFBIG;
    5442           1 :                 goto out_mutex;
    5443             :         }
    5444             : 
    5445             :         /* Offset must be less than i_size */
    5446      144620 :         if (offset >= inode->i_size) {
    5447        2282 :                 ret = -EINVAL;
    5448        2282 :                 goto out_mutex;
    5449             :         }
    5450             : 
    5451             :         /* Wait for existing dio to complete */
    5452      142338 :         inode_dio_wait(inode);
    5453             : 
    5454      142338 :         ret = file_modified(file);
    5455      142337 :         if (ret)
    5456           0 :                 goto out_mutex;
    5457             : 
    5458             :         /*
    5459             :          * Prevent page faults from reinstantiating pages we have released from
    5460             :          * page cache.
    5461             :          */
    5462      142337 :         filemap_invalidate_lock(mapping);
    5463             : 
    5464      142339 :         ret = ext4_break_layouts(inode);
    5465      142337 :         if (ret)
    5466           0 :                 goto out_mmap;
    5467             : 
    5468             :         /*
    5469             :          * Need to round down to align start offset to page size boundary
    5470             :          * for page size > block size.
    5471             :          */
    5472      142337 :         ioffset = round_down(offset, PAGE_SIZE);
    5473             :         /* Write out all dirty pages */
    5474      142337 :         ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
    5475             :                         LLONG_MAX);
    5476      142336 :         if (ret)
    5477           0 :                 goto out_mmap;
    5478      142336 :         truncate_pagecache(inode, ioffset);
    5479             : 
    5480      142339 :         credits = ext4_writepage_trans_blocks(inode);
    5481      142338 :         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
    5482      142339 :         if (IS_ERR(handle)) {
    5483           0 :                 ret = PTR_ERR(handle);
    5484           0 :                 goto out_mmap;
    5485             :         }
    5486      142339 :         ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
    5487             : 
    5488             :         /* Expand file to avoid data loss if there is error while shifting */
    5489      142337 :         inode->i_size += len;
    5490      142337 :         EXT4_I(inode)->i_disksize += len;
    5491      142337 :         inode->i_mtime = inode->i_ctime = current_time(inode);
    5492      142337 :         ret = ext4_mark_inode_dirty(handle, inode);
    5493      142339 :         if (ret)
    5494           0 :                 goto out_stop;
    5495             : 
    5496      142339 :         down_write(&EXT4_I(inode)->i_data_sem);
    5497      142338 :         ext4_discard_preallocations(inode, 0);
    5498             : 
    5499      142339 :         path = ext4_find_extent(inode, offset_lblk, NULL, 0);
    5500      142339 :         if (IS_ERR(path)) {
    5501           0 :                 up_write(&EXT4_I(inode)->i_data_sem);
    5502           0 :                 goto out_stop;
    5503             :         }
    5504             : 
    5505      142339 :         depth = ext_depth(inode);
    5506      142339 :         extent = path[depth].p_ext;
    5507      142339 :         if (extent) {
    5508      141294 :                 ee_start_lblk = le32_to_cpu(extent->ee_block);
    5509      141294 :                 ee_len = ext4_ext_get_actual_len(extent);
    5510             : 
    5511             :                 /*
    5512             :                  * If offset_lblk is not the starting block of extent, split
    5513             :                  * the extent @offset_lblk
    5514             :                  */
    5515      141294 :                 if ((offset_lblk > ee_start_lblk) &&
    5516      112377 :                                 (offset_lblk < (ee_start_lblk + ee_len))) {
    5517       48039 :                         if (ext4_ext_is_unwritten(extent))
    5518       17693 :                                 split_flag = EXT4_EXT_MARK_UNWRIT1 |
    5519             :                                         EXT4_EXT_MARK_UNWRIT2;
    5520       48039 :                         ret = ext4_split_extent_at(handle, inode, &path,
    5521             :                                         offset_lblk, split_flag,
    5522             :                                         EXT4_EX_NOCACHE |
    5523             :                                         EXT4_GET_BLOCKS_PRE_IO |
    5524             :                                         EXT4_GET_BLOCKS_METADATA_NOFAIL);
    5525             :                 }
    5526             : 
    5527      141294 :                 ext4_free_ext_path(path);
    5528      141294 :                 if (ret < 0) {
    5529           0 :                         up_write(&EXT4_I(inode)->i_data_sem);
    5530           0 :                         goto out_stop;
    5531             :                 }
    5532             :         } else {
    5533        1045 :                 ext4_free_ext_path(path);
    5534             :         }
    5535             : 
    5536      142339 :         ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
    5537             : 
    5538             :         /*
    5539             :          * if offset_lblk lies in a hole which is at start of file, use
    5540             :          * ee_start_lblk to shift extents
    5541             :          */
    5542      142339 :         ret = ext4_ext_shift_extents(inode, handle,
    5543      142339 :                 max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT);
    5544             : 
    5545      142339 :         up_write(&EXT4_I(inode)->i_data_sem);
    5546      142337 :         if (IS_SYNC(inode))
    5547           0 :                 ext4_handle_sync(handle);
    5548      142337 :         if (ret >= 0)
    5549      142336 :                 ext4_update_inode_fsync_trans(handle, inode, 1);
    5550             : 
    5551           1 : out_stop:
    5552      142337 :         ext4_journal_stop(handle);
    5553      142339 : out_mmap:
    5554      142339 :         filemap_invalidate_unlock(mapping);
    5555      144622 : out_mutex:
    5556      144622 :         inode_unlock(inode);
    5557      144622 :         return ret;
    5558             : }
    5559             : 
    5560             : /**
    5561             :  * ext4_swap_extents() - Swap extents between two inodes
    5562             :  * @handle: handle for this transaction
    5563             :  * @inode1:     First inode
    5564             :  * @inode2:     Second inode
    5565             :  * @lblk1:      Start block for first inode
    5566             :  * @lblk2:      Start block for second inode
    5567             :  * @count:      Number of blocks to swap
    5568             :  * @unwritten: Mark second inode's extents as unwritten after swap
    5569             :  * @erp:        Pointer to save error value
    5570             :  *
    5571             :  * This helper routine does exactly what is promise "swap extents". All other
    5572             :  * stuff such as page-cache locking consistency, bh mapping consistency or
    5573             :  * extent's data copying must be performed by caller.
    5574             :  * Locking:
    5575             :  *              i_rwsem is held for both inodes
    5576             :  *              i_data_sem is locked for write for both inodes
    5577             :  * Assumptions:
    5578             :  *              All pages from requested range are locked for both inodes
    5579             :  */
    5580             : int
    5581     1862118 : ext4_swap_extents(handle_t *handle, struct inode *inode1,
    5582             :                   struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
    5583             :                   ext4_lblk_t count, int unwritten, int *erp)
    5584             : {
    5585     1862118 :         struct ext4_ext_path *path1 = NULL;
    5586     1862118 :         struct ext4_ext_path *path2 = NULL;
    5587     1862118 :         int replaced_count = 0;
    5588             : 
    5589     1862118 :         BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
    5590     1862118 :         BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
    5591     1862118 :         BUG_ON(!inode_is_locked(inode1));
    5592     1862118 :         BUG_ON(!inode_is_locked(inode2));
    5593             : 
    5594     1862118 :         ext4_es_remove_extent(inode1, lblk1, count);
    5595     1862118 :         ext4_es_remove_extent(inode2, lblk2, count);
    5596             : 
    5597     5673423 :         while (count) {
    5598     3811305 :                 struct ext4_extent *ex1, *ex2, tmp_ex;
    5599     3811305 :                 ext4_lblk_t e1_blk, e2_blk;
    5600     3811305 :                 int e1_len, e2_len, len;
    5601     3811305 :                 int split = 0;
    5602             : 
    5603     3811305 :                 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
    5604     3811305 :                 if (IS_ERR(path1)) {
    5605           0 :                         *erp = PTR_ERR(path1);
    5606           0 :                         path1 = NULL;
    5607           0 :                 finish:
    5608           0 :                         count = 0;
    5609           0 :                         goto repeat;
    5610             :                 }
    5611     3811305 :                 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
    5612     3811305 :                 if (IS_ERR(path2)) {
    5613           0 :                         *erp = PTR_ERR(path2);
    5614           0 :                         path2 = NULL;
    5615           0 :                         goto finish;
    5616             :                 }
    5617     3811305 :                 ex1 = path1[path1->p_depth].p_ext;
    5618     3811305 :                 ex2 = path2[path2->p_depth].p_ext;
    5619             :                 /* Do we have something to swap ? */
    5620     3811305 :                 if (unlikely(!ex2 || !ex1))
    5621           0 :                         goto finish;
    5622             : 
    5623     3811305 :                 e1_blk = le32_to_cpu(ex1->ee_block);
    5624     3811305 :                 e2_blk = le32_to_cpu(ex2->ee_block);
    5625     3811305 :                 e1_len = ext4_ext_get_actual_len(ex1);
    5626     3811305 :                 e2_len = ext4_ext_get_actual_len(ex2);
    5627             : 
    5628             :                 /* Hole handling */
    5629     3811305 :                 if (!in_range(lblk1, e1_blk, e1_len) ||
    5630     3806189 :                     !in_range(lblk2, e2_blk, e2_len)) {
    5631        6564 :                         ext4_lblk_t next1, next2;
    5632             : 
    5633             :                         /* if hole after extent, then go to next extent */
    5634        6564 :                         next1 = ext4_ext_next_allocated_block(path1);
    5635        6564 :                         next2 = ext4_ext_next_allocated_block(path2);
    5636             :                         /* If hole before extent, then shift to that extent */
    5637        6564 :                         if (e1_blk > lblk1)
    5638           0 :                                 next1 = e1_blk;
    5639        6564 :                         if (e2_blk > lblk2)
    5640        5116 :                                 next2 = e2_blk;
    5641             :                         /* Do we have something to swap */
    5642        6564 :                         if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
    5643           0 :                                 goto finish;
    5644             :                         /* Move to the rightest boundary */
    5645        6564 :                         len = next1 - lblk1;
    5646        6564 :                         if (len < next2 - lblk2)
    5647             :                                 len = next2 - lblk2;
    5648        6564 :                         if (len > count)
    5649        6564 :                                 len = count;
    5650        6564 :                         lblk1 += len;
    5651        6564 :                         lblk2 += len;
    5652        6564 :                         count -= len;
    5653        6564 :                         goto repeat;
    5654             :                 }
    5655             : 
    5656             :                 /* Prepare left boundary */
    5657     3804741 :                 if (e1_blk < lblk1) {
    5658      102006 :                         split = 1;
    5659      102006 :                         *erp = ext4_force_split_extent_at(handle, inode1,
    5660             :                                                 &path1, lblk1, 0);
    5661      102006 :                         if (unlikely(*erp))
    5662           0 :                                 goto finish;
    5663             :                 }
    5664     3804741 :                 if (e2_blk < lblk2) {
    5665        1960 :                         split = 1;
    5666        1960 :                         *erp = ext4_force_split_extent_at(handle, inode2,
    5667             :                                                 &path2,  lblk2, 0);
    5668        1960 :                         if (unlikely(*erp))
    5669           0 :                                 goto finish;
    5670             :                 }
    5671             :                 /* ext4_split_extent_at() may result in leaf extent split,
    5672             :                  * path must to be revalidated. */
    5673     3802781 :                 if (split)
    5674      102592 :                         goto repeat;
    5675             : 
    5676             :                 /* Prepare right boundary */
    5677     3702149 :                 len = count;
    5678     3702149 :                 if (len > e1_blk + e1_len - lblk1)
    5679             :                         len = e1_blk + e1_len - lblk1;
    5680     3702149 :                 if (len > e2_blk + e2_len - lblk2)
    5681           0 :                         len = e2_blk + e2_len - lblk2;
    5682             : 
    5683     3702149 :                 if (len != e1_len) {
    5684     1834501 :                         split = 1;
    5685     1834501 :                         *erp = ext4_force_split_extent_at(handle, inode1,
    5686             :                                                 &path1, lblk1 + len, 0);
    5687     1834501 :                         if (unlikely(*erp))
    5688           0 :                                 goto finish;
    5689             :                 }
    5690     3702149 :                 if (len != e2_len) {
    5691     1742320 :                         split = 1;
    5692     1742320 :                         *erp = ext4_force_split_extent_at(handle, inode2,
    5693             :                                                 &path2, lblk2 + len, 0);
    5694     1742320 :                         if (*erp)
    5695           0 :                                 goto finish;
    5696             :                 }
    5697             :                 /* ext4_split_extent_at() may result in leaf extent split,
    5698             :                  * path must to be revalidated. */
    5699     1959829 :                 if (split)
    5700     1846595 :                         goto repeat;
    5701             : 
    5702     1855554 :                 BUG_ON(e2_len != e1_len);
    5703     1855554 :                 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
    5704     1855554 :                 if (unlikely(*erp))
    5705           0 :                         goto finish;
    5706     1855554 :                 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
    5707     1855554 :                 if (unlikely(*erp))
    5708           0 :                         goto finish;
    5709             : 
    5710             :                 /* Both extents are fully inside boundaries. Swap it now */
    5711     1855554 :                 tmp_ex = *ex1;
    5712     1855554 :                 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
    5713     1855554 :                 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
    5714     1855554 :                 ex1->ee_len = cpu_to_le16(e2_len);
    5715     1855554 :                 ex2->ee_len = cpu_to_le16(e1_len);
    5716     1855554 :                 if (unwritten)
    5717     1855554 :                         ext4_ext_mark_unwritten(ex2);
    5718     1855554 :                 if (ext4_ext_is_unwritten(&tmp_ex))
    5719     1786246 :                         ext4_ext_mark_unwritten(ex1);
    5720             : 
    5721     1855554 :                 ext4_ext_try_to_merge(handle, inode2, path2, ex2);
    5722     1855554 :                 ext4_ext_try_to_merge(handle, inode1, path1, ex1);
    5723     1855554 :                 *erp = ext4_ext_dirty(handle, inode2, path2 +
    5724             :                                       path2->p_depth);
    5725     1855554 :                 if (unlikely(*erp))
    5726           0 :                         goto finish;
    5727     1855554 :                 *erp = ext4_ext_dirty(handle, inode1, path1 +
    5728             :                                       path1->p_depth);
    5729             :                 /*
    5730             :                  * Looks scarry ah..? second inode already points to new blocks,
    5731             :                  * and it was successfully dirtied. But luckily error may happen
    5732             :                  * only due to journal error, so full transaction will be
    5733             :                  * aborted anyway.
    5734             :                  */
    5735     1855554 :                 if (unlikely(*erp))
    5736           0 :                         goto finish;
    5737     1855554 :                 lblk1 += len;
    5738     1855554 :                 lblk2 += len;
    5739     1855554 :                 replaced_count += len;
    5740     1855554 :                 count -= len;
    5741             : 
    5742     3811305 :         repeat:
    5743     3811305 :                 ext4_free_ext_path(path1);
    5744     3811305 :                 ext4_free_ext_path(path2);
    5745     3811305 :                 path1 = path2 = NULL;
    5746             :         }
    5747     1862118 :         return replaced_count;
    5748             : }
    5749             : 
    5750             : /*
    5751             :  * ext4_clu_mapped - determine whether any block in a logical cluster has
    5752             :  *                   been mapped to a physical cluster
    5753             :  *
    5754             :  * @inode - file containing the logical cluster
    5755             :  * @lclu - logical cluster of interest
    5756             :  *
    5757             :  * Returns 1 if any block in the logical cluster is mapped, signifying
    5758             :  * that a physical cluster has been allocated for it.  Otherwise,
    5759             :  * returns 0.  Can also return negative error codes.  Derived from
    5760             :  * ext4_ext_map_blocks().
    5761             :  */
    5762        4096 : int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
    5763             : {
    5764        4096 :         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
    5765        4096 :         struct ext4_ext_path *path;
    5766        4096 :         int depth, mapped = 0, err = 0;
    5767        4096 :         struct ext4_extent *extent;
    5768        4096 :         ext4_lblk_t first_lblk, first_lclu, last_lclu;
    5769             : 
    5770             :         /*
    5771             :          * if data can be stored inline, the logical cluster isn't
    5772             :          * mapped - no physical clusters have been allocated, and the
    5773             :          * file has no extents
    5774             :          */
    5775        4096 :         if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
    5776             :             ext4_has_inline_data(inode))
    5777             :                 return 0;
    5778             : 
    5779             :         /* search for the extent closest to the first block in the cluster */
    5780        4096 :         path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
    5781        4096 :         if (IS_ERR(path)) {
    5782           0 :                 err = PTR_ERR(path);
    5783           0 :                 path = NULL;
    5784           0 :                 goto out;
    5785             :         }
    5786             : 
    5787        4096 :         depth = ext_depth(inode);
    5788             : 
    5789             :         /*
    5790             :          * A consistent leaf must not be empty.  This situation is possible,
    5791             :          * though, _during_ tree modification, and it's why an assert can't
    5792             :          * be put in ext4_find_extent().
    5793             :          */
    5794        4096 :         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
    5795           0 :                 EXT4_ERROR_INODE(inode,
    5796             :                     "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
    5797             :                                  (unsigned long) EXT4_C2B(sbi, lclu),
    5798             :                                  depth, path[depth].p_block);
    5799           0 :                 err = -EFSCORRUPTED;
    5800           0 :                 goto out;
    5801             :         }
    5802             : 
    5803        4096 :         extent = path[depth].p_ext;
    5804             : 
    5805             :         /* can't be mapped if the extent tree is empty */
    5806        4096 :         if (extent == NULL)
    5807        3840 :                 goto out;
    5808             : 
    5809         256 :         first_lblk = le32_to_cpu(extent->ee_block);
    5810         256 :         first_lclu = EXT4_B2C(sbi, first_lblk);
    5811             : 
    5812             :         /*
    5813             :          * Three possible outcomes at this point - found extent spanning
    5814             :          * the target cluster, to the left of the target cluster, or to the
    5815             :          * right of the target cluster.  The first two cases are handled here.
    5816             :          * The last case indicates the target cluster is not mapped.
    5817             :          */
    5818         256 :         if (lclu >= first_lclu) {
    5819         512 :                 last_lclu = EXT4_B2C(sbi, first_lblk +
    5820             :                                      ext4_ext_get_actual_len(extent) - 1);
    5821         256 :                 if (lclu <= last_lclu) {
    5822             :                         mapped = 1;
    5823             :                 } else {
    5824         256 :                         first_lblk = ext4_ext_next_allocated_block(path);
    5825         256 :                         first_lclu = EXT4_B2C(sbi, first_lblk);
    5826         256 :                         if (lclu == first_lclu)
    5827           0 :                                 mapped = 1;
    5828             :                 }
    5829             :         }
    5830             : 
    5831         256 : out:
    5832        4096 :         ext4_free_ext_path(path);
    5833             : 
    5834        4096 :         return err ? err : mapped;
    5835             : }
    5836             : 
    5837             : /*
    5838             :  * Updates physical block address and unwritten status of extent
    5839             :  * starting at lblk start and of len. If such an extent doesn't exist,
    5840             :  * this function splits the extent tree appropriately to create an
    5841             :  * extent like this.  This function is called in the fast commit
    5842             :  * replay path.  Returns 0 on success and error on failure.
    5843             :  */
    5844           0 : int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
    5845             :                               int len, int unwritten, ext4_fsblk_t pblk)
    5846             : {
    5847           0 :         struct ext4_ext_path *path = NULL, *ppath;
    5848           0 :         struct ext4_extent *ex;
    5849           0 :         int ret;
    5850             : 
    5851           0 :         path = ext4_find_extent(inode, start, NULL, 0);
    5852           0 :         if (IS_ERR(path))
    5853           0 :                 return PTR_ERR(path);
    5854           0 :         ex = path[path->p_depth].p_ext;
    5855           0 :         if (!ex) {
    5856           0 :                 ret = -EFSCORRUPTED;
    5857           0 :                 goto out;
    5858             :         }
    5859             : 
    5860           0 :         if (le32_to_cpu(ex->ee_block) != start ||
    5861             :                 ext4_ext_get_actual_len(ex) != len) {
    5862             :                 /* We need to split this extent to match our extent first */
    5863           0 :                 ppath = path;
    5864           0 :                 down_write(&EXT4_I(inode)->i_data_sem);
    5865           0 :                 ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
    5866           0 :                 up_write(&EXT4_I(inode)->i_data_sem);
    5867           0 :                 if (ret)
    5868           0 :                         goto out;
    5869           0 :                 kfree(path);
    5870           0 :                 path = ext4_find_extent(inode, start, NULL, 0);
    5871           0 :                 if (IS_ERR(path))
    5872             :                         return -1;
    5873           0 :                 ppath = path;
    5874           0 :                 ex = path[path->p_depth].p_ext;
    5875           0 :                 WARN_ON(le32_to_cpu(ex->ee_block) != start);
    5876           0 :                 if (ext4_ext_get_actual_len(ex) != len) {
    5877           0 :                         down_write(&EXT4_I(inode)->i_data_sem);
    5878           0 :                         ret = ext4_force_split_extent_at(NULL, inode, &ppath,
    5879             :                                                          start + len, 1);
    5880           0 :                         up_write(&EXT4_I(inode)->i_data_sem);
    5881           0 :                         if (ret)
    5882           0 :                                 goto out;
    5883           0 :                         kfree(path);
    5884           0 :                         path = ext4_find_extent(inode, start, NULL, 0);
    5885           0 :                         if (IS_ERR(path))
    5886             :                                 return -EINVAL;
    5887           0 :                         ex = path[path->p_depth].p_ext;
    5888             :                 }
    5889             :         }
    5890           0 :         if (unwritten)
    5891           0 :                 ext4_ext_mark_unwritten(ex);
    5892             :         else
    5893           0 :                 ext4_ext_mark_initialized(ex);
    5894           0 :         ext4_ext_store_pblock(ex, pblk);
    5895           0 :         down_write(&EXT4_I(inode)->i_data_sem);
    5896           0 :         ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
    5897           0 :         up_write(&EXT4_I(inode)->i_data_sem);
    5898           0 : out:
    5899           0 :         ext4_free_ext_path(path);
    5900           0 :         ext4_mark_inode_dirty(NULL, inode);
    5901           0 :         return ret;
    5902             : }
    5903             : 
    5904             : /* Try to shrink the extent tree */
    5905           0 : void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
    5906             : {
    5907           0 :         struct ext4_ext_path *path = NULL;
    5908           0 :         struct ext4_extent *ex;
    5909           0 :         ext4_lblk_t old_cur, cur = 0;
    5910             : 
    5911           0 :         while (cur < end) {
    5912           0 :                 path = ext4_find_extent(inode, cur, NULL, 0);
    5913           0 :                 if (IS_ERR(path))
    5914             :                         return;
    5915           0 :                 ex = path[path->p_depth].p_ext;
    5916           0 :                 if (!ex) {
    5917           0 :                         ext4_free_ext_path(path);
    5918           0 :                         ext4_mark_inode_dirty(NULL, inode);
    5919           0 :                         return;
    5920             :                 }
    5921           0 :                 old_cur = cur;
    5922           0 :                 cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
    5923           0 :                 if (cur <= old_cur)
    5924           0 :                         cur = old_cur + 1;
    5925           0 :                 ext4_ext_try_to_merge(NULL, inode, path, ex);
    5926           0 :                 down_write(&EXT4_I(inode)->i_data_sem);
    5927           0 :                 ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
    5928           0 :                 up_write(&EXT4_I(inode)->i_data_sem);
    5929           0 :                 ext4_mark_inode_dirty(NULL, inode);
    5930           0 :                 ext4_free_ext_path(path);
    5931             :         }
    5932             : }
    5933             : 
    5934             : /* Check if *cur is a hole and if it is, skip it */
    5935           0 : static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
    5936             : {
    5937           0 :         int ret;
    5938           0 :         struct ext4_map_blocks map;
    5939             : 
    5940           0 :         map.m_lblk = *cur;
    5941           0 :         map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
    5942             : 
    5943           0 :         ret = ext4_map_blocks(NULL, inode, &map, 0);
    5944           0 :         if (ret < 0)
    5945             :                 return ret;
    5946           0 :         if (ret != 0)
    5947             :                 return 0;
    5948           0 :         *cur = *cur + map.m_len;
    5949           0 :         return 0;
    5950             : }
    5951             : 
    5952             : /* Count number of blocks used by this inode and update i_blocks */
    5953           0 : int ext4_ext_replay_set_iblocks(struct inode *inode)
    5954             : {
    5955           0 :         struct ext4_ext_path *path = NULL, *path2 = NULL;
    5956           0 :         struct ext4_extent *ex;
    5957           0 :         ext4_lblk_t cur = 0, end;
    5958           0 :         int numblks = 0, i, ret = 0;
    5959           0 :         ext4_fsblk_t cmp1, cmp2;
    5960           0 :         struct ext4_map_blocks map;
    5961             : 
    5962             :         /* Determin the size of the file first */
    5963           0 :         path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
    5964             :                                         EXT4_EX_NOCACHE);
    5965           0 :         if (IS_ERR(path))
    5966           0 :                 return PTR_ERR(path);
    5967           0 :         ex = path[path->p_depth].p_ext;
    5968           0 :         if (!ex) {
    5969           0 :                 ext4_free_ext_path(path);
    5970           0 :                 goto out;
    5971             :         }
    5972           0 :         end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
    5973           0 :         ext4_free_ext_path(path);
    5974             : 
    5975             :         /* Count the number of data blocks */
    5976           0 :         cur = 0;
    5977           0 :         while (cur < end) {
    5978           0 :                 map.m_lblk = cur;
    5979           0 :                 map.m_len = end - cur;
    5980           0 :                 ret = ext4_map_blocks(NULL, inode, &map, 0);
    5981           0 :                 if (ret < 0)
    5982             :                         break;
    5983           0 :                 if (ret > 0)
    5984           0 :                         numblks += ret;
    5985           0 :                 cur = cur + map.m_len;
    5986             :         }
    5987             : 
    5988             :         /*
    5989             :          * Count the number of extent tree blocks. We do it by looking up
    5990             :          * two successive extents and determining the difference between
    5991             :          * their paths. When path is different for 2 successive extents
    5992             :          * we compare the blocks in the path at each level and increment
    5993             :          * iblocks by total number of differences found.
    5994             :          */
    5995           0 :         cur = 0;
    5996           0 :         ret = skip_hole(inode, &cur);
    5997           0 :         if (ret < 0)
    5998           0 :                 goto out;
    5999           0 :         path = ext4_find_extent(inode, cur, NULL, 0);
    6000           0 :         if (IS_ERR(path))
    6001           0 :                 goto out;
    6002           0 :         numblks += path->p_depth;
    6003           0 :         ext4_free_ext_path(path);
    6004           0 :         while (cur < end) {
    6005           0 :                 path = ext4_find_extent(inode, cur, NULL, 0);
    6006           0 :                 if (IS_ERR(path))
    6007             :                         break;
    6008           0 :                 ex = path[path->p_depth].p_ext;
    6009           0 :                 if (!ex) {
    6010           0 :                         ext4_free_ext_path(path);
    6011           0 :                         return 0;
    6012             :                 }
    6013           0 :                 cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
    6014             :                                         ext4_ext_get_actual_len(ex));
    6015           0 :                 ret = skip_hole(inode, &cur);
    6016           0 :                 if (ret < 0) {
    6017           0 :                         ext4_free_ext_path(path);
    6018             :                         break;
    6019             :                 }
    6020           0 :                 path2 = ext4_find_extent(inode, cur, NULL, 0);
    6021           0 :                 if (IS_ERR(path2)) {
    6022           0 :                         ext4_free_ext_path(path);
    6023             :                         break;
    6024             :                 }
    6025           0 :                 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
    6026           0 :                         cmp1 = cmp2 = 0;
    6027           0 :                         if (i <= path->p_depth)
    6028           0 :                                 cmp1 = path[i].p_bh ?
    6029           0 :                                         path[i].p_bh->b_blocknr : 0;
    6030           0 :                         if (i <= path2->p_depth)
    6031           0 :                                 cmp2 = path2[i].p_bh ?
    6032           0 :                                         path2[i].p_bh->b_blocknr : 0;
    6033           0 :                         if (cmp1 != cmp2 && cmp2 != 0)
    6034           0 :                                 numblks++;
    6035             :                 }
    6036           0 :                 ext4_free_ext_path(path);
    6037           0 :                 ext4_free_ext_path(path2);
    6038             :         }
    6039             : 
    6040           0 : out:
    6041           0 :         inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
    6042           0 :         ext4_mark_inode_dirty(NULL, inode);
    6043           0 :         return 0;
    6044             : }
    6045             : 
    6046           0 : int ext4_ext_clear_bb(struct inode *inode)
    6047             : {
    6048           0 :         struct ext4_ext_path *path = NULL;
    6049           0 :         struct ext4_extent *ex;
    6050           0 :         ext4_lblk_t cur = 0, end;
    6051           0 :         int j, ret = 0;
    6052           0 :         struct ext4_map_blocks map;
    6053             : 
    6054           0 :         if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
    6055             :                 return 0;
    6056             : 
    6057             :         /* Determin the size of the file first */
    6058           0 :         path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
    6059             :                                         EXT4_EX_NOCACHE);
    6060           0 :         if (IS_ERR(path))
    6061           0 :                 return PTR_ERR(path);
    6062           0 :         ex = path[path->p_depth].p_ext;
    6063           0 :         if (!ex) {
    6064           0 :                 ext4_free_ext_path(path);
    6065           0 :                 return 0;
    6066             :         }
    6067           0 :         end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
    6068           0 :         ext4_free_ext_path(path);
    6069             : 
    6070           0 :         cur = 0;
    6071           0 :         while (cur < end) {
    6072           0 :                 map.m_lblk = cur;
    6073           0 :                 map.m_len = end - cur;
    6074           0 :                 ret = ext4_map_blocks(NULL, inode, &map, 0);
    6075           0 :                 if (ret < 0)
    6076             :                         break;
    6077           0 :                 if (ret > 0) {
    6078           0 :                         path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
    6079           0 :                         if (!IS_ERR_OR_NULL(path)) {
    6080           0 :                                 for (j = 0; j < path->p_depth; j++) {
    6081             : 
    6082           0 :                                         ext4_mb_mark_bb(inode->i_sb,
    6083           0 :                                                         path[j].p_block, 1, 0);
    6084           0 :                                         ext4_fc_record_regions(inode->i_sb, inode->i_ino,
    6085             :                                                         0, path[j].p_block, 1, 1);
    6086             :                                 }
    6087           0 :                                 ext4_free_ext_path(path);
    6088             :                         }
    6089           0 :                         ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
    6090           0 :                         ext4_fc_record_regions(inode->i_sb, inode->i_ino,
    6091           0 :                                         map.m_lblk, map.m_pblk, map.m_len, 1);
    6092             :                 }
    6093           0 :                 cur = cur + map.m_len;
    6094             :         }
    6095             : 
    6096             :         return 0;
    6097             : }

Generated by: LCOV version 1.14