Line data Source code
1 : /*
2 : * Ext4 orphan inode handling
3 : */
4 : #include <linux/fs.h>
5 : #include <linux/quotaops.h>
6 : #include <linux/buffer_head.h>
7 :
8 : #include "ext4.h"
9 : #include "ext4_jbd2.h"
10 :
11 0 : static int ext4_orphan_file_add(handle_t *handle, struct inode *inode)
12 : {
13 0 : int i, j, start;
14 0 : struct ext4_orphan_info *oi = &EXT4_SB(inode->i_sb)->s_orphan_info;
15 0 : int ret = 0;
16 0 : bool found = false;
17 0 : __le32 *bdata;
18 0 : int inodes_per_ob = ext4_inodes_per_orphan_block(inode->i_sb);
19 0 : int looped = 0;
20 :
21 : /*
22 : * Find block with free orphan entry. Use CPU number for a naive hash
23 : * for a search start in the orphan file
24 : */
25 0 : start = raw_smp_processor_id()*13 % oi->of_blocks;
26 0 : i = start;
27 0 : do {
28 0 : if (atomic_dec_if_positive(&oi->of_binfo[i].ob_free_entries)
29 : >= 0) {
30 : found = true;
31 : break;
32 : }
33 0 : if (++i >= oi->of_blocks)
34 0 : i = 0;
35 0 : } while (i != start);
36 :
37 0 : if (!found) {
38 : /*
39 : * For now we don't grow or shrink orphan file. We just use
40 : * whatever was allocated at mke2fs time. The additional
41 : * credits we would have to reserve for each orphan inode
42 : * operation just don't seem worth it.
43 : */
44 : return -ENOSPC;
45 : }
46 :
47 0 : ret = ext4_journal_get_write_access(handle, inode->i_sb,
48 : oi->of_binfo[i].ob_bh, EXT4_JTR_ORPHAN_FILE);
49 0 : if (ret) {
50 0 : atomic_inc(&oi->of_binfo[i].ob_free_entries);
51 0 : return ret;
52 : }
53 :
54 0 : bdata = (__le32 *)(oi->of_binfo[i].ob_bh->b_data);
55 : /* Find empty slot in a block */
56 0 : j = 0;
57 0 : do {
58 0 : if (looped) {
59 : /*
60 : * Did we walk through the block several times without
61 : * finding free entry? It is theoretically possible
62 : * if entries get constantly allocated and freed or
63 : * if the block is corrupted. Avoid indefinite looping
64 : * and bail. We'll use orphan list instead.
65 : */
66 0 : if (looped > 3) {
67 0 : atomic_inc(&oi->of_binfo[i].ob_free_entries);
68 0 : return -ENOSPC;
69 : }
70 0 : cond_resched();
71 : }
72 0 : while (bdata[j]) {
73 0 : if (++j >= inodes_per_ob) {
74 0 : j = 0;
75 0 : looped++;
76 : }
77 : }
78 0 : } while (cmpxchg(&bdata[j], (__le32)0, cpu_to_le32(inode->i_ino)) !=
79 : (__le32)0);
80 :
81 0 : EXT4_I(inode)->i_orphan_idx = i * inodes_per_ob + j;
82 0 : ext4_set_inode_state(inode, EXT4_STATE_ORPHAN_FILE);
83 :
84 0 : return ext4_handle_dirty_metadata(handle, NULL, oi->of_binfo[i].ob_bh);
85 : }
86 :
87 : /*
88 : * ext4_orphan_add() links an unlinked or truncated inode into a list of
89 : * such inodes, starting at the superblock, in case we crash before the
90 : * file is closed/deleted, or in case the inode truncate spans multiple
91 : * transactions and the last transaction is not recovered after a crash.
92 : *
93 : * At filesystem recovery time, we walk this list deleting unlinked
94 : * inodes and truncating linked inodes in ext4_orphan_cleanup().
95 : *
96 : * Orphan list manipulation functions must be called under i_rwsem unless
97 : * we are just creating the inode or deleting it.
98 : */
99 0 : int ext4_orphan_add(handle_t *handle, struct inode *inode)
100 : {
101 0 : struct super_block *sb = inode->i_sb;
102 0 : struct ext4_sb_info *sbi = EXT4_SB(sb);
103 0 : struct ext4_iloc iloc;
104 0 : int err = 0, rc;
105 0 : bool dirty = false;
106 :
107 0 : if (!sbi->s_journal || is_bad_inode(inode))
108 0 : return 0;
109 :
110 0 : WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
111 : !inode_is_locked(inode));
112 : /*
113 : * Inode orphaned in orphan file or in orphan list?
114 : */
115 0 : if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
116 0 : !list_empty(&EXT4_I(inode)->i_orphan))
117 : return 0;
118 :
119 : /*
120 : * Orphan handling is only valid for files with data blocks
121 : * being truncated, or files being unlinked. Note that we either
122 : * hold i_rwsem, or the inode can not be referenced from outside,
123 : * so i_nlink should not be bumped due to race
124 : */
125 0 : ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
126 : S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
127 :
128 0 : if (sbi->s_orphan_info.of_blocks) {
129 0 : err = ext4_orphan_file_add(handle, inode);
130 : /*
131 : * Fallback to normal orphan list of orphan file is
132 : * out of space
133 : */
134 0 : if (err != -ENOSPC)
135 : return err;
136 : }
137 :
138 0 : BUFFER_TRACE(sbi->s_sbh, "get_write_access");
139 0 : err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
140 : EXT4_JTR_NONE);
141 0 : if (err)
142 0 : goto out;
143 :
144 0 : err = ext4_reserve_inode_write(handle, inode, &iloc);
145 0 : if (err)
146 0 : goto out;
147 :
148 0 : mutex_lock(&sbi->s_orphan_lock);
149 : /*
150 : * Due to previous errors inode may be already a part of on-disk
151 : * orphan list. If so skip on-disk list modification.
152 : */
153 0 : if (!NEXT_ORPHAN(inode) || NEXT_ORPHAN(inode) >
154 0 : (le32_to_cpu(sbi->s_es->s_inodes_count))) {
155 : /* Insert this inode at the head of the on-disk orphan list */
156 0 : NEXT_ORPHAN(inode) = le32_to_cpu(sbi->s_es->s_last_orphan);
157 0 : lock_buffer(sbi->s_sbh);
158 0 : sbi->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
159 0 : ext4_superblock_csum_set(sb);
160 0 : unlock_buffer(sbi->s_sbh);
161 0 : dirty = true;
162 : }
163 0 : list_add(&EXT4_I(inode)->i_orphan, &sbi->s_orphan);
164 0 : mutex_unlock(&sbi->s_orphan_lock);
165 :
166 0 : if (dirty) {
167 0 : err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
168 0 : rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
169 0 : if (!err)
170 0 : err = rc;
171 0 : if (err) {
172 : /*
173 : * We have to remove inode from in-memory list if
174 : * addition to on disk orphan list failed. Stray orphan
175 : * list entries can cause panics at unmount time.
176 : */
177 0 : mutex_lock(&sbi->s_orphan_lock);
178 0 : list_del_init(&EXT4_I(inode)->i_orphan);
179 0 : mutex_unlock(&sbi->s_orphan_lock);
180 : }
181 : } else
182 0 : brelse(iloc.bh);
183 :
184 : ext4_debug("superblock will point to %lu\n", inode->i_ino);
185 : ext4_debug("orphan inode %lu will point to %d\n",
186 : inode->i_ino, NEXT_ORPHAN(inode));
187 0 : out:
188 0 : ext4_std_error(sb, err);
189 : return err;
190 : }
191 :
192 0 : static int ext4_orphan_file_del(handle_t *handle, struct inode *inode)
193 : {
194 0 : struct ext4_orphan_info *oi = &EXT4_SB(inode->i_sb)->s_orphan_info;
195 0 : __le32 *bdata;
196 0 : int blk, off;
197 0 : int inodes_per_ob = ext4_inodes_per_orphan_block(inode->i_sb);
198 0 : int ret = 0;
199 :
200 0 : if (!handle)
201 0 : goto out;
202 0 : blk = EXT4_I(inode)->i_orphan_idx / inodes_per_ob;
203 0 : off = EXT4_I(inode)->i_orphan_idx % inodes_per_ob;
204 0 : if (WARN_ON_ONCE(blk >= oi->of_blocks))
205 0 : goto out;
206 :
207 0 : ret = ext4_journal_get_write_access(handle, inode->i_sb,
208 : oi->of_binfo[blk].ob_bh, EXT4_JTR_ORPHAN_FILE);
209 0 : if (ret)
210 0 : goto out;
211 :
212 0 : bdata = (__le32 *)(oi->of_binfo[blk].ob_bh->b_data);
213 0 : bdata[off] = 0;
214 0 : atomic_inc(&oi->of_binfo[blk].ob_free_entries);
215 0 : ret = ext4_handle_dirty_metadata(handle, NULL, oi->of_binfo[blk].ob_bh);
216 0 : out:
217 0 : ext4_clear_inode_state(inode, EXT4_STATE_ORPHAN_FILE);
218 0 : INIT_LIST_HEAD(&EXT4_I(inode)->i_orphan);
219 :
220 0 : return ret;
221 : }
222 :
223 : /*
224 : * ext4_orphan_del() removes an unlinked or truncated inode from the list
225 : * of such inodes stored on disk, because it is finally being cleaned up.
226 : */
227 0 : int ext4_orphan_del(handle_t *handle, struct inode *inode)
228 : {
229 0 : struct list_head *prev;
230 0 : struct ext4_inode_info *ei = EXT4_I(inode);
231 0 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
232 0 : __u32 ino_next;
233 0 : struct ext4_iloc iloc;
234 0 : int err = 0;
235 :
236 0 : if (!sbi->s_journal && !(sbi->s_mount_state & EXT4_ORPHAN_FS))
237 : return 0;
238 :
239 0 : WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
240 : !inode_is_locked(inode));
241 0 : if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE))
242 0 : return ext4_orphan_file_del(handle, inode);
243 :
244 : /* Do this quick check before taking global s_orphan_lock. */
245 0 : if (list_empty(&ei->i_orphan))
246 : return 0;
247 :
248 0 : if (handle) {
249 : /* Grab inode buffer early before taking global s_orphan_lock */
250 0 : err = ext4_reserve_inode_write(handle, inode, &iloc);
251 : }
252 :
253 0 : mutex_lock(&sbi->s_orphan_lock);
254 0 : ext4_debug("remove inode %lu from orphan list\n", inode->i_ino);
255 :
256 0 : prev = ei->i_orphan.prev;
257 0 : list_del_init(&ei->i_orphan);
258 :
259 : /* If we're on an error path, we may not have a valid
260 : * transaction handle with which to update the orphan list on
261 : * disk, but we still need to remove the inode from the linked
262 : * list in memory. */
263 0 : if (!handle || err) {
264 0 : mutex_unlock(&sbi->s_orphan_lock);
265 0 : goto out_err;
266 : }
267 :
268 0 : ino_next = NEXT_ORPHAN(inode);
269 0 : if (prev == &sbi->s_orphan) {
270 0 : ext4_debug("superblock will point to %u\n", ino_next);
271 0 : BUFFER_TRACE(sbi->s_sbh, "get_write_access");
272 0 : err = ext4_journal_get_write_access(handle, inode->i_sb,
273 : sbi->s_sbh, EXT4_JTR_NONE);
274 0 : if (err) {
275 0 : mutex_unlock(&sbi->s_orphan_lock);
276 0 : goto out_brelse;
277 : }
278 0 : lock_buffer(sbi->s_sbh);
279 0 : sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
280 0 : ext4_superblock_csum_set(inode->i_sb);
281 0 : unlock_buffer(sbi->s_sbh);
282 0 : mutex_unlock(&sbi->s_orphan_lock);
283 0 : err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
284 : } else {
285 0 : struct ext4_iloc iloc2;
286 0 : struct inode *i_prev =
287 0 : &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
288 :
289 0 : ext4_debug("orphan inode %lu will point to %u\n",
290 : i_prev->i_ino, ino_next);
291 0 : err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
292 0 : if (err) {
293 0 : mutex_unlock(&sbi->s_orphan_lock);
294 0 : goto out_brelse;
295 : }
296 0 : NEXT_ORPHAN(i_prev) = ino_next;
297 0 : err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
298 0 : mutex_unlock(&sbi->s_orphan_lock);
299 : }
300 0 : if (err)
301 0 : goto out_brelse;
302 0 : NEXT_ORPHAN(inode) = 0;
303 0 : err = ext4_mark_iloc_dirty(handle, inode, &iloc);
304 0 : out_err:
305 0 : ext4_std_error(inode->i_sb, err);
306 : return err;
307 :
308 0 : out_brelse:
309 0 : brelse(iloc.bh);
310 0 : goto out_err;
311 : }
312 :
313 : #ifdef CONFIG_QUOTA
314 0 : static int ext4_quota_on_mount(struct super_block *sb, int type)
315 : {
316 0 : return dquot_quota_on_mount(sb,
317 0 : rcu_dereference_protected(EXT4_SB(sb)->s_qf_names[type],
318 : lockdep_is_held(&sb->s_umount)),
319 : EXT4_SB(sb)->s_jquota_fmt, type);
320 : }
321 : #endif
322 :
323 0 : static void ext4_process_orphan(struct inode *inode,
324 : int *nr_truncates, int *nr_orphans)
325 : {
326 0 : struct super_block *sb = inode->i_sb;
327 0 : int ret;
328 :
329 0 : dquot_initialize(inode);
330 0 : if (inode->i_nlink) {
331 0 : if (test_opt(sb, DEBUG))
332 0 : ext4_msg(sb, KERN_DEBUG,
333 : "%s: truncating inode %lu to %lld bytes",
334 : __func__, inode->i_ino, inode->i_size);
335 0 : ext4_debug("truncating inode %lu to %lld bytes\n",
336 : inode->i_ino, inode->i_size);
337 0 : inode_lock(inode);
338 0 : truncate_inode_pages(inode->i_mapping, inode->i_size);
339 0 : ret = ext4_truncate(inode);
340 0 : if (ret) {
341 : /*
342 : * We need to clean up the in-core orphan list
343 : * manually if ext4_truncate() failed to get a
344 : * transaction handle.
345 : */
346 0 : ext4_orphan_del(NULL, inode);
347 0 : ext4_std_error(inode->i_sb, ret);
348 : }
349 0 : inode_unlock(inode);
350 0 : (*nr_truncates)++;
351 : } else {
352 0 : if (test_opt(sb, DEBUG))
353 0 : ext4_msg(sb, KERN_DEBUG,
354 : "%s: deleting unreferenced inode %lu",
355 : __func__, inode->i_ino);
356 0 : ext4_debug("deleting unreferenced inode %lu\n",
357 : inode->i_ino);
358 0 : (*nr_orphans)++;
359 : }
360 0 : iput(inode); /* The delete magic happens here! */
361 0 : }
362 :
363 : /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
364 : * the superblock) which were deleted from all directories, but held open by
365 : * a process at the time of a crash. We walk the list and try to delete these
366 : * inodes at recovery time (only with a read-write filesystem).
367 : *
368 : * In order to keep the orphan inode chain consistent during traversal (in
369 : * case of crash during recovery), we link each inode into the superblock
370 : * orphan list_head and handle it the same way as an inode deletion during
371 : * normal operation (which journals the operations for us).
372 : *
373 : * We only do an iget() and an iput() on each inode, which is very safe if we
374 : * accidentally point at an in-use or already deleted inode. The worst that
375 : * can happen in this case is that we get a "bit already cleared" message from
376 : * ext4_free_inode(). The only reason we would point at a wrong inode is if
377 : * e2fsck was run on this filesystem, and it must have already done the orphan
378 : * inode cleanup for us, so we can safely abort without any further action.
379 : */
380 0 : void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es)
381 : {
382 0 : unsigned int s_flags = sb->s_flags;
383 0 : int nr_orphans = 0, nr_truncates = 0;
384 0 : struct inode *inode;
385 0 : int i, j;
386 : #ifdef CONFIG_QUOTA
387 0 : int quota_update = 0;
388 : #endif
389 0 : __le32 *bdata;
390 0 : struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info;
391 0 : int inodes_per_ob = ext4_inodes_per_orphan_block(sb);
392 :
393 0 : if (!es->s_last_orphan && !oi->of_blocks) {
394 : ext4_debug("no orphan inodes to clean up\n");
395 0 : return;
396 : }
397 :
398 0 : if (bdev_read_only(sb->s_bdev)) {
399 0 : ext4_msg(sb, KERN_ERR, "write access "
400 : "unavailable, skipping orphan cleanup");
401 0 : return;
402 : }
403 :
404 : /* Check if feature set would not allow a r/w mount */
405 0 : if (!ext4_feature_set_ok(sb, 0)) {
406 0 : ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
407 : "unknown ROCOMPAT features");
408 0 : return;
409 : }
410 :
411 0 : if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
412 : /* don't clear list on RO mount w/ errors */
413 0 : if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
414 0 : ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
415 : "clearing orphan list.");
416 0 : es->s_last_orphan = 0;
417 : }
418 0 : ext4_debug("Skipping orphan recovery on fs with errors.\n");
419 0 : return;
420 : }
421 :
422 0 : if (s_flags & SB_RDONLY) {
423 0 : ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
424 0 : sb->s_flags &= ~SB_RDONLY;
425 : }
426 : #ifdef CONFIG_QUOTA
427 : /*
428 : * Turn on quotas which were not enabled for read-only mounts if
429 : * filesystem has quota feature, so that they are updated correctly.
430 : */
431 0 : if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
432 0 : int ret = ext4_enable_quotas(sb);
433 :
434 0 : if (!ret)
435 : quota_update = 1;
436 : else
437 0 : ext4_msg(sb, KERN_ERR,
438 : "Cannot turn on quotas: error %d", ret);
439 : }
440 :
441 : /* Turn on journaled quotas used for old sytle */
442 0 : for (i = 0; i < EXT4_MAXQUOTAS; i++) {
443 0 : if (EXT4_SB(sb)->s_qf_names[i]) {
444 0 : int ret = ext4_quota_on_mount(sb, i);
445 :
446 0 : if (!ret)
447 : quota_update = 1;
448 : else
449 0 : ext4_msg(sb, KERN_ERR,
450 : "Cannot turn on journaled "
451 : "quota: type %d: error %d", i, ret);
452 : }
453 : }
454 : #endif
455 :
456 0 : while (es->s_last_orphan) {
457 : /*
458 : * We may have encountered an error during cleanup; if
459 : * so, skip the rest.
460 : */
461 0 : if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
462 0 : ext4_debug("Skipping orphan recovery on fs with errors.\n");
463 0 : es->s_last_orphan = 0;
464 0 : break;
465 : }
466 :
467 0 : inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
468 0 : if (IS_ERR(inode)) {
469 0 : es->s_last_orphan = 0;
470 0 : break;
471 : }
472 :
473 0 : list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
474 0 : ext4_process_orphan(inode, &nr_truncates, &nr_orphans);
475 : }
476 :
477 0 : for (i = 0; i < oi->of_blocks; i++) {
478 0 : bdata = (__le32 *)(oi->of_binfo[i].ob_bh->b_data);
479 0 : for (j = 0; j < inodes_per_ob; j++) {
480 0 : if (!bdata[j])
481 0 : continue;
482 0 : inode = ext4_orphan_get(sb, le32_to_cpu(bdata[j]));
483 0 : if (IS_ERR(inode))
484 0 : continue;
485 0 : ext4_set_inode_state(inode, EXT4_STATE_ORPHAN_FILE);
486 0 : EXT4_I(inode)->i_orphan_idx = i * inodes_per_ob + j;
487 0 : ext4_process_orphan(inode, &nr_truncates, &nr_orphans);
488 : }
489 : }
490 :
491 : #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
492 :
493 0 : if (nr_orphans)
494 0 : ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
495 : PLURAL(nr_orphans));
496 0 : if (nr_truncates)
497 0 : ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
498 : PLURAL(nr_truncates));
499 : #ifdef CONFIG_QUOTA
500 : /* Turn off quotas if they were enabled for orphan cleanup */
501 0 : if (quota_update) {
502 0 : for (i = 0; i < EXT4_MAXQUOTAS; i++) {
503 0 : if (sb_dqopt(sb)->files[i])
504 0 : dquot_quota_off(sb, i);
505 : }
506 : }
507 : #endif
508 0 : sb->s_flags = s_flags; /* Restore SB_RDONLY status */
509 : }
510 :
511 0 : void ext4_release_orphan_info(struct super_block *sb)
512 : {
513 0 : int i;
514 0 : struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info;
515 :
516 0 : if (!oi->of_blocks)
517 : return;
518 0 : for (i = 0; i < oi->of_blocks; i++)
519 0 : brelse(oi->of_binfo[i].ob_bh);
520 0 : kfree(oi->of_binfo);
521 : }
522 :
523 : static struct ext4_orphan_block_tail *ext4_orphan_block_tail(
524 : struct super_block *sb,
525 : struct buffer_head *bh)
526 : {
527 0 : return (struct ext4_orphan_block_tail *)(bh->b_data + sb->s_blocksize -
528 : sizeof(struct ext4_orphan_block_tail));
529 : }
530 :
531 0 : static int ext4_orphan_file_block_csum_verify(struct super_block *sb,
532 : struct buffer_head *bh)
533 : {
534 0 : __u32 calculated;
535 0 : int inodes_per_ob = ext4_inodes_per_orphan_block(sb);
536 0 : struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info;
537 0 : struct ext4_orphan_block_tail *ot;
538 0 : __le64 dsk_block_nr = cpu_to_le64(bh->b_blocknr);
539 :
540 0 : if (!ext4_has_metadata_csum(sb))
541 : return 1;
542 :
543 0 : ot = ext4_orphan_block_tail(sb, bh);
544 0 : calculated = ext4_chksum(EXT4_SB(sb), oi->of_csum_seed,
545 : (__u8 *)&dsk_block_nr, sizeof(dsk_block_nr));
546 0 : calculated = ext4_chksum(EXT4_SB(sb), calculated, (__u8 *)bh->b_data,
547 : inodes_per_ob * sizeof(__u32));
548 0 : return le32_to_cpu(ot->ob_checksum) == calculated;
549 : }
550 :
551 : /* This gets called only when checksumming is enabled */
552 0 : void ext4_orphan_file_block_trigger(struct jbd2_buffer_trigger_type *triggers,
553 : struct buffer_head *bh,
554 : void *data, size_t size)
555 : {
556 0 : struct super_block *sb = EXT4_TRIGGER(triggers)->sb;
557 0 : __u32 csum;
558 0 : int inodes_per_ob = ext4_inodes_per_orphan_block(sb);
559 0 : struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info;
560 0 : struct ext4_orphan_block_tail *ot;
561 0 : __le64 dsk_block_nr = cpu_to_le64(bh->b_blocknr);
562 :
563 0 : csum = ext4_chksum(EXT4_SB(sb), oi->of_csum_seed,
564 : (__u8 *)&dsk_block_nr, sizeof(dsk_block_nr));
565 0 : csum = ext4_chksum(EXT4_SB(sb), csum, (__u8 *)data,
566 : inodes_per_ob * sizeof(__u32));
567 0 : ot = ext4_orphan_block_tail(sb, bh);
568 0 : ot->ob_checksum = cpu_to_le32(csum);
569 0 : }
570 :
571 0 : int ext4_init_orphan_info(struct super_block *sb)
572 : {
573 0 : struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info;
574 0 : struct inode *inode;
575 0 : int i, j;
576 0 : int ret;
577 0 : int free;
578 0 : __le32 *bdata;
579 0 : int inodes_per_ob = ext4_inodes_per_orphan_block(sb);
580 0 : struct ext4_orphan_block_tail *ot;
581 0 : ino_t orphan_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_orphan_file_inum);
582 :
583 0 : if (!ext4_has_feature_orphan_file(sb))
584 : return 0;
585 :
586 0 : inode = ext4_iget(sb, orphan_ino, EXT4_IGET_SPECIAL);
587 0 : if (IS_ERR(inode)) {
588 0 : ext4_msg(sb, KERN_ERR, "get orphan inode failed");
589 0 : return PTR_ERR(inode);
590 : }
591 0 : oi->of_blocks = inode->i_size >> sb->s_blocksize_bits;
592 0 : oi->of_csum_seed = EXT4_I(inode)->i_csum_seed;
593 0 : oi->of_binfo = kmalloc(oi->of_blocks*sizeof(struct ext4_orphan_block),
594 : GFP_KERNEL);
595 0 : if (!oi->of_binfo) {
596 0 : ret = -ENOMEM;
597 0 : goto out_put;
598 : }
599 0 : for (i = 0; i < oi->of_blocks; i++) {
600 0 : oi->of_binfo[i].ob_bh = ext4_bread(NULL, inode, i, 0);
601 0 : if (IS_ERR(oi->of_binfo[i].ob_bh)) {
602 0 : ret = PTR_ERR(oi->of_binfo[i].ob_bh);
603 0 : goto out_free;
604 : }
605 0 : if (!oi->of_binfo[i].ob_bh) {
606 0 : ret = -EIO;
607 0 : goto out_free;
608 : }
609 0 : ot = ext4_orphan_block_tail(sb, oi->of_binfo[i].ob_bh);
610 0 : if (le32_to_cpu(ot->ob_magic) != EXT4_ORPHAN_BLOCK_MAGIC) {
611 0 : ext4_error(sb, "orphan file block %d: bad magic", i);
612 0 : ret = -EIO;
613 0 : goto out_free;
614 : }
615 0 : if (!ext4_orphan_file_block_csum_verify(sb,
616 : oi->of_binfo[i].ob_bh)) {
617 0 : ext4_error(sb, "orphan file block %d: bad checksum", i);
618 0 : ret = -EIO;
619 0 : goto out_free;
620 : }
621 0 : bdata = (__le32 *)(oi->of_binfo[i].ob_bh->b_data);
622 0 : free = 0;
623 0 : for (j = 0; j < inodes_per_ob; j++)
624 0 : if (bdata[j] == 0)
625 0 : free++;
626 0 : atomic_set(&oi->of_binfo[i].ob_free_entries, free);
627 : }
628 0 : iput(inode);
629 0 : return 0;
630 0 : out_free:
631 0 : for (i--; i >= 0; i--)
632 0 : brelse(oi->of_binfo[i].ob_bh);
633 0 : kfree(oi->of_binfo);
634 0 : out_put:
635 0 : iput(inode);
636 0 : return ret;
637 : }
638 :
639 0 : int ext4_orphan_file_empty(struct super_block *sb)
640 : {
641 0 : struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info;
642 0 : int i;
643 0 : int inodes_per_ob = ext4_inodes_per_orphan_block(sb);
644 :
645 0 : if (!ext4_has_feature_orphan_file(sb))
646 : return 1;
647 0 : for (i = 0; i < oi->of_blocks; i++)
648 0 : if (atomic_read(&oi->of_binfo[i].ob_free_entries) !=
649 : inodes_per_ob)
650 : return 0;
651 : return 1;
652 : }
|