Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'fs_for_v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext2, udf, quota updates from Jan Kara:

- A fix for a race in quota code that can expose ocfs2 to
use-after-free issues

- UDF fix to avoid memory corruption in face of corrupted format

- Couple of ext2 fixes for better handling of fs corruption

- Some more various code cleanups in UDF & ext2

* tag 'fs_for_v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
ext2: reject inodes with zero i_nlink and valid mode in ext2_iget()
ext2: use get_random_u32() where appropriate
quota: Fix race of dquot_scan_active() with quota deactivation
udf: fix partition descriptor append bookkeeping
ext2: avoid drop_nlink() during unlink of zero-nlink inode in ext2_unlink()
ext2: guard reservation window dump with EXT2FS_DEBUG
ext2: replace BUG_ON with WARN_ON_ONCE in ext2_get_blocks
ext2: remove stale TODO about kmap
fs: udf: avoid assignment in condition when selecting allocation goal

+59 -33
+2 -2
fs/ext2/balloc.c
··· 201 201 * windows(start, end). Otherwise, it will only print out the "bad" windows, 202 202 * those windows that overlap with their immediate neighbors. 203 203 */ 204 - #if 1 204 + #ifdef EXT2FS_DEBUG 205 205 static void __rsv_window_dump(struct rb_root *root, int verbose, 206 206 const char *fn) 207 207 { ··· 248 248 __rsv_window_dump((root), (verbose), __func__) 249 249 #else 250 250 #define rsv_window_dump(root, verbose) do {} while (0) 251 - #endif 251 + #endif /* EXT2FS_DEBUG */ 252 252 253 253 /** 254 254 * goal_in_my_reservation()
+13 -4
fs/ext2/inode.c
··· 639 639 int count = 0; 640 640 ext2_fsblk_t first_block = 0; 641 641 642 - BUG_ON(maxblocks == 0); 642 + if (WARN_ON_ONCE(maxblocks == 0)) 643 + return -EINVAL; 643 644 644 645 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary); 645 646 ··· 1434 1433 * the test is that same one that e2fsck uses 1435 1434 * NeilBrown 1999oct15 1436 1435 */ 1437 - if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) { 1438 - /* this inode is deleted */ 1439 - ret = -ESTALE; 1436 + if (inode->i_nlink == 0) { 1437 + if (inode->i_mode == 0 || ei->i_dtime) { 1438 + /* this inode is deleted */ 1439 + ret = -ESTALE; 1440 + } else { 1441 + ext2_error(sb, __func__, 1442 + "inode %lu has zero i_nlink with mode 0%o and no dtime, " 1443 + "filesystem may be corrupt", 1444 + ino, inode->i_mode); 1445 + ret = -EFSCORRUPTED; 1446 + } 1440 1447 goto bad_inode; 1441 1448 } 1442 1449 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
+4 -3
fs/ext2/namei.c
··· 14 14 * 15 15 * The only non-static object here is ext2_dir_inode_operations. 16 16 * 17 - * TODO: get rid of kmap() use, add readahead. 18 - * 19 17 * Copyright (C) 1992, 1993, 1994, 1995 20 18 * Remy Card (card@masi.ibp.fr) 21 19 * Laboratoire MASI - Institut Blaise Pascal ··· 291 293 goto out; 292 294 293 295 inode_set_ctime_to_ts(inode, inode_get_ctime(dir)); 294 - inode_dec_link_count(inode); 296 + 297 + if (inode->i_nlink) 298 + inode_dec_link_count(inode); 299 + 295 300 err = 0; 296 301 out: 297 302 return err;
+1 -1
fs/ext2/super.c
··· 1152 1152 goto failed_mount2; 1153 1153 } 1154 1154 sbi->s_gdb_count = db_count; 1155 - get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 1155 + sbi->s_next_generation = get_random_u32(); 1156 1156 spin_lock_init(&sbi->s_next_gen_lock); 1157 1157 1158 1158 /* per filesystem reservation list head & lock */
+30 -8
fs/quota/dquot.c
··· 363 363 return test_bit(DQ_ACTIVE_B, &dquot->dq_flags); 364 364 } 365 365 366 + static struct dquot *__dqgrab(struct dquot *dquot) 367 + { 368 + lockdep_assert_held(&dq_list_lock); 369 + if (!atomic_read(&dquot->dq_count)) 370 + remove_free_dquot(dquot); 371 + atomic_inc(&dquot->dq_count); 372 + return dquot; 373 + } 374 + 375 + /* 376 + * Get reference to dquot when we got pointer to it by some other means. The 377 + * dquot has to be active and the caller has to make sure it cannot get 378 + * deactivated under our hands. 379 + */ 380 + struct dquot *dqgrab(struct dquot *dquot) 381 + { 382 + spin_lock(&dq_list_lock); 383 + WARN_ON_ONCE(!dquot_active(dquot)); 384 + dquot = __dqgrab(dquot); 385 + spin_unlock(&dq_list_lock); 386 + 387 + return dquot; 388 + } 389 + EXPORT_SYMBOL_GPL(dqgrab); 390 + 366 391 static inline int dquot_dirty(struct dquot *dquot) 367 392 { 368 393 return test_bit(DQ_MOD_B, &dquot->dq_flags); ··· 666 641 continue; 667 642 if (dquot->dq_sb != sb) 668 643 continue; 669 - /* Now we have active dquot so we can just increase use count */ 670 - atomic_inc(&dquot->dq_count); 644 + __dqgrab(dquot); 671 645 spin_unlock(&dq_list_lock); 672 646 dqput(old_dquot); 673 647 old_dquot = dquot; 674 648 /* 675 649 * ->release_dquot() can be racing with us. Our reference 676 - * protects us from new calls to it so just wait for any 677 - * outstanding call and recheck the DQ_ACTIVE_B after that. 650 + * protects us from dquot_release() proceeding so just wait for 651 + * any outstanding call and recheck the DQ_ACTIVE_B after that. 678 652 */ 679 653 wait_on_dquot(dquot); 680 654 if (dquot_active(dquot)) { ··· 741 717 /* Now we have active dquot from which someone is 742 718 * holding reference so we can safely just increase 743 719 * use count */ 744 - dqgrab(dquot); 720 + __dqgrab(dquot); 745 721 spin_unlock(&dq_list_lock); 746 722 err = dquot_write_dquot(dquot); 747 723 if (err && !ret) ··· 987 963 spin_unlock(&dq_list_lock); 988 964 dqstats_inc(DQST_LOOKUPS); 989 965 } else { 990 - if (!atomic_read(&dquot->dq_count)) 991 - remove_free_dquot(dquot); 992 - atomic_inc(&dquot->dq_count); 966 + __dqgrab(dquot); 993 967 spin_unlock(&dq_list_lock); 994 968 dqstats_inc(DQST_CACHE_HITS); 995 969 dqstats_inc(DQST_LOOKUPS);
+5 -6
fs/udf/inode.c
··· 733 733 sector_t offset = 0; 734 734 int8_t etype, tmpetype; 735 735 struct udf_inode_info *iinfo = UDF_I(inode); 736 - udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum; 736 + udf_pblk_t goal = 0, pgoal = 0; 737 737 int lastblock = 0; 738 738 bool isBeyondEOF = false; 739 739 int ret = 0; ··· 892 892 else { /* otherwise, allocate a new block */ 893 893 if (iinfo->i_next_alloc_block == map->lblk) 894 894 goal = iinfo->i_next_alloc_goal; 895 - 896 - if (!goal) { 897 - if (!(goal = pgoal)) /* XXX: what was intended here? */ 898 - goal = iinfo->i_location.logicalBlockNum + 1; 899 - } 895 + if (!goal) 896 + goal = pgoal; 897 + if (!goal) 898 + goal = iinfo->i_location.logicalBlockNum + 1; 900 899 901 900 newblocknum = udf_new_block(inode->i_sb, inode, 902 901 iinfo->i_location.partitionReferenceNum,
+3 -1
fs/udf/super.c
··· 1695 1695 return &(data->part_descs_loc[i].rec); 1696 1696 if (data->num_part_descs >= data->size_part_descs) { 1697 1697 struct part_desc_seq_scan_data *new_loc; 1698 - unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); 1698 + unsigned int new_size; 1699 1699 1700 + new_size = data->num_part_descs + PART_DESC_ALLOC_STEP; 1700 1701 new_loc = kzalloc_objs(*new_loc, new_size); 1701 1702 if (!new_loc) 1702 1703 return ERR_PTR(-ENOMEM); ··· 1707 1706 data->part_descs_loc = new_loc; 1708 1707 data->size_part_descs = new_size; 1709 1708 } 1709 + data->part_descs_loc[data->num_part_descs].partnum = partnum; 1710 1710 return &(data->part_descs_loc[data->num_part_descs++].rec); 1711 1711 } 1712 1712
+1 -8
include/linux/quotaops.h
··· 44 44 bool dquot_initialize_needed(struct inode *inode); 45 45 void dquot_drop(struct inode *inode); 46 46 struct dquot *dqget(struct super_block *sb, struct kqid qid); 47 - static inline struct dquot *dqgrab(struct dquot *dquot) 48 - { 49 - /* Make sure someone else has active reference to dquot */ 50 - WARN_ON_ONCE(!atomic_read(&dquot->dq_count)); 51 - WARN_ON_ONCE(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)); 52 - atomic_inc(&dquot->dq_count); 53 - return dquot; 54 - } 47 + struct dquot *dqgrab(struct dquot *dquot); 55 48 56 49 static inline bool dquot_is_busy(struct dquot *dquot) 57 50 {