Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-5.3-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
"Two fixes that popped up during testing:

- fix for sysfs-related code that adds/removes block groups, warnings
appear during several fstests in connection with sysfs updates in
5.3, the fix essentially replaces a workaround with scope NOFS and
applies to 5.2-based branch too

- add sanity check of trim range"

* tag 'for-5.3-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
btrfs: trim: Check the range passed into to prevent overflow
Btrfs: fix sysfs warning and missing raid sysfs directories

+35 -55
-4
fs/btrfs/ctree.h
··· 401 401 struct raid_kobject { 402 402 u64 flags; 403 403 struct kobject kobj; 404 - struct list_head list; 405 404 }; 406 405 407 406 /* ··· 914 915 u32 thread_pool_size; 915 916 916 917 struct kobject *space_info_kobj; 917 - struct list_head pending_raid_kobjs; 918 - spinlock_t pending_raid_kobjs_lock; /* uncontended */ 919 918 920 919 u64 total_pinned; 921 920 ··· 2695 2698 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 2696 2699 u64 bytes_used, u64 type, u64 chunk_offset, 2697 2700 u64 size); 2698 - void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info); 2699 2701 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 2700 2702 struct btrfs_fs_info *fs_info, 2701 2703 const u64 chunk_offset);
-2
fs/btrfs/disk-io.c
··· 2683 2683 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2684 2684 INIT_LIST_HEAD(&fs_info->delalloc_roots); 2685 2685 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2686 - INIT_LIST_HEAD(&fs_info->pending_raid_kobjs); 2687 - spin_lock_init(&fs_info->pending_raid_kobjs_lock); 2688 2686 spin_lock_init(&fs_info->delalloc_root_lock); 2689 2687 spin_lock_init(&fs_info->trans_lock); 2690 2688 spin_lock_init(&fs_info->fs_roots_radix_lock);
+35 -36
fs/btrfs/extent-tree.c
··· 4 4 */ 5 5 6 6 #include <linux/sched.h> 7 + #include <linux/sched/mm.h> 7 8 #include <linux/sched/signal.h> 8 9 #include <linux/pagemap.h> 9 10 #include <linux/writeback.h> ··· 7889 7888 return 0; 7890 7889 } 7891 7890 7892 - /* link_block_group will queue up kobjects to add when we're reclaim-safe */ 7893 - void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info) 7894 - { 7895 - struct btrfs_space_info *space_info; 7896 - struct raid_kobject *rkobj; 7897 - LIST_HEAD(list); 7898 - int ret = 0; 7899 - 7900 - spin_lock(&fs_info->pending_raid_kobjs_lock); 7901 - list_splice_init(&fs_info->pending_raid_kobjs, &list); 7902 - spin_unlock(&fs_info->pending_raid_kobjs_lock); 7903 - 7904 - list_for_each_entry(rkobj, &list, list) { 7905 - space_info = btrfs_find_space_info(fs_info, rkobj->flags); 7906 - 7907 - ret = kobject_add(&rkobj->kobj, &space_info->kobj, 7908 - "%s", btrfs_bg_type_to_raid_name(rkobj->flags)); 7909 - if (ret) { 7910 - kobject_put(&rkobj->kobj); 7911 - break; 7912 - } 7913 - } 7914 - if (ret) 7915 - btrfs_warn(fs_info, 7916 - "failed to add kobject for block cache, ignoring"); 7917 - } 7918 - 7919 7891 static void link_block_group(struct btrfs_block_group_cache *cache) 7920 7892 { 7921 7893 struct btrfs_space_info *space_info = cache->space_info; ··· 7903 7929 up_write(&space_info->groups_sem); 7904 7930 7905 7931 if (first) { 7906 - struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); 7932 + struct raid_kobject *rkobj; 7933 + unsigned int nofs_flag; 7934 + int ret; 7935 + 7936 + /* 7937 + * Setup a NOFS context because kobject_add(), deep in its call 7938 + * chain, does GFP_KERNEL allocations, and we are often called 7939 + * in a context where if reclaim is triggered we can deadlock 7940 + * (we are either holding a transaction handle or some lock 7941 + * required for a transaction commit). 7942 + */ 7943 + nofs_flag = memalloc_nofs_save(); 7944 + rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL); 7907 7945 if (!rkobj) { 7946 + memalloc_nofs_restore(nofs_flag); 7908 7947 btrfs_warn(cache->fs_info, 7909 7948 "couldn't alloc memory for raid level kobject"); 7910 7949 return; 7911 7950 } 7912 7951 rkobj->flags = cache->flags; 7913 7952 kobject_init(&rkobj->kobj, &btrfs_raid_ktype); 7914 - 7915 - spin_lock(&fs_info->pending_raid_kobjs_lock); 7916 - list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs); 7917 - spin_unlock(&fs_info->pending_raid_kobjs_lock); 7953 + ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s", 7954 + btrfs_bg_type_to_raid_name(rkobj->flags)); 7955 + memalloc_nofs_restore(nofs_flag); 7956 + if (ret) { 7957 + kobject_put(&rkobj->kobj); 7958 + btrfs_warn(fs_info, 7959 + "failed to add kobject for block cache, ignoring"); 7960 + return; 7961 + } 7918 7962 space_info->block_group_kobjs[index] = &rkobj->kobj; 7919 7963 } 7920 7964 } ··· 8198 8206 inc_block_group_ro(cache, 1); 8199 8207 } 8200 8208 8201 - btrfs_add_raid_kobjects(info); 8202 8209 btrfs_init_global_block_rsv(info); 8203 8210 ret = check_chunk_block_group_mappings(info); 8204 8211 error: ··· 8966 8975 struct btrfs_device *device; 8967 8976 struct list_head *devices; 8968 8977 u64 group_trimmed; 8978 + u64 range_end = U64_MAX; 8969 8979 u64 start; 8970 8980 u64 end; 8971 8981 u64 trimmed = 0; ··· 8976 8984 int dev_ret = 0; 8977 8985 int ret = 0; 8978 8986 8987 + /* 8988 + * Check range overflow if range->len is set. 8989 + * The default range->len is U64_MAX. 8990 + */ 8991 + if (range->len != U64_MAX && 8992 + check_add_overflow(range->start, range->len, &range_end)) 8993 + return -EINVAL; 8994 + 8979 8995 cache = btrfs_lookup_first_block_group(fs_info, range->start); 8980 8996 for (; cache; cache = next_block_group(cache)) { 8981 - if (cache->key.objectid >= (range->start + range->len)) { 8997 + if (cache->key.objectid >= range_end) { 8982 8998 btrfs_put_block_group(cache); 8983 8999 break; 8984 9000 } 8985 9001 8986 9002 start = max(range->start, cache->key.objectid); 8987 - end = min(range->start + range->len, 8988 - cache->key.objectid + cache->key.offset); 9003 + end = min(range_end, cache->key.objectid + cache->key.offset); 8989 9004 8990 9005 if (end - start >= range->minlen) { 8991 9006 if (!block_group_cache_done(cache)) {
-13
fs/btrfs/volumes.c
··· 3087 3087 if (ret) 3088 3088 return ret; 3089 3089 3090 - /* 3091 - * We add the kobjects here (and after forcing data chunk creation) 3092 - * since relocation is the only place we'll create chunks of a new 3093 - * type at runtime. The only place where we'll remove the last 3094 - * chunk of a type is the call immediately below this one. Even 3095 - * so, we're protected against races with the cleaner thread since 3096 - * we're covered by the delete_unused_bgs_mutex. 3097 - */ 3098 - btrfs_add_raid_kobjects(fs_info); 3099 - 3100 3090 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3101 3091 chunk_offset); 3102 3092 if (IS_ERR(trans)) { ··· 3213 3223 btrfs_end_transaction(trans); 3214 3224 if (ret < 0) 3215 3225 return ret; 3216 - 3217 - btrfs_add_raid_kobjects(fs_info); 3218 - 3219 3226 return 1; 3220 3227 } 3221 3228 }