Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-7.0-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

- multiple error handling fixes of unexpected conditions

- reset block group size class once it becomes empty so that
its class can be changed

- error message level adjustments

- fixes of returned error values

- use correct block reserve for delayed refs

* tag 'for-7.0-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
btrfs: fix invalid leaf access in btrfs_quota_enable() if ref key not found
btrfs: fix lost error return in btrfs_find_orphan_roots()
btrfs: fix lost return value on error in finish_verity()
btrfs: change unaligned root messages to error level in btrfs_validate_super()
btrfs: use the correct type to initialize block reserve for delayed refs
btrfs: do not ASSERT() when the fs flips RO inside btrfs_repair_io_failure()
btrfs: reset block group size class when it becomes empty
btrfs: replace BUG() with error handling in __btrfs_balance()
btrfs: handle unexpected exact match in btrfs_set_inode_index_count()

+56 -21
+7 -1
fs/btrfs/bio.c
··· 934 934 struct bio *bio = NULL; 935 935 int ret = 0; 936 936 937 - ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); 938 937 BUG_ON(!mirror_num); 939 938 940 939 /* Basic alignment checks. */ ··· 944 945 ASSERT(length <= BTRFS_MAX_BLOCKSIZE); 945 946 ASSERT(step <= length); 946 947 ASSERT(is_power_of_2(step)); 948 + 949 + /* 950 + * The fs either mounted RO or hit critical errors, no need 951 + * to continue repairing. 952 + */ 953 + if (unlikely(sb_rdonly(fs_info->sb))) 954 + return 0; 947 955 948 956 if (btrfs_repair_one_zone(fs_info, logical)) 949 957 return 0;
+10
fs/btrfs/block-group.c
··· 3760 3760 return ret; 3761 3761 } 3762 3762 3763 + static void btrfs_maybe_reset_size_class(struct btrfs_block_group *bg) 3764 + { 3765 + lockdep_assert_held(&bg->lock); 3766 + if (btrfs_block_group_should_use_size_class(bg) && 3767 + bg->used == 0 && bg->reserved == 0) 3768 + bg->size_class = BTRFS_BG_SZ_NONE; 3769 + } 3770 + 3763 3771 int btrfs_update_block_group(struct btrfs_trans_handle *trans, 3764 3772 u64 bytenr, u64 num_bytes, bool alloc) 3765 3773 { ··· 3832 3824 old_val -= num_bytes; 3833 3825 cache->used = old_val; 3834 3826 cache->pinned += num_bytes; 3827 + btrfs_maybe_reset_size_class(cache); 3835 3828 btrfs_space_info_update_bytes_pinned(space_info, num_bytes); 3836 3829 space_info->bytes_used -= num_bytes; 3837 3830 space_info->disk_used -= num_bytes * factor; ··· 3961 3952 spin_lock(&cache->lock); 3962 3953 bg_ro = cache->ro; 3963 3954 cache->reserved -= num_bytes; 3955 + btrfs_maybe_reset_size_class(cache); 3964 3956 if (is_delalloc) 3965 3957 cache->delalloc_bytes -= num_bytes; 3966 3958 spin_unlock(&cache->lock);
+4 -3
fs/btrfs/block-rsv.c
··· 276 276 struct btrfs_block_rsv *target = NULL; 277 277 278 278 /* 279 - * If we are a delayed block reserve then push to the global rsv, 280 - * otherwise dump into the global delayed reserve if it is not full. 279 + * If we are a delayed refs block reserve then push to the global 280 + * reserve, otherwise dump into the global delayed refs reserve if it is 281 + * not full. 281 282 */ 282 - if (block_rsv->type == BTRFS_BLOCK_RSV_DELOPS) 283 + if (block_rsv->type == BTRFS_BLOCK_RSV_DELREFS) 283 284 target = global_rsv; 284 285 else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv)) 285 286 target = delayed_rsv;
+5 -5
fs/btrfs/disk-io.c
··· 2416 2416 2417 2417 /* Root alignment check */ 2418 2418 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { 2419 - btrfs_warn(fs_info, "tree_root block unaligned: %llu", 2420 - btrfs_super_root(sb)); 2419 + btrfs_err(fs_info, "tree_root block unaligned: %llu", 2420 + btrfs_super_root(sb)); 2421 2421 ret = -EINVAL; 2422 2422 } 2423 2423 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { 2424 - btrfs_warn(fs_info, "chunk_root block unaligned: %llu", 2424 + btrfs_err(fs_info, "chunk_root block unaligned: %llu", 2425 2425 btrfs_super_chunk_root(sb)); 2426 2426 ret = -EINVAL; 2427 2427 } 2428 2428 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { 2429 - btrfs_warn(fs_info, "log_root block unaligned: %llu", 2430 - btrfs_super_log_root(sb)); 2429 + btrfs_err(fs_info, "log_root block unaligned: %llu", 2430 + btrfs_super_log_root(sb)); 2431 2431 ret = -EINVAL; 2432 2432 } 2433 2433
+12 -3
fs/btrfs/inode.c
··· 6146 6146 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6147 6147 if (ret < 0) 6148 6148 return ret; 6149 - /* FIXME: we should be able to handle this */ 6150 - if (ret == 0) 6151 - return ret; 6149 + 6150 + if (unlikely(ret == 0)) { 6151 + /* 6152 + * Key with offset -1 found, there would have to exist a dir 6153 + * index item with such offset, but this is out of the valid 6154 + * range. 6155 + */ 6156 + btrfs_err(root->fs_info, 6157 + "unexpected exact match for DIR_INDEX key, inode %llu", 6158 + btrfs_ino(inode)); 6159 + return -EUCLEAN; 6160 + } 6152 6161 6153 6162 if (path->slots[0] == 0) { 6154 6163 inode->index_cnt = BTRFS_DIR_START_INDEX;
+7 -4
fs/btrfs/qgroup.c
··· 1169 1169 } 1170 1170 if (ret > 0) { 1171 1171 /* 1172 - * Shouldn't happen, but in case it does we 1173 - * don't need to do the btrfs_next_item, just 1174 - * continue. 1172 + * Shouldn't happen because the key should still 1173 + * be there (return 0), but in case it does it 1174 + * means we have reached the end of the tree - 1175 + * there are no more leaves with items that have 1176 + * a key greater than or equals to @found_key, 1177 + * so just stop the search loop. 1175 1178 */ 1176 - continue; 1179 + break; 1177 1180 } 1178 1181 } 1179 1182 ret = btrfs_next_item(tree_root, path);
+1 -1
fs/btrfs/root-tree.c
··· 257 257 root = btrfs_get_fs_root(fs_info, root_objectid, false); 258 258 ret = PTR_ERR_OR_ZERO(root); 259 259 if (ret && ret != -ENOENT) { 260 - break; 260 + return ret; 261 261 } else if (ret == -ENOENT) { 262 262 struct btrfs_trans_handle *trans; 263 263
+1 -1
fs/btrfs/transaction.c
··· 726 726 727 727 h->type = type; 728 728 INIT_LIST_HEAD(&h->new_bgs); 729 - btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELOPS); 729 + btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELREFS); 730 730 731 731 smp_mb(); 732 732 if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
+1 -1
fs/btrfs/verity.c
··· 552 552 btrfs_set_fs_compat_ro(root->fs_info, VERITY); 553 553 end_trans: 554 554 btrfs_end_transaction(trans); 555 - return 0; 555 + return ret; 556 556 557 557 } 558 558
+8 -2
fs/btrfs/volumes.c
··· 4367 4367 * this shouldn't happen, it means the last relocate 4368 4368 * failed 4369 4369 */ 4370 - if (ret == 0) 4371 - BUG(); /* FIXME break ? */ 4370 + if (unlikely(ret == 0)) { 4371 + btrfs_err(fs_info, 4372 + "unexpected exact match of CHUNK_ITEM in chunk tree, offset 0x%llx", 4373 + key.offset); 4374 + mutex_unlock(&fs_info->reclaim_bgs_lock); 4375 + ret = -EUCLEAN; 4376 + goto error; 4377 + } 4372 4378 4373 4379 ret = btrfs_previous_item(chunk_root, path, 0, 4374 4380 BTRFS_CHUNK_ITEM_KEY);