Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-6.2-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

- one more fix for a tree-log 'write time corruption' report, update
the last dir index directly and don't keep in the log context

- do VFS-level inode lock around FIEMAP to prevent a deadlock with
concurrent fsync, the extent-level lock is not sufficient

- don't cache a single-device filesystem device to avoid cases when a
loop device is reformatted and the entry gets stale

* tag 'for-6.2-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
btrfs: free device in btrfs_close_devices for a single device filesystem
btrfs: lock the inode in shared mode before starting fiemap
btrfs: simplify update of last_dir_index_offset when logging a directory

+34 -9
+2
fs/btrfs/extent_io.c
··· 3826 3826 lockend = round_up(start + len, inode->root->fs_info->sectorsize); 3827 3827 prev_extent_end = lockstart; 3828 3828 3829 + btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED); 3829 3830 lock_extent(&inode->io_tree, lockstart, lockend, &cached_state); 3830 3831 3831 3832 ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end); ··· 4020 4019 4021 4020 out_unlock: 4022 4021 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state); 4022 + btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 4023 4023 out: 4024 4024 free_extent_state(delalloc_cached_state); 4025 4025 btrfs_free_backref_share_ctx(backref_ctx);
+17 -6
fs/btrfs/tree-log.c
··· 3576 3576 } 3577 3577 3578 3578 static int flush_dir_items_batch(struct btrfs_trans_handle *trans, 3579 - struct btrfs_root *log, 3579 + struct btrfs_inode *inode, 3580 3580 struct extent_buffer *src, 3581 3581 struct btrfs_path *dst_path, 3582 3582 int start_slot, 3583 3583 int count) 3584 3584 { 3585 + struct btrfs_root *log = inode->root->log_root; 3585 3586 char *ins_data = NULL; 3586 3587 struct btrfs_item_batch batch; 3587 3588 struct extent_buffer *dst; 3588 3589 unsigned long src_offset; 3589 3590 unsigned long dst_offset; 3591 + u64 last_index; 3590 3592 struct btrfs_key key; 3591 3593 u32 item_size; 3592 3594 int ret; ··· 3646 3644 src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1); 3647 3645 copy_extent_buffer(dst, src, dst_offset, src_offset, batch.total_data_size); 3648 3646 btrfs_release_path(dst_path); 3647 + 3648 + last_index = batch.keys[count - 1].offset; 3649 + ASSERT(last_index > inode->last_dir_index_offset); 3650 + 3651 + /* 3652 + * If for some unexpected reason the last item's index is not greater 3653 + * than the last index we logged, warn and return an error to fallback 3654 + * to a transaction commit. 3655 + */ 3656 + if (WARN_ON(last_index <= inode->last_dir_index_offset)) 3657 + ret = -EUCLEAN; 3658 + else 3659 + inode->last_dir_index_offset = last_index; 3649 3660 out: 3650 3661 kfree(ins_data); 3651 3662 ··· 3708 3693 } 3709 3694 3710 3695 di = btrfs_item_ptr(src, i, struct btrfs_dir_item); 3711 - ctx->last_dir_item_offset = key.offset; 3712 3696 3713 3697 /* 3714 3698 * Skip ranges of items that consist only of dir item keys created ··· 3770 3756 if (batch_size > 0) { 3771 3757 int ret; 3772 3758 3773 - ret = flush_dir_items_batch(trans, log, src, dst_path, 3759 + ret = flush_dir_items_batch(trans, inode, src, dst_path, 3774 3760 batch_start, batch_size); 3775 3761 if (ret < 0) 3776 3762 return ret; ··· 4058 4044 4059 4045 min_key = BTRFS_DIR_START_INDEX; 4060 4046 max_key = 0; 4061 - ctx->last_dir_item_offset = inode->last_dir_index_offset; 4062 4047 4063 4048 while (1) { 4064 4049 ret = log_dir_items(trans, inode, path, dst_path, ··· 4068 4055 break; 4069 4056 min_key = max_key + 1; 4070 4057 } 4071 - 4072 - inode->last_dir_index_offset = ctx->last_dir_item_offset; 4073 4058 4074 4059 return 0; 4075 4060 }
-2
fs/btrfs/tree-log.h
··· 24 24 bool logging_new_delayed_dentries; 25 25 /* Indicate if the inode being logged was logged before. */ 26 26 bool logged_before; 27 - /* Tracks the last logged dir item/index key offset. */ 28 - u64 last_dir_item_offset; 29 27 struct inode *inode; 30 28 struct list_head list; 31 29 /* Only used for fast fsyncs. */
+15 -1
fs/btrfs/volumes.c
··· 403 403 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 404 404 { 405 405 struct btrfs_device *device; 406 + 406 407 WARN_ON(fs_devices->opened); 407 408 while (!list_empty(&fs_devices->devices)) { 408 409 device = list_entry(fs_devices->devices.next, ··· 1182 1181 1183 1182 mutex_lock(&uuid_mutex); 1184 1183 close_fs_devices(fs_devices); 1185 - if (!fs_devices->opened) 1184 + if (!fs_devices->opened) { 1186 1185 list_splice_init(&fs_devices->seed_list, &list); 1186 + 1187 + /* 1188 + * If the struct btrfs_fs_devices is not assembled with any 1189 + * other device, it can be re-initialized during the next mount 1190 + * without the needing device-scan step. Therefore, it can be 1191 + * fully freed. 1192 + */ 1193 + if (fs_devices->num_devices == 1) { 1194 + list_del(&fs_devices->fs_list); 1195 + free_fs_devices(fs_devices); 1196 + } 1197 + } 1198 + 1187 1199 1188 1200 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1189 1201 close_fs_devices(fs_devices);