Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-6.4-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fix from David Sterba:
"Unfortunately the recent u32 overflow fix was not complete, there was
one conversion left, assertion not triggered by my tests but caught by
Qu's fstests case.

The "cleanup for later" has been promoted to a proper fix and wraps
all uses of the stripe left shift so the diffstat has grown but leaves
no potentially problematic uses.

We should have done it that way before, sorry"

* tag 'for-6.4-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
btrfs: fix remaining u32 overflows when left shifting stripe_nr

+40 -28
+1 -1
fs/btrfs/block-group.c
··· 1973 1973 1974 1974 /* For RAID5/6 adjust to a full IO stripe length */ 1975 1975 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 1976 - io_stripe_size = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT; 1976 + io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 1977 1977 1978 1978 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 1979 1979 if (!buf) {
+11 -11
fs/btrfs/scrub.c
··· 1304 1304 u32 stripe_index; 1305 1305 u32 rot; 1306 1306 1307 - *offset = last_offset + (i << BTRFS_STRIPE_LEN_SHIFT); 1307 + *offset = last_offset + btrfs_stripe_nr_to_offset(i); 1308 1308 1309 1309 stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes; 1310 1310 ··· 1319 1319 if (stripe_index < num) 1320 1320 j++; 1321 1321 } 1322 - *offset = last_offset + (j << BTRFS_STRIPE_LEN_SHIFT); 1322 + *offset = last_offset + btrfs_stripe_nr_to_offset(j); 1323 1323 return 1; 1324 1324 } 1325 1325 ··· 1715 1715 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state)); 1716 1716 1717 1717 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev, 1718 - nr_stripes << BTRFS_STRIPE_LEN_SHIFT); 1718 + btrfs_stripe_nr_to_offset(nr_stripes)); 1719 1719 for (int i = 0; i < nr_stripes; i++) { 1720 1720 stripe = &sctx->stripes[i]; 1721 1721 scrub_submit_initial_read(sctx, stripe); ··· 1838 1838 bool all_empty = true; 1839 1839 const int data_stripes = nr_data_stripes(map); 1840 1840 unsigned long extent_bitmap = 0; 1841 - u64 length = data_stripes << BTRFS_STRIPE_LEN_SHIFT; 1841 + u64 length = btrfs_stripe_nr_to_offset(data_stripes); 1842 1842 int ret; 1843 1843 1844 1844 ASSERT(sctx->raid56_data_stripes); ··· 1853 1853 data_stripes) >> BTRFS_STRIPE_LEN_SHIFT; 1854 1854 stripe_index = (i + rot) % map->num_stripes; 1855 1855 physical = map->stripes[stripe_index].physical + 1856 - (rot << BTRFS_STRIPE_LEN_SHIFT); 1856 + btrfs_stripe_nr_to_offset(rot); 1857 1857 1858 1858 scrub_reset_stripe(stripe); 1859 1859 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state); 1860 1860 ret = scrub_find_fill_first_stripe(bg, 1861 1861 map->stripes[stripe_index].dev, physical, 1, 1862 - full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT), 1862 + full_stripe_start + btrfs_stripe_nr_to_offset(i), 1863 1863 BTRFS_STRIPE_LEN, stripe); 1864 1864 if (ret < 0) 1865 1865 goto out; ··· 1869 1869 */ 1870 1870 if (ret > 0) { 1871 1871 stripe->logical = full_stripe_start + 1872 - (i << BTRFS_STRIPE_LEN_SHIFT); 1872 + btrfs_stripe_nr_to_offset(i); 1873 1873 stripe->dev = map->stripes[stripe_index].dev; 1874 1874 stripe->mirror_num = 1; 1875 1875 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); ··· 2062 2062 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | 2063 2063 BTRFS_BLOCK_GROUP_RAID10)); 2064 2064 2065 - return (map->num_stripes / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT; 2065 + return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes); 2066 2066 } 2067 2067 2068 2068 /* Get the logical bytenr for the stripe */ ··· 2078 2078 * (stripe_index / sub_stripes) gives how many data stripes we need to 2079 2079 * skip. 2080 2080 */ 2081 - return ((stripe_index / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT) + 2081 + return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) + 2082 2082 bg->start; 2083 2083 } 2084 2084 ··· 2204 2204 } 2205 2205 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { 2206 2206 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index); 2207 - offset = (stripe_index / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT; 2207 + offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes); 2208 2208 goto out; 2209 2209 } 2210 2210 ··· 2219 2219 2220 2220 /* Initialize @offset in case we need to go to out: label */ 2221 2221 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL); 2222 - increment = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT; 2222 + increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 2223 2223 2224 2224 /* 2225 2225 * Due to the rotation, for RAID56 it's better to iterate each stripe
+2 -2
fs/btrfs/tree-checker.c
··· 857 857 * 858 858 * Thus it should be a good way to catch obvious bitflips. 859 859 */ 860 - if (unlikely(length >= ((u64)U32_MAX << BTRFS_STRIPE_LEN_SHIFT))) { 860 + if (unlikely(length >= btrfs_stripe_nr_to_offset(U32_MAX))) { 861 861 chunk_err(leaf, chunk, logical, 862 862 "chunk length too large: have %llu limit %llu", 863 - length, (u64)U32_MAX << BTRFS_STRIPE_LEN_SHIFT); 863 + length, btrfs_stripe_nr_to_offset(U32_MAX)); 864 864 return -EUCLEAN; 865 865 } 866 866 if (unlikely(type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
+15 -14
fs/btrfs/volumes.c
··· 5125 5125 /* We don't want a chunk larger than 10% of writable space */ 5126 5126 ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), 5127 5127 ctl->max_chunk_size); 5128 - ctl->dev_extent_min = ctl->dev_stripes << BTRFS_STRIPE_LEN_SHIFT; 5128 + ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes); 5129 5129 } 5130 5130 5131 5131 static void init_alloc_chunk_ctl_policy_zoned( ··· 5801 5801 if (!WARN_ON(IS_ERR(em))) { 5802 5802 map = em->map_lookup; 5803 5803 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5804 - len = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT; 5804 + len = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 5805 5805 free_extent_map(em); 5806 5806 } 5807 5807 return len; ··· 5975 5975 stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 5976 5976 5977 5977 /* stripe_offset is the offset of this block in its stripe */ 5978 - stripe_offset = offset - ((u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT); 5978 + stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr); 5979 5979 5980 5980 stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >> 5981 5981 BTRFS_STRIPE_LEN_SHIFT; 5982 5982 stripe_cnt = stripe_nr_end - stripe_nr; 5983 - stripe_end_offset = ((u64)stripe_nr_end << BTRFS_STRIPE_LEN_SHIFT) - 5983 + stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) - 5984 5984 (offset + length); 5985 5985 /* 5986 5986 * after this, stripe_nr is the number of stripes on this ··· 6023 6023 for (i = 0; i < *num_stripes; i++) { 6024 6024 stripes[i].physical = 6025 6025 map->stripes[stripe_index].physical + 6026 - stripe_offset + ((u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT); 6026 + stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr); 6027 6027 stripes[i].dev = map->stripes[stripe_index].dev; 6028 6028 6029 6029 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6030 6030 BTRFS_BLOCK_GROUP_RAID10)) { 6031 - stripes[i].length = stripes_per_dev << BTRFS_STRIPE_LEN_SHIFT; 6031 + stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev); 6032 6032 6033 6033 if (i / sub_stripes < remaining_stripes) 6034 6034 stripes[i].length += BTRFS_STRIPE_LEN; ··· 6183 6183 ASSERT(*stripe_offset < U32_MAX); 6184 6184 6185 6185 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6186 - unsigned long full_stripe_len = nr_data_stripes(map) << 6187 - BTRFS_STRIPE_LEN_SHIFT; 6186 + unsigned long full_stripe_len = 6187 + btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 6188 6188 6189 6189 /* 6190 6190 * For full stripe start, we use previously calculated ··· 6196 6196 * not ensured to be power of 2. 6197 6197 */ 6198 6198 *full_stripe_start = 6199 - (u64)rounddown(*stripe_nr, nr_data_stripes(map)) << 6200 - BTRFS_STRIPE_LEN_SHIFT; 6199 + btrfs_stripe_nr_to_offset( 6200 + rounddown(*stripe_nr, nr_data_stripes(map))); 6201 6201 6202 6202 ASSERT(*full_stripe_start + full_stripe_len > offset); 6203 6203 ASSERT(*full_stripe_start <= offset); ··· 6223 6223 { 6224 6224 dst->dev = map->stripes[stripe_index].dev; 6225 6225 dst->physical = map->stripes[stripe_index].physical + 6226 - stripe_offset + ((u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT); 6226 + stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr); 6227 6227 } 6228 6228 6229 6229 int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, ··· 6345 6345 /* Return the length to the full stripe end */ 6346 6346 *length = min(logical + *length, 6347 6347 raid56_full_stripe_start + em->start + 6348 - (data_stripes << BTRFS_STRIPE_LEN_SHIFT)) - logical; 6348 + btrfs_stripe_nr_to_offset(data_stripes)) - 6349 + logical; 6349 6350 stripe_index = 0; 6350 6351 stripe_offset = 0; 6351 6352 } else { ··· 6436 6435 * modulo, to reduce one modulo call. 6437 6436 */ 6438 6437 bioc->full_stripe_logical = em->start + 6439 - ((stripe_nr * data_stripes) << BTRFS_STRIPE_LEN_SHIFT); 6438 + btrfs_stripe_nr_to_offset(stripe_nr * data_stripes); 6440 6439 for (i = 0; i < num_stripes; i++) 6441 6440 set_io_stripe(&bioc->stripes[i], map, 6442 6441 (i + stripe_nr) % num_stripes, ··· 8033 8032 8034 8033 for (i = 0; i < data_stripes; i++) { 8035 8034 u64 stripe_start = bioc->full_stripe_logical + 8036 - (i << BTRFS_STRIPE_LEN_SHIFT); 8035 + btrfs_stripe_nr_to_offset(i); 8037 8036 8038 8037 if (logical >= stripe_start && 8039 8038 logical < stripe_start + BTRFS_STRIPE_LEN)
+11
fs/btrfs/volumes.h
··· 574 574 sizeof(struct btrfs_stripe) * (num_stripes - 1); 575 575 } 576 576 577 + /* 578 + * Do the type safe converstion from stripe_nr to offset inside the chunk. 579 + * 580 + * @stripe_nr is u32, with left shift it can overflow u32 for chunks larger 581 + * than 4G. This does the proper type cast to avoid overflow. 582 + */ 583 + static inline u64 btrfs_stripe_nr_to_offset(u32 stripe_nr) 584 + { 585 + return (u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT; 586 + } 587 + 577 588 void btrfs_get_bioc(struct btrfs_io_context *bioc); 578 589 void btrfs_put_bioc(struct btrfs_io_context *bioc); 579 590 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,