Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'ntfs-for-7.1-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/linkinjeon/ntfs

Pull ntfs updates from Namjae Jeon:

- Fix potential data leakage by zeroing the portion of the straddle
block beyond initialized_size when reading non-resident attributes

- Remove unnecessary zeroing in ntfs_punch_hole() for ranges beyond
initialized_size, as they are already returned as zeros on read

- Fix writable check in ntfs_file_mmap_prepare() to correctly handle
shared mappings using VMA_SHARED_BIT | VMA_MAYWRITE_BIT

- Use page allocation instead of kmemdup() for IOMAP_INLINE data to
ensure page-aligned address and avoid BUG trap in
iomap_inline_data_valid() caused by the page boundary check

- Add a size check before memory allocation in ntfs_attr_readall() and
reject overly large attributes

- Remove unneeded noop_direct_IO from ntfs_aops as it is no longer
required following the FMODE_CAN_ODIRECT flag

- Fix seven static analysis warnings reported by Smatch

* tag 'ntfs-for-7.1-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/linkinjeon/ntfs:
ntfs: use page allocation for resident attribute inline data
ntfs: fix mmap_prepare writable check for shared mappings
ntfs: fix potential 32-bit truncation in ntfs_write_cb()
ntfs: fix uninitialized variable in ntfs_map_runlist_nolock
ntfs: delete dead code
ntfs: add missing error code in ntfs_mft_record_alloc()
ntfs: fix uninitialized variables in ntfs_ea_set_wsl_inode()
ntfs: fix uninitialized pointer in ntfs_write_mft_block
ntfs: fix uninitialized variable in ntfs_write_simple_iomap_begin_non_resident
ntfs: remove noop_direct_IO from address_space_operations
ntfs: limit memory allocation in ntfs_attr_readall
ntfs: not zero out range beyond init in punch_hole
ntfs: zero out stale data in straddle block beyond initialized_size

+113 -41
+45 -3
fs/ntfs/aops.c
··· 15 15 #include "debug.h" 16 16 #include "iomap.h" 17 17 18 + static void ntfs_iomap_read_end_io(struct bio *bio) 19 + { 20 + int error = blk_status_to_errno(bio->bi_status); 21 + struct folio_iter iter; 22 + 23 + bio_for_each_folio_all(iter, bio) { 24 + struct folio *folio = iter.folio; 25 + struct ntfs_inode *ni = NTFS_I(folio->mapping->host); 26 + s64 init_size; 27 + loff_t pos = folio_pos(folio); 28 + 29 + init_size = ni->initialized_size; 30 + if (pos + iter.offset < init_size && 31 + pos + iter.offset + iter.length > init_size) 32 + folio_zero_segment(folio, offset_in_folio(folio, init_size), 33 + iter.offset + iter.length); 34 + 35 + iomap_finish_folio_read(folio, iter.offset, iter.length, error); 36 + } 37 + bio_put(bio); 38 + } 39 + 40 + static void ntfs_iomap_bio_submit_read(const struct iomap_iter *iter, 41 + struct iomap_read_folio_ctx *ctx) 42 + { 43 + struct bio *bio = ctx->read_ctx; 44 + bio->bi_end_io = ntfs_iomap_read_end_io; 45 + submit_bio(bio); 46 + } 47 + 48 + static const struct iomap_read_ops ntfs_iomap_bio_read_ops = { 49 + .read_folio_range = iomap_bio_read_folio_range, 50 + .submit_read = ntfs_iomap_bio_submit_read, 51 + }; 52 + 18 53 /* 19 54 * ntfs_read_folio - Read data for a folio from the device 20 55 * @file: open file to which the folio @folio belongs or NULL ··· 70 35 static int ntfs_read_folio(struct file *file, struct folio *folio) 71 36 { 72 37 struct ntfs_inode *ni = NTFS_I(folio->mapping->host); 38 + struct iomap_read_folio_ctx ctx = { 39 + .cur_folio = folio, 40 + .ops = &ntfs_iomap_bio_read_ops, 41 + }; 73 42 74 43 /* 75 44 * Only $DATA attributes can be encrypted and only unnamed $DATA ··· 97 58 return ntfs_read_compressed_block(folio); 98 59 } 99 60 100 - iomap_bio_read_folio(folio, &ntfs_read_iomap_ops); 61 + iomap_read_folio(&ntfs_read_iomap_ops, &ctx, NULL); 101 62 return 0; 102 63 } 103 64 ··· 227 188 struct address_space *mapping = rac->mapping; 228 189 struct inode *inode = mapping->host; 229 190 struct ntfs_inode *ni = NTFS_I(inode); 191 + struct iomap_read_folio_ctx ctx = { 192 + .ops = &ntfs_iomap_bio_read_ops, 193 + .rac = rac, 194 + }; 230 195 231 196 /* 232 197 * Resident files are not cached in the page cache, ··· 238 195 */ 239 196 if (!NInoNonResident(ni) || NInoCompressed(ni)) 240 197 return; 241 - iomap_bio_readahead(rac, &ntfs_read_iomap_ops); 198 + iomap_readahead(&ntfs_read_iomap_ops, &ctx, NULL); 242 199 } 243 200 244 201 static int ntfs_writepages(struct address_space *mapping, ··· 281 238 .read_folio = ntfs_read_folio, 282 239 .readahead = ntfs_readahead, 283 240 .writepages = ntfs_writepages, 284 - .direct_IO = noop_direct_IO, 285 241 .dirty_folio = iomap_dirty_folio, 286 242 .bmap = ntfs_bmap, 287 243 .migrate_folio = filemap_migrate_folio,
+15 -1
fs/ntfs/attrib.c
··· 30 30 __le16 AT_UNNAMED[] = { cpu_to_le16('\0') }; 31 31 32 32 /* 33 + * Maximum size allowed for reading attributes by ntfs_attr_readall(). 34 + * Extended attribute, reparse point are not expected to be larger than this size. 35 + */ 36 + 37 + #define NTFS_ATTR_READALL_MAX_SIZE (64 * 1024) 38 + 39 + /* 33 40 * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode 34 41 * @ni: ntfs inode for which to map (part of) a runlist 35 42 * @vcn: map runlist part containing this vcn ··· 92 85 struct runlist_element *rl; 93 86 struct folio *put_this_folio = NULL; 94 87 int err = 0; 95 - bool ctx_is_temporary = false, ctx_needs_reset; 88 + bool ctx_is_temporary = false, ctx_needs_reset = false; 96 89 struct ntfs_attr_search_ctx old_ctx = { NULL, }; 97 90 size_t new_rl_count; 98 91 ··· 5123 5116 goto err_exit; 5124 5117 } 5125 5118 bmp_ni = NTFS_I(bmp_vi); 5119 + 5120 + if (bmp_ni->data_size > NTFS_ATTR_READALL_MAX_SIZE && 5121 + (bmp_ni->type != AT_BITMAP || 5122 + bmp_ni->data_size > ((ni->vol->nr_clusters + 7) >> 3))) { 5123 + ntfs_error(sb, "Invalid attribute data size"); 5124 + goto out; 5125 + } 5126 5126 5127 5127 data = kvmalloc(bmp_ni->data_size, GFP_NOFS); 5128 5128 if (!data)
+2 -1
fs/ntfs/compress.c
··· 1374 1374 bio_size = insz; 1375 1375 } 1376 1376 1377 - new_vcn = ntfs_bytes_to_cluster(vol, pos & ~(ni->itype.compressed.block_size - 1)); 1377 + new_vcn = ntfs_bytes_to_cluster(vol, 1378 + pos & ~((loff_t)ni->itype.compressed.block_size - 1)); 1378 1379 new_length = ntfs_bytes_to_cluster(vol, round_up(bio_size, vol->cluster_size)); 1379 1380 1380 1381 err = ntfs_non_resident_attr_punch_hole(ni, new_vcn, ni->itype.compressed.block_clusters);
+4 -1
fs/ntfs/ea.c
··· 406 406 unsigned int flags) 407 407 { 408 408 __le32 v; 409 - int err; 409 + int err = 0; 410 + 411 + if (ea_size) 412 + *ea_size = 0; 410 413 411 414 if (flags & NTFS_EA_UID) { 412 415 /* Store uid to lxuid EA */
+24 -25
fs/ntfs/file.c
··· 267 267 return err; 268 268 269 269 inode_dio_wait(vi); 270 - /* Serialize against page faults */ 271 - if (NInoNonResident(NTFS_I(vi)) && attr->ia_size < old_size) { 272 - err = iomap_truncate_page(vi, attr->ia_size, NULL, 273 - &ntfs_read_iomap_ops, 274 - &ntfs_iomap_folio_ops, NULL); 275 - if (err) 276 - return err; 277 - } 278 - 279 270 truncate_setsize(vi, attr->ia_size); 280 271 err = ntfs_truncate_vfs(vi, attr->ia_size, old_size); 281 272 if (err) { ··· 525 534 ret = -EIO; 526 535 goto out; 527 536 } 528 - if (!ret2) 529 - invalidate_mapping_pages(iocb->ki_filp->f_mapping, 530 - offset >> PAGE_SHIFT, 531 - end >> PAGE_SHIFT); 537 + invalidate_mapping_pages(iocb->ki_filp->f_mapping, 538 + offset >> PAGE_SHIFT, 539 + end >> PAGE_SHIFT); 532 540 } 533 541 534 542 out: ··· 644 654 if (NInoCompressed(NTFS_I(inode))) 645 655 return -EOPNOTSUPP; 646 656 647 - if (vma_desc_test(desc, VMA_WRITE_BIT)) { 657 + if (vma_desc_test_all(desc, VMA_SHARED_BIT, VMA_MAYWRITE_BIT)) { 648 658 struct inode *inode = file_inode(file); 649 659 loff_t from, to; 650 660 int err; ··· 875 885 end_vcn = ntfs_bytes_to_cluster(vol, end_offset - 1) + 1; 876 886 877 887 if (offset & vol->cluster_size_mask) { 878 - loff_t to; 888 + if (offset < ni->initialized_size) { 889 + loff_t to; 879 890 880 - to = min_t(loff_t, ntfs_cluster_to_bytes(vol, start_vcn + 1), 881 - end_offset); 882 - err = iomap_zero_range(vi, offset, to - offset, NULL, 883 - &ntfs_seek_iomap_ops, 884 - &ntfs_iomap_folio_ops, NULL); 885 - if (err < 0 || (end_vcn - start_vcn) == 1) 891 + to = min_t(loff_t, 892 + ntfs_cluster_to_bytes(vol, start_vcn + 1), 893 + end_offset); 894 + err = iomap_zero_range(vi, offset, to - offset, 895 + NULL, &ntfs_seek_iomap_ops, 896 + &ntfs_iomap_folio_ops, NULL); 897 + if (err < 0) 898 + goto out; 899 + } 900 + if (end_vcn - start_vcn == 1) 886 901 goto out; 887 902 start_vcn++; 888 903 } ··· 896 901 loff_t from; 897 902 898 903 from = ntfs_cluster_to_bytes(vol, end_vcn - 1); 899 - err = iomap_zero_range(vi, from, end_offset - from, NULL, 900 - &ntfs_seek_iomap_ops, 901 - &ntfs_iomap_folio_ops, NULL); 902 - if (err < 0 || (end_vcn - start_vcn) == 1) 904 + if (from < ni->initialized_size) { 905 + err = iomap_zero_range(vi, from, end_offset - from, 906 + NULL, &ntfs_seek_iomap_ops, 907 + &ntfs_iomap_folio_ops, NULL); 908 + if (err < 0) 909 + goto out; 910 + } 911 + if (end_vcn - start_vcn == 1) 903 912 goto out; 904 913 end_vcn--; 905 914 }
+20 -8
fs/ntfs/iomap.c
··· 89 89 u32 attr_len; 90 90 int err = 0; 91 91 char *kattr; 92 + struct page *ipage; 92 93 93 94 if (NInoAttr(ni)) 94 95 base_ni = ni->ext.base_ntfs_ino; ··· 130 129 131 130 kattr = (u8 *)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset); 132 131 133 - iomap->inline_data = kmemdup(kattr, attr_len, GFP_KERNEL); 134 - if (!iomap->inline_data) { 132 + ipage = alloc_page(GFP_NOFS | __GFP_ZERO); 133 + if (!ipage) { 135 134 err = -ENOMEM; 136 135 goto out; 137 136 } 138 137 138 + memcpy(page_address(ipage), kattr, attr_len); 139 139 iomap->type = IOMAP_INLINE; 140 + iomap->inline_data = page_address(ipage); 140 141 iomap->offset = 0; 141 142 iomap->length = attr_len; 143 + iomap->private = ipage; 142 144 143 145 out: 144 146 if (ctx) ··· 289 285 static int ntfs_read_iomap_end(struct inode *inode, loff_t pos, loff_t length, 290 286 ssize_t written, unsigned int flags, struct iomap *iomap) 291 287 { 292 - if (iomap->type == IOMAP_INLINE) 293 - kfree(iomap->inline_data); 288 + if (iomap->type == IOMAP_INLINE) { 289 + struct page *ipage = iomap->private; 290 + 291 + put_page(ipage); 292 + } 294 293 295 294 return written; 296 295 } ··· 391 384 loff_t vcn_ofs, rl_length; 392 385 struct runlist_element *rl, *rlc; 393 386 bool is_retry = false; 394 - int err; 387 + int err = 0; 395 388 s64 vcn, lcn; 396 389 s64 max_clu_count = 397 390 ntfs_bytes_to_cluster(vol, round_up(length, vol->cluster_size)); ··· 659 652 u32 attr_len; 660 653 int err = 0; 661 654 char *kattr; 655 + struct page *ipage; 662 656 663 657 ctx = ntfs_attr_get_search_ctx(ni, NULL); 664 658 if (!ctx) { ··· 680 672 attr_len = le32_to_cpu(a->data.resident.value_length); 681 673 kattr = (u8 *)a + le16_to_cpu(a->data.resident.value_offset); 682 674 683 - iomap->inline_data = kmemdup(kattr, attr_len, GFP_KERNEL); 684 - if (!iomap->inline_data) { 675 + ipage = alloc_page(GFP_NOFS | __GFP_ZERO); 676 + if (!ipage) { 685 677 err = -ENOMEM; 686 678 goto out; 687 679 } 688 680 681 + memcpy(page_address(ipage), kattr, attr_len); 689 682 iomap->type = IOMAP_INLINE; 683 + iomap->inline_data = page_address(ipage); 690 684 iomap->offset = 0; 691 685 /* iomap requires there is only one INLINE_DATA extent */ 692 686 iomap->length = attr_len; 687 + iomap->private = ipage; 693 688 694 689 out: 695 690 if (ctx) ··· 782 771 u32 attr_len; 783 772 int err; 784 773 char *kattr; 774 + struct page *ipage = iomap->private; 785 775 786 776 mutex_lock(&ni->mrec_lock); 787 777 ctx = ntfs_attr_get_search_ctx(ni, NULL); ··· 811 799 mark_mft_record_dirty(ctx->ntfs_ino); 812 800 err_out: 813 801 ntfs_attr_put_search_ctx(ctx); 814 - kfree(iomap->inline_data); 802 + put_page(ipage); 815 803 mutex_unlock(&ni->mrec_lock); 816 804 return written; 817 805
+3 -2
fs/ntfs/mft.c
··· 2503 2503 folio_unlock(folio); 2504 2504 kunmap_local(m); 2505 2505 folio_put(folio); 2506 + err = -ENOMEM; 2506 2507 goto undo_mftbmp_alloc; 2507 2508 } 2508 2509 ··· 2715 2714 s64 vcn = ntfs_pidx_to_cluster(vol, folio->index); 2716 2715 s64 end_vcn = ntfs_bytes_to_cluster(vol, ni->allocated_size); 2717 2716 unsigned int folio_sz; 2718 - struct runlist_element *rl; 2717 + struct runlist_element *rl = NULL; 2719 2718 loff_t i_size = i_size_read(vi); 2720 2719 2721 2720 ntfs_debug("Entering for inode 0x%llx, attribute type 0x%x, folio index 0x%lx.", ··· 2821 2820 2822 2821 if (vol->cluster_size == NTFS_BLOCK_SIZE && 2823 2822 (mft_record_off || 2824 - rl->length - (vcn_off - rl->vcn) == 1 || 2823 + (rl && rl->length - (vcn_off - rl->vcn) == 1) || 2825 2824 mft_ofs + NTFS_BLOCK_SIZE >= PAGE_SIZE)) 2826 2825 folio_sz = NTFS_BLOCK_SIZE; 2827 2826 else