Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'ntfs3_for_7.1' of https://github.com/Paragon-Software-Group/linux-ntfs3

Pull ntfs3 updates from Konstantin Komarov:
"New:
- reject inodes with zero non-DOS link count
- return folios from ntfs_lock_new_page()
- subset of W=1 warnings for stricter checks
- work around -Wmaybe-uninitialized warnings
- buffer boundary checks to run_unpack()
- terminate the cached volume label after UTF-8 conversion

Fixes:
- check return value of indx_find to avoid infinite loop
- prevent uninitialized lcn caused by zero len
- increase CLIENT_REC name field size to prevent buffer overflow
- missing run load for vcn0 in attr_data_get_block_locked()
- memory leak in indx_create_allocate()
- OOB write in attr_wof_frame_info()
- mount failure on volumes with fragmented MFT bitmap
- integer overflow in run_unpack() volume boundary check
- validate rec->used in journal-replay file record check

Updates:
- resolve compare function in public index APIs
- $LXDEV xattr lookup
- potential double iput on d_make_root() failure
- initialize err in ni_allocate_da_blocks_locked()
- correct the pre_alloc condition in attr_allocate_clusters()"

* tag 'ntfs3_for_7.1' of https://github.com/Paragon-Software-Group/linux-ntfs3:
fs/ntfs3: fix Smatch warnings
fs/ntfs3: validate rec->used in journal-replay file record check
fs/ntfs3: terminate the cached volume label after UTF-8 conversion
fs/ntfs3: fix potential double iput on d_make_root() failure
ntfs3: fix integer overflow in run_unpack() volume boundary check
ntfs3: add buffer boundary checks to run_unpack()
ntfs3: fix mount failure on volumes with fragmented MFT bitmap
fs/ntfs3: fix $LXDEV xattr lookup
ntfs3: fix OOB write in attr_wof_frame_info()
ntfs3: fix memory leak in indx_create_allocate()
ntfs3: work around false-postive -Wmaybe-uninitialized warnings
fs/ntfs3: fix missing run load for vcn0 in attr_data_get_block_locked()
fs/ntfs3: increase CLIENT_REC name field size
fs/ntfs3: prevent uninitialized lcn caused by zero len
fs/ntfs3: add a subset of W=1 warnings for stricter checks
fs/ntfs3: return folios from ntfs_lock_new_page()
fs/ntfs3: resolve compare function in public index APIs
ntfs3: reject inodes with zero non-DOS link count

+202 -73
+20
fs/ntfs3/Makefile
··· 3 3 # Makefile for the ntfs3 filesystem support. 4 4 # 5 5 6 + # Subset of W=1 warnings 7 + subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter 8 + subdir-ccflags-y += -Wmissing-declarations 9 + subdir-ccflags-y += -Wmissing-format-attribute 10 + subdir-ccflags-y += -Wmissing-prototypes 11 + subdir-ccflags-y += -Wold-style-definition 12 + subdir-ccflags-y += -Wmissing-include-dirs 13 + condflags := \ 14 + $(call cc-option, -Wunused-but-set-variable) \ 15 + $(call cc-option, -Wunused-const-variable) \ 16 + $(call cc-option, -Wpacked-not-aligned) \ 17 + $(call cc-option, -Wstringop-truncation) \ 18 + $(call cc-option, -Wmaybe-uninitialized) 19 + subdir-ccflags-y += $(condflags) 20 + # The following turn off the warnings enabled by -Wextra 21 + subdir-ccflags-y += -Wno-missing-field-initializers 22 + subdir-ccflags-y += -Wno-sign-compare 23 + subdir-ccflags-y += -Wno-type-limits 24 + subdir-ccflags-y += -Wno-shift-negative-value 25 + 6 26 # to check robot warnings 7 27 ccflags-y += -Wint-to-pointer-cast \ 8 28 $(call cc-option,-Wunused-but-set-variable,-Wunused-const-variable) \
+22 -1
fs/ntfs3/attrib.c
··· 173 173 174 174 if (err == -ENOSPC && pre) { 175 175 pre = 0; 176 - if (*pre_alloc) 176 + if (pre_alloc) 177 177 *pre_alloc = 0; 178 178 continue; 179 179 } ··· 1152 1152 if (err) 1153 1153 goto out; 1154 1154 } 1155 + 1156 + if (vcn0 < svcn || evcn1 <= vcn0) { 1157 + struct ATTRIB *attr2; 1158 + 1159 + attr2 = ni_find_attr(ni, attr_b, &le_b, ATTR_DATA, NULL, 1160 + 0, &vcn0, &mi); 1161 + if (!attr2) { 1162 + err = -EINVAL; 1163 + goto out; 1164 + } 1165 + err = attr_load_runs(attr2, ni, run, NULL); 1166 + if (err) 1167 + goto out; 1168 + } 1169 + 1155 1170 da = false; /* no delalloc for compressed file. */ 1156 1171 } 1157 1172 ··· 1590 1575 if (index != folio->index) { 1591 1576 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1); 1592 1577 u64 to = min(from + PAGE_SIZE, wof_size); 1578 + 1579 + if (from >= wof_size) { 1580 + _ntfs_bad_inode(&ni->vfs_inode); 1581 + err = -EINVAL; 1582 + goto out1; 1583 + } 1593 1584 1594 1585 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME, 1595 1586 ARRAY_SIZE(WOF_NAME), run,
+27 -22
fs/ntfs3/frecord.c
··· 1852 1852 return REPARSE_LINK; 1853 1853 } 1854 1854 1855 - static struct page *ntfs_lock_new_page(struct address_space *mapping, 1856 - pgoff_t index, gfp_t gfp) 1855 + static struct folio *ntfs_lock_new_page(struct address_space *mapping, 1856 + pgoff_t index, gfp_t gfp) 1857 1857 { 1858 - struct folio *folio = __filemap_get_folio( 1859 - mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1860 - struct page *page; 1858 + struct folio *folio = __filemap_get_folio(mapping, index, 1859 + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1861 1860 1862 1861 if (IS_ERR(folio)) 1863 - return ERR_CAST(folio); 1862 + return folio; 1864 1863 1865 - if (!folio_test_uptodate(folio)) 1866 - return folio_file_page(folio, index); 1864 + if (!folio_test_uptodate(folio)) { 1865 + struct page *page = folio_file_page(folio, index); 1866 + 1867 + if (IS_ERR(page)) 1868 + return ERR_CAST(page); 1869 + return page_folio(page); 1870 + } 1867 1871 1868 1872 /* Use a temporary page to avoid data corruption */ 1869 1873 folio_unlock(folio); 1870 1874 folio_put(folio); 1871 - page = alloc_page(gfp); 1872 - if (!page) 1875 + folio = folio_alloc(gfp, 0); 1876 + if (!folio) 1873 1877 return ERR_PTR(-ENOMEM); 1874 - __SetPageLocked(page); 1875 - return page; 1878 + __folio_set_locked(folio); 1879 + return folio; 1876 1880 } 1877 1881 1878 1882 /* ··· 1898 1894 u32 i, idx, frame_size, pages_per_frame; 1899 1895 gfp_t gfp_mask; 1900 1896 struct page *pg; 1897 + struct folio *f; 1901 1898 1902 1899 if (vbo >= i_size_read(&ni->vfs_inode)) { 1903 1900 folio_zero_range(folio, 0, folio_size(folio)); ··· 1934 1929 if (i == idx) 1935 1930 continue; 1936 1931 1937 - pg = ntfs_lock_new_page(mapping, index, gfp_mask); 1938 - if (IS_ERR(pg)) { 1939 - err = PTR_ERR(pg); 1932 + f = ntfs_lock_new_page(mapping, index, gfp_mask); 1933 + if (IS_ERR(f)) { 1934 + err = PTR_ERR(f); 1940 1935 goto out1; 1941 1936 } 1942 - pages[i] = pg; 1937 + pages[i] = &f->page; 1943 1938 } 1944 1939 1945 1940 ni_lock(ni); ··· 2028 2023 } 2029 2024 2030 2025 for (i = 0; i < pages_per_frame; i++, index++) { 2031 - struct page *pg; 2026 + struct folio *f; 2032 2027 2033 - pg = ntfs_lock_new_page(mapping, index, gfp_mask); 2034 - if (IS_ERR(pg)) { 2028 + f = ntfs_lock_new_page(mapping, index, gfp_mask); 2029 + if (IS_ERR(f)) { 2035 2030 while (i--) { 2036 2031 unlock_page(pages[i]); 2037 2032 put_page(pages[i]); 2038 2033 } 2039 - err = PTR_ERR(pg); 2034 + err = PTR_ERR(f); 2040 2035 goto out; 2041 2036 } 2042 - pages[i] = pg; 2037 + pages[i] = &f->page; 2043 2038 } 2044 2039 2045 2040 err = ni_read_frame(ni, vbo, pages, pages_per_frame, 1); ··· 3267 3262 */ 3268 3263 int ni_allocate_da_blocks_locked(struct ntfs_inode *ni) 3269 3264 { 3270 - int err; 3265 + int err = 0; 3271 3266 3272 3267 if (!ni->file.run_da.count) 3273 3268 return 0;
+13 -3
fs/ntfs3/fslog.c
··· 45 45 __le16 seq_num; // 0x14: 46 46 u8 align[6]; // 0x16: 47 47 __le32 name_bytes; // 0x1C: In bytes. 48 - __le16 name[32]; // 0x20: Name of client. 48 + __le16 name[64]; // 0x20: Name of client. 49 49 }; 50 50 51 - static_assert(sizeof(struct CLIENT_REC) == 0x60); 51 + static_assert(sizeof(struct CLIENT_REC) == 0xa0); 52 52 53 53 /* Two copies of these will exist at the beginning of the log file */ 54 54 struct RESTART_AREA { ··· 2791 2791 u16 fn = le16_to_cpu(rec->rhdr.fix_num); 2792 2792 u16 ao = le16_to_cpu(rec->attr_off); 2793 2793 u32 rs = sbi->record_size; 2794 + u32 used = le32_to_cpu(rec->used); 2794 2795 2795 2796 /* Check the file record header for consistency. */ 2796 2797 if (rec->rhdr.sign != NTFS_FILE_SIGNATURE || 2797 2798 fo > (SECTOR_SIZE - ((rs >> SECTOR_SHIFT) + 1) * sizeof(short)) || 2798 2799 (fn - 1) * SECTOR_SIZE != rs || ao < MFTRECORD_FIXUP_OFFSET_1 || 2799 2800 ao > sbi->record_size - SIZEOF_RESIDENT || !is_rec_inuse(rec) || 2800 - le32_to_cpu(rec->total) != rs) { 2801 + le32_to_cpu(rec->total) != rs || used > rs || used < ao) { 2801 2802 return false; 2802 2803 } 2803 2804 ··· 2809 2808 continue; 2810 2809 return false; 2811 2810 } 2811 + 2812 + /* 2813 + * The do_action() handlers compute memmove lengths as 2814 + * "rec->used - <offset of validated attr>", which underflows when 2815 + * rec->used is smaller than the attribute walk reached. At this 2816 + * point attr is the ATTR_END marker; rec->used must cover it. 2817 + */ 2818 + if (used < PtrOffset(rec, attr) + sizeof(attr->type)) 2819 + return false; 2812 2820 2813 2821 return true; 2814 2822 }
+2 -2
fs/ntfs3/fsntfs.c
··· 1440 1440 u16 fo = le16_to_cpu(rhdr->fix_off); 1441 1441 u16 fn = le16_to_cpu(rhdr->fix_num); 1442 1442 u32 idx; 1443 - __le16 *fixup; 1444 - __le16 sample; 1443 + __le16 *fixup = NULL; 1444 + __le16 sample = cpu_to_le16(-1u); 1445 1445 1446 1446 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- || 1447 1447 fn * SECTOR_SIZE > bytes) {
+50 -27
fs/ntfs3/index.c
··· 714 714 */ 715 715 static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx, 716 716 const struct INDEX_HDR *hdr, const void *key, 717 - size_t key_len, const void *ctx, int *diff) 717 + size_t key_len, const void *ctx, int *diff, 718 + NTFS_CMP_FUNC cmp) 718 719 { 719 720 struct NTFS_DE *e, *found = NULL; 720 - NTFS_CMP_FUNC cmp = indx->cmp; 721 721 int min_idx = 0, mid_idx, max_idx = 0; 722 722 int diff2; 723 723 int table_size = 8; ··· 726 726 u32 off = le32_to_cpu(hdr->de_off); 727 727 u32 total = le32_to_cpu(hdr->total); 728 728 u16 offs[128]; 729 - 730 - if (unlikely(!cmp)) 731 - return NULL; 732 729 733 730 fill_table: 734 731 if (end > total) ··· 797 800 static struct NTFS_DE *hdr_insert_de(const struct ntfs_index *indx, 798 801 struct INDEX_HDR *hdr, 799 802 const struct NTFS_DE *de, 800 - struct NTFS_DE *before, const void *ctx) 803 + struct NTFS_DE *before, const void *ctx, 804 + NTFS_CMP_FUNC cmp) 801 805 { 802 806 int diff; 803 807 size_t off = PtrOffset(hdr, before); ··· 821 823 } 822 824 /* No insert point is applied. Get it manually. */ 823 825 before = hdr_find_e(indx, hdr, de + 1, le16_to_cpu(de->key_size), ctx, 824 - &diff); 826 + &diff, cmp); 825 827 if (!before) 826 828 return NULL; 827 829 off = PtrOffset(hdr, before); ··· 912 914 } 913 915 914 916 init_rwsem(&indx->run_lock); 915 - 916 - indx->cmp = get_cmp_func(root); 917 - if (!indx->cmp) 918 - goto out; 919 917 920 918 return 0; 921 919 ··· 1135 1141 int err; 1136 1142 struct NTFS_DE *e; 1137 1143 struct indx_node *node; 1144 + NTFS_CMP_FUNC cmp; 1138 1145 1139 1146 if (!root) 1140 1147 root = indx_get_root(&ni->dir, ni, NULL, NULL); ··· 1145 1150 return -EINVAL; 1146 1151 } 1147 1152 1153 + cmp = get_cmp_func(root); 1154 + if (unlikely(!cmp)) { 1155 + WARN_ON_ONCE(1); 1156 + return -EINVAL; 1157 + } 1158 + 1148 1159 /* Check cache. */ 1149 1160 e = fnd->level ? fnd->de[fnd->level - 1] : fnd->root_de; 1150 1161 if (e && !de_is_last(e) && 1151 - !(*indx->cmp)(key, key_len, e + 1, le16_to_cpu(e->key_size), ctx)) { 1162 + !(*cmp)(key, key_len, e + 1, le16_to_cpu(e->key_size), ctx)) { 1152 1163 *entry = e; 1153 1164 *diff = 0; 1154 1165 return 0; ··· 1164 1163 fnd_clear(fnd); 1165 1164 1166 1165 /* Lookup entry that is <= to the search value. */ 1167 - e = hdr_find_e(indx, &root->ihdr, key, key_len, ctx, diff); 1166 + e = hdr_find_e(indx, &root->ihdr, key, key_len, ctx, diff, cmp); 1168 1167 if (!e) 1169 1168 return -EINVAL; 1170 1169 ··· 1184 1183 1185 1184 /* Lookup entry that is <= to the search value. */ 1186 1185 e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx, 1187 - diff); 1186 + diff, cmp); 1188 1187 if (!e) { 1189 1188 put_indx_node(node); 1190 1189 return -EINVAL; ··· 1482 1481 run_deallocate(sbi, &run, false); 1483 1482 1484 1483 out: 1484 + run_close(&run); 1485 1485 return err; 1486 1486 } 1487 1487 ··· 1587 1585 static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni, 1588 1586 const struct NTFS_DE *new_de, 1589 1587 struct NTFS_DE *root_de, const void *ctx, 1590 - struct ntfs_fnd *fnd, bool undo) 1588 + struct ntfs_fnd *fnd, bool undo, NTFS_CMP_FUNC cmp) 1591 1589 { 1592 1590 int err = 0; 1593 1591 struct NTFS_DE *e, *e0, *re; ··· 1628 1626 if ((undo || asize + ds_root < sbi->max_bytes_per_attr) && 1629 1627 mi_resize_attr(mi, attr, ds_root)) { 1630 1628 hdr->total = cpu_to_le32(hdr_total + ds_root); 1631 - e = hdr_insert_de(indx, hdr, new_de, root_de, ctx); 1629 + e = hdr_insert_de(indx, hdr, new_de, root_de, ctx, cmp); 1632 1630 WARN_ON(!e); 1633 1631 fnd_clear(fnd); 1634 1632 fnd->root_de = e; ··· 1769 1767 * Now root is a parent for new index buffer. 1770 1768 * Insert NewEntry a new buffer. 1771 1769 */ 1772 - e = hdr_insert_de(indx, hdr, new_de, NULL, ctx); 1770 + e = hdr_insert_de(indx, hdr, new_de, NULL, ctx, cmp); 1773 1771 if (!e) { 1774 1772 err = -EINVAL; 1775 1773 goto out_put_n; ··· 1799 1797 static int 1800 1798 indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni, 1801 1799 struct INDEX_ROOT *root, const struct NTFS_DE *new_de, 1802 - const void *ctx, int level, struct ntfs_fnd *fnd) 1800 + const void *ctx, int level, struct ntfs_fnd *fnd, NTFS_CMP_FUNC cmp) 1803 1801 { 1804 1802 int err; 1805 1803 const struct NTFS_DE *sp; ··· 1816 1814 1817 1815 /* Try the most easy case. */ 1818 1816 e = fnd->level - 1 == level ? fnd->de[level] : NULL; 1819 - e = hdr_insert_de(indx, hdr1, new_de, e, ctx); 1817 + e = hdr_insert_de(indx, hdr1, new_de, e, ctx, cmp); 1820 1818 fnd->de[level] = e; 1821 1819 if (e) { 1822 1820 /* Just write updated index into disk. */ ··· 1893 1891 * (depending on sp <=> new_de). 1894 1892 */ 1895 1893 hdr_insert_de(indx, 1896 - (*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size), 1894 + (*cmp)(new_de + 1, le16_to_cpu(new_de->key_size), 1897 1895 up_e + 1, le16_to_cpu(up_e->key_size), 1898 1896 ctx) < 0 ? 1899 1897 hdr2 : 1900 1898 hdr1, 1901 - new_de, NULL, ctx); 1899 + new_de, NULL, ctx, cmp); 1902 1900 1903 1901 indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits); 1904 1902 ··· 1913 1911 */ 1914 1912 if (!level) { 1915 1913 /* Insert in root. */ 1916 - err = indx_insert_into_root(indx, ni, up_e, NULL, ctx, fnd, 0); 1914 + err = indx_insert_into_root(indx, ni, up_e, NULL, ctx, fnd, 0, cmp); 1917 1915 } else { 1918 1916 /* 1919 1917 * The target buffer's parent is another index buffer. 1920 1918 * TODO: Remove recursion. 1921 1919 */ 1922 1920 err = indx_insert_into_buffer(indx, ni, root, up_e, ctx, 1923 - level - 1, fnd); 1921 + level - 1, fnd, cmp); 1924 1922 } 1925 1923 1926 1924 if (err) { ··· 1954 1952 struct NTFS_DE *e; 1955 1953 struct ntfs_fnd *fnd_a = NULL; 1956 1954 struct INDEX_ROOT *root; 1955 + NTFS_CMP_FUNC cmp; 1957 1956 1958 1957 if (!fnd) { 1959 1958 fnd_a = fnd_get(); ··· 1969 1966 if (!root) { 1970 1967 err = -EINVAL; 1971 1968 goto out; 1969 + } 1970 + 1971 + cmp = get_cmp_func(root); 1972 + if (unlikely(!cmp)) { 1973 + WARN_ON_ONCE(1); 1974 + return -EINVAL; 1972 1975 } 1973 1976 1974 1977 if (fnd_is_empty(fnd)) { ··· 2000 1991 * new entry into it. 2001 1992 */ 2002 1993 err = indx_insert_into_root(indx, ni, new_de, fnd->root_de, ctx, 2003 - fnd, undo); 1994 + fnd, undo, cmp); 2004 1995 } else { 2005 1996 /* 2006 1997 * Found a leaf buffer, so we'll insert the new entry into it. 2007 1998 */ 2008 1999 err = indx_insert_into_buffer(indx, ni, root, new_de, ctx, 2009 - fnd->level - 1, fnd); 2000 + fnd->level - 1, fnd, cmp); 2010 2001 } 2011 2002 2012 2003 indx->version += 1; ··· 2300 2291 u32 e_size, root_size, new_root_size; 2301 2292 size_t trim_bit; 2302 2293 const struct INDEX_NAMES *in; 2294 + NTFS_CMP_FUNC cmp; 2303 2295 2304 2296 fnd = fnd_get(); 2305 2297 if (!fnd) { ··· 2318 2308 if (!root) { 2319 2309 err = -EINVAL; 2320 2310 goto out; 2311 + } 2312 + 2313 + cmp = get_cmp_func(root); 2314 + if (unlikely(!cmp)) { 2315 + WARN_ON_ONCE(1); 2316 + return -EINVAL; 2321 2317 } 2322 2318 2323 2319 /* Locate the entry to remove. */ ··· 2392 2376 err = level ? indx_insert_into_buffer(indx, ni, root, 2393 2377 re, ctx, 2394 2378 fnd->level - 1, 2395 - fnd) : 2379 + fnd, cmp) : 2396 2380 indx_insert_into_root(indx, ni, re, e, 2397 - ctx, fnd, 0); 2381 + ctx, fnd, 0, cmp); 2398 2382 kfree(re); 2399 2383 2400 2384 if (err) ··· 2689 2673 struct INDEX_ROOT *root; 2690 2674 struct mft_inode *mi; 2691 2675 struct ntfs_index *indx = &ni->dir; 2676 + NTFS_CMP_FUNC cmp; 2692 2677 2693 2678 fnd = fnd_get(); 2694 2679 if (!fnd) ··· 2699 2682 if (!root) { 2700 2683 err = -EINVAL; 2701 2684 goto out; 2685 + } 2686 + 2687 + cmp = get_cmp_func(root); 2688 + if (unlikely(!cmp)) { 2689 + WARN_ON_ONCE(1); 2690 + return -EINVAL; 2702 2691 } 2703 2692 2704 2693 /* Find entry in directory. */
+10 -5
fs/ntfs3/inode.c
··· 432 432 ni->mi.dirty = true; 433 433 } 434 434 435 + if (!links) { 436 + err = -EINVAL; 437 + goto out; 438 + } 439 + 435 440 set_nlink(inode, links); 436 441 437 442 if (S_ISDIR(mode)) { ··· 778 773 return err; 779 774 } 780 775 776 + if (!clen) { 777 + /* broken file? */ 778 + return -EINVAL; 779 + } 780 + 781 781 if (lcn == EOF_LCN) { 782 782 /* request out of file. */ 783 783 if (flags & IOMAP_REPORT) { ··· 814 804 iomap->offset = 0; 815 805 iomap->length = clen; /* resident size in bytes. */ 816 806 return 0; 817 - } 818 - 819 - if (!clen) { 820 - /* broken file? */ 821 - return -EINVAL; 822 807 } 823 808 824 809 iomap->bdev = inode->i_sb->s_bdev;
-3
fs/ntfs3/ntfs_fs.h
··· 196 196 struct rw_semaphore run_lock; 197 197 size_t version; /* increment each change */ 198 198 199 - /*TODO: Remove 'cmp'. */ 200 - NTFS_CMP_FUNC cmp; 201 - 202 199 u8 index_bits; // log2(root->index_block_size) 203 200 u8 idx2vbn_bits; // log2(root->index_block_clst) 204 201 u8 vbn2vbo_bits; // index_block_size < cluster? 9 : cluster_bits
+15 -3
fs/ntfs3/run.c
··· 1008 1008 if (size_size > sizeof(len)) 1009 1009 return -EINVAL; 1010 1010 1011 + if (run_buf + size_size > run_last) 1012 + return -EINVAL; 1013 + 1011 1014 len = run_unpack_s64(run_buf, size_size, 0); 1012 1015 /* Skip size_size. */ 1013 1016 run_buf += size_size; ··· 1022 1019 lcn = SPARSE_LCN64; 1023 1020 else if (offset_size <= sizeof(s64)) { 1024 1021 s64 dlcn; 1022 + 1023 + if (run_buf + offset_size > run_last) 1024 + return -EINVAL; 1025 1025 1026 1026 /* Initial value of dlcn is -1 or 0. */ 1027 1027 dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0; ··· 1065 1059 return -EOPNOTSUPP; 1066 1060 } 1067 1061 #endif 1068 - if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) { 1069 - /* LCN range is out of volume. */ 1070 - return -EINVAL; 1062 + if (lcn != SPARSE_LCN64) { 1063 + u64 lcn_end; 1064 + 1065 + if (check_add_overflow(lcn, len, &lcn_end)) 1066 + return -EINVAL; 1067 + if (lcn_end > sbi->used.bitmap.nbits) { 1068 + /* LCN range is out of volume. */ 1069 + return -EINVAL; 1070 + } 1071 1071 } 1072 1072 1073 1073 if (!run)
+42 -6
fs/ntfs3/super.c
··· 1332 1332 le32_to_cpu(attr->res.data_size) >> 1, 1333 1333 UTF16_LITTLE_ENDIAN, sbi->volume.label, 1334 1334 sizeof(sbi->volume.label)); 1335 - if (err < 0) 1335 + if (err < 0) { 1336 1336 sbi->volume.label[0] = 0; 1337 + } else if (err >= sizeof(sbi->volume.label)) { 1338 + sbi->volume.label[sizeof(sbi->volume.label) - 1] = 0; 1339 + } else { 1340 + sbi->volume.label[err] = 0; 1341 + } 1337 1342 } else { 1338 1343 /* Should we break mounting here? */ 1339 1344 //err = -EINVAL; ··· 1424 1419 tt = inode->i_size >> sbi->record_bits; 1425 1420 sbi->mft.next_free = MFT_REC_USER; 1426 1421 1427 - err = wnd_init(&sbi->mft.bitmap, sb, tt); 1428 - if (err) 1429 - goto put_inode_out; 1430 - 1431 1422 err = ni_load_all_mi(ni); 1432 1423 if (err) { 1433 1424 ntfs_err(sb, "Failed to load $MFT's subrecords (%d).", err); 1434 1425 goto put_inode_out; 1435 1426 } 1427 + 1428 + /* Merge MFT bitmap runs from extent records loaded by ni_load_all_mi. */ 1429 + { 1430 + struct ATTRIB *a = NULL; 1431 + struct ATTR_LIST_ENTRY *le = NULL; 1432 + 1433 + while ((a = ni_enum_attr_ex(ni, a, &le, NULL))) { 1434 + CLST svcn, evcn; 1435 + u16 roff; 1436 + 1437 + if (a->type != ATTR_BITMAP || !a->non_res) 1438 + continue; 1439 + 1440 + svcn = le64_to_cpu(a->nres.svcn); 1441 + if (!svcn) 1442 + continue; /* Base record runs already loaded. */ 1443 + 1444 + evcn = le64_to_cpu(a->nres.evcn); 1445 + roff = le16_to_cpu(a->nres.run_off); 1446 + 1447 + err = run_unpack_ex(&sbi->mft.bitmap.run, sbi, 1448 + MFT_REC_MFT, svcn, evcn, svcn, 1449 + Add2Ptr(a, roff), 1450 + le32_to_cpu(a->size) - roff); 1451 + if (err < 0) { 1452 + ntfs_err(sb, "Failed to unpack $MFT bitmap extent (%d).", err); 1453 + goto put_inode_out; 1454 + } 1455 + err = 0; 1456 + } 1457 + } 1458 + 1459 + err = wnd_init(&sbi->mft.bitmap, sb, tt); 1460 + if (err) 1461 + goto put_inode_out; 1436 1462 1437 1463 sbi->mft.ni = ni; 1438 1464 ··· 1702 1666 sb->s_root = d_make_root(inode); 1703 1667 if (!sb->s_root) { 1704 1668 err = -ENOMEM; 1705 - goto put_inode_out; 1669 + goto out; 1706 1670 } 1707 1671 1708 1672 if (boot2) {
+1 -1
fs/ntfs3/xattr.c
··· 1031 1031 i_gid_write(inode, (gid_t)le32_to_cpu(value[1])); 1032 1032 inode->i_mode = le32_to_cpu(value[2]); 1033 1033 1034 - if (ntfs_get_ea(inode, "$LXDEV", sizeof("$$LXDEV") - 1, 1034 + if (ntfs_get_ea(inode, "$LXDEV", sizeof("$LXDEV") - 1, 1035 1035 &value[0], sizeof(value), 1036 1036 &sz) == sizeof(value[0])) { 1037 1037 inode->i_rdev = le32_to_cpu(value[0]);