Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

fs/ntfs3: implement iomap-based file operations

This patch modifies the ntfs3 driver by replacing the buffer_head-based
operations with the iomap ones.

Implementation details:
- Implements core iomap operations (ntfs_iomap_begin/end) for block mapping:
Proper handling of resident attributes via IOMAP_INLINE.
Support for sparse files through IOMAP_HOLE semantics.
Correct unwritten extent handling for zeroing operations.
- Replaces custom implementations with standardized iomap helpers:
Converts buffered reads to use iomap_read_folio and iomap_readahead.
Implements iomap_file_buffered_write for write operations.
Uses iomap_dio_rw for direct I/O paths.
Migrates zero range operations to iomap_zero_range.
- Preserves special handling paths for compressed files
- Implements proper EOF/valid data size management during writes

Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>

+617 -806
+33 -45
fs/ntfs3/attrib.c
··· 166 166 continue; 167 167 } 168 168 169 + if (err == -ENOSPC && new_len && vcn - vcn0) { 170 + /* Keep already allocated clusters. */ 171 + *alen = vcn - vcn0; 172 + return 0; 173 + } 174 + 169 175 if (err) 170 176 goto out; 171 177 ··· 892 886 * - new allocated clusters are zeroed via blkdev_issue_zeroout. 893 887 */ 894 888 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, 895 - CLST *len, bool *new, bool zero) 889 + CLST *len, bool *new, bool zero, void **res) 896 890 { 897 891 int err = 0; 898 892 struct runs_tree *run = &ni->file.run; ··· 909 903 910 904 if (new) 911 905 *new = false; 906 + if (res) 907 + *res = NULL; 912 908 913 909 /* Try to find in cache. */ 914 910 down_read(&ni->file.run_lock); ··· 947 939 } 948 940 949 941 if (!attr_b->non_res) { 942 + u32 data_size = le32_to_cpu(attr_b->res.data_size); 950 943 *lcn = RESIDENT_LCN; 951 - *len = le32_to_cpu(attr_b->res.data_size); 944 + *len = data_size; 945 + if (res && data_size) { 946 + *res = kmemdup(resident_data(attr_b), data_size, 947 + GFP_KERNEL); 948 + if (!*res) 949 + err = -ENOMEM; 950 + } 952 951 goto out; 953 952 } 954 953 ··· 1043 1028 to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn; 1044 1029 if (fr < clst_per_frame) 1045 1030 fr = clst_per_frame; 1046 - zero = true; 1031 + if (vcn != vcn0) 1032 + zero = true; 1047 1033 1048 1034 /* Check if 'vcn' and 'vcn0' in different attribute segments. */ 1049 1035 if (vcn < svcn || evcn1 <= vcn) { ··· 1260 1244 goto out; 1261 1245 } 1262 1246 1263 - int attr_data_read_resident(struct ntfs_inode *ni, struct folio *folio) 1264 - { 1265 - u64 vbo; 1266 - struct ATTRIB *attr; 1267 - u32 data_size; 1268 - size_t len; 1269 - 1270 - attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL); 1271 - if (!attr) 1272 - return -EINVAL; 1273 - 1274 - if (attr->non_res) 1275 - return E_NTFS_NONRESIDENT; 1276 - 1277 - vbo = folio->index << PAGE_SHIFT; 1278 - data_size = le32_to_cpu(attr->res.data_size); 1279 - if (vbo > data_size) 1280 - len = 0; 1281 - else 1282 - len = min(data_size - vbo, folio_size(folio)); 1283 - 1284 - folio_fill_tail(folio, 0, resident_data(attr) + vbo, len); 1285 - folio_mark_uptodate(folio); 1286 - 1287 - return 0; 1288 - } 1289 - 1290 1247 int attr_data_write_resident(struct ntfs_inode *ni, struct folio *folio) 1291 1248 { 1292 1249 u64 vbo; ··· 1276 1287 return E_NTFS_NONRESIDENT; 1277 1288 } 1278 1289 1279 - vbo = folio->index << PAGE_SHIFT; 1290 + vbo = folio_pos(folio); 1280 1291 data_size = le32_to_cpu(attr->res.data_size); 1281 1292 if (vbo < data_size) { 1282 1293 char *data = resident_data(attr); ··· 1349 1360 int retry = 0; 1350 1361 1351 1362 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) { 1352 - if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) { 1353 - if (retry != 0) { /* Next run_lookup_entry(vcn) also failed. */ 1354 - err = -EINVAL; 1355 - break; 1356 - } 1357 - err = attr_load_runs_vcn(ni, type, name, name_len, run, 1358 - vcn); 1359 - if (err) 1360 - break; 1361 - 1362 - clen = 0; /* Next run_lookup_entry(vcn) must be success. */ 1363 - retry++; 1364 - } 1365 - else 1363 + if (run_lookup_entry(run, vcn, &lcn, &clen, NULL)) { 1366 1364 retry = 0; 1365 + continue; 1366 + } 1367 + if (retry) { 1368 + err = -EINVAL; 1369 + break; 1370 + } 1371 + err = attr_load_runs_vcn(ni, type, name, name_len, run, vcn); 1372 + if (err) 1373 + break; 1374 + 1375 + clen = 0; /* Next run_lookup_entry(vcn) must be success. */ 1376 + retry++; 1367 1377 } 1368 1378 1369 1379 return err;
+192 -176
fs/ntfs3/file.c
··· 14 14 #include <linux/falloc.h> 15 15 #include <linux/fiemap.h> 16 16 #include <linux/fileattr.h> 17 + #include <linux/iomap.h> 17 18 18 19 #include "debug.h" 19 20 #include "ntfs.h" ··· 190 189 const loff_t new_valid) 191 190 { 192 191 struct inode *inode = &ni->vfs_inode; 193 - struct address_space *mapping = inode->i_mapping; 194 - struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 195 - loff_t pos = valid; 196 192 int err; 197 193 198 194 if (valid >= new_valid) ··· 202 204 203 205 WARN_ON(is_compressed(ni)); 204 206 205 - for (;;) { 206 - u32 zerofrom, len; 207 - struct folio *folio; 208 - u8 bits; 209 - CLST vcn, lcn, clen; 210 - 211 - if (is_sparsed(ni)) { 212 - bits = sbi->cluster_bits; 213 - vcn = pos >> bits; 214 - 215 - err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, 216 - false); 217 - if (err) 218 - goto out; 219 - 220 - if (lcn == SPARSE_LCN) { 221 - pos = ((loff_t)clen + vcn) << bits; 222 - ni->i_valid = pos; 223 - goto next; 224 - } 225 - } 226 - 227 - zerofrom = pos & (PAGE_SIZE - 1); 228 - len = PAGE_SIZE - zerofrom; 229 - 230 - if (pos + len > new_valid) 231 - len = new_valid - pos; 232 - 233 - err = ntfs_write_begin(NULL, mapping, pos, len, &folio, NULL); 234 - if (err) 235 - goto out; 236 - 237 - folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom); 238 - 239 - err = ntfs_write_end(NULL, mapping, pos, len, len, folio, NULL); 240 - if (err < 0) 241 - goto out; 242 - pos += len; 243 - 244 - next: 245 - if (pos >= new_valid) 246 - break; 247 - 248 - balance_dirty_pages_ratelimited(mapping); 249 - cond_resched(); 207 + err = iomap_zero_range(inode, valid, new_valid - valid, NULL, 208 + &ntfs_iomap_ops, &ntfs_iomap_folio_ops, NULL); 209 + if (err) { 210 + ni->i_valid = valid; 211 + ntfs_inode_warn(inode, 212 + "failed to extend initialized size to %llx.", 213 + new_valid); 214 + return err; 250 215 } 251 216 252 217 return 0; 253 - 254 - out: 255 - ni->i_valid = valid; 256 - ntfs_inode_warn(inode, "failed to extend initialized size to %llx.", 257 - new_valid); 258 - return err; 259 218 } 260 219 261 - /* 262 - * ntfs_zero_range - Helper function for punch_hole. 263 - * 264 - * It zeroes a range [vbo, vbo_to). 265 - */ 266 - static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) 220 + static void ntfs_filemap_close(struct vm_area_struct *vma) 267 221 { 268 - int err = 0; 269 - struct address_space *mapping = inode->i_mapping; 270 - u32 blocksize = i_blocksize(inode); 271 - pgoff_t idx = vbo >> PAGE_SHIFT; 272 - u32 from = vbo & (PAGE_SIZE - 1); 273 - pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; 274 - loff_t page_off; 275 - struct buffer_head *head, *bh; 276 - u32 bh_next, bh_off, to; 277 - sector_t iblock; 278 - struct folio *folio; 279 - bool dirty = false; 222 + struct inode *inode = file_inode(vma->vm_file); 223 + struct ntfs_inode *ni = ntfs_i(inode); 224 + u64 from = (u64)vma->vm_pgoff << PAGE_SHIFT; 225 + u64 to = min_t(u64, i_size_read(inode), 226 + from + vma->vm_end - vma->vm_start); 280 227 281 - for (; idx < idx_end; idx += 1, from = 0) { 282 - page_off = (loff_t)idx << PAGE_SHIFT; 283 - to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) : 284 - PAGE_SIZE; 285 - iblock = page_off >> inode->i_blkbits; 286 - 287 - folio = __filemap_get_folio( 288 - mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 289 - mapping_gfp_constraint(mapping, ~__GFP_FS)); 290 - if (IS_ERR(folio)) 291 - return PTR_ERR(folio); 292 - 293 - head = folio_buffers(folio); 294 - if (!head) 295 - head = create_empty_buffers(folio, blocksize, 0); 296 - 297 - bh = head; 298 - bh_off = 0; 299 - do { 300 - bh_next = bh_off + blocksize; 301 - 302 - if (bh_next <= from || bh_off >= to) 303 - continue; 304 - 305 - if (!buffer_mapped(bh)) { 306 - ntfs_get_block(inode, iblock, bh, 0); 307 - /* Unmapped? It's a hole - nothing to do. */ 308 - if (!buffer_mapped(bh)) 309 - continue; 310 - } 311 - 312 - /* Ok, it's mapped. Make sure it's up-to-date. */ 313 - if (folio_test_uptodate(folio)) 314 - set_buffer_uptodate(bh); 315 - else if (bh_read(bh, 0) < 0) { 316 - err = -EIO; 317 - folio_unlock(folio); 318 - folio_put(folio); 319 - goto out; 320 - } 321 - 322 - mark_buffer_dirty(bh); 323 - } while (bh_off = bh_next, iblock += 1, 324 - head != (bh = bh->b_this_page)); 325 - 326 - folio_zero_segment(folio, from, to); 327 - dirty = true; 328 - 329 - folio_unlock(folio); 330 - folio_put(folio); 331 - cond_resched(); 332 - } 333 - out: 334 - if (dirty) 228 + if (ni->i_valid < to) { 229 + ni->i_valid = to; 335 230 mark_inode_dirty(inode); 336 - return err; 231 + } 337 232 } 233 + 234 + /* Copy of generic_file_vm_ops. */ 235 + static const struct vm_operations_struct ntfs_file_vm_ops = { 236 + .close = ntfs_filemap_close, 237 + .fault = filemap_fault, 238 + .map_pages = filemap_map_pages, 239 + .page_mkwrite = filemap_page_mkwrite, 240 + }; 338 241 339 242 /* 340 243 * ntfs_file_mmap_prepare - file_operations::mmap_prepare ··· 245 346 struct file *file = desc->file; 246 347 struct inode *inode = file_inode(file); 247 348 struct ntfs_inode *ni = ntfs_i(inode); 248 - u64 from = ((u64)desc->pgoff << PAGE_SHIFT); 249 349 bool rw = desc->vm_flags & VM_WRITE; 250 350 int err; 251 351 ··· 276 378 } 277 379 278 380 if (rw) { 279 - u64 to = min_t(loff_t, i_size_read(inode), 381 + u64 from = (u64)desc->pgoff << PAGE_SHIFT; 382 + u64 to = min_t(u64, i_size_read(inode), 280 383 from + vma_desc_size(desc)); 281 384 282 385 if (is_sparsed(ni)) { ··· 290 391 291 392 for (; vcn < end; vcn += len) { 292 393 err = attr_data_get_block(ni, vcn, 1, &lcn, 293 - &len, &new, true); 394 + &len, &new, true, 395 + NULL); 294 396 if (err) 295 397 goto out; 296 398 } ··· 311 411 } 312 412 313 413 err = generic_file_mmap_prepare(desc); 414 + if (!err && rw) 415 + desc->vm_ops = &ntfs_file_vm_ops; 314 416 out: 315 417 return err; 316 418 } ··· 367 465 */ 368 466 for (; vcn < cend_v; vcn += clen) { 369 467 err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn, 370 - &clen, &new, true); 468 + &clen, &new, true, NULL); 371 469 if (err) 372 470 goto out; 373 471 } ··· 376 474 */ 377 475 for (; vcn < cend; vcn += clen) { 378 476 err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, 379 - &clen, &new, false); 477 + &clen, &new, false, NULL); 380 478 if (err) 381 479 goto out; 382 480 } ··· 405 503 406 504 static int ntfs_truncate(struct inode *inode, loff_t new_size) 407 505 { 408 - struct super_block *sb = inode->i_sb; 409 - struct ntfs_inode *ni = ntfs_i(inode); 410 - u64 new_valid; 411 506 int err; 507 + struct ntfs_inode *ni = ntfs_i(inode); 508 + u64 new_valid = min_t(u64, ni->i_valid, new_size); 412 509 413 - if (!S_ISREG(inode->i_mode)) 414 - return 0; 415 - 416 - if (is_compressed(ni)) { 417 - if (ni->i_valid > new_size) 418 - ni->i_valid = new_size; 419 - } else { 420 - err = block_truncate_page(inode->i_mapping, new_size, 421 - ntfs_get_block); 422 - if (err) 423 - return err; 424 - } 425 - 426 - new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); 427 510 truncate_setsize(inode, new_size); 428 511 429 512 ni_lock(ni); ··· 418 531 &new_valid, ni->mi.sbi->options->prealloc, NULL); 419 532 up_write(&ni->file.run_lock); 420 533 421 - if (new_valid < ni->i_valid) 422 - ni->i_valid = new_valid; 534 + ni->i_valid = new_valid; 423 535 424 536 ni_unlock(ni); 425 - if (unlikely(err)) 537 + 538 + if (err) 426 539 return err; 427 540 428 541 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; ··· 533 646 534 647 tmp = min(vbo_a, end); 535 648 if (tmp > vbo) { 536 - err = ntfs_zero_range(inode, vbo, tmp); 649 + err = iomap_zero_range(inode, vbo, tmp - vbo, NULL, 650 + &ntfs_iomap_ops, 651 + &ntfs_iomap_folio_ops, NULL); 537 652 if (err) 538 653 goto out; 539 654 } 540 655 541 656 if (vbo < end_a && end_a < end) { 542 - err = ntfs_zero_range(inode, end_a, end); 657 + err = iomap_zero_range(inode, end_a, end - end_a, NULL, 658 + &ntfs_iomap_ops, 659 + &ntfs_iomap_folio_ops, NULL); 543 660 if (err) 544 661 goto out; 545 662 } ··· 653 762 for (; vcn < cend_v; vcn += clen) { 654 763 err = attr_data_get_block(ni, vcn, cend_v - vcn, 655 764 &lcn, &clen, &new, 656 - true); 765 + true, NULL); 657 766 if (err) 658 767 goto out; 659 768 } ··· 663 772 for (; vcn < cend; vcn += clen) { 664 773 err = attr_data_get_block(ni, vcn, cend - vcn, 665 774 &lcn, &clen, &new, 666 - false); 775 + false, NULL); 667 776 if (err) 668 777 goto out; 669 778 } ··· 678 787 ni_unlock(ni); 679 788 if (err) 680 789 goto out; 790 + i_size_write(inode, i_size); 681 791 } else if (new_size > i_size) { 682 792 i_size_write(inode, new_size); 683 793 } ··· 815 923 struct file *file = iocb->ki_filp; 816 924 struct inode *inode = file_inode(file); 817 925 struct ntfs_inode *ni = ntfs_i(inode); 926 + size_t bytes = iov_iter_count(iter); 818 927 ssize_t err; 819 928 820 929 err = check_read_restriction(inode); 821 930 if (err) 822 931 return err; 932 + 933 + if (!bytes) 934 + return 0; /* skip atime */ 823 935 824 936 if (is_compressed(ni)) { 825 937 if (iocb->ki_flags & IOCB_DIRECT) { ··· 836 940 } 837 941 838 942 /* Check minimum alignment for dio. */ 943 + if ((iocb->ki_flags & IOCB_DIRECT) && 944 + (is_resident(ni) || ((iocb->ki_pos | iov_iter_alignment(iter)) & 945 + ni->mi.sbi->bdev_blocksize_mask))) { 946 + /* Fallback to buffered I/O */ 947 + iocb->ki_flags &= ~IOCB_DIRECT; 948 + } 949 + 839 950 if (iocb->ki_flags & IOCB_DIRECT) { 840 - struct super_block *sb = inode->i_sb; 841 - struct ntfs_sb_info *sbi = sb->s_fs_info; 842 - if ((iocb->ki_pos | iov_iter_alignment(iter)) & 843 - sbi->bdev_blocksize_mask) { 844 - iocb->ki_flags &= ~IOCB_DIRECT; 951 + loff_t valid, i_size; 952 + loff_t vbo = iocb->ki_pos; 953 + loff_t end = vbo + bytes; 954 + unsigned int dio_flags = IOMAP_DIO_PARTIAL; 955 + 956 + if (iocb->ki_flags & IOCB_NOWAIT) { 957 + if (!inode_trylock_shared(inode)) 958 + return -EAGAIN; 959 + } else { 960 + inode_lock_shared(inode); 845 961 } 962 + 963 + valid = ni->i_valid; 964 + i_size = inode->i_size; 965 + 966 + if (vbo < valid) { 967 + if (valid < end) { 968 + /* read cross 'valid' size. */ 969 + dio_flags |= IOMAP_DIO_FORCE_WAIT; 970 + } 971 + 972 + err = iomap_dio_rw(iocb, iter, &ntfs_iomap_ops, NULL, 973 + dio_flags, NULL, 0); 974 + 975 + if (err > 0) { 976 + end = vbo + err; 977 + if (valid < end) { 978 + size_t to_zero = end - valid; 979 + /* Fix iter. */ 980 + iov_iter_revert(iter, to_zero); 981 + iov_iter_zero(to_zero, iter); 982 + } 983 + } 984 + } else if (vbo < i_size) { 985 + if (end > i_size) 986 + bytes = i_size - vbo; 987 + iov_iter_zero(bytes, iter); 988 + iocb->ki_pos += bytes; 989 + err = bytes; 990 + } 991 + 992 + inode_unlock_shared(inode); 993 + file_accessed(iocb->ki_filp); 994 + return err; 846 995 } 847 996 848 997 return generic_file_read_iter(iocb, iter); ··· 1011 1070 off = valid & (frame_size - 1); 1012 1071 1013 1072 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn, 1014 - &clen, NULL, false); 1073 + &clen, NULL, false, NULL); 1015 1074 if (err) 1016 1075 goto out; 1017 1076 ··· 1214 1273 struct file *file = iocb->ki_filp; 1215 1274 struct inode *inode = file_inode(file); 1216 1275 struct ntfs_inode *ni = ntfs_i(inode); 1217 - ssize_t ret; 1218 - int err; 1276 + struct super_block *sb = inode->i_sb; 1277 + struct ntfs_sb_info *sbi = sb->s_fs_info; 1278 + ssize_t ret, err; 1219 1279 1220 1280 if (!inode_trylock(inode)) { 1221 1281 if (iocb->ki_flags & IOCB_NOWAIT) ··· 1254 1312 if (ret) 1255 1313 goto out; 1256 1314 1257 - ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) : 1258 - __generic_file_write_iter(iocb, from); 1315 + if (is_compressed(ni)) { 1316 + ret = ntfs_compress_write(iocb, from); 1317 + goto out; 1318 + } 1319 + 1320 + /* Check minimum alignment for dio. */ 1321 + if ((iocb->ki_flags & IOCB_DIRECT) && 1322 + (is_resident(ni) || ((iocb->ki_pos | iov_iter_alignment(from)) & 1323 + sbi->bdev_blocksize_mask))) { 1324 + /* Fallback to buffered I/O */ 1325 + iocb->ki_flags &= ~IOCB_DIRECT; 1326 + } 1327 + 1328 + if (!(iocb->ki_flags & IOCB_DIRECT)) { 1329 + ret = iomap_file_buffered_write(iocb, from, &ntfs_iomap_ops, 1330 + &ntfs_iomap_folio_ops, NULL); 1331 + inode_unlock(inode); 1332 + 1333 + if (likely(ret > 0)) 1334 + ret = generic_write_sync(iocb, ret); 1335 + 1336 + return ret; 1337 + } 1338 + 1339 + ret = iomap_dio_rw(iocb, from, &ntfs_iomap_ops, NULL, IOMAP_DIO_PARTIAL, 1340 + NULL, 0); 1341 + 1342 + if (ret == -ENOTBLK) { 1343 + /* Returns -ENOTBLK in case of a page invalidation failure for writes.*/ 1344 + /* The callers needs to fall back to buffered I/O in this case. */ 1345 + ret = 0; 1346 + } 1347 + 1348 + if (ret >= 0 && iov_iter_count(from)) { 1349 + loff_t offset = iocb->ki_pos, endbyte; 1350 + 1351 + iocb->ki_flags &= ~IOCB_DIRECT; 1352 + err = iomap_file_buffered_write(iocb, from, &ntfs_iomap_ops, 1353 + &ntfs_iomap_folio_ops, NULL); 1354 + if (err < 0) { 1355 + ret = err; 1356 + goto out; 1357 + } 1358 + 1359 + /* 1360 + * We need to ensure that the pages within the page cache for 1361 + * the range covered by this I/O are written to disk and 1362 + * invalidated. This is in attempt to preserve the expected 1363 + * direct I/O semantics in the case we fallback to buffered I/O 1364 + * to complete off the I/O request. 1365 + */ 1366 + ret += err; 1367 + endbyte = offset + err - 1; 1368 + err = filemap_write_and_wait_range(inode->i_mapping, offset, 1369 + endbyte); 1370 + if (err) { 1371 + ret = err; 1372 + goto out; 1373 + } 1374 + 1375 + invalidate_mapping_pages(inode->i_mapping, offset >> PAGE_SHIFT, 1376 + endbyte >> PAGE_SHIFT); 1377 + } 1259 1378 1260 1379 out: 1261 1380 inode_unlock(inode); 1262 - 1263 - if (ret > 0) 1264 - ret = generic_write_sync(iocb, ret); 1265 1381 1266 1382 return ret; 1267 1383 } ··· 1358 1358 return -EOPNOTSUPP; 1359 1359 #endif 1360 1360 } 1361 + 1362 + file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT; 1361 1363 1362 1364 return generic_file_open(inode, file); 1363 1365 } ··· 1410 1408 if (unlikely(is_bad_ni(ni))) 1411 1409 return -EINVAL; 1412 1410 1413 - err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); 1414 - if (err) 1415 - return err; 1411 + if (is_compressed(ni)) { 1412 + /* Unfortunately cp -r incorrectly treats compressed clusters. */ 1413 + ntfs_inode_warn(inode, 1414 + "fiemap is not supported for compressed file"); 1415 + return -EOPNOTSUPP; 1416 + } 1416 1417 1417 - ni_lock(ni); 1418 + if (S_ISDIR(inode->i_mode)) { 1419 + /* TODO: add support for dirs (ATTR_ALLOC). */ 1420 + ntfs_inode_warn(inode, 1421 + "fiemap is not supported for directories"); 1422 + return -EOPNOTSUPP; 1423 + } 1418 1424 1419 - err = ni_fiemap(ni, fieinfo, start, len); 1425 + if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 1426 + ntfs_inode_warn(inode, "fiemap(xattr) is not supported"); 1427 + return -EOPNOTSUPP; 1428 + } 1420 1429 1421 - ni_unlock(ni); 1430 + inode_lock_shared(inode); 1422 1431 1432 + err = iomap_fiemap(inode, fieinfo, start, len, &ntfs_iomap_ops); 1433 + 1434 + inode_unlock_shared(inode); 1423 1435 return err; 1424 1436 } 1425 1437 ··· 1479 1463 1480 1464 if (!ret) { 1481 1465 ntfs_set_state(sbi, NTFS_DIRTY_CLEAR); 1482 - ntfs_update_mftmirr(sbi, false); 1466 + ntfs_update_mftmirr(sbi); 1483 1467 } 1484 1468 1485 1469 err = sync_blockdev(sb->s_bdev);
+6 -177
fs/ntfs3/frecord.c
··· 1850 1850 return REPARSE_LINK; 1851 1851 } 1852 1852 1853 - /* 1854 - * ni_fiemap - Helper for file_fiemap(). 1855 - * 1856 - * Assumed ni_lock. 1857 - * TODO: Less aggressive locks. 1858 - */ 1859 - int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, 1860 - __u64 vbo, __u64 len) 1861 - { 1862 - int err = 0; 1863 - struct ntfs_sb_info *sbi = ni->mi.sbi; 1864 - u8 cluster_bits = sbi->cluster_bits; 1865 - struct runs_tree run; 1866 - struct ATTRIB *attr; 1867 - CLST vcn = vbo >> cluster_bits; 1868 - CLST lcn, clen; 1869 - u64 valid = ni->i_valid; 1870 - u64 lbo, bytes; 1871 - u64 end, alloc_size; 1872 - size_t idx = -1; 1873 - u32 flags; 1874 - bool ok; 1875 - 1876 - run_init(&run); 1877 - if (S_ISDIR(ni->vfs_inode.i_mode)) { 1878 - attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME, 1879 - ARRAY_SIZE(I30_NAME), NULL, NULL); 1880 - } else { 1881 - attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, 1882 - NULL); 1883 - if (!attr) { 1884 - err = -EINVAL; 1885 - goto out; 1886 - } 1887 - if (is_attr_compressed(attr)) { 1888 - /* Unfortunately cp -r incorrectly treats compressed clusters. */ 1889 - err = -EOPNOTSUPP; 1890 - ntfs_inode_warn( 1891 - &ni->vfs_inode, 1892 - "fiemap is not supported for compressed file (cp -r)"); 1893 - goto out; 1894 - } 1895 - } 1896 - 1897 - if (!attr || !attr->non_res) { 1898 - err = fiemap_fill_next_extent( 1899 - fieinfo, 0, 0, 1900 - attr ? le32_to_cpu(attr->res.data_size) : 0, 1901 - FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST | 1902 - FIEMAP_EXTENT_MERGED); 1903 - goto out; 1904 - } 1905 - 1906 - end = vbo + len; 1907 - alloc_size = le64_to_cpu(attr->nres.alloc_size); 1908 - if (end > alloc_size) 1909 - end = alloc_size; 1910 - 1911 - while (vbo < end) { 1912 - if (idx == -1) { 1913 - ok = run_lookup_entry(&run, vcn, &lcn, &clen, &idx); 1914 - } else { 1915 - CLST vcn_next = vcn; 1916 - 1917 - ok = run_get_entry(&run, ++idx, &vcn, &lcn, &clen) && 1918 - vcn == vcn_next; 1919 - if (!ok) 1920 - vcn = vcn_next; 1921 - } 1922 - 1923 - if (!ok) { 1924 - err = attr_load_runs_vcn(ni, attr->type, 1925 - attr_name(attr), 1926 - attr->name_len, &run, vcn); 1927 - 1928 - if (err) 1929 - break; 1930 - 1931 - ok = run_lookup_entry(&run, vcn, &lcn, &clen, &idx); 1932 - 1933 - if (!ok) { 1934 - err = -EINVAL; 1935 - break; 1936 - } 1937 - } 1938 - 1939 - if (!clen) { 1940 - err = -EINVAL; // ? 1941 - break; 1942 - } 1943 - 1944 - if (lcn == SPARSE_LCN) { 1945 - vcn += clen; 1946 - vbo = (u64)vcn << cluster_bits; 1947 - continue; 1948 - } 1949 - 1950 - flags = FIEMAP_EXTENT_MERGED; 1951 - if (S_ISDIR(ni->vfs_inode.i_mode)) { 1952 - ; 1953 - } else if (is_attr_compressed(attr)) { 1954 - CLST clst_data; 1955 - 1956 - err = attr_is_frame_compressed(ni, attr, 1957 - vcn >> attr->nres.c_unit, 1958 - &clst_data, &run); 1959 - if (err) 1960 - break; 1961 - if (clst_data < NTFS_LZNT_CLUSTERS) 1962 - flags |= FIEMAP_EXTENT_ENCODED; 1963 - } else if (is_attr_encrypted(attr)) { 1964 - flags |= FIEMAP_EXTENT_DATA_ENCRYPTED; 1965 - } 1966 - 1967 - vbo = (u64)vcn << cluster_bits; 1968 - bytes = (u64)clen << cluster_bits; 1969 - lbo = (u64)lcn << cluster_bits; 1970 - 1971 - vcn += clen; 1972 - 1973 - if (vbo + bytes >= end) 1974 - bytes = end - vbo; 1975 - 1976 - if (vbo + bytes <= valid) { 1977 - ; 1978 - } else if (vbo >= valid) { 1979 - flags |= FIEMAP_EXTENT_UNWRITTEN; 1980 - } else { 1981 - /* vbo < valid && valid < vbo + bytes */ 1982 - u64 dlen = valid - vbo; 1983 - 1984 - if (vbo + dlen >= end) 1985 - flags |= FIEMAP_EXTENT_LAST; 1986 - 1987 - err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen, 1988 - flags); 1989 - 1990 - if (err < 0) 1991 - break; 1992 - if (err == 1) { 1993 - err = 0; 1994 - break; 1995 - } 1996 - 1997 - vbo = valid; 1998 - bytes -= dlen; 1999 - if (!bytes) 2000 - continue; 2001 - 2002 - lbo += dlen; 2003 - flags |= FIEMAP_EXTENT_UNWRITTEN; 2004 - } 2005 - 2006 - if (vbo + bytes >= end) 2007 - flags |= FIEMAP_EXTENT_LAST; 2008 - 2009 - err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags); 2010 - if (err < 0) 2011 - break; 2012 - if (err == 1) { 2013 - err = 0; 2014 - break; 2015 - } 2016 - 2017 - vbo += bytes; 2018 - } 2019 - 2020 - out: 2021 - run_close(&run); 2022 - return err; 2023 - } 2024 - 2025 1853 static struct page *ntfs_lock_new_page(struct address_space *mapping, 2026 - pgoff_t index, gfp_t gfp) 1854 + pgoff_t index, gfp_t gfp) 2027 1855 { 2028 - struct folio *folio = __filemap_get_folio(mapping, index, 2029 - FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1856 + struct folio *folio = __filemap_get_folio( 1857 + mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 2030 1858 struct page *page; 2031 1859 2032 1860 if (IS_ERR(folio)) ··· 2014 2186 2015 2187 for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) { 2016 2188 err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, 2017 - &clen, &new, false); 2189 + &clen, &new, false, NULL); 2018 2190 if (err) 2019 2191 goto out; 2020 2192 } ··· 2845 3017 2846 3018 /* Enumerate all fragments. */ 2847 3019 for (vcn = offset >> cluster_bits;; vcn += clen) { 2848 - err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, false); 3020 + err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, false, 3021 + NULL); 2849 3022 if (err) { 2850 3023 return err; 2851 3024 }
+1 -1
fs/ntfs3/fslog.c
··· 5130 5130 5131 5131 undo_action_done: 5132 5132 5133 - ntfs_update_mftmirr(sbi, 0); 5133 + ntfs_update_mftmirr(sbi); 5134 5134 5135 5135 sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY; 5136 5136
+1 -9
fs/ntfs3/fsntfs.c
··· 843 843 /* 844 844 * ntfs_update_mftmirr - Update $MFTMirr data. 845 845 */ 846 - void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait) 846 + void ntfs_update_mftmirr(struct ntfs_sb_info *sbi) 847 847 { 848 - int err; 849 848 struct super_block *sb = sbi->sb; 850 849 u32 blocksize, bytes; 851 850 sector_t block1, block2; ··· 883 884 884 885 put_bh(bh1); 885 886 bh1 = NULL; 886 - 887 - err = wait ? sync_dirty_buffer(bh2) : 0; 888 - 889 887 put_bh(bh2); 890 - if (err) 891 - return; 892 888 } 893 889 894 890 sbi->flags &= ~NTFS_FLAGS_MFTMIRR; ··· 1351 1357 err = -ENOMEM; 1352 1358 goto out; 1353 1359 } 1354 - 1355 1360 wait_on_buffer(bh); 1356 - 1357 1361 lock_buffer(bh); 1358 1362 if (!buffer_uptodate(bh)) { 1359 1363 memset(bh->b_data, 0, blocksize);
+372 -383
fs/ntfs3/inode.c
··· 12 12 #include <linux/nls.h> 13 13 #include <linux/uio.h> 14 14 #include <linux/writeback.h> 15 + #include <linux/iomap.h> 15 16 16 17 #include "debug.h" 17 18 #include "ntfs.h" ··· 167 166 168 167 std5 = Add2Ptr(attr, roff); 169 168 170 - #ifdef STATX_BTIME 171 169 nt2kernel(std5->cr_time, &ni->i_crtime); 172 - #endif 173 170 nt2kernel(std5->a_time, &ts); 174 171 inode_set_atime_to_ts(inode, ts); 175 172 nt2kernel(std5->c_time, &ts); ··· 554 555 return inode; 555 556 } 556 557 557 - enum get_block_ctx { 558 - GET_BLOCK_GENERAL = 0, 559 - GET_BLOCK_WRITE_BEGIN = 1, 560 - GET_BLOCK_DIRECT_IO_R = 2, 561 - GET_BLOCK_DIRECT_IO_W = 3, 562 - GET_BLOCK_BMAP = 4, 563 - }; 564 - 565 - static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo, 566 - struct buffer_head *bh, int create, 567 - enum get_block_ctx ctx) 568 - { 569 - struct super_block *sb = inode->i_sb; 570 - struct ntfs_sb_info *sbi = sb->s_fs_info; 571 - struct ntfs_inode *ni = ntfs_i(inode); 572 - struct folio *folio = bh->b_folio; 573 - u8 cluster_bits = sbi->cluster_bits; 574 - u32 block_size = sb->s_blocksize; 575 - u64 bytes, lbo, valid; 576 - u32 off; 577 - int err; 578 - CLST vcn, lcn, len; 579 - bool new; 580 - 581 - /* Clear previous state. */ 582 - clear_buffer_new(bh); 583 - clear_buffer_uptodate(bh); 584 - 585 - if (is_resident(ni)) { 586 - bh->b_blocknr = RESIDENT_LCN; 587 - bh->b_size = block_size; 588 - if (!folio) { 589 - /* direct io (read) or bmap call */ 590 - err = 0; 591 - } else { 592 - ni_lock(ni); 593 - err = attr_data_read_resident(ni, folio); 594 - ni_unlock(ni); 595 - 596 - if (!err) 597 - set_buffer_uptodate(bh); 598 - } 599 - return err; 600 - } 601 - 602 - vcn = vbo >> cluster_bits; 603 - off = vbo & sbi->cluster_mask; 604 - new = false; 605 - 606 - err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL, 607 - create && sbi->cluster_size > PAGE_SIZE); 608 - if (err) 609 - goto out; 610 - 611 - if (!len) 612 - return 0; 613 - 614 - bytes = ((u64)len << cluster_bits) - off; 615 - 616 - if (lcn >= sbi->used.bitmap.nbits) { 617 - /* This case includes resident/compressed/sparse. */ 618 - if (!create) { 619 - if (bh->b_size > bytes) 620 - bh->b_size = bytes; 621 - return 0; 622 - } 623 - WARN_ON(1); 624 - } 625 - 626 - if (new) 627 - set_buffer_new(bh); 628 - 629 - lbo = ((u64)lcn << cluster_bits) + off; 630 - 631 - set_buffer_mapped(bh); 632 - bh->b_bdev = sb->s_bdev; 633 - bh->b_blocknr = lbo >> sb->s_blocksize_bits; 634 - 635 - valid = ni->i_valid; 636 - 637 - if (ctx == GET_BLOCK_DIRECT_IO_W) { 638 - /* ntfs_direct_IO will update ni->i_valid. */ 639 - if (vbo >= valid) 640 - set_buffer_new(bh); 641 - } else if (create) { 642 - /* Normal write. */ 643 - if (bytes > bh->b_size) 644 - bytes = bh->b_size; 645 - 646 - if (vbo >= valid) 647 - set_buffer_new(bh); 648 - 649 - if (vbo + bytes > valid) { 650 - ni->i_valid = vbo + bytes; 651 - mark_inode_dirty(inode); 652 - } 653 - } else if (vbo >= valid) { 654 - /* Read out of valid data. */ 655 - clear_buffer_mapped(bh); 656 - } else if (vbo + bytes <= valid) { 657 - /* Normal read. */ 658 - } else if (vbo + block_size <= valid) { 659 - /* Normal short read. */ 660 - bytes = block_size; 661 - } else { 662 - /* 663 - * Read across valid size: vbo < valid && valid < vbo + block_size 664 - */ 665 - bytes = block_size; 666 - 667 - if (folio) { 668 - u32 voff = valid - vbo; 669 - 670 - bh->b_size = block_size; 671 - off = vbo & (PAGE_SIZE - 1); 672 - folio_set_bh(bh, folio, off); 673 - 674 - if (bh_read(bh, 0) < 0) { 675 - err = -EIO; 676 - goto out; 677 - } 678 - folio_zero_segment(folio, off + voff, off + block_size); 679 - } 680 - } 681 - 682 - if (bh->b_size > bytes) 683 - bh->b_size = bytes; 684 - 685 - #ifndef __LP64__ 686 - if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) { 687 - static_assert(sizeof(size_t) < sizeof(loff_t)); 688 - if (bytes > 0x40000000u) 689 - bh->b_size = 0x40000000u; 690 - } 691 - #endif 692 - 693 - return 0; 694 - 695 - out: 696 - return err; 697 - } 698 - 699 - int ntfs_get_block(struct inode *inode, sector_t vbn, 700 - struct buffer_head *bh_result, int create) 701 - { 702 - return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits, 703 - bh_result, create, GET_BLOCK_GENERAL); 704 - } 705 - 706 - static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn, 707 - struct buffer_head *bh_result, int create) 708 - { 709 - return ntfs_get_block_vbo(inode, 710 - (u64)vsn << inode->i_sb->s_blocksize_bits, 711 - bh_result, create, GET_BLOCK_BMAP); 712 - } 713 - 714 558 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block) 715 559 { 716 - return generic_block_bmap(mapping, block, ntfs_get_block_bmap); 560 + return iomap_bmap(mapping, block, &ntfs_iomap_ops); 717 561 } 562 + 563 + static void ntfs_iomap_read_end_io(struct bio *bio) 564 + { 565 + int error = blk_status_to_errno(bio->bi_status); 566 + struct folio_iter fi; 567 + 568 + bio_for_each_folio_all(fi, bio) { 569 + struct folio *folio = fi.folio; 570 + struct inode *inode = folio->mapping->host; 571 + struct ntfs_inode *ni = ntfs_i(inode); 572 + u64 valid = ni->i_valid; 573 + u32 f_size = folio_size(folio); 574 + loff_t f_pos = folio_pos(folio); 575 + 576 + 577 + if (valid < f_pos + f_size) { 578 + u32 z_from = valid <= f_pos ? 579 + 0 : 580 + offset_in_folio(folio, valid); 581 + /* The only thing ntfs_iomap_read_end_io used for. */ 582 + folio_zero_segment(folio, z_from, f_size); 583 + } 584 + 585 + iomap_finish_folio_read(folio, fi.offset, fi.length, error); 586 + } 587 + bio_put(bio); 588 + } 589 + 590 + /* 591 + * Copied from iomap/bio.c. 592 + */ 593 + static int ntfs_iomap_bio_read_folio_range(const struct iomap_iter *iter, 594 + struct iomap_read_folio_ctx *ctx, 595 + size_t plen) 596 + { 597 + struct folio *folio = ctx->cur_folio; 598 + const struct iomap *iomap = &iter->iomap; 599 + loff_t pos = iter->pos; 600 + size_t poff = offset_in_folio(folio, pos); 601 + loff_t length = iomap_length(iter); 602 + sector_t sector; 603 + struct bio *bio = ctx->read_ctx; 604 + 605 + sector = iomap_sector(iomap, pos); 606 + if (!bio || bio_end_sector(bio) != sector || 607 + !bio_add_folio(bio, folio, plen, poff)) { 608 + gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 609 + gfp_t orig_gfp = gfp; 610 + unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 611 + 612 + if (bio) 613 + submit_bio(bio); 614 + 615 + if (ctx->rac) /* same as readahead_gfp_mask */ 616 + gfp |= __GFP_NORETRY | __GFP_NOWARN; 617 + bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ, 618 + gfp); 619 + /* 620 + * If the bio_alloc fails, try it again for a single page to 621 + * avoid having to deal with partial page reads. This emulates 622 + * what do_mpage_read_folio does. 623 + */ 624 + if (!bio) 625 + bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp); 626 + if (ctx->rac) 627 + bio->bi_opf |= REQ_RAHEAD; 628 + bio->bi_iter.bi_sector = sector; 629 + bio->bi_end_io = ntfs_iomap_read_end_io; 630 + bio_add_folio_nofail(bio, folio, plen, poff); 631 + ctx->read_ctx = bio; 632 + } 633 + return 0; 634 + } 635 + 636 + static void ntfs_iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx) 637 + { 638 + struct bio *bio = ctx->read_ctx; 639 + 640 + if (bio) 641 + submit_bio(bio); 642 + } 643 + 644 + static const struct iomap_read_ops ntfs_iomap_bio_read_ops = { 645 + .read_folio_range = ntfs_iomap_bio_read_folio_range, 646 + .submit_read = ntfs_iomap_bio_submit_read, 647 + }; 718 648 719 649 static int ntfs_read_folio(struct file *file, struct folio *folio) 720 650 { ··· 652 724 struct inode *inode = mapping->host; 653 725 struct ntfs_inode *ni = ntfs_i(inode); 654 726 loff_t vbo = folio_pos(folio); 727 + struct iomap_read_folio_ctx ctx = { 728 + .cur_folio = folio, 729 + .ops = &ntfs_iomap_bio_read_ops, 730 + }; 655 731 656 732 if (unlikely(is_bad_ni(ni))) { 657 733 folio_unlock(folio); ··· 669 737 return 0; 670 738 } 671 739 672 - if (is_resident(ni)) { 673 - ni_lock(ni); 674 - err = attr_data_read_resident(ni, folio); 675 - ni_unlock(ni); 676 - if (err != E_NTFS_NONRESIDENT) { 677 - folio_unlock(folio); 678 - return err; 679 - } 680 - } 681 - 682 740 if (is_compressed(ni)) { 683 741 /* ni_lock is taken inside ni_read_folio_cmpr after page locks */ 684 742 err = ni_read_folio_cmpr(ni, folio); 685 743 return err; 686 744 } 687 745 688 - /* Normal + sparse files. */ 689 - return mpage_read_folio(folio, ntfs_get_block); 746 + iomap_read_folio(&ntfs_iomap_ops, &ctx); 747 + return 0; 690 748 } 691 749 692 750 static void ntfs_readahead(struct readahead_control *rac) ··· 684 762 struct address_space *mapping = rac->mapping; 685 763 struct inode *inode = mapping->host; 686 764 struct ntfs_inode *ni = ntfs_i(inode); 687 - u64 valid; 688 - loff_t pos; 765 + struct iomap_read_folio_ctx ctx = { 766 + .ops = &ntfs_iomap_bio_read_ops, 767 + .rac = rac, 768 + }; 689 769 690 770 if (is_resident(ni)) { 691 771 /* No readahead for resident. */ ··· 699 775 return; 700 776 } 701 777 702 - valid = ni->i_valid; 703 - pos = readahead_pos(rac); 704 - 705 - if (valid < i_size_read(inode) && pos <= valid && 706 - valid < pos + readahead_length(rac)) { 707 - /* Range cross 'valid'. Read it page by page. */ 708 - return; 709 - } 710 - 711 - mpage_readahead(rac, ntfs_get_block); 712 - } 713 - 714 - static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock, 715 - struct buffer_head *bh_result, int create) 716 - { 717 - return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits, 718 - bh_result, create, GET_BLOCK_DIRECT_IO_R); 719 - } 720 - 721 - static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock, 722 - struct buffer_head *bh_result, int create) 723 - { 724 - return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits, 725 - bh_result, create, GET_BLOCK_DIRECT_IO_W); 726 - } 727 - 728 - static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 729 - { 730 - struct file *file = iocb->ki_filp; 731 - struct address_space *mapping = file->f_mapping; 732 - struct inode *inode = mapping->host; 733 - struct ntfs_inode *ni = ntfs_i(inode); 734 - loff_t vbo = iocb->ki_pos; 735 - loff_t end; 736 - int wr = iov_iter_rw(iter) & WRITE; 737 - size_t iter_count = iov_iter_count(iter); 738 - loff_t valid; 739 - ssize_t ret; 740 - 741 - if (is_resident(ni)) { 742 - /* Switch to buffered write. */ 743 - ret = 0; 744 - goto out; 745 - } 746 - if (is_compressed(ni)) { 747 - ret = 0; 748 - goto out; 749 - } 750 - 751 - ret = blockdev_direct_IO(iocb, inode, iter, 752 - wr ? ntfs_get_block_direct_IO_W : 753 - ntfs_get_block_direct_IO_R); 754 - 755 - if (ret > 0) 756 - end = vbo + ret; 757 - else if (wr && ret == -EIOCBQUEUED) 758 - end = vbo + iter_count; 759 - else 760 - goto out; 761 - 762 - valid = ni->i_valid; 763 - if (wr) { 764 - if (end > valid && !S_ISBLK(inode->i_mode)) { 765 - ni->i_valid = end; 766 - mark_inode_dirty(inode); 767 - } 768 - } else if (vbo < valid && valid < end) { 769 - /* Fix page. */ 770 - iov_iter_revert(iter, end - valid); 771 - iov_iter_zero(end - valid, iter); 772 - } 773 - 774 - out: 775 - return ret; 778 + iomap_readahead(&ntfs_iomap_ops, &ctx); 776 779 } 777 780 778 781 int ntfs_set_size(struct inode *inode, u64 new_size) ··· 712 861 /* Check for maximum file size. */ 713 862 if (is_sparsed(ni) || is_compressed(ni)) { 714 863 if (new_size > sbi->maxbytes_sparse) { 715 - err = -EFBIG; 716 - goto out; 864 + return -EFBIG; 717 865 } 718 866 } else if (new_size > sbi->maxbytes) { 719 - err = -EFBIG; 720 - goto out; 867 + return -EFBIG; 721 868 } 722 869 723 870 ni_lock(ni); ··· 724 875 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, 725 876 &ni->i_valid, true, NULL); 726 877 878 + if (!err) { 879 + i_size_write(inode, new_size); 880 + mark_inode_dirty(inode); 881 + } 882 + 727 883 up_write(&ni->file.run_lock); 728 884 ni_unlock(ni); 729 885 730 - mark_inode_dirty(inode); 731 - 732 - out: 733 886 return err; 734 887 } 888 + 889 + /* 890 + * Function to get mapping vbo -> lbo. 891 + * used with: 892 + * - iomap_zero_range 893 + * - iomap_truncate_page 894 + * - iomap_dio_rw 895 + * - iomap_file_buffered_write 896 + * - iomap_bmap 897 + * - iomap_fiemap 898 + * - iomap_bio_read_folio 899 + * - iomap_bio_readahead 900 + */ 901 + static int ntfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 902 + unsigned int flags, struct iomap *iomap, 903 + struct iomap *srcmap) 904 + { 905 + struct ntfs_inode *ni = ntfs_i(inode); 906 + struct ntfs_sb_info *sbi = ni->mi.sbi; 907 + u8 cluster_bits = sbi->cluster_bits; 908 + CLST vcn = offset >> cluster_bits; 909 + u32 off = offset & sbi->cluster_mask; 910 + bool rw = flags & IOMAP_WRITE; 911 + loff_t endbyte = offset + length; 912 + void *res = NULL; 913 + int err; 914 + CLST lcn, clen, clen_max; 915 + bool new_clst = false; 916 + if (unlikely(ntfs3_forced_shutdown(sbi->sb))) 917 + return -EIO; 918 + 919 + if ((flags & IOMAP_REPORT) && offset > ntfs_get_maxbytes(ni)) { 920 + /* called from fiemap/bmap. */ 921 + return -EINVAL; 922 + } 923 + 924 + clen_max = rw ? (bytes_to_cluster(sbi, endbyte) - vcn) : 1; 925 + 926 + err = attr_data_get_block( 927 + ni, vcn, clen_max, &lcn, &clen, rw ? &new_clst : NULL, 928 + flags == IOMAP_WRITE && (off || (endbyte & sbi->cluster_mask)), 929 + &res); 930 + 931 + if (err) { 932 + return err; 933 + } 934 + 935 + if (lcn == EOF_LCN) { 936 + /* request out of file. */ 937 + if (flags & IOMAP_REPORT) { 938 + /* special code for report. */ 939 + return -ENOENT; 940 + } 941 + 942 + if (rw) { 943 + /* should never be here. */ 944 + return -EINVAL; 945 + } 946 + lcn = SPARSE_LCN; 947 + } 948 + 949 + if (lcn == RESIDENT_LCN) { 950 + if (offset >= clen) { 951 + kfree(res); 952 + if (flags & IOMAP_REPORT) { 953 + /* special code for report. */ 954 + return -ENOENT; 955 + } 956 + return -EFAULT; 957 + } 958 + 959 + iomap->private = iomap->inline_data = res; 960 + iomap->type = IOMAP_INLINE; 961 + iomap->offset = 0; 962 + iomap->length = clen; /* resident size in bytes. */ 963 + iomap->flags = 0; 964 + return 0; 965 + } 966 + 967 + if (!clen) { 968 + /* broken file? */ 969 + return -EINVAL; 970 + } 971 + 972 + if (lcn == COMPRESSED_LCN) { 973 + /* should never be here. */ 974 + return -EOPNOTSUPP; 975 + } 976 + 977 + iomap->flags = new_clst ? IOMAP_F_NEW : 0; 978 + iomap->bdev = inode->i_sb->s_bdev; 979 + 980 + /* Translate clusters into bytes. */ 981 + iomap->offset = offset; 982 + iomap->addr = ((loff_t)lcn << cluster_bits) + off; 983 + iomap->length = ((loff_t)clen << cluster_bits) - off; 984 + if (length && iomap->length > length) 985 + iomap->length = length; 986 + else 987 + endbyte = offset + iomap->length; 988 + 989 + if (lcn == SPARSE_LCN) { 990 + iomap->addr = IOMAP_NULL_ADDR; 991 + iomap->type = IOMAP_HOLE; 992 + } else if (endbyte <= ni->i_valid) { 993 + iomap->type = IOMAP_MAPPED; 994 + } else if (offset < ni->i_valid) { 995 + iomap->type = IOMAP_MAPPED; 996 + if (flags & IOMAP_REPORT) 997 + iomap->length = ni->i_valid - offset; 998 + } else if (rw || (flags & IOMAP_ZERO)) { 999 + iomap->type = IOMAP_MAPPED; 1000 + } else { 1001 + iomap->type = IOMAP_UNWRITTEN; 1002 + } 1003 + 1004 + if ((flags & IOMAP_ZERO) && iomap->type == IOMAP_MAPPED) { 1005 + /* Avoid too large requests. */ 1006 + u32 tail; 1007 + u32 off_a = iomap->addr & (PAGE_SIZE - 1); 1008 + if (off_a) 1009 + tail = PAGE_SIZE - off_a; 1010 + else 1011 + tail = PAGE_SIZE; 1012 + 1013 + if (iomap->length > tail) 1014 + iomap->length = tail; 1015 + } 1016 + 1017 + return 0; 1018 + } 1019 + 1020 + static int ntfs_iomap_end(struct inode *inode, loff_t pos, loff_t length, 1021 + ssize_t written, unsigned int flags, 1022 + struct iomap *iomap) 1023 + { 1024 + int err = 0; 1025 + struct ntfs_inode *ni = ntfs_i(inode); 1026 + loff_t endbyte = pos + written; 1027 + 1028 + if ((flags & IOMAP_WRITE) || (flags & IOMAP_ZERO)) { 1029 + if (iomap->type == IOMAP_INLINE) { 1030 + u32 data_size; 1031 + struct ATTRIB *attr; 1032 + struct mft_inode *mi; 1033 + 1034 + attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, 1035 + NULL, &mi); 1036 + if (!attr || attr->non_res) { 1037 + err = -EINVAL; 1038 + goto out; 1039 + } 1040 + 1041 + data_size = le32_to_cpu(attr->res.data_size); 1042 + if (!(pos < data_size && endbyte <= data_size)) { 1043 + err = -EINVAL; 1044 + goto out; 1045 + } 1046 + 1047 + /* Update resident data. */ 1048 + memcpy(resident_data(attr) + pos, 1049 + iomap_inline_data(iomap, pos), written); 1050 + mi->dirty = true; 1051 + ni->i_valid = data_size; 1052 + } else if (ni->i_valid < endbyte) { 1053 + ni->i_valid = endbyte; 1054 + mark_inode_dirty(inode); 1055 + } 1056 + } 1057 + 1058 + if ((flags & IOMAP_ZERO) && iomap->type == IOMAP_MAPPED) { 1059 + balance_dirty_pages_ratelimited(inode->i_mapping); 1060 + cond_resched(); 1061 + } 1062 + 1063 + out: 1064 + if (iomap->type == IOMAP_INLINE) { 1065 + kfree(iomap->private); 1066 + iomap->private = NULL; 1067 + } 1068 + 1069 + return err; 1070 + } 1071 + 1072 + /* 1073 + * write_begin + put_folio + write_end. 1074 + * iomap_zero_range 1075 + * iomap_truncate_page 1076 + * iomap_file_buffered_write 1077 + */ 1078 + static void ntfs_iomap_put_folio(struct inode *inode, loff_t pos, 1079 + unsigned int len, struct folio *folio) 1080 + { 1081 + struct ntfs_inode *ni = ntfs_i(inode); 1082 + loff_t end = pos + len; 1083 + u32 f_size = folio_size(folio); 1084 + loff_t f_pos = folio_pos(folio); 1085 + loff_t f_end = f_pos + f_size; 1086 + 1087 + if (ni->i_valid < end && end < f_end) { 1088 + /* zero range [end - f_end). */ 1089 + /* The only thing ntfs_iomap_put_folio used for. */ 1090 + folio_zero_segment(folio, offset_in_folio(folio, end), f_size); 1091 + } 1092 + folio_unlock(folio); 1093 + folio_put(folio); 1094 + } 1095 + 1096 + static ssize_t ntfs_writeback_range(struct iomap_writepage_ctx *wpc, 1097 + struct folio *folio, u64 offset, 1098 + unsigned int len, u64 end_pos) 1099 + { 1100 + struct iomap *iomap = &wpc->iomap; 1101 + struct inode *inode = wpc->inode; 1102 + 1103 + /* Check iomap position. */ 1104 + if (!(iomap->offset <= offset && 1105 + offset < iomap->offset + iomap->length)) { 1106 + int err; 1107 + struct ntfs_sb_info *sbi = ntfs_sb(inode->i_sb); 1108 + loff_t i_size_up = ntfs_up_cluster(sbi, inode->i_size); 1109 + loff_t len_max = i_size_up - offset; 1110 + 1111 + err = ntfs_iomap_begin(inode, offset, len_max, IOMAP_WRITE, 1112 + iomap, NULL); 1113 + if (err) { 1114 + ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 1115 + return err; 1116 + } 1117 + } 1118 + 1119 + return iomap_add_to_ioend(wpc, folio, offset, end_pos, len); 1120 + } 1121 + 1122 + 1123 + const struct iomap_writeback_ops ntfs_writeback_ops = { 1124 + .writeback_range = ntfs_writeback_range, 1125 + .writeback_submit = iomap_ioend_writeback_submit, 1126 + }; 735 1127 736 1128 static int ntfs_resident_writepage(struct folio *folio, 737 1129 struct writeback_control *wbc) ··· 1002 912 static int ntfs_writepages(struct address_space *mapping, 1003 913 struct writeback_control *wbc) 1004 914 { 1005 - struct inode *inode = mapping->host; 1006 - 1007 - /* Avoid any operation if inode is bad. */ 1008 - if (unlikely(is_bad_ni(ntfs_i(inode)))) 1009 - return -EINVAL; 1010 - 1011 - if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1012 - return -EIO; 1013 - 1014 - if (is_resident(ntfs_i(inode))) { 1015 - struct folio *folio = NULL; 1016 - int error; 1017 - 1018 - while ((folio = writeback_iter(mapping, wbc, folio, &error))) 1019 - error = ntfs_resident_writepage(folio, wbc); 1020 - return error; 1021 - } 1022 - return mpage_writepages(mapping, wbc, ntfs_get_block); 1023 - } 1024 - 1025 - static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn, 1026 - struct buffer_head *bh_result, int create) 1027 - { 1028 - return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits, 1029 - bh_result, create, GET_BLOCK_WRITE_BEGIN); 1030 - } 1031 - 1032 - int ntfs_write_begin(const struct kiocb *iocb, struct address_space *mapping, 1033 - loff_t pos, u32 len, struct folio **foliop, void **fsdata) 1034 - { 1035 915 int err; 1036 916 struct inode *inode = mapping->host; 1037 917 struct ntfs_inode *ni = ntfs_i(inode); 918 + struct iomap_writepage_ctx wpc = { 919 + .inode = mapping->host, 920 + .wbc = wbc, 921 + .ops = &ntfs_writeback_ops, 922 + }; 1038 923 1039 924 /* Avoid any operation if inode is bad. */ 1040 925 if (unlikely(is_bad_ni(ni))) ··· 1019 954 return -EIO; 1020 955 1021 956 if (is_resident(ni)) { 1022 - struct folio *folio = __filemap_get_folio( 1023 - mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN, 1024 - mapping_gfp_mask(mapping)); 957 + struct folio *folio; 1025 958 1026 - if (IS_ERR(folio)) { 1027 - err = PTR_ERR(folio); 1028 - goto out; 1029 - } 959 + while ((folio = writeback_iter(mapping, wbc, folio, &err))) 960 + err = ntfs_resident_writepage(folio, wbc); 1030 961 1031 - ni_lock(ni); 1032 - err = attr_data_read_resident(ni, folio); 1033 - ni_unlock(ni); 1034 - 1035 - if (!err) { 1036 - *foliop = folio; 1037 - goto out; 1038 - } 1039 - folio_unlock(folio); 1040 - folio_put(folio); 1041 - 1042 - if (err != E_NTFS_NONRESIDENT) 1043 - goto out; 962 + return err; 1044 963 } 1045 964 1046 - err = block_write_begin(mapping, pos, len, foliop, 1047 - ntfs_get_block_write_begin); 1048 - 1049 - out: 1050 - return err; 1051 - } 1052 - 1053 - /* 1054 - * ntfs_write_end - Address_space_operations::write_end. 1055 - */ 1056 - int ntfs_write_end(const struct kiocb *iocb, struct address_space *mapping, 1057 - loff_t pos, u32 len, u32 copied, struct folio *folio, 1058 - void *fsdata) 1059 - { 1060 - struct inode *inode = mapping->host; 1061 - struct ntfs_inode *ni = ntfs_i(inode); 1062 - u64 valid = ni->i_valid; 1063 - bool dirty = false; 1064 - int err; 1065 - 1066 - if (is_resident(ni)) { 1067 - ni_lock(ni); 1068 - err = attr_data_write_resident(ni, folio); 1069 - ni_unlock(ni); 1070 - if (!err) { 1071 - struct buffer_head *head = folio_buffers(folio); 1072 - dirty = true; 1073 - /* Clear any buffers in folio. */ 1074 - if (head) { 1075 - struct buffer_head *bh = head; 1076 - 1077 - do { 1078 - clear_buffer_dirty(bh); 1079 - clear_buffer_mapped(bh); 1080 - set_buffer_uptodate(bh); 1081 - } while (head != (bh = bh->b_this_page)); 1082 - } 1083 - folio_mark_uptodate(folio); 1084 - err = copied; 1085 - } 1086 - folio_unlock(folio); 1087 - folio_put(folio); 1088 - } else { 1089 - err = generic_write_end(iocb, mapping, pos, len, copied, folio, 1090 - fsdata); 1091 - } 1092 - 1093 - if (err >= 0) { 1094 - if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) { 1095 - inode_set_mtime_to_ts(inode, 1096 - inode_set_ctime_current(inode)); 1097 - ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; 1098 - dirty = true; 1099 - } 1100 - 1101 - if (valid != ni->i_valid) { 1102 - /* ni->i_valid is changed in ntfs_get_block_vbo. */ 1103 - dirty = true; 1104 - } 1105 - 1106 - if (pos + err > inode->i_size) { 1107 - i_size_write(inode, pos + err); 1108 - dirty = true; 1109 - } 1110 - 1111 - if (dirty) 1112 - mark_inode_dirty(inode); 1113 - } 1114 - 1115 - return err; 965 + return iomap_writepages(&wpc); 1116 966 } 1117 967 1118 968 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc) ··· 1042 1062 1043 1063 /* 1044 1064 * Helper function to read file. 1065 + * Used to read $AttrDef and $UpCase 1045 1066 */ 1046 1067 int inode_read_data(struct inode *inode, void *data, size_t bytes) 1047 1068 { ··· 2088 2107 .read_folio = ntfs_read_folio, 2089 2108 .readahead = ntfs_readahead, 2090 2109 .writepages = ntfs_writepages, 2091 - .write_begin = ntfs_write_begin, 2092 - .write_end = ntfs_write_end, 2093 - .direct_IO = ntfs_direct_IO, 2094 2110 .bmap = ntfs_bmap, 2095 - .dirty_folio = block_dirty_folio, 2096 - .migrate_folio = buffer_migrate_folio, 2097 - .invalidate_folio = block_invalidate_folio, 2111 + .dirty_folio = iomap_dirty_folio, 2112 + .migrate_folio = filemap_migrate_folio, 2113 + .release_folio = iomap_release_folio, 2114 + .invalidate_folio = iomap_invalidate_folio, 2098 2115 }; 2099 2116 2100 2117 const struct address_space_operations ntfs_aops_cmpr = { 2101 2118 .read_folio = ntfs_read_folio, 2102 - .dirty_folio = block_dirty_folio, 2103 - .direct_IO = ntfs_direct_IO, 2119 + .dirty_folio = iomap_dirty_folio, 2120 + .release_folio = iomap_release_folio, 2121 + .invalidate_folio = iomap_invalidate_folio, 2122 + }; 2123 + 2124 + const struct iomap_ops ntfs_iomap_ops = { 2125 + .iomap_begin = ntfs_iomap_begin, 2126 + .iomap_end = ntfs_iomap_end, 2127 + }; 2128 + 2129 + const struct iomap_write_ops ntfs_iomap_folio_ops = { 2130 + .put_folio = ntfs_iomap_put_folio, 2104 2131 }; 2105 2132 // clang-format on
+4 -12
fs/ntfs3/ntfs_fs.h
··· 442 442 u64 new_size, const u64 *new_valid, bool keep_prealloc, 443 443 struct ATTRIB **ret); 444 444 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, 445 - CLST *len, bool *new, bool zero); 446 - int attr_data_read_resident(struct ntfs_inode *ni, struct folio *folio); 445 + CLST *len, bool *new, bool zero, void **res); 447 446 int attr_data_write_resident(struct ntfs_inode *ni, struct folio *folio); 448 447 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type, 449 448 const __le16 *name, u8 name_len, struct runs_tree *run, ··· 567 568 struct REPARSE_DATA_BUFFER *buffer); 568 569 int ni_write_inode(struct inode *inode, int sync, const char *hint); 569 570 #define _ni_write_inode(i, w) ni_write_inode(i, w, __func__) 570 - int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, 571 - __u64 vbo, __u64 len); 572 571 int ni_read_folio_cmpr(struct ntfs_inode *ni, struct folio *folio); 573 572 int ni_decompress_file(struct ntfs_inode *ni); 574 573 int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages, ··· 611 614 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft); 612 615 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to); 613 616 int ntfs_refresh_zone(struct ntfs_sb_info *sbi); 614 - void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait); 617 + void ntfs_update_mftmirr(struct ntfs_sb_info *sbi); 615 618 void ntfs_bad_inode(struct inode *inode, const char *hint); 616 619 #define _ntfs_bad_inode(i) ntfs_bad_inode(i, __func__) 617 620 enum NTFS_DIRTY_FLAGS { ··· 742 745 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref, 743 746 const struct cpu_str *name); 744 747 int ntfs_set_size(struct inode *inode, u64 new_size); 745 - int ntfs_get_block(struct inode *inode, sector_t vbn, 746 - struct buffer_head *bh_result, int create); 747 - int ntfs_write_begin(const struct kiocb *iocb, struct address_space *mapping, 748 - loff_t pos, u32 len, struct folio **foliop, void **fsdata); 749 - int ntfs_write_end(const struct kiocb *iocb, struct address_space *mapping, 750 - loff_t pos, u32 len, u32 copied, struct folio *folio, 751 - void *fsdata); 752 748 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc); 753 749 int ntfs_sync_inode(struct inode *inode); 754 750 int inode_read_data(struct inode *inode, void *data, size_t bytes); ··· 752 762 int ntfs_link_inode(struct inode *inode, struct dentry *dentry); 753 763 int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry); 754 764 void ntfs_evict_inode(struct inode *inode); 765 + extern const struct iomap_ops ntfs_iomap_ops; 766 + extern const struct iomap_write_ops ntfs_iomap_folio_ops; 755 767 extern const struct inode_operations ntfs_link_inode_operations; 756 768 extern const struct address_space_operations ntfs_aops; 757 769 extern const struct address_space_operations ntfs_aops_cmpr;
+8 -3
fs/ntfs3/super.c
··· 58 58 #include <linux/buffer_head.h> 59 59 #include <linux/exportfs.h> 60 60 #include <linux/fs.h> 61 - #include <linux/fs_struct.h> 62 61 #include <linux/fs_context.h> 63 62 #include <linux/fs_parser.h> 63 + #include <linux/fs_struct.h> 64 64 #include <linux/log2.h> 65 65 #include <linux/minmax.h> 66 66 #include <linux/module.h> ··· 674 674 sbi->volume.ni = NULL; 675 675 } 676 676 677 - ntfs_update_mftmirr(sbi, 0); 677 + ntfs_update_mftmirr(sbi); 678 678 679 679 indx_clear(&sbi->security.index_sii); 680 680 indx_clear(&sbi->security.index_sdh); ··· 821 821 if (!err) 822 822 ntfs_set_state(sbi, NTFS_DIRTY_CLEAR); 823 823 824 - ntfs_update_mftmirr(sbi, wait); 824 + ntfs_update_mftmirr(sbi); 825 + 826 + if (wait) { 827 + sync_blockdev(sb->s_bdev); 828 + blkdev_issue_flush(sb->s_bdev); 829 + } 825 830 826 831 return err; 827 832 }