Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'xfs-5.10-fixes-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Darrick Wong:

- Fix an uninitialized struct problem

- Fix an iomap problem zeroing unwritten EOF blocks

- Fix some clumsy error handling when writeback fails on filesystems
with blocksize < pagesize

- Fix a retry loop not resetting loop variables properly

- Fix scrub flagging rtinherit inodes on a non-rt fs, since the kernel
actually does permit that combination

- Fix excessive page cache flushing when unsharing part of a file

* tag 'xfs-5.10-fixes-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
xfs: only flush the unshared range in xfs_reflink_unshare
xfs: fix scrub flagging rtinherit even if there is no rt device
xfs: fix missing CoW blocks writeback conversion retry
iomap: clean up writeback state logic on writepage error
iomap: support partial page discard on writeback block mapping failure
xfs: flush new eof page on truncate to avoid post-eof corruption
xfs: set xefi_discard when creating a deferred agfl free log intent item

+38 -33
+10 -20
fs/iomap/buffered-io.c
··· 1374 1374 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1375 1375 WARN_ON_ONCE(!PageLocked(page)); 1376 1376 WARN_ON_ONCE(PageWriteback(page)); 1377 + WARN_ON_ONCE(PageDirty(page)); 1377 1378 1378 1379 /* 1379 1380 * We cannot cancel the ioend directly here on error. We may have ··· 1383 1382 * appropriately. 1384 1383 */ 1385 1384 if (unlikely(error)) { 1385 + /* 1386 + * Let the filesystem know what portion of the current page 1387 + * failed to map. If the page wasn't been added to ioend, it 1388 + * won't be affected by I/O completion and we must unlock it 1389 + * now. 1390 + */ 1391 + if (wpc->ops->discard_page) 1392 + wpc->ops->discard_page(page, file_offset); 1386 1393 if (!count) { 1387 - /* 1388 - * If the current page hasn't been added to ioend, it 1389 - * won't be affected by I/O completions and we must 1390 - * discard and unlock it right here. 1391 - */ 1392 - if (wpc->ops->discard_page) 1393 - wpc->ops->discard_page(page); 1394 1394 ClearPageUptodate(page); 1395 1395 unlock_page(page); 1396 1396 goto done; 1397 1397 } 1398 - 1399 - /* 1400 - * If the page was not fully cleaned, we need to ensure that the 1401 - * higher layers come back to it correctly. That means we need 1402 - * to keep the page dirty, and for WB_SYNC_ALL writeback we need 1403 - * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed 1404 - * so another attempt to write this page in this writeback sweep 1405 - * will be made. 1406 - */ 1407 - set_page_writeback_keepwrite(page); 1408 - } else { 1409 - clear_page_dirty_for_io(page); 1410 - set_page_writeback(page); 1411 1398 } 1412 1399 1400 + set_page_writeback(page); 1413 1401 unlock_page(page); 1414 1402 1415 1403 /*
+1
fs/xfs/libxfs/xfs_alloc.c
··· 2467 2467 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno); 2468 2468 new->xefi_blockcount = 1; 2469 2469 new->xefi_oinfo = *oinfo; 2470 + new->xefi_skip_discard = false; 2470 2471 2471 2472 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1); 2472 2473
+1 -1
fs/xfs/libxfs/xfs_bmap.h
··· 52 52 { 53 53 xfs_fsblock_t xefi_startblock;/* starting fs block number */ 54 54 xfs_extlen_t xefi_blockcount;/* number of blocks in extent */ 55 + bool xefi_skip_discard; 55 56 struct list_head xefi_list; 56 57 struct xfs_owner_info xefi_oinfo; /* extent owner */ 57 - bool xefi_skip_discard; 58 58 }; 59 59 60 60 #define XFS_BMAP_MAX_NMAP 4
+1 -2
fs/xfs/scrub/inode.c
··· 121 121 goto bad; 122 122 123 123 /* rt flags require rt device */ 124 - if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) && 125 - !mp->m_rtdev_targp) 124 + if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp) 126 125 goto bad; 127 126 128 127 /* new rt bitmap flag only valid for rbmino */
+12 -8
fs/xfs/xfs_aops.c
··· 346 346 ssize_t count = i_blocksize(inode); 347 347 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 348 348 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); 349 - xfs_fileoff_t cow_fsb = NULLFILEOFF; 350 - int whichfork = XFS_DATA_FORK; 349 + xfs_fileoff_t cow_fsb; 350 + int whichfork; 351 351 struct xfs_bmbt_irec imap; 352 352 struct xfs_iext_cursor icur; 353 353 int retries = 0; ··· 381 381 * landed in a hole and we skip the block. 382 382 */ 383 383 retry: 384 + cow_fsb = NULLFILEOFF; 385 + whichfork = XFS_DATA_FORK; 384 386 xfs_ilock(ip, XFS_ILOCK_SHARED); 385 387 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || 386 388 (ip->i_df.if_flags & XFS_IFEXTENTS)); ··· 529 527 */ 530 528 static void 531 529 xfs_discard_page( 532 - struct page *page) 530 + struct page *page, 531 + loff_t fileoff) 533 532 { 534 533 struct inode *inode = page->mapping->host; 535 534 struct xfs_inode *ip = XFS_I(inode); 536 535 struct xfs_mount *mp = ip->i_mount; 537 - loff_t offset = page_offset(page); 538 - xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset); 536 + unsigned int pageoff = offset_in_page(fileoff); 537 + xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, fileoff); 538 + xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff); 539 539 int error; 540 540 541 541 if (XFS_FORCED_SHUTDOWN(mp)) ··· 545 541 546 542 xfs_alert_ratelimited(mp, 547 543 "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.", 548 - page, ip->i_ino, offset); 544 + page, ip->i_ino, fileoff); 549 545 550 546 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 551 - i_blocks_per_page(inode, page)); 547 + i_blocks_per_page(inode, page) - pageoff_fsb); 552 548 if (error && !XFS_FORCED_SHUTDOWN(mp)) 553 549 xfs_alert(mp, "page discard unable to remove delalloc mapping."); 554 550 out_invalidate: 555 - iomap_invalidatepage(page, 0, PAGE_SIZE); 551 + iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff); 556 552 } 557 553 558 554 static const struct iomap_writeback_ops xfs_writeback_ops = {
+10
fs/xfs/xfs_iops.c
··· 911 911 error = iomap_zero_range(inode, oldsize, newsize - oldsize, 912 912 &did_zeroing, &xfs_buffered_write_iomap_ops); 913 913 } else { 914 + /* 915 + * iomap won't detect a dirty page over an unwritten block (or a 916 + * cow block over a hole) and subsequently skips zeroing the 917 + * newly post-EOF portion of the page. Flush the new EOF to 918 + * convert the block before the pagecache truncate. 919 + */ 920 + error = filemap_write_and_wait_range(inode->i_mapping, newsize, 921 + newsize); 922 + if (error) 923 + return error; 914 924 error = iomap_truncate_page(inode, newsize, &did_zeroing, 915 925 &xfs_buffered_write_iomap_ops); 916 926 }
+2 -1
fs/xfs/xfs_reflink.c
··· 1502 1502 &xfs_buffered_write_iomap_ops); 1503 1503 if (error) 1504 1504 goto out; 1505 - error = filemap_write_and_wait(inode->i_mapping); 1505 + 1506 + error = filemap_write_and_wait_range(inode->i_mapping, offset, len); 1506 1507 if (error) 1507 1508 goto out; 1508 1509
+1 -1
include/linux/iomap.h
··· 221 221 * Optional, allows the file system to discard state on a page where 222 222 * we failed to submit any I/O. 223 223 */ 224 - void (*discard_page)(struct page *page); 224 + void (*discard_page)(struct page *page, loff_t fileoff); 225 225 }; 226 226 227 227 struct iomap_writepage_ctx {