Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'xfs-4.15-fixes-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Darrick Wong:
"Here are some bug fixes for 4.15-rc2.

- fix memory leaks that appeared after removing ifork inline data
buffer

- recover deferred rmap update log items in correct order

- fix memory leaks when buffer construction fails

- fix memory leaks when bmbt is corrupt

- fix some uninitialized variables and math problems in the quota
scrubber

- add some omitted attribution tags on the log replay commit

- fix some UBSAN complaints about integer overflows with large sparse
files

- implement an effective inode mode check in online fsck

- fix log's inability to retry quota item writeout due to transient
errors"

* tag 'xfs-4.15-fixes-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
xfs: Properly retry failed dquot items in case of error during buffer writeback
xfs: scrub inode mode properly
xfs: remove unused parameter from xfs_writepage_map
xfs: ubsan fixes
xfs: calculate correct offset in xfs_scrub_quota_item
xfs: fix uninitialized variable in xfs_scrub_quota
xfs: fix leaks on corruption errors in xfs_bmap.c
xfs: fortify xfs_alloc_buftarg error handling
xfs: log recovery should replay deferred ops in order
xfs: always free inline data before resetting inode fork during ifree

+190 -61
+4 -2
fs/xfs/libxfs/xfs_bmap.c
··· 5662 5662 *done = true; 5663 5663 goto del_cursor; 5664 5664 } 5665 - XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock)); 5665 + XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5666 + del_cursor); 5666 5667 5667 5668 new_startoff = got.br_startoff - offset_shift_fsb; 5668 5669 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { ··· 5768 5767 goto del_cursor; 5769 5768 } 5770 5769 } 5771 - XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock)); 5770 + XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5771 + del_cursor); 5772 5772 5773 5773 if (stop_fsb >= got.br_startoff + got.br_blockcount) { 5774 5774 error = -EIO;
+13 -1
fs/xfs/scrub/inode.c
··· 318 318 319 319 /* di_mode */ 320 320 mode = be16_to_cpu(dip->di_mode); 321 - if (mode & ~(S_IALLUGO | S_IFMT)) 321 + switch (mode & S_IFMT) { 322 + case S_IFLNK: 323 + case S_IFREG: 324 + case S_IFDIR: 325 + case S_IFCHR: 326 + case S_IFBLK: 327 + case S_IFIFO: 328 + case S_IFSOCK: 329 + /* mode is recognized */ 330 + break; 331 + default: 322 332 xfs_scrub_ino_set_corrupt(sc, ino, bp); 333 + break; 334 + } 323 335 324 336 /* v1/v2 fields */ 325 337 switch (dip->di_version) {
+2 -2
fs/xfs/scrub/quota.c
··· 107 107 unsigned long long rcount; 108 108 xfs_ino_t fs_icount; 109 109 110 - offset = id * qi->qi_dqperchunk; 110 + offset = id / qi->qi_dqperchunk; 111 111 112 112 /* 113 113 * We fed $id and DQNEXT into the xfs_qm_dqget call, which means ··· 207 207 xfs_dqid_t id = 0; 208 208 uint dqtype; 209 209 int nimaps; 210 - int error; 210 + int error = 0; 211 211 212 212 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 213 213 return -ENOENT;
+6 -6
fs/xfs/xfs_aops.c
··· 399 399 (ip->i_df.if_flags & XFS_IFEXTENTS)); 400 400 ASSERT(offset <= mp->m_super->s_maxbytes); 401 401 402 - if (offset + count > mp->m_super->s_maxbytes) 402 + if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes) 403 403 count = mp->m_super->s_maxbytes - offset; 404 404 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 405 405 offset_fsb = XFS_B_TO_FSBT(mp, offset); ··· 896 896 struct writeback_control *wbc, 897 897 struct inode *inode, 898 898 struct page *page, 899 - loff_t offset, 900 - uint64_t end_offset) 899 + uint64_t end_offset) 901 900 { 902 901 LIST_HEAD(submit_list); 903 902 struct xfs_ioend *ioend, *next; 904 903 struct buffer_head *bh, *head; 905 904 ssize_t len = i_blocksize(inode); 905 + uint64_t offset; 906 906 int error = 0; 907 907 int count = 0; 908 908 int uptodate = 1; ··· 1146 1146 end_offset = offset; 1147 1147 } 1148 1148 1149 - return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset); 1149 + return xfs_writepage_map(wpc, wbc, inode, page, end_offset); 1150 1150 1151 1151 redirty: 1152 1152 redirty_page_for_writepage(wbc, page); ··· 1265 1265 if (mapping_size > size) 1266 1266 mapping_size = size; 1267 1267 if (offset < i_size_read(inode) && 1268 - offset + mapping_size >= i_size_read(inode)) { 1268 + (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) { 1269 1269 /* limit mapping to block that spans EOF */ 1270 1270 mapping_size = roundup_64(i_size_read(inode) - offset, 1271 1271 i_blocksize(inode)); ··· 1312 1312 lockmode = xfs_ilock_data_map_shared(ip); 1313 1313 1314 1314 ASSERT(offset <= mp->m_super->s_maxbytes); 1315 - if (offset + size > mp->m_super->s_maxbytes) 1315 + if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes) 1316 1316 size = mp->m_super->s_maxbytes - offset; 1317 1317 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); 1318 1318 offset_fsb = XFS_B_TO_FSBT(mp, offset);
+7 -16
fs/xfs/xfs_bmap_item.c
··· 389 389 int 390 390 xfs_bui_recover( 391 391 struct xfs_mount *mp, 392 - struct xfs_bui_log_item *buip) 392 + struct xfs_bui_log_item *buip, 393 + struct xfs_defer_ops *dfops) 393 394 { 394 395 int error = 0; 395 396 unsigned int bui_type; ··· 405 404 xfs_exntst_t state; 406 405 struct xfs_trans *tp; 407 406 struct xfs_inode *ip = NULL; 408 - struct xfs_defer_ops dfops; 409 407 struct xfs_bmbt_irec irec; 410 - xfs_fsblock_t firstfsb; 411 408 412 409 ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags)); 413 410 ··· 463 464 464 465 if (VFS_I(ip)->i_nlink == 0) 465 466 xfs_iflags_set(ip, XFS_IRECOVERY); 466 - xfs_defer_init(&dfops, &firstfsb); 467 467 468 468 /* Process deferred bmap item. */ 469 469 state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ? ··· 477 479 break; 478 480 default: 479 481 error = -EFSCORRUPTED; 480 - goto err_dfops; 482 + goto err_inode; 481 483 } 482 484 xfs_trans_ijoin(tp, ip, 0); 483 485 484 486 count = bmap->me_len; 485 - error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type, 487 + error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type, 486 488 ip, whichfork, bmap->me_startoff, 487 489 bmap->me_startblock, &count, state); 488 490 if (error) 489 - goto err_dfops; 491 + goto err_inode; 490 492 491 493 if (count > 0) { 492 494 ASSERT(type == XFS_BMAP_UNMAP); ··· 494 496 irec.br_blockcount = count; 495 497 irec.br_startoff = bmap->me_startoff; 496 498 irec.br_state = state; 497 - error = xfs_bmap_unmap_extent(tp->t_mountp, &dfops, ip, &irec); 499 + error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec); 498 500 if (error) 499 - goto err_dfops; 501 + goto err_inode; 500 502 } 501 - 502 - /* Finish transaction, free inodes. */ 503 - error = xfs_defer_finish(&tp, &dfops); 504 - if (error) 505 - goto err_dfops; 506 503 507 504 set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); 508 505 error = xfs_trans_commit(tp); ··· 506 513 507 514 return error; 508 515 509 - err_dfops: 510 - xfs_defer_cancel(&dfops); 511 516 err_inode: 512 517 xfs_trans_cancel(tp); 513 518 if (ip) {
+2 -1
fs/xfs/xfs_bmap_item.h
··· 93 93 struct xfs_bui_log_item *); 94 94 void xfs_bui_item_free(struct xfs_bui_log_item *); 95 95 void xfs_bui_release(struct xfs_bui_log_item *); 96 - int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip); 96 + int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip, 97 + struct xfs_defer_ops *dfops); 97 98 98 99 #endif /* __XFS_BMAP_ITEM_H__ */
+10 -5
fs/xfs/xfs_buf.c
··· 1815 1815 btp->bt_daxdev = dax_dev; 1816 1816 1817 1817 if (xfs_setsize_buftarg_early(btp, bdev)) 1818 - goto error; 1818 + goto error_free; 1819 1819 1820 1820 if (list_lru_init(&btp->bt_lru)) 1821 - goto error; 1821 + goto error_free; 1822 1822 1823 1823 if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL)) 1824 - goto error; 1824 + goto error_lru; 1825 1825 1826 1826 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; 1827 1827 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; 1828 1828 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1829 1829 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE; 1830 - register_shrinker(&btp->bt_shrinker); 1830 + if (register_shrinker(&btp->bt_shrinker)) 1831 + goto error_pcpu; 1831 1832 return btp; 1832 1833 1833 - error: 1834 + error_pcpu: 1835 + percpu_counter_destroy(&btp->bt_io_count); 1836 + error_lru: 1837 + list_lru_destroy(&btp->bt_lru); 1838 + error_free: 1834 1839 kmem_free(btp); 1835 1840 return NULL; 1836 1841 }
+11 -3
fs/xfs/xfs_dquot.c
··· 970 970 * holding the lock before removing the dquot from the AIL. 971 971 */ 972 972 if ((lip->li_flags & XFS_LI_IN_AIL) && 973 - lip->li_lsn == qip->qli_flush_lsn) { 973 + ((lip->li_lsn == qip->qli_flush_lsn) || 974 + (lip->li_flags & XFS_LI_FAILED))) { 974 975 975 976 /* xfs_trans_ail_delete() drops the AIL lock. */ 976 977 spin_lock(&ailp->xa_lock); 977 - if (lip->li_lsn == qip->qli_flush_lsn) 978 + if (lip->li_lsn == qip->qli_flush_lsn) { 978 979 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE); 979 - else 980 + } else { 981 + /* 982 + * Clear the failed state since we are about to drop the 983 + * flush lock 984 + */ 985 + if (lip->li_flags & XFS_LI_FAILED) 986 + xfs_clear_li_failed(lip); 980 987 spin_unlock(&ailp->xa_lock); 988 + } 981 989 } 982 990 983 991 /*
+38 -2
fs/xfs/xfs_dquot_item.c
··· 137 137 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); 138 138 } 139 139 140 + /* 141 + * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer 142 + * have been failed during writeback 143 + * 144 + * this informs the AIL that the dquot is already flush locked on the next push, 145 + * and acquires a hold on the buffer to ensure that it isn't reclaimed before 146 + * dirty data makes it to disk. 147 + */ 148 + STATIC void 149 + xfs_dquot_item_error( 150 + struct xfs_log_item *lip, 151 + struct xfs_buf *bp) 152 + { 153 + struct xfs_dquot *dqp; 154 + 155 + dqp = DQUOT_ITEM(lip)->qli_dquot; 156 + ASSERT(!completion_done(&dqp->q_flush)); 157 + xfs_set_li_failed(lip, bp); 158 + } 159 + 140 160 STATIC uint 141 161 xfs_qm_dquot_logitem_push( 142 162 struct xfs_log_item *lip, ··· 164 144 __acquires(&lip->li_ailp->xa_lock) 165 145 { 166 146 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; 167 - struct xfs_buf *bp = NULL; 147 + struct xfs_buf *bp = lip->li_buf; 168 148 uint rval = XFS_ITEM_SUCCESS; 169 149 int error; 170 150 171 151 if (atomic_read(&dqp->q_pincount) > 0) 172 152 return XFS_ITEM_PINNED; 153 + 154 + /* 155 + * The buffer containing this item failed to be written back 156 + * previously. Resubmit the buffer for IO 157 + */ 158 + if (lip->li_flags & XFS_LI_FAILED) { 159 + if (!xfs_buf_trylock(bp)) 160 + return XFS_ITEM_LOCKED; 161 + 162 + if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list)) 163 + rval = XFS_ITEM_FLUSHING; 164 + 165 + xfs_buf_unlock(bp); 166 + return rval; 167 + } 173 168 174 169 if (!xfs_dqlock_nowait(dqp)) 175 170 return XFS_ITEM_LOCKED; ··· 277 242 .iop_unlock = xfs_qm_dquot_logitem_unlock, 278 243 .iop_committed = xfs_qm_dquot_logitem_committed, 279 244 .iop_push = xfs_qm_dquot_logitem_push, 280 - .iop_committing = xfs_qm_dquot_logitem_committing 245 + .iop_committing = xfs_qm_dquot_logitem_committing, 246 + .iop_error = xfs_dquot_item_error 281 247 }; 282 248 283 249 /*
+21
fs/xfs/xfs_inode.c
··· 2401 2401 } 2402 2402 2403 2403 /* 2404 + * Free any local-format buffers sitting around before we reset to 2405 + * extents format. 2406 + */ 2407 + static inline void 2408 + xfs_ifree_local_data( 2409 + struct xfs_inode *ip, 2410 + int whichfork) 2411 + { 2412 + struct xfs_ifork *ifp; 2413 + 2414 + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) 2415 + return; 2416 + 2417 + ifp = XFS_IFORK_PTR(ip, whichfork); 2418 + xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 2419 + } 2420 + 2421 + /* 2404 2422 * This is called to return an inode to the inode free list. 2405 2423 * The inode should already be truncated to 0 length and have 2406 2424 * no pages associated with it. This routine also assumes that ··· 2454 2436 error = xfs_difree(tp, ip->i_ino, dfops, &xic); 2455 2437 if (error) 2456 2438 return error; 2439 + 2440 + xfs_ifree_local_data(ip, XFS_DATA_FORK); 2441 + xfs_ifree_local_data(ip, XFS_ATTR_FORK); 2457 2442 2458 2443 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ 2459 2444 ip->i_d.di_flags = 0;
+67 -8
fs/xfs/xfs_log_recover.c
··· 24 24 #include "xfs_bit.h" 25 25 #include "xfs_sb.h" 26 26 #include "xfs_mount.h" 27 + #include "xfs_defer.h" 27 28 #include "xfs_da_format.h" 28 29 #include "xfs_da_btree.h" 29 30 #include "xfs_inode.h" ··· 4717 4716 xlog_recover_process_cui( 4718 4717 struct xfs_mount *mp, 4719 4718 struct xfs_ail *ailp, 4720 - struct xfs_log_item *lip) 4719 + struct xfs_log_item *lip, 4720 + struct xfs_defer_ops *dfops) 4721 4721 { 4722 4722 struct xfs_cui_log_item *cuip; 4723 4723 int error; ··· 4731 4729 return 0; 4732 4730 4733 4731 spin_unlock(&ailp->xa_lock); 4734 - error = xfs_cui_recover(mp, cuip); 4732 + error = xfs_cui_recover(mp, cuip, dfops); 4735 4733 spin_lock(&ailp->xa_lock); 4736 4734 4737 4735 return error; ··· 4758 4756 xlog_recover_process_bui( 4759 4757 struct xfs_mount *mp, 4760 4758 struct xfs_ail *ailp, 4761 - struct xfs_log_item *lip) 4759 + struct xfs_log_item *lip, 4760 + struct xfs_defer_ops *dfops) 4762 4761 { 4763 4762 struct xfs_bui_log_item *buip; 4764 4763 int error; ··· 4772 4769 return 0; 4773 4770 4774 4771 spin_unlock(&ailp->xa_lock); 4775 - error = xfs_bui_recover(mp, buip); 4772 + error = xfs_bui_recover(mp, buip, dfops); 4776 4773 spin_lock(&ailp->xa_lock); 4777 4774 4778 4775 return error; ··· 4808 4805 } 4809 4806 } 4810 4807 4808 + /* Take all the collected deferred ops and finish them in order. */ 4809 + static int 4810 + xlog_finish_defer_ops( 4811 + struct xfs_mount *mp, 4812 + struct xfs_defer_ops *dfops) 4813 + { 4814 + struct xfs_trans *tp; 4815 + int64_t freeblks; 4816 + uint resblks; 4817 + int error; 4818 + 4819 + /* 4820 + * We're finishing the defer_ops that accumulated as a result of 4821 + * recovering unfinished intent items during log recovery. We 4822 + * reserve an itruncate transaction because it is the largest 4823 + * permanent transaction type. Since we're the only user of the fs 4824 + * right now, take 93% (15/16) of the available free blocks. Use 4825 + * weird math to avoid a 64-bit division. 4826 + */ 4827 + freeblks = percpu_counter_sum(&mp->m_fdblocks); 4828 + if (freeblks <= 0) 4829 + return -ENOSPC; 4830 + resblks = min_t(int64_t, UINT_MAX, freeblks); 4831 + resblks = (resblks * 15) >> 4; 4832 + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks, 4833 + 0, XFS_TRANS_RESERVE, &tp); 4834 + if (error) 4835 + return error; 4836 + 4837 + error = xfs_defer_finish(&tp, dfops); 4838 + if (error) 4839 + goto out_cancel; 4840 + 4841 + return xfs_trans_commit(tp); 4842 + 4843 + out_cancel: 4844 + xfs_trans_cancel(tp); 4845 + return error; 4846 + } 4847 + 4811 4848 /* 4812 4849 * When this is called, all of the log intent items which did not have 4813 4850 * corresponding log done items should be in the AIL. What we do now ··· 4868 4825 xlog_recover_process_intents( 4869 4826 struct xlog *log) 4870 4827 { 4871 - struct xfs_log_item *lip; 4872 - int error = 0; 4828 + struct xfs_defer_ops dfops; 4873 4829 struct xfs_ail_cursor cur; 4830 + struct xfs_log_item *lip; 4874 4831 struct xfs_ail *ailp; 4832 + xfs_fsblock_t firstfsb; 4833 + int error = 0; 4875 4834 #if defined(DEBUG) || defined(XFS_WARN) 4876 4835 xfs_lsn_t last_lsn; 4877 4836 #endif ··· 4884 4839 #if defined(DEBUG) || defined(XFS_WARN) 4885 4840 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); 4886 4841 #endif 4842 + xfs_defer_init(&dfops, &firstfsb); 4887 4843 while (lip != NULL) { 4888 4844 /* 4889 4845 * We're done when we see something other than an intent. ··· 4905 4859 */ 4906 4860 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0); 4907 4861 4862 + /* 4863 + * NOTE: If your intent processing routine can create more 4864 + * deferred ops, you /must/ attach them to the dfops in this 4865 + * routine or else those subsequent intents will get 4866 + * replayed in the wrong order! 4867 + */ 4908 4868 switch (lip->li_type) { 4909 4869 case XFS_LI_EFI: 4910 4870 error = xlog_recover_process_efi(log->l_mp, ailp, lip); ··· 4919 4867 error = xlog_recover_process_rui(log->l_mp, ailp, lip); 4920 4868 break; 4921 4869 case XFS_LI_CUI: 4922 - error = xlog_recover_process_cui(log->l_mp, ailp, lip); 4870 + error = xlog_recover_process_cui(log->l_mp, ailp, lip, 4871 + &dfops); 4923 4872 break; 4924 4873 case XFS_LI_BUI: 4925 - error = xlog_recover_process_bui(log->l_mp, ailp, lip); 4874 + error = xlog_recover_process_bui(log->l_mp, ailp, lip, 4875 + &dfops); 4926 4876 break; 4927 4877 } 4928 4878 if (error) ··· 4934 4880 out: 4935 4881 xfs_trans_ail_cursor_done(&cur); 4936 4882 spin_unlock(&ailp->xa_lock); 4883 + if (error) 4884 + xfs_defer_cancel(&dfops); 4885 + else 4886 + error = xlog_finish_defer_ops(log->l_mp, &dfops); 4887 + 4937 4888 return error; 4938 4889 } 4939 4890
+7 -14
fs/xfs/xfs_refcount_item.c
··· 393 393 int 394 394 xfs_cui_recover( 395 395 struct xfs_mount *mp, 396 - struct xfs_cui_log_item *cuip) 396 + struct xfs_cui_log_item *cuip, 397 + struct xfs_defer_ops *dfops) 397 398 { 398 399 int i; 399 400 int error = 0; ··· 406 405 struct xfs_trans *tp; 407 406 struct xfs_btree_cur *rcur = NULL; 408 407 enum xfs_refcount_intent_type type; 409 - xfs_fsblock_t firstfsb; 410 408 xfs_fsblock_t new_fsb; 411 409 xfs_extlen_t new_len; 412 410 struct xfs_bmbt_irec irec; 413 - struct xfs_defer_ops dfops; 414 411 bool requeue_only = false; 415 412 416 413 ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags)); ··· 464 465 return error; 465 466 cudp = xfs_trans_get_cud(tp, cuip); 466 467 467 - xfs_defer_init(&dfops, &firstfsb); 468 468 for (i = 0; i < cuip->cui_format.cui_nextents; i++) { 469 469 refc = &cuip->cui_format.cui_extents[i]; 470 470 refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK; ··· 483 485 new_len = refc->pe_len; 484 486 } else 485 487 error = xfs_trans_log_finish_refcount_update(tp, cudp, 486 - &dfops, type, refc->pe_startblock, refc->pe_len, 488 + dfops, type, refc->pe_startblock, refc->pe_len, 487 489 &new_fsb, &new_len, &rcur); 488 490 if (error) 489 491 goto abort_error; ··· 495 497 switch (type) { 496 498 case XFS_REFCOUNT_INCREASE: 497 499 error = xfs_refcount_increase_extent( 498 - tp->t_mountp, &dfops, &irec); 500 + tp->t_mountp, dfops, &irec); 499 501 break; 500 502 case XFS_REFCOUNT_DECREASE: 501 503 error = xfs_refcount_decrease_extent( 502 - tp->t_mountp, &dfops, &irec); 504 + tp->t_mountp, dfops, &irec); 503 505 break; 504 506 case XFS_REFCOUNT_ALLOC_COW: 505 507 error = xfs_refcount_alloc_cow_extent( 506 - tp->t_mountp, &dfops, 508 + tp->t_mountp, dfops, 507 509 irec.br_startblock, 508 510 irec.br_blockcount); 509 511 break; 510 512 case XFS_REFCOUNT_FREE_COW: 511 513 error = xfs_refcount_free_cow_extent( 512 - tp->t_mountp, &dfops, 514 + tp->t_mountp, dfops, 513 515 irec.br_startblock, 514 516 irec.br_blockcount); 515 517 break; ··· 523 525 } 524 526 525 527 xfs_refcount_finish_one_cleanup(tp, rcur, error); 526 - error = xfs_defer_finish(&tp, &dfops); 527 - if (error) 528 - goto abort_defer; 529 528 set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); 530 529 error = xfs_trans_commit(tp); 531 530 return error; 532 531 533 532 abort_error: 534 533 xfs_refcount_finish_one_cleanup(tp, rcur, error); 535 - abort_defer: 536 - xfs_defer_cancel(&dfops); 537 534 xfs_trans_cancel(tp); 538 535 return error; 539 536 }
+2 -1
fs/xfs/xfs_refcount_item.h
··· 96 96 struct xfs_cui_log_item *); 97 97 void xfs_cui_item_free(struct xfs_cui_log_item *); 98 98 void xfs_cui_release(struct xfs_cui_log_item *); 99 - int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip); 99 + int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip, 100 + struct xfs_defer_ops *dfops); 100 101 101 102 #endif /* __XFS_REFCOUNT_ITEM_H__ */