Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'xfs-5.19-fixes-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Darrick Wong:
"This fixes some stalling problems and corrects the last of the
problems (I hope) observed during testing of the new atomic xattr
update feature.

- Fix statfs blocking on background inode gc workers

- Fix some broken inode lock assertion code

- Fix xattr leaf buffer leaks when cancelling a deferred xattr update
operation

- Clean up xattr recovery to make it easier to understand.

- Fix xattr leaf block verifiers tripping over empty blocks.

- Remove complicated and error prone xattr leaf block bholding mess.

- Fix a bug where an rt extent crossing EOF was treated as "posteof"
blocks and cleaned unnecessarily.

- Fix a UAF when log shutdown races with unmount"

* tag 'xfs-5.19-fixes-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
xfs: prevent a UAF when log IO errors race with unmount
xfs: dont treat rt extents beyond EOF as eofblocks to be cleared
xfs: don't hold xattr leaf buffers across transaction rolls
xfs: empty xattr leaf header blocks are not corruption
xfs: clean up the end of xfs_attri_item_recover
xfs: always free xattri_leaf_bp when cancelling a deferred op
xfs: use invalidate_lock to check the state of mmap_lock
xfs: factor out the common lock flags assert
xfs: introduce xfs_inodegc_push()
xfs: bound maximum wait time for inodegc work

+130 -131
+9 -29
fs/xfs/libxfs/xfs_attr.c
··· 50 50 STATIC int xfs_attr_leaf_get(xfs_da_args_t *args); 51 51 STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args); 52 52 STATIC int xfs_attr_leaf_hasname(struct xfs_da_args *args, struct xfs_buf **bp); 53 - STATIC int xfs_attr_leaf_try_add(struct xfs_da_args *args, struct xfs_buf *bp); 53 + STATIC int xfs_attr_leaf_try_add(struct xfs_da_args *args); 54 54 55 55 /* 56 56 * Internal routines when attribute list is more than one block. ··· 393 393 * It won't fit in the shortform, transform to a leaf block. GROT: 394 394 * another possible req'mt for a double-split btree op. 395 395 */ 396 - error = xfs_attr_shortform_to_leaf(args, &attr->xattri_leaf_bp); 396 + error = xfs_attr_shortform_to_leaf(args); 397 397 if (error) 398 398 return error; 399 399 400 - /* 401 - * Prevent the leaf buffer from being unlocked so that a concurrent AIL 402 - * push cannot grab the half-baked leaf buffer and run into problems 403 - * with the write verifier. 404 - */ 405 - xfs_trans_bhold(args->trans, attr->xattri_leaf_bp); 406 400 attr->xattri_dela_state = XFS_DAS_LEAF_ADD; 407 401 out: 408 402 trace_xfs_attr_sf_addname_return(attr->xattri_dela_state, args->dp); ··· 441 447 442 448 /* 443 449 * Use the leaf buffer we may already hold locked as a result of 444 - * a sf-to-leaf conversion. The held buffer is no longer valid 445 - * after this call, regardless of the result. 450 + * a sf-to-leaf conversion. 446 451 */ 447 - error = xfs_attr_leaf_try_add(args, attr->xattri_leaf_bp); 448 - attr->xattri_leaf_bp = NULL; 452 + error = xfs_attr_leaf_try_add(args); 449 453 450 454 if (error == -ENOSPC) { 451 455 error = xfs_attr3_leaf_to_node(args); ··· 488 496 { 489 497 struct xfs_da_args *args = attr->xattri_da_args; 490 498 int error; 491 - 492 - ASSERT(!attr->xattri_leaf_bp); 493 499 494 500 error = xfs_attr_node_addname_find_attr(attr); 495 501 if (error) ··· 1205 1215 */ 1206 1216 STATIC int 1207 1217 xfs_attr_leaf_try_add( 1208 - struct xfs_da_args *args, 1209 - struct xfs_buf *bp) 1218 + struct xfs_da_args *args) 1210 1219 { 1220 + struct xfs_buf *bp; 1211 1221 int error; 1212 1222 1213 - /* 1214 - * If the caller provided a buffer to us, it is locked and held in 1215 - * the transaction because it just did a shortform to leaf conversion. 1216 - * Hence we don't need to read it again. Otherwise read in the leaf 1217 - * buffer. 1218 - */ 1219 - if (bp) { 1220 - xfs_trans_bhold_release(args->trans, bp); 1221 - } else { 1222 - error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp); 1223 - if (error) 1224 - return error; 1225 - } 1223 + error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp); 1224 + if (error) 1225 + return error; 1226 1226 1227 1227 /* 1228 1228 * Look up the xattr name to set the insertion point for the new xattr.
-5
fs/xfs/libxfs/xfs_attr.h
··· 515 515 */ 516 516 struct xfs_attri_log_nameval *xattri_nameval; 517 517 518 - /* 519 - * Used by xfs_attr_set to hold a leaf buffer across a transaction roll 520 - */ 521 - struct xfs_buf *xattri_leaf_bp; 522 - 523 518 /* Used to keep track of current state of delayed operation */ 524 519 enum xfs_delattr_state xattri_dela_state; 525 520
+19 -16
fs/xfs/libxfs/xfs_attr_leaf.c
··· 289 289 return NULL; 290 290 } 291 291 292 + /* 293 + * Validate an attribute leaf block. 294 + * 295 + * Empty leaf blocks can occur under the following circumstances: 296 + * 297 + * 1. setxattr adds a new extended attribute to a file; 298 + * 2. The file has zero existing attributes; 299 + * 3. The attribute is too large to fit in the attribute fork; 300 + * 4. The attribute is small enough to fit in a leaf block; 301 + * 5. A log flush occurs after committing the transaction that creates 302 + * the (empty) leaf block; and 303 + * 6. The filesystem goes down after the log flush but before the new 304 + * attribute can be committed to the leaf block. 305 + * 306 + * Hence we need to ensure that we don't fail the validation purely 307 + * because the leaf is empty. 308 + */ 292 309 static xfs_failaddr_t 293 310 xfs_attr3_leaf_verify( 294 311 struct xfs_buf *bp) ··· 326 309 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr); 327 310 if (fa) 328 311 return fa; 329 - 330 - /* 331 - * Empty leaf blocks should never occur; they imply the existence of a 332 - * software bug that needs fixing. xfs_repair also flags them as a 333 - * corruption that needs fixing, so we should never let these go to 334 - * disk. 335 - */ 336 - if (ichdr.count == 0) 337 - return __this_address; 338 312 339 313 /* 340 314 * firstused is the block offset of the first name info structure. ··· 930 922 return -ENOATTR; 931 923 } 932 924 933 - /* 934 - * Convert from using the shortform to the leaf. On success, return the 935 - * buffer so that we can keep it locked until we're totally done with it. 936 - */ 925 + /* Convert from using the shortform to the leaf format. */ 937 926 int 938 927 xfs_attr_shortform_to_leaf( 939 - struct xfs_da_args *args, 940 - struct xfs_buf **leaf_bp) 928 + struct xfs_da_args *args) 941 929 { 942 930 struct xfs_inode *dp; 943 931 struct xfs_attr_shortform *sf; ··· 995 991 sfe = xfs_attr_sf_nextentry(sfe); 996 992 } 997 993 error = 0; 998 - *leaf_bp = bp; 999 994 out: 1000 995 kmem_free(tmpbuffer); 1001 996 return error;
+1 -2
fs/xfs/libxfs/xfs_attr_leaf.h
··· 49 49 void xfs_attr_shortform_add(struct xfs_da_args *args, int forkoff); 50 50 int xfs_attr_shortform_lookup(struct xfs_da_args *args); 51 51 int xfs_attr_shortform_getvalue(struct xfs_da_args *args); 52 - int xfs_attr_shortform_to_leaf(struct xfs_da_args *args, 53 - struct xfs_buf **leaf_bp); 52 + int xfs_attr_shortform_to_leaf(struct xfs_da_args *args); 54 53 int xfs_attr_sf_removename(struct xfs_da_args *args); 55 54 int xfs_attr_sf_findname(struct xfs_da_args *args, 56 55 struct xfs_attr_sf_entry **sfep,
+15 -12
fs/xfs/xfs_attr_item.c
··· 576 576 struct xfs_trans_res tres; 577 577 struct xfs_attri_log_format *attrp; 578 578 struct xfs_attri_log_nameval *nv = attrip->attri_nameval; 579 - int error, ret = 0; 579 + int error; 580 580 int total; 581 581 int local; 582 582 struct xfs_attrd_log_item *done_item = NULL; ··· 655 655 xfs_ilock(ip, XFS_ILOCK_EXCL); 656 656 xfs_trans_ijoin(tp, ip, 0); 657 657 658 - ret = xfs_xattri_finish_update(attr, done_item); 659 - if (ret == -EAGAIN) { 660 - /* There's more work to do, so add it to this transaction */ 658 + error = xfs_xattri_finish_update(attr, done_item); 659 + if (error == -EAGAIN) { 660 + /* 661 + * There's more work to do, so add the intent item to this 662 + * transaction so that we can continue it later. 663 + */ 661 664 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_ATTR, &attr->xattri_list); 662 - } else 663 - error = ret; 665 + error = xfs_defer_ops_capture_and_commit(tp, capture_list); 666 + if (error) 667 + goto out_unlock; 664 668 669 + xfs_iunlock(ip, XFS_ILOCK_EXCL); 670 + xfs_irele(ip); 671 + return 0; 672 + } 665 673 if (error) { 666 674 xfs_trans_cancel(tp); 667 675 goto out_unlock; 668 676 } 669 677 670 678 error = xfs_defer_ops_capture_and_commit(tp, capture_list); 671 - 672 679 out_unlock: 673 - if (attr->xattri_leaf_bp) 674 - xfs_buf_relse(attr->xattri_leaf_bp); 675 - 676 680 xfs_iunlock(ip, XFS_ILOCK_EXCL); 677 681 xfs_irele(ip); 678 682 out: 679 - if (ret != -EAGAIN) 680 - xfs_attr_free_item(attr); 683 + xfs_attr_free_item(attr); 681 684 return error; 682 685 } 683 686
+2
fs/xfs/xfs_bmap_util.c
··· 686 686 * forever. 687 687 */ 688 688 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip)); 689 + if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) 690 + end_fsb = roundup_64(end_fsb, mp->m_sb.sb_rextsize); 689 691 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 690 692 if (last_fsb <= end_fsb) 691 693 return false;
+37 -19
fs/xfs/xfs_icache.c
··· 440 440 for_each_online_cpu(cpu) { 441 441 gc = per_cpu_ptr(mp->m_inodegc, cpu); 442 442 if (!llist_empty(&gc->list)) 443 - queue_work_on(cpu, mp->m_inodegc_wq, &gc->work); 443 + mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); 444 444 } 445 445 } 446 446 ··· 1841 1841 xfs_inodegc_worker( 1842 1842 struct work_struct *work) 1843 1843 { 1844 - struct xfs_inodegc *gc = container_of(work, struct xfs_inodegc, 1845 - work); 1844 + struct xfs_inodegc *gc = container_of(to_delayed_work(work), 1845 + struct xfs_inodegc, work); 1846 1846 struct llist_node *node = llist_del_all(&gc->list); 1847 1847 struct xfs_inode *ip, *n; 1848 1848 ··· 1862 1862 } 1863 1863 1864 1864 /* 1865 + * Expedite all pending inodegc work to run immediately. This does not wait for 1866 + * completion of the work. 1867 + */ 1868 + void 1869 + xfs_inodegc_push( 1870 + struct xfs_mount *mp) 1871 + { 1872 + if (!xfs_is_inodegc_enabled(mp)) 1873 + return; 1874 + trace_xfs_inodegc_push(mp, __return_address); 1875 + xfs_inodegc_queue_all(mp); 1876 + } 1877 + 1878 + /* 1865 1879 * Force all currently queued inode inactivation work to run immediately and 1866 1880 * wait for the work to finish. 1867 1881 */ ··· 1883 1869 xfs_inodegc_flush( 1884 1870 struct xfs_mount *mp) 1885 1871 { 1886 - if (!xfs_is_inodegc_enabled(mp)) 1887 - return; 1888 - 1872 + xfs_inodegc_push(mp); 1889 1873 trace_xfs_inodegc_flush(mp, __return_address); 1890 - 1891 - xfs_inodegc_queue_all(mp); 1892 1874 flush_workqueue(mp->m_inodegc_wq); 1893 1875 } 1894 1876 ··· 2024 2014 struct xfs_inodegc *gc; 2025 2015 int items; 2026 2016 unsigned int shrinker_hits; 2017 + unsigned long queue_delay = 1; 2027 2018 2028 2019 trace_xfs_inode_set_need_inactive(ip); 2029 2020 spin_lock(&ip->i_flags_lock); ··· 2036 2025 items = READ_ONCE(gc->items); 2037 2026 WRITE_ONCE(gc->items, items + 1); 2038 2027 shrinker_hits = READ_ONCE(gc->shrinker_hits); 2039 - put_cpu_ptr(gc); 2040 2028 2041 - if (!xfs_is_inodegc_enabled(mp)) 2029 + /* 2030 + * We queue the work while holding the current CPU so that the work 2031 + * is scheduled to run on this CPU. 2032 + */ 2033 + if (!xfs_is_inodegc_enabled(mp)) { 2034 + put_cpu_ptr(gc); 2042 2035 return; 2043 - 2044 - if (xfs_inodegc_want_queue_work(ip, items)) { 2045 - trace_xfs_inodegc_queue(mp, __return_address); 2046 - queue_work(mp->m_inodegc_wq, &gc->work); 2047 2036 } 2037 + 2038 + if (xfs_inodegc_want_queue_work(ip, items)) 2039 + queue_delay = 0; 2040 + 2041 + trace_xfs_inodegc_queue(mp, __return_address); 2042 + mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay); 2043 + put_cpu_ptr(gc); 2048 2044 2049 2045 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) { 2050 2046 trace_xfs_inodegc_throttle(mp, __return_address); 2051 - flush_work(&gc->work); 2047 + flush_delayed_work(&gc->work); 2052 2048 } 2053 2049 } 2054 2050 ··· 2072 2054 unsigned int count = 0; 2073 2055 2074 2056 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu); 2075 - cancel_work_sync(&dead_gc->work); 2057 + cancel_delayed_work_sync(&dead_gc->work); 2076 2058 2077 2059 if (llist_empty(&dead_gc->list)) 2078 2060 return; ··· 2091 2073 llist_add_batch(first, last, &gc->list); 2092 2074 count += READ_ONCE(gc->items); 2093 2075 WRITE_ONCE(gc->items, count); 2094 - put_cpu_ptr(gc); 2095 2076 2096 2077 if (xfs_is_inodegc_enabled(mp)) { 2097 2078 trace_xfs_inodegc_queue(mp, __return_address); 2098 - queue_work(mp->m_inodegc_wq, &gc->work); 2079 + mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0); 2099 2080 } 2081 + put_cpu_ptr(gc); 2100 2082 } 2101 2083 2102 2084 /* ··· 2191 2173 unsigned int h = READ_ONCE(gc->shrinker_hits); 2192 2174 2193 2175 WRITE_ONCE(gc->shrinker_hits, h + 1); 2194 - queue_work_on(cpu, mp->m_inodegc_wq, &gc->work); 2176 + mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); 2195 2177 no_items = false; 2196 2178 } 2197 2179 }
+1
fs/xfs/xfs_icache.h
··· 76 76 void xfs_blockgc_start(struct xfs_mount *mp); 77 77 78 78 void xfs_inodegc_worker(struct work_struct *work); 79 + void xfs_inodegc_push(struct xfs_mount *mp); 79 80 void xfs_inodegc_flush(struct xfs_mount *mp); 80 81 void xfs_inodegc_stop(struct xfs_mount *mp); 81 82 void xfs_inodegc_start(struct xfs_mount *mp);
+25 -39
fs/xfs/xfs_inode.c
··· 132 132 } 133 133 134 134 /* 135 + * You can't set both SHARED and EXCL for the same lock, 136 + * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED, 137 + * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values 138 + * to set in lock_flags. 139 + */ 140 + static inline void 141 + xfs_lock_flags_assert( 142 + uint lock_flags) 143 + { 144 + ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 145 + (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 146 + ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 147 + (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 148 + ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 149 + (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 150 + ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 151 + ASSERT(lock_flags != 0); 152 + } 153 + 154 + /* 135 155 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2 136 156 * multi-reader locks: invalidate_lock and the i_lock. This routine allows 137 157 * various combinations of the locks to be obtained. ··· 188 168 { 189 169 trace_xfs_ilock(ip, lock_flags, _RET_IP_); 190 170 191 - /* 192 - * You can't set both SHARED and EXCL for the same lock, 193 - * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 194 - * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 195 - */ 196 - ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 197 - (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 198 - ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 199 - (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 200 - ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 201 - (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 202 - ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 171 + xfs_lock_flags_assert(lock_flags); 203 172 204 173 if (lock_flags & XFS_IOLOCK_EXCL) { 205 174 down_write_nested(&VFS_I(ip)->i_rwsem, ··· 231 222 { 232 223 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); 233 224 234 - /* 235 - * You can't set both SHARED and EXCL for the same lock, 236 - * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 237 - * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 238 - */ 239 - ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 240 - (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 241 - ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 242 - (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 243 - ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 244 - (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 245 - ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 225 + xfs_lock_flags_assert(lock_flags); 246 226 247 227 if (lock_flags & XFS_IOLOCK_EXCL) { 248 228 if (!down_write_trylock(&VFS_I(ip)->i_rwsem)) ··· 289 291 xfs_inode_t *ip, 290 292 uint lock_flags) 291 293 { 292 - /* 293 - * You can't set both SHARED and EXCL for the same lock, 294 - * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 295 - * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 296 - */ 297 - ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 298 - (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 299 - ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 300 - (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 301 - ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 302 - (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 303 - ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 304 - ASSERT(lock_flags != 0); 294 + xfs_lock_flags_assert(lock_flags); 305 295 306 296 if (lock_flags & XFS_IOLOCK_EXCL) 307 297 up_write(&VFS_I(ip)->i_rwsem); ··· 365 379 } 366 380 367 381 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) { 368 - return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem, 369 - (lock_flags & XFS_IOLOCK_SHARED)); 382 + return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock, 383 + (lock_flags & XFS_MMAPLOCK_SHARED)); 370 384 } 371 385 372 386 if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
+7 -2
fs/xfs/xfs_log.c
··· 2092 2092 xlog_in_core_t *iclog, *next_iclog; 2093 2093 int i; 2094 2094 2095 - xlog_cil_destroy(log); 2096 - 2097 2095 /* 2098 2096 * Cycle all the iclogbuf locks to make sure all log IO completion 2099 2097 * is done before we tear down these buffers. ··· 2102 2104 up(&iclog->ic_sema); 2103 2105 iclog = iclog->ic_next; 2104 2106 } 2107 + 2108 + /* 2109 + * Destroy the CIL after waiting for iclog IO completion because an 2110 + * iclog EIO error will try to shut down the log, which accesses the 2111 + * CIL to wake up the waiters. 2112 + */ 2113 + xlog_cil_destroy(log); 2105 2114 2106 2115 iclog = log->l_iclog; 2107 2116 for (i = 0; i < log->l_iclog_bufs; i++) {
+1 -1
fs/xfs/xfs_mount.h
··· 61 61 */ 62 62 struct xfs_inodegc { 63 63 struct llist_head list; 64 - struct work_struct work; 64 + struct delayed_work work; 65 65 66 66 /* approximate count of inodes in the list */ 67 67 unsigned int items;
+6 -3
fs/xfs/xfs_qm_syscalls.c
··· 454 454 struct xfs_dquot *dqp; 455 455 int error; 456 456 457 - /* Flush inodegc work at the start of a quota reporting scan. */ 457 + /* 458 + * Expedite pending inodegc work at the start of a quota reporting 459 + * scan but don't block waiting for it to complete. 460 + */ 458 461 if (id == 0) 459 - xfs_inodegc_flush(mp); 462 + xfs_inodegc_push(mp); 460 463 461 464 /* 462 465 * Try to get the dquot. We don't want it allocated on disk, so don't ··· 501 498 502 499 /* Flush inodegc work at the start of a quota reporting scan. */ 503 500 if (*id == 0) 504 - xfs_inodegc_flush(mp); 501 + xfs_inodegc_push(mp); 505 502 506 503 error = xfs_qm_dqget_next(mp, *id, type, &dqp); 507 504 if (error)
+6 -3
fs/xfs/xfs_super.c
··· 797 797 xfs_extlen_t lsize; 798 798 int64_t ffree; 799 799 800 - /* Wait for whatever inactivations are in progress. */ 801 - xfs_inodegc_flush(mp); 800 + /* 801 + * Expedite background inodegc but don't wait. We do not want to block 802 + * here waiting hours for a billion extent file to be truncated. 803 + */ 804 + xfs_inodegc_push(mp); 802 805 803 806 statp->f_type = XFS_SUPER_MAGIC; 804 807 statp->f_namelen = MAXNAMELEN - 1; ··· 1077 1074 gc = per_cpu_ptr(mp->m_inodegc, cpu); 1078 1075 init_llist_head(&gc->list); 1079 1076 gc->items = 0; 1080 - INIT_WORK(&gc->work, xfs_inodegc_worker); 1077 + INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); 1081 1078 } 1082 1079 return 0; 1083 1080 }
+1
fs/xfs/xfs_trace.h
··· 240 240 TP_PROTO(struct xfs_mount *mp, void *caller_ip), \ 241 241 TP_ARGS(mp, caller_ip)) 242 242 DEFINE_FS_EVENT(xfs_inodegc_flush); 243 + DEFINE_FS_EVENT(xfs_inodegc_push); 243 244 DEFINE_FS_EVENT(xfs_inodegc_start); 244 245 DEFINE_FS_EVENT(xfs_inodegc_stop); 245 246 DEFINE_FS_EVENT(xfs_inodegc_queue);