Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'xfs-fixes-7.0-rc6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Carlos Maiolino:
"This includes a few important bug fixes, and some code refactoring
that was necessary for one of the fixes"

* tag 'xfs-fixes-7.0-rc6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
xfs: remove file_path tracepoint data
xfs: don't irele after failing to iget in xfs_attri_recover_work
xfs: remove redundant validation in xlog_recover_attri_commit_pass2
xfs: fix ri_total validation in xlog_recover_attri_commit_pass2
xfs: close crash window in attr dabtree inactivation
xfs: factor out xfs_attr3_leaf_init
xfs: factor out xfs_attr3_node_entry_remove
xfs: only assert new size for datafork during truncate extents
xfs: annotate struct xfs_attr_list_context with __counted_by_ptr
xfs: cleanup buftarg handling in XFS_IOC_VERIFY_MEDIA
xfs: scrub: unlock dquot before early return in quota scrub
xfs: refactor xfsaild_push loop into helper
xfs: save ailp before dropping the AIL lock in push callbacks
xfs: avoid dereferencing log items after push callbacks
xfs: stop reclaim before pushing AIL during unmount

+276 -197
+2 -1
fs/xfs/libxfs/xfs_attr.h
··· 55 55 struct xfs_trans *tp; 56 56 struct xfs_inode *dp; /* inode */ 57 57 struct xfs_attrlist_cursor_kern cursor; /* position in list */ 58 - void *buffer; /* output buffer */ 58 + /* output buffer */ 59 + void *buffer __counted_by_ptr(bufsize); 59 60 60 61 /* 61 62 * Abort attribute list iteration if non-zero. Can be used to pass
+22
fs/xfs/libxfs/xfs_attr_leaf.c
··· 1416 1416 } 1417 1417 1418 1418 /* 1419 + * Reinitialize an existing attr fork block as an empty leaf, and attach 1420 + * the buffer to tp. 1421 + */ 1422 + int 1423 + xfs_attr3_leaf_init( 1424 + struct xfs_trans *tp, 1425 + struct xfs_inode *dp, 1426 + xfs_dablk_t blkno) 1427 + { 1428 + struct xfs_buf *bp = NULL; 1429 + struct xfs_da_args args = { 1430 + .trans = tp, 1431 + .dp = dp, 1432 + .owner = dp->i_ino, 1433 + .geo = dp->i_mount->m_attr_geo, 1434 + }; 1435 + 1436 + ASSERT(tp != NULL); 1437 + 1438 + return xfs_attr3_leaf_create(&args, blkno, &bp); 1439 + } 1440 + /* 1419 1441 * Split the leaf node, rebalance, then add the new entry. 1420 1442 * 1421 1443 * Returns 0 if the entry was added, 1 if a further split is needed or a
+3
fs/xfs/libxfs/xfs_attr_leaf.h
··· 87 87 /* 88 88 * Routines used for shrinking the Btree. 89 89 */ 90 + 91 + int xfs_attr3_leaf_init(struct xfs_trans *tp, struct xfs_inode *dp, 92 + xfs_dablk_t blkno); 90 93 int xfs_attr3_leaf_toosmall(struct xfs_da_state *state, int *retval); 91 94 void xfs_attr3_leaf_unbalance(struct xfs_da_state *state, 92 95 struct xfs_da_state_blk *drop_blk,
+42 -11
fs/xfs/libxfs/xfs_da_btree.c
··· 1506 1506 } 1507 1507 1508 1508 /* 1509 - * Remove an entry from an intermediate node. 1509 + * Internal implementation to remove an entry from an intermediate node. 1510 1510 */ 1511 1511 STATIC void 1512 - xfs_da3_node_remove( 1513 - struct xfs_da_state *state, 1514 - struct xfs_da_state_blk *drop_blk) 1512 + __xfs_da3_node_remove( 1513 + struct xfs_trans *tp, 1514 + struct xfs_inode *dp, 1515 + struct xfs_da_geometry *geo, 1516 + struct xfs_da_state_blk *drop_blk) 1515 1517 { 1516 1518 struct xfs_da_intnode *node; 1517 1519 struct xfs_da3_icnode_hdr nodehdr; 1518 1520 struct xfs_da_node_entry *btree; 1519 1521 int index; 1520 1522 int tmp; 1521 - struct xfs_inode *dp = state->args->dp; 1522 - 1523 - trace_xfs_da_node_remove(state->args); 1524 1523 1525 1524 node = drop_blk->bp->b_addr; 1526 1525 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); ··· 1535 1536 tmp = nodehdr.count - index - 1; 1536 1537 tmp *= (uint)sizeof(xfs_da_node_entry_t); 1537 1538 memmove(&btree[index], &btree[index + 1], tmp); 1538 - xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1539 + xfs_trans_log_buf(tp, drop_blk->bp, 1539 1540 XFS_DA_LOGRANGE(node, &btree[index], tmp)); 1540 1541 index = nodehdr.count - 1; 1541 1542 } 1542 1543 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t)); 1543 - xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1544 + xfs_trans_log_buf(tp, drop_blk->bp, 1544 1545 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index]))); 1545 1546 nodehdr.count -= 1; 1546 1547 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr); 1547 - xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1548 - XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size)); 1548 + xfs_trans_log_buf(tp, drop_blk->bp, 1549 + XFS_DA_LOGRANGE(node, &node->hdr, geo->node_hdr_size)); 1549 1550 1550 1551 /* 1551 1552 * Copy the last hash value from the block to propagate upwards. 1552 1553 */ 1553 1554 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval); 1555 + } 1556 + 1557 + /* 1558 + * Remove an entry from an intermediate node. 1559 + */ 1560 + STATIC void 1561 + xfs_da3_node_remove( 1562 + struct xfs_da_state *state, 1563 + struct xfs_da_state_blk *drop_blk) 1564 + { 1565 + trace_xfs_da_node_remove(state->args); 1566 + 1567 + __xfs_da3_node_remove(state->args->trans, state->args->dp, 1568 + state->args->geo, drop_blk); 1569 + } 1570 + 1571 + /* 1572 + * Remove an entry from an intermediate attr node at the specified index. 1573 + */ 1574 + void 1575 + xfs_attr3_node_entry_remove( 1576 + struct xfs_trans *tp, 1577 + struct xfs_inode *dp, 1578 + struct xfs_buf *bp, 1579 + int index) 1580 + { 1581 + struct xfs_da_state_blk blk = { 1582 + .index = index, 1583 + .bp = bp, 1584 + }; 1585 + 1586 + __xfs_da3_node_remove(tp, dp, dp->i_mount->m_attr_geo, &blk); 1554 1587 } 1555 1588 1556 1589 /*
+2
fs/xfs/libxfs/xfs_da_btree.h
··· 184 184 int xfs_da3_join(xfs_da_state_t *state); 185 185 void xfs_da3_fixhashpath(struct xfs_da_state *state, 186 186 struct xfs_da_state_path *path_to_to_fix); 187 + void xfs_attr3_node_entry_remove(struct xfs_trans *tp, struct xfs_inode *dp, 188 + struct xfs_buf *bp, int index); 187 189 188 190 /* 189 191 * Routines used for finding things in the Btree.
+3 -1
fs/xfs/scrub/quota.c
··· 171 171 172 172 error = xchk_quota_item_bmap(sc, dq, offset); 173 173 xchk_iunlock(sc, XFS_ILOCK_SHARED); 174 - if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, offset, &error)) 174 + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, offset, &error)) { 175 + mutex_unlock(&dq->q_qlock); 175 176 return error; 177 + } 176 178 177 179 /* 178 180 * Warn if the hard limits are larger than the fs.
+2 -10
fs/xfs/scrub/trace.h
··· 972 972 TP_STRUCT__entry( 973 973 __field(dev_t, dev) 974 974 __field(unsigned long, ino) 975 - __array(char, pathname, MAXNAMELEN) 976 975 ), 977 976 TP_fast_assign( 978 - char *path; 979 - 980 977 __entry->ino = file_inode(xf->file)->i_ino; 981 - path = file_path(xf->file, __entry->pathname, MAXNAMELEN); 982 - if (IS_ERR(path)) 983 - strncpy(__entry->pathname, "(unknown)", 984 - sizeof(__entry->pathname)); 985 978 ), 986 - TP_printk("xfino 0x%lx path '%s'", 987 - __entry->ino, 988 - __entry->pathname) 979 + TP_printk("xfino 0x%lx", 980 + __entry->ino) 989 981 ); 990 982 991 983 TRACE_EVENT(xfile_destroy,
+59 -40
fs/xfs/xfs_attr_inactive.c
··· 140 140 xfs_daddr_t parent_blkno, child_blkno; 141 141 struct xfs_buf *child_bp; 142 142 struct xfs_da3_icnode_hdr ichdr; 143 - int error, i; 143 + int error; 144 144 145 145 /* 146 146 * Since this code is recursive (gasp!) we must protect ourselves. ··· 152 152 return -EFSCORRUPTED; 153 153 } 154 154 155 - xfs_da3_node_hdr_from_disk(dp->i_mount, &ichdr, bp->b_addr); 155 + xfs_da3_node_hdr_from_disk(mp, &ichdr, bp->b_addr); 156 156 parent_blkno = xfs_buf_daddr(bp); 157 157 if (!ichdr.count) { 158 158 xfs_trans_brelse(*trans, bp); ··· 167 167 * over the leaves removing all of them. If this is higher up 168 168 * in the tree, recurse downward. 169 169 */ 170 - for (i = 0; i < ichdr.count; i++) { 170 + while (ichdr.count > 0) { 171 171 /* 172 172 * Read the subsidiary block to see what we have to work with. 173 173 * Don't do this in a transaction. This is a depth-first ··· 218 218 xfs_trans_binval(*trans, child_bp); 219 219 child_bp = NULL; 220 220 221 - /* 222 - * If we're not done, re-read the parent to get the next 223 - * child block number. 224 - */ 225 - if (i + 1 < ichdr.count) { 226 - struct xfs_da3_icnode_hdr phdr; 221 + error = xfs_da3_node_read_mapped(*trans, dp, 222 + parent_blkno, &bp, XFS_ATTR_FORK); 223 + if (error) 224 + return error; 227 225 228 - error = xfs_da3_node_read_mapped(*trans, dp, 229 - parent_blkno, &bp, XFS_ATTR_FORK); 226 + /* 227 + * Remove entry from parent node, prevents being indexed to. 228 + */ 229 + xfs_attr3_node_entry_remove(*trans, dp, bp, 0); 230 + 231 + xfs_da3_node_hdr_from_disk(mp, &ichdr, bp->b_addr); 232 + bp = NULL; 233 + 234 + if (ichdr.count > 0) { 235 + /* 236 + * If we're not done, get the next child block number. 237 + */ 238 + child_fsb = be32_to_cpu(ichdr.btree[0].before); 239 + 240 + /* 241 + * Atomically commit the whole invalidate stuff. 242 + */ 243 + error = xfs_trans_roll_inode(trans, dp); 230 244 if (error) 231 245 return error; 232 - xfs_da3_node_hdr_from_disk(dp->i_mount, &phdr, 233 - bp->b_addr); 234 - child_fsb = be32_to_cpu(phdr.btree[i + 1].before); 235 - xfs_trans_brelse(*trans, bp); 236 - bp = NULL; 237 246 } 238 - /* 239 - * Atomically commit the whole invalidate stuff. 240 - */ 241 - error = xfs_trans_roll_inode(trans, dp); 242 - if (error) 243 - return error; 244 247 } 245 248 246 249 return 0; ··· 260 257 struct xfs_trans **trans, 261 258 struct xfs_inode *dp) 262 259 { 263 - struct xfs_mount *mp = dp->i_mount; 264 260 struct xfs_da_blkinfo *info; 265 261 struct xfs_buf *bp; 266 - xfs_daddr_t blkno; 267 262 int error; 268 263 269 264 /* ··· 273 272 error = xfs_da3_node_read(*trans, dp, 0, &bp, XFS_ATTR_FORK); 274 273 if (error) 275 274 return error; 276 - blkno = xfs_buf_daddr(bp); 277 275 278 276 /* 279 277 * Invalidate the tree, even if the "tree" is only a single leaf block. ··· 283 283 case cpu_to_be16(XFS_DA_NODE_MAGIC): 284 284 case cpu_to_be16(XFS_DA3_NODE_MAGIC): 285 285 error = xfs_attr3_node_inactive(trans, dp, bp, 1); 286 + /* 287 + * Empty root node block are not allowed, convert it to leaf. 288 + */ 289 + if (!error) 290 + error = xfs_attr3_leaf_init(*trans, dp, 0); 291 + if (!error) 292 + error = xfs_trans_roll_inode(trans, dp); 286 293 break; 287 294 case cpu_to_be16(XFS_ATTR_LEAF_MAGIC): 288 295 case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC): 289 296 error = xfs_attr3_leaf_inactive(trans, dp, bp); 297 + /* 298 + * Reinit the leaf before truncating extents so that a crash 299 + * mid-truncation leaves an empty leaf rather than one with 300 + * entries that may reference freed remote value blocks. 301 + */ 302 + if (!error) 303 + error = xfs_attr3_leaf_init(*trans, dp, 0); 304 + if (!error) 305 + error = xfs_trans_roll_inode(trans, dp); 290 306 break; 291 307 default: 292 308 xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK); ··· 311 295 xfs_trans_brelse(*trans, bp); 312 296 break; 313 297 } 314 - if (error) 315 - return error; 316 - 317 - /* 318 - * Invalidate the incore copy of the root block. 319 - */ 320 - error = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno, 321 - XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0, &bp); 322 - if (error) 323 - return error; 324 - xfs_trans_binval(*trans, bp); /* remove from cache */ 325 - /* 326 - * Commit the invalidate and start the next transaction. 327 - */ 328 - error = xfs_trans_roll_inode(trans, dp); 329 298 330 299 return error; 331 300 } ··· 329 328 { 330 329 struct xfs_trans *trans; 331 330 struct xfs_mount *mp; 331 + struct xfs_buf *bp; 332 332 int lock_mode = XFS_ILOCK_SHARED; 333 333 int error = 0; 334 334 ··· 365 363 * removal below. 366 364 */ 367 365 if (dp->i_af.if_nextents > 0) { 366 + /* 367 + * Invalidate and truncate all blocks but leave the root block. 368 + */ 368 369 error = xfs_attr3_root_inactive(&trans, dp); 369 370 if (error) 370 371 goto out_cancel; 371 372 373 + error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 374 + XFS_FSB_TO_B(mp, mp->m_attr_geo->fsbcount)); 375 + if (error) 376 + goto out_cancel; 377 + 378 + /* 379 + * Invalidate and truncate the root block and ensure that the 380 + * operation is completed within a single transaction. 381 + */ 382 + error = xfs_da_get_buf(trans, dp, 0, &bp, XFS_ATTR_FORK); 383 + if (error) 384 + goto out_cancel; 385 + 386 + xfs_trans_binval(trans, bp); 372 387 error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); 373 388 if (error) 374 389 goto out_cancel;
+2 -49
fs/xfs/xfs_attr_item.c
··· 653 653 break; 654 654 } 655 655 if (error) { 656 - xfs_irele(ip); 657 656 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, attrp, 658 657 sizeof(*attrp)); 659 658 return ERR_PTR(-EFSCORRUPTED); ··· 1046 1047 break; 1047 1048 case XFS_ATTRI_OP_FLAGS_SET: 1048 1049 case XFS_ATTRI_OP_FLAGS_REPLACE: 1049 - /* Log item, attr name, attr value */ 1050 - if (item->ri_total != 3) { 1050 + /* Log item, attr name, optional attr value */ 1051 + if (item->ri_total != 2 + !!attri_formatp->alfi_value_len) { 1051 1052 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 1052 1053 attri_formatp, len); 1053 1054 return -EFSCORRUPTED; ··· 1129 1130 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 1130 1131 attri_formatp, len); 1131 1132 return -EFSCORRUPTED; 1132 - } 1133 - 1134 - switch (op) { 1135 - case XFS_ATTRI_OP_FLAGS_REMOVE: 1136 - /* Regular remove operations operate only on names. */ 1137 - if (attr_value != NULL || value_len != 0) { 1138 - XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 1139 - attri_formatp, len); 1140 - return -EFSCORRUPTED; 1141 - } 1142 - fallthrough; 1143 - case XFS_ATTRI_OP_FLAGS_PPTR_REMOVE: 1144 - case XFS_ATTRI_OP_FLAGS_PPTR_SET: 1145 - case XFS_ATTRI_OP_FLAGS_SET: 1146 - case XFS_ATTRI_OP_FLAGS_REPLACE: 1147 - /* 1148 - * Regular xattr set/remove/replace operations require a name 1149 - * and do not take a newname. Values are optional for set and 1150 - * replace. 1151 - * 1152 - * Name-value set/remove operations must have a name, do not 1153 - * take a newname, and can take a value. 1154 - */ 1155 - if (attr_name == NULL || name_len == 0) { 1156 - XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 1157 - attri_formatp, len); 1158 - return -EFSCORRUPTED; 1159 - } 1160 - break; 1161 - case XFS_ATTRI_OP_FLAGS_PPTR_REPLACE: 1162 - /* 1163 - * Name-value replace operations require the caller to 1164 - * specify the old and new names and values explicitly. 1165 - * Values are optional. 1166 - */ 1167 - if (attr_name == NULL || name_len == 0) { 1168 - XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 1169 - attri_formatp, len); 1170 - return -EFSCORRUPTED; 1171 - } 1172 - if (attr_new_name == NULL || new_name_len == 0) { 1173 - XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 1174 - attri_formatp, len); 1175 - return -EFSCORRUPTED; 1176 - } 1177 - break; 1178 1133 } 1179 1134 1180 1135 /*
+7 -2
fs/xfs/xfs_dquot_item.c
··· 125 125 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); 126 126 struct xfs_dquot *dqp = qlip->qli_dquot; 127 127 struct xfs_buf *bp; 128 + struct xfs_ail *ailp = lip->li_ailp; 128 129 uint rval = XFS_ITEM_SUCCESS; 129 130 int error; 130 131 ··· 154 153 goto out_unlock; 155 154 } 156 155 157 - spin_unlock(&lip->li_ailp->ail_lock); 156 + spin_unlock(&ailp->ail_lock); 158 157 159 158 error = xfs_dquot_use_attached_buf(dqp, &bp); 160 159 if (error == -EAGAIN) { ··· 173 172 rval = XFS_ITEM_FLUSHING; 174 173 } 175 174 xfs_buf_relse(bp); 175 + /* 176 + * The buffer no longer protects the log item from reclaim, so 177 + * do not reference lip after this point. 178 + */ 176 179 177 180 out_relock_ail: 178 - spin_lock(&lip->li_ailp->ail_lock); 181 + spin_lock(&ailp->ail_lock); 179 182 out_unlock: 180 183 mutex_unlock(&dqp->q_qlock); 181 184 return rval;
+1 -1
fs/xfs/xfs_handle.c
··· 443 443 context.dp = dp; 444 444 context.resynch = 1; 445 445 context.attr_filter = xfs_attr_filter(flags); 446 - context.buffer = buffer; 447 446 context.bufsize = round_down(bufsize, sizeof(uint32_t)); 447 + context.buffer = buffer; 448 448 context.firstu = context.bufsize; 449 449 context.put_listent = xfs_ioc_attr_put_listent; 450 450
+2 -1
fs/xfs/xfs_inode.c
··· 1048 1048 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 1049 1049 if (icount_read(VFS_I(ip))) 1050 1050 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL); 1051 - ASSERT(new_size <= XFS_ISIZE(ip)); 1051 + if (whichfork == XFS_DATA_FORK) 1052 + ASSERT(new_size <= XFS_ISIZE(ip)); 1052 1053 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1053 1054 ASSERT(ip->i_itemp != NULL); 1054 1055 ASSERT(ip->i_itemp->ili_lock_flags == 0);
+7 -2
fs/xfs/xfs_inode_item.c
··· 746 746 struct xfs_inode_log_item *iip = INODE_ITEM(lip); 747 747 struct xfs_inode *ip = iip->ili_inode; 748 748 struct xfs_buf *bp = lip->li_buf; 749 + struct xfs_ail *ailp = lip->li_ailp; 749 750 uint rval = XFS_ITEM_SUCCESS; 750 751 int error; 751 752 ··· 772 771 if (!xfs_buf_trylock(bp)) 773 772 return XFS_ITEM_LOCKED; 774 773 775 - spin_unlock(&lip->li_ailp->ail_lock); 774 + spin_unlock(&ailp->ail_lock); 776 775 777 776 /* 778 777 * We need to hold a reference for flushing the cluster buffer as it may ··· 796 795 rval = XFS_ITEM_LOCKED; 797 796 } 798 797 799 - spin_lock(&lip->li_ailp->ail_lock); 798 + /* 799 + * The buffer no longer protects the log item from reclaim, so 800 + * do not reference lip after this point. 801 + */ 802 + spin_lock(&ailp->ail_lock); 800 803 return rval; 801 804 } 802 805
+4 -3
fs/xfs/xfs_mount.c
··· 608 608 * have been retrying in the background. This will prevent never-ending 609 609 * retries in AIL pushing from hanging the unmount. 610 610 * 611 - * Finally, we can push the AIL to clean all the remaining dirty objects, then 612 - * reclaim the remaining inodes that are still in memory at this point in time. 611 + * Stop inodegc and background reclaim before pushing the AIL so that they 612 + * are not running while the AIL is being flushed. Then push the AIL to 613 + * clean all the remaining dirty objects and reclaim the remaining inodes. 613 614 */ 614 615 static void 615 616 xfs_unmount_flush_inodes( ··· 622 621 623 622 xfs_set_unmounting(mp); 624 623 625 - xfs_ail_push_all_sync(mp->m_ail); 626 624 xfs_inodegc_stop(mp); 627 625 cancel_delayed_work_sync(&mp->m_reclaim_work); 626 + xfs_ail_push_all_sync(mp->m_ail); 628 627 xfs_reclaim_inodes(mp); 629 628 xfs_health_unmount(mp); 630 629 xfs_healthmon_unmount(mp);
+34 -13
fs/xfs/xfs_trace.h
··· 56 56 #include <linux/tracepoint.h> 57 57 58 58 struct xfs_agf; 59 + struct xfs_ail; 59 60 struct xfs_alloc_arg; 60 61 struct xfs_attr_list_context; 61 62 struct xfs_buf_log_item; ··· 1651 1650 DEFINE_EVENT(xfs_log_item_class, name, \ 1652 1651 TP_PROTO(struct xfs_log_item *lip), \ 1653 1652 TP_ARGS(lip)) 1654 - DEFINE_LOG_ITEM_EVENT(xfs_ail_push); 1655 - DEFINE_LOG_ITEM_EVENT(xfs_ail_pinned); 1656 - DEFINE_LOG_ITEM_EVENT(xfs_ail_locked); 1657 - DEFINE_LOG_ITEM_EVENT(xfs_ail_flushing); 1658 1653 DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_mark); 1659 1654 DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_skip); 1660 1655 DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_unpin); 1661 1656 DEFINE_LOG_ITEM_EVENT(xlog_ail_insert_abort); 1662 1657 DEFINE_LOG_ITEM_EVENT(xfs_trans_free_abort); 1658 + 1659 + DECLARE_EVENT_CLASS(xfs_ail_push_class, 1660 + TP_PROTO(struct xfs_ail *ailp, uint type, unsigned long flags, xfs_lsn_t lsn), 1661 + TP_ARGS(ailp, type, flags, lsn), 1662 + TP_STRUCT__entry( 1663 + __field(dev_t, dev) 1664 + __field(uint, type) 1665 + __field(unsigned long, flags) 1666 + __field(xfs_lsn_t, lsn) 1667 + ), 1668 + TP_fast_assign( 1669 + __entry->dev = ailp->ail_log->l_mp->m_super->s_dev; 1670 + __entry->type = type; 1671 + __entry->flags = flags; 1672 + __entry->lsn = lsn; 1673 + ), 1674 + TP_printk("dev %d:%d lsn %d/%d type %s flags %s", 1675 + MAJOR(__entry->dev), MINOR(__entry->dev), 1676 + CYCLE_LSN(__entry->lsn), BLOCK_LSN(__entry->lsn), 1677 + __print_symbolic(__entry->type, XFS_LI_TYPE_DESC), 1678 + __print_flags(__entry->flags, "|", XFS_LI_FLAGS)) 1679 + ) 1680 + 1681 + #define DEFINE_AIL_PUSH_EVENT(name) \ 1682 + DEFINE_EVENT(xfs_ail_push_class, name, \ 1683 + TP_PROTO(struct xfs_ail *ailp, uint type, unsigned long flags, xfs_lsn_t lsn), \ 1684 + TP_ARGS(ailp, type, flags, lsn)) 1685 + DEFINE_AIL_PUSH_EVENT(xfs_ail_push); 1686 + DEFINE_AIL_PUSH_EVENT(xfs_ail_pinned); 1687 + DEFINE_AIL_PUSH_EVENT(xfs_ail_locked); 1688 + DEFINE_AIL_PUSH_EVENT(xfs_ail_flushing); 1663 1689 1664 1690 DECLARE_EVENT_CLASS(xfs_ail_class, 1665 1691 TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn), ··· 5119 5091 TP_STRUCT__entry( 5120 5092 __field(dev_t, dev) 5121 5093 __field(unsigned long, ino) 5122 - __array(char, pathname, MAXNAMELEN) 5123 5094 ), 5124 5095 TP_fast_assign( 5125 - char *path; 5126 5096 struct file *file = btp->bt_file; 5127 5097 5128 5098 __entry->dev = btp->bt_mount->m_super->s_dev; 5129 5099 __entry->ino = file_inode(file)->i_ino; 5130 - path = file_path(file, __entry->pathname, MAXNAMELEN); 5131 - if (IS_ERR(path)) 5132 - strncpy(__entry->pathname, "(unknown)", 5133 - sizeof(__entry->pathname)); 5134 5100 ), 5135 - TP_printk("dev %d:%d xmino 0x%lx path '%s'", 5101 + TP_printk("dev %d:%d xmino 0x%lx", 5136 5102 MAJOR(__entry->dev), MINOR(__entry->dev), 5137 - __entry->ino, 5138 - __entry->pathname) 5103 + __entry->ino) 5139 5104 ); 5140 5105 5141 5106 TRACE_EVENT(xmbuf_free,
+75 -52
fs/xfs/xfs_trans_ail.c
··· 365 365 return XFS_ITEM_SUCCESS; 366 366 } 367 367 368 + /* 369 + * Push a single log item from the AIL. 370 + * 371 + * @lip may have been released and freed by the time this function returns, 372 + * so callers must not dereference the log item afterwards. 373 + */ 368 374 static inline uint 369 375 xfsaild_push_item( 370 376 struct xfs_ail *ailp, ··· 464 458 return target_lsn; 465 459 } 466 460 461 + static void 462 + xfsaild_process_logitem( 463 + struct xfs_ail *ailp, 464 + struct xfs_log_item *lip, 465 + int *stuck, 466 + int *flushing) 467 + { 468 + struct xfs_mount *mp = ailp->ail_log->l_mp; 469 + uint type = lip->li_type; 470 + unsigned long flags = lip->li_flags; 471 + xfs_lsn_t item_lsn = lip->li_lsn; 472 + int lock_result; 473 + 474 + /* 475 + * Note that iop_push may unlock and reacquire the AIL lock. We 476 + * rely on the AIL cursor implementation to be able to deal with 477 + * the dropped lock. 478 + * 479 + * The log item may have been freed by the push, so it must not 480 + * be accessed or dereferenced below this line. 481 + */ 482 + lock_result = xfsaild_push_item(ailp, lip); 483 + switch (lock_result) { 484 + case XFS_ITEM_SUCCESS: 485 + XFS_STATS_INC(mp, xs_push_ail_success); 486 + trace_xfs_ail_push(ailp, type, flags, item_lsn); 487 + 488 + ailp->ail_last_pushed_lsn = item_lsn; 489 + break; 490 + 491 + case XFS_ITEM_FLUSHING: 492 + /* 493 + * The item or its backing buffer is already being 494 + * flushed. The typical reason for that is that an 495 + * inode buffer is locked because we already pushed the 496 + * updates to it as part of inode clustering. 497 + * 498 + * We do not want to stop flushing just because lots 499 + * of items are already being flushed, but we need to 500 + * re-try the flushing relatively soon if most of the 501 + * AIL is being flushed. 502 + */ 503 + XFS_STATS_INC(mp, xs_push_ail_flushing); 504 + trace_xfs_ail_flushing(ailp, type, flags, item_lsn); 505 + 506 + (*flushing)++; 507 + ailp->ail_last_pushed_lsn = item_lsn; 508 + break; 509 + 510 + case XFS_ITEM_PINNED: 511 + XFS_STATS_INC(mp, xs_push_ail_pinned); 512 + trace_xfs_ail_pinned(ailp, type, flags, item_lsn); 513 + 514 + (*stuck)++; 515 + ailp->ail_log_flush++; 516 + break; 517 + case XFS_ITEM_LOCKED: 518 + XFS_STATS_INC(mp, xs_push_ail_locked); 519 + trace_xfs_ail_locked(ailp, type, flags, item_lsn); 520 + 521 + (*stuck)++; 522 + break; 523 + default: 524 + ASSERT(0); 525 + break; 526 + } 527 + } 528 + 467 529 static long 468 530 xfsaild_push( 469 531 struct xfs_ail *ailp) ··· 579 505 580 506 lsn = lip->li_lsn; 581 507 while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) { 582 - int lock_result; 583 508 584 509 if (test_bit(XFS_LI_FLUSHING, &lip->li_flags)) 585 510 goto next_item; 586 511 587 - /* 588 - * Note that iop_push may unlock and reacquire the AIL lock. We 589 - * rely on the AIL cursor implementation to be able to deal with 590 - * the dropped lock. 591 - */ 592 - lock_result = xfsaild_push_item(ailp, lip); 593 - switch (lock_result) { 594 - case XFS_ITEM_SUCCESS: 595 - XFS_STATS_INC(mp, xs_push_ail_success); 596 - trace_xfs_ail_push(lip); 597 - 598 - ailp->ail_last_pushed_lsn = lsn; 599 - break; 600 - 601 - case XFS_ITEM_FLUSHING: 602 - /* 603 - * The item or its backing buffer is already being 604 - * flushed. The typical reason for that is that an 605 - * inode buffer is locked because we already pushed the 606 - * updates to it as part of inode clustering. 607 - * 608 - * We do not want to stop flushing just because lots 609 - * of items are already being flushed, but we need to 610 - * re-try the flushing relatively soon if most of the 611 - * AIL is being flushed. 612 - */ 613 - XFS_STATS_INC(mp, xs_push_ail_flushing); 614 - trace_xfs_ail_flushing(lip); 615 - 616 - flushing++; 617 - ailp->ail_last_pushed_lsn = lsn; 618 - break; 619 - 620 - case XFS_ITEM_PINNED: 621 - XFS_STATS_INC(mp, xs_push_ail_pinned); 622 - trace_xfs_ail_pinned(lip); 623 - 624 - stuck++; 625 - ailp->ail_log_flush++; 626 - break; 627 - case XFS_ITEM_LOCKED: 628 - XFS_STATS_INC(mp, xs_push_ail_locked); 629 - trace_xfs_ail_locked(lip); 630 - 631 - stuck++; 632 - break; 633 - default: 634 - ASSERT(0); 635 - break; 636 - } 637 - 512 + xfsaild_process_logitem(ailp, lip, &stuck, &flushing); 638 513 count++; 639 514 640 515 /*
+8 -10
fs/xfs/xfs_verify_media.c
··· 183 183 min_not_zero(SZ_1M, me->me_max_io_size); 184 184 185 185 BUILD_BUG_ON(BBSHIFT != SECTOR_SHIFT); 186 - ASSERT(BBTOB(bbcount) >= bdev_logical_block_size(btp->bt_bdev)); 186 + ASSERT(BBTOB(bbcount) >= btp->bt_logical_sectorsize); 187 187 188 - return clamp(iosize, bdev_logical_block_size(btp->bt_bdev), 189 - BBTOB(bbcount)); 188 + return clamp(iosize, btp->bt_logical_sectorsize, BBTOB(bbcount)); 190 189 } 191 190 192 191 /* Allocate as much memory as we can get for verification buffer. */ ··· 217 218 unsigned int bio_bbcount, 218 219 blk_status_t bio_status) 219 220 { 220 - trace_xfs_verify_media_error(mp, me, btp->bt_bdev->bd_dev, daddr, 221 - bio_bbcount, bio_status); 221 + trace_xfs_verify_media_error(mp, me, btp->bt_dev, daddr, bio_bbcount, 222 + bio_status); 222 223 223 224 /* 224 225 * Pass any error, I/O or otherwise, up to the caller if we didn't ··· 279 280 btp = mp->m_ddev_targp; 280 281 break; 281 282 case XFS_DEV_LOG: 282 - if (mp->m_logdev_targp->bt_bdev != mp->m_ddev_targp->bt_bdev) 283 + if (mp->m_logdev_targp != mp->m_ddev_targp) 283 284 btp = mp->m_logdev_targp; 284 285 break; 285 286 case XFS_DEV_RT: ··· 298 299 299 300 /* start and end have to be aligned to the lba size */ 300 301 if (!IS_ALIGNED(BBTOB(me->me_start_daddr | me->me_end_daddr), 301 - bdev_logical_block_size(btp->bt_bdev))) 302 + btp->bt_logical_sectorsize)) 302 303 return -EINVAL; 303 304 304 305 /* ··· 330 331 if (!folio) 331 332 return -ENOMEM; 332 333 333 - trace_xfs_verify_media(mp, me, btp->bt_bdev->bd_dev, daddr, bbcount, 334 - folio); 334 + trace_xfs_verify_media(mp, me, btp->bt_dev, daddr, bbcount, folio); 335 335 336 336 bio = bio_alloc(btp->bt_bdev, 1, REQ_OP_READ, GFP_KERNEL); 337 337 if (!bio) { ··· 398 400 * an operational error. 399 401 */ 400 402 me->me_start_daddr = daddr; 401 - trace_xfs_verify_media_end(mp, me, btp->bt_bdev->bd_dev); 403 + trace_xfs_verify_media_end(mp, me, btp->bt_dev); 402 404 return 0; 403 405 } 404 406
+1 -1
fs/xfs/xfs_xattr.c
··· 332 332 memset(&context, 0, sizeof(context)); 333 333 context.dp = XFS_I(inode); 334 334 context.resynch = 1; 335 - context.buffer = size ? data : NULL; 336 335 context.bufsize = size; 336 + context.buffer = size ? data : NULL; 337 337 context.firstu = context.bufsize; 338 338 context.put_listent = xfs_xattr_put_listent; 339 339