Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'xfs-for-linus-v3.13-rc5' of git://oss.sgi.com/xfs/xfs

Pull xfs bugfixes from Ben Myers:
"This contains fixes for some asserts
related to project quotas, a memory leak, a hang when disabling group or
project quotas before disabling user quotas, Dave's email address, several
fixes for the alignment of file allocation to stripe unit/width geometry, a
fix for an assertion with xfs_zero_remaining_bytes, and the behavior of
metadata writeback in the face of IO errors.

Details:
- fix memory leak in xfs_dir2_node_removename
- fix quota assertion in xfs_setattr_size
- fix quota assertions in xfs_qm_vop_create_dqattach
- fix for hang when disabling group and project quotas before
disabling user quotas
- fix Dave Chinner's email address in MAINTAINERS
- fix for file allocation alignment
- fix for assertion in xfs_buf_stale by removing xfsbdstrat
- fix for alignment with swalloc mount option
- fix for "retry forever" semantics on IO errors"

* tag 'xfs-for-linus-v3.13-rc5' of git://oss.sgi.com/xfs/xfs:
xfs: abort metadata writeback on permanent errors
xfs: swalloc doesn't align allocations properly
xfs: remove xfsbdstrat error
xfs: align initial file allocations correctly
MAINTAINERS: fix incorrect mail address of XFS maintainer
xfs: fix infinite loop by detaching the group/project hints from user dquot
xfs: fix assertion failure at xfs_setattr_nonsize
xfs: fix false assertion at xfs_qm_vop_create_dqattach
xfs: fix memory leak in xfs_dir2_node_removename

+168 -84
+1 -1
MAINTAINERS
··· 9588 9588 9589 9589 XFS FILESYSTEM 9590 9590 P: Silicon Graphics Inc 9591 - M: Dave Chinner <dchinner@fromorbit.com> 9591 + M: Dave Chinner <david@fromorbit.com> 9592 9592 M: Ben Myers <bpm@sgi.com> 9593 9593 M: xfs@oss.sgi.com 9594 9594 L: xfs@oss.sgi.com
+24 -8
fs/xfs/xfs_bmap.c
··· 1635 1635 * blocks at the end of the file which do not start at the previous data block, 1636 1636 * we will try to align the new blocks at stripe unit boundaries. 1637 1637 * 1638 - * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be 1638 + * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1639 1639 * at, or past the EOF. 1640 1640 */ 1641 1641 STATIC int ··· 1650 1650 bma->aeof = 0; 1651 1651 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1652 1652 &is_empty); 1653 - if (error || is_empty) 1653 + if (error) 1654 1654 return error; 1655 + 1656 + if (is_empty) { 1657 + bma->aeof = 1; 1658 + return 0; 1659 + } 1655 1660 1656 1661 /* 1657 1662 * Check if we are allocation or past the last extent, or at least into ··· 3648 3643 int isaligned; 3649 3644 int tryagain; 3650 3645 int error; 3646 + int stripe_align; 3651 3647 3652 3648 ASSERT(ap->length); 3653 3649 3654 3650 mp = ap->ip->i_mount; 3651 + 3652 + /* stripe alignment for allocation is determined by mount parameters */ 3653 + stripe_align = 0; 3654 + if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 3655 + stripe_align = mp->m_swidth; 3656 + else if (mp->m_dalign) 3657 + stripe_align = mp->m_dalign; 3658 + 3655 3659 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; 3656 3660 if (unlikely(align)) { 3657 3661 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, ··· 3669 3655 ASSERT(!error); 3670 3656 ASSERT(ap->length); 3671 3657 } 3658 + 3659 + 3672 3660 nullfb = *ap->firstblock == NULLFSBLOCK; 3673 3661 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 3674 3662 if (nullfb) { ··· 3746 3730 */ 3747 3731 if (!ap->flist->xbf_low && ap->aeof) { 3748 3732 if (!ap->offset) { 3749 - args.alignment = mp->m_dalign; 3733 + args.alignment = stripe_align; 3750 3734 atype = args.type; 3751 3735 isaligned = 1; 3752 3736 /* ··· 3771 3755 * of minlen+alignment+slop doesn't go up 3772 3756 * between the calls. 3773 3757 */ 3774 - if (blen > mp->m_dalign && blen <= args.maxlen) 3775 - nextminlen = blen - mp->m_dalign; 3758 + if (blen > stripe_align && blen <= args.maxlen) 3759 + nextminlen = blen - stripe_align; 3776 3760 else 3777 3761 nextminlen = args.minlen; 3778 - if (nextminlen + mp->m_dalign > args.minlen + 1) 3762 + if (nextminlen + stripe_align > args.minlen + 1) 3779 3763 args.minalignslop = 3780 - nextminlen + mp->m_dalign - 3764 + nextminlen + stripe_align - 3781 3765 args.minlen - 1; 3782 3766 else 3783 3767 args.minalignslop = 0; ··· 3799 3783 */ 3800 3784 args.type = atype; 3801 3785 args.fsbno = ap->blkno; 3802 - args.alignment = mp->m_dalign; 3786 + args.alignment = stripe_align; 3803 3787 args.minlen = nextminlen; 3804 3788 args.minalignslop = 0; 3805 3789 isaligned = 1;
+12 -2
fs/xfs/xfs_bmap_util.c
··· 1187 1187 XFS_BUF_UNWRITE(bp); 1188 1188 XFS_BUF_READ(bp); 1189 1189 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); 1190 - xfsbdstrat(mp, bp); 1190 + 1191 + if (XFS_FORCED_SHUTDOWN(mp)) { 1192 + error = XFS_ERROR(EIO); 1193 + break; 1194 + } 1195 + xfs_buf_iorequest(bp); 1191 1196 error = xfs_buf_iowait(bp); 1192 1197 if (error) { 1193 1198 xfs_buf_ioerror_alert(bp, ··· 1205 1200 XFS_BUF_UNDONE(bp); 1206 1201 XFS_BUF_UNREAD(bp); 1207 1202 XFS_BUF_WRITE(bp); 1208 - xfsbdstrat(mp, bp); 1203 + 1204 + if (XFS_FORCED_SHUTDOWN(mp)) { 1205 + error = XFS_ERROR(EIO); 1206 + break; 1207 + } 1208 + xfs_buf_iorequest(bp); 1209 1209 error = xfs_buf_iowait(bp); 1210 1210 if (error) { 1211 1211 xfs_buf_ioerror_alert(bp,
+14 -23
fs/xfs/xfs_buf.c
··· 698 698 bp->b_flags |= XBF_READ; 699 699 bp->b_ops = ops; 700 700 701 - xfsbdstrat(target->bt_mount, bp); 701 + if (XFS_FORCED_SHUTDOWN(target->bt_mount)) { 702 + xfs_buf_relse(bp); 703 + return NULL; 704 + } 705 + xfs_buf_iorequest(bp); 702 706 xfs_buf_iowait(bp); 703 707 return bp; 704 708 } ··· 1093 1089 * This is meant for userdata errors; metadata bufs come with 1094 1090 * iodone functions attached, so that we can track down errors. 1095 1091 */ 1096 - STATIC int 1092 + int 1097 1093 xfs_bioerror_relse( 1098 1094 struct xfs_buf *bp) 1099 1095 { ··· 1156 1152 ASSERT(xfs_buf_islocked(bp)); 1157 1153 1158 1154 bp->b_flags |= XBF_WRITE; 1159 - bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); 1155 + bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL); 1160 1156 1161 1157 xfs_bdstrat_cb(bp); 1162 1158 ··· 1166 1162 SHUTDOWN_META_IO_ERROR); 1167 1163 } 1168 1164 return error; 1169 - } 1170 - 1171 - /* 1172 - * Wrapper around bdstrat so that we can stop data from going to disk in case 1173 - * we are shutting down the filesystem. Typically user data goes thru this 1174 - * path; one of the exceptions is the superblock. 1175 - */ 1176 - void 1177 - xfsbdstrat( 1178 - struct xfs_mount *mp, 1179 - struct xfs_buf *bp) 1180 - { 1181 - if (XFS_FORCED_SHUTDOWN(mp)) { 1182 - trace_xfs_bdstrat_shut(bp, _RET_IP_); 1183 - xfs_bioerror_relse(bp); 1184 - return; 1185 - } 1186 - 1187 - xfs_buf_iorequest(bp); 1188 1165 } 1189 1166 1190 1167 STATIC void ··· 1501 1516 struct xfs_buf *bp; 1502 1517 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1503 1518 list_del_init(&bp->b_lru); 1519 + if (bp->b_flags & XBF_WRITE_FAIL) { 1520 + xfs_alert(btp->bt_mount, 1521 + "Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n" 1522 + "Please run xfs_repair to determine the extent of the problem.", 1523 + (long long)bp->b_bn); 1524 + } 1504 1525 xfs_buf_rele(bp); 1505 1526 } 1506 1527 if (loop++ != 0) ··· 1790 1799 1791 1800 blk_start_plug(&plug); 1792 1801 list_for_each_entry_safe(bp, n, io_list, b_list) { 1793 - bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); 1802 + bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); 1794 1803 bp->b_flags |= XBF_WRITE; 1795 1804 1796 1805 if (!wait) {
+7 -4
fs/xfs/xfs_buf.h
··· 45 45 #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ 46 46 #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ 47 47 #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ 48 + #define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */ 48 49 49 50 /* I/O hints for the BIO layer */ 50 51 #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ ··· 71 70 { XBF_ASYNC, "ASYNC" }, \ 72 71 { XBF_DONE, "DONE" }, \ 73 72 { XBF_STALE, "STALE" }, \ 73 + { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ 74 74 { XBF_SYNCIO, "SYNCIO" }, \ 75 75 { XBF_FUA, "FUA" }, \ 76 76 { XBF_FLUSH, "FLUSH" }, \ ··· 81 79 { _XBF_KMEM, "KMEM" }, \ 82 80 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 83 81 { _XBF_COMPOUND, "COMPOUND" } 82 + 84 83 85 84 /* 86 85 * Internal state flags. ··· 272 269 273 270 /* Buffer Read and Write Routines */ 274 271 extern int xfs_bwrite(struct xfs_buf *bp); 275 - 276 - extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); 277 - 278 272 extern void xfs_buf_ioend(xfs_buf_t *, int); 279 273 extern void xfs_buf_ioerror(xfs_buf_t *, int); 280 274 extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); ··· 281 281 xfs_buf_rw_t); 282 282 #define xfs_buf_zero(bp, off, len) \ 283 283 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) 284 + 285 + extern int xfs_bioerror_relse(struct xfs_buf *); 284 286 285 287 static inline int xfs_buf_geterror(xfs_buf_t *bp) 286 288 { ··· 303 301 304 302 #define XFS_BUF_ZEROFLAGS(bp) \ 305 303 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ 306 - XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) 304 + XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \ 305 + XBF_WRITE_FAIL)) 307 306 308 307 void xfs_buf_stale(struct xfs_buf *bp); 309 308 #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
+19 -2
fs/xfs/xfs_buf_item.c
··· 496 496 } 497 497 } 498 498 499 + /* 500 + * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30 501 + * seconds so as to not spam logs too much on repeated detection of the same 502 + * buffer being bad.. 503 + */ 504 + 505 + DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10); 506 + 499 507 STATIC uint 500 508 xfs_buf_item_push( 501 509 struct xfs_log_item *lip, ··· 531 523 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 532 524 533 525 trace_xfs_buf_item_push(bip); 526 + 527 + /* has a previous flush failed due to IO errors? */ 528 + if ((bp->b_flags & XBF_WRITE_FAIL) && 529 + ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) { 530 + xfs_warn(bp->b_target->bt_mount, 531 + "Detected failing async write on buffer block 0x%llx. Retrying async write.\n", 532 + (long long)bp->b_bn); 533 + } 534 534 535 535 if (!xfs_buf_delwri_queue(bp, buffer_list)) 536 536 rval = XFS_ITEM_FLUSHING; ··· 1112 1096 1113 1097 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ 1114 1098 1115 - if (!XFS_BUF_ISSTALE(bp)) { 1116 - bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; 1099 + if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) { 1100 + bp->b_flags |= XBF_WRITE | XBF_ASYNC | 1101 + XBF_DONE | XBF_WRITE_FAIL; 1117 1102 xfs_buf_iorequest(bp); 1118 1103 } else { 1119 1104 xfs_buf_relse(bp);
+13 -13
fs/xfs/xfs_dir2_node.c
··· 2067 2067 */ 2068 2068 int /* error */ 2069 2069 xfs_dir2_node_removename( 2070 - xfs_da_args_t *args) /* operation arguments */ 2070 + struct xfs_da_args *args) /* operation arguments */ 2071 2071 { 2072 - xfs_da_state_blk_t *blk; /* leaf block */ 2072 + struct xfs_da_state_blk *blk; /* leaf block */ 2073 2073 int error; /* error return value */ 2074 2074 int rval; /* operation return value */ 2075 - xfs_da_state_t *state; /* btree cursor */ 2075 + struct xfs_da_state *state; /* btree cursor */ 2076 2076 2077 2077 trace_xfs_dir2_node_removename(args); 2078 2078 ··· 2084 2084 state->mp = args->dp->i_mount; 2085 2085 state->blocksize = state->mp->m_dirblksize; 2086 2086 state->node_ents = state->mp->m_dir_node_ents; 2087 - /* 2088 - * Look up the entry we're deleting, set up the cursor. 2089 - */ 2087 + 2088 + /* Look up the entry we're deleting, set up the cursor. */ 2090 2089 error = xfs_da3_node_lookup_int(state, &rval); 2091 2090 if (error) 2092 - rval = error; 2093 - /* 2094 - * Didn't find it, upper layer screwed up. 2095 - */ 2091 + goto out_free; 2092 + 2093 + /* Didn't find it, upper layer screwed up. */ 2096 2094 if (rval != EEXIST) { 2097 - xfs_da_state_free(state); 2098 - return rval; 2095 + error = rval; 2096 + goto out_free; 2099 2097 } 2098 + 2100 2099 blk = &state->path.blk[state->path.active - 1]; 2101 2100 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); 2102 2101 ASSERT(state->extravalid); ··· 2106 2107 error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, 2107 2108 &state->extrablk, &rval); 2108 2109 if (error) 2109 - return error; 2110 + goto out_free; 2110 2111 /* 2111 2112 * Fix the hash values up the btree. 2112 2113 */ ··· 2121 2122 */ 2122 2123 if (!error) 2123 2124 error = xfs_dir2_node_to_leaf(state); 2125 + out_free: 2124 2126 xfs_da_state_free(state); 2125 2127 return error; 2126 2128 }
+2 -1
fs/xfs/xfs_iops.c
··· 618 618 } 619 619 if (!gid_eq(igid, gid)) { 620 620 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { 621 - ASSERT(!XFS_IS_PQUOTA_ON(mp)); 621 + ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) || 622 + !XFS_IS_PQUOTA_ON(mp)); 622 623 ASSERT(mask & ATTR_GID); 623 624 ASSERT(gdqp); 624 625 olddquot2 = xfs_qm_vop_chown(tp, ip,
+11 -2
fs/xfs/xfs_log_recover.c
··· 193 193 bp->b_io_length = nbblks; 194 194 bp->b_error = 0; 195 195 196 - xfsbdstrat(log->l_mp, bp); 196 + if (XFS_FORCED_SHUTDOWN(log->l_mp)) 197 + return XFS_ERROR(EIO); 198 + 199 + xfs_buf_iorequest(bp); 197 200 error = xfs_buf_iowait(bp); 198 201 if (error) 199 202 xfs_buf_ioerror_alert(bp, __func__); ··· 4400 4397 XFS_BUF_READ(bp); 4401 4398 XFS_BUF_UNASYNC(bp); 4402 4399 bp->b_ops = &xfs_sb_buf_ops; 4403 - xfsbdstrat(log->l_mp, bp); 4400 + 4401 + if (XFS_FORCED_SHUTDOWN(log->l_mp)) { 4402 + xfs_buf_relse(bp); 4403 + return XFS_ERROR(EIO); 4404 + } 4405 + 4406 + xfs_buf_iorequest(bp); 4404 4407 error = xfs_buf_iowait(bp); 4405 4408 if (error) { 4406 4409 xfs_buf_ioerror_alert(bp, __func__);
+53 -27
fs/xfs/xfs_qm.c
··· 134 134 { 135 135 struct xfs_mount *mp = dqp->q_mount; 136 136 struct xfs_quotainfo *qi = mp->m_quotainfo; 137 - struct xfs_dquot *gdqp = NULL; 138 - struct xfs_dquot *pdqp = NULL; 139 137 140 138 xfs_dqlock(dqp); 141 139 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { 142 140 xfs_dqunlock(dqp); 143 141 return EAGAIN; 144 - } 145 - 146 - /* 147 - * If this quota has a hint attached, prepare for releasing it now. 148 - */ 149 - gdqp = dqp->q_gdquot; 150 - if (gdqp) { 151 - xfs_dqlock(gdqp); 152 - dqp->q_gdquot = NULL; 153 - } 154 - 155 - pdqp = dqp->q_pdquot; 156 - if (pdqp) { 157 - xfs_dqlock(pdqp); 158 - dqp->q_pdquot = NULL; 159 142 } 160 143 161 144 dqp->dq_flags |= XFS_DQ_FREEING; ··· 189 206 XFS_STATS_DEC(xs_qm_dquot_unused); 190 207 191 208 xfs_qm_dqdestroy(dqp); 209 + return 0; 210 + } 211 + 212 + /* 213 + * Release the group or project dquot pointers the user dquots maybe carrying 214 + * around as a hint, and proceed to purge the user dquot cache if requested. 215 + */ 216 + STATIC int 217 + xfs_qm_dqpurge_hints( 218 + struct xfs_dquot *dqp, 219 + void *data) 220 + { 221 + struct xfs_dquot *gdqp = NULL; 222 + struct xfs_dquot *pdqp = NULL; 223 + uint flags = *((uint *)data); 224 + 225 + xfs_dqlock(dqp); 226 + if (dqp->dq_flags & XFS_DQ_FREEING) { 227 + xfs_dqunlock(dqp); 228 + return EAGAIN; 229 + } 230 + 231 + /* If this quota has a hint attached, prepare for releasing it now */ 232 + gdqp = dqp->q_gdquot; 233 + if (gdqp) 234 + dqp->q_gdquot = NULL; 235 + 236 + pdqp = dqp->q_pdquot; 237 + if (pdqp) 238 + dqp->q_pdquot = NULL; 239 + 240 + xfs_dqunlock(dqp); 192 241 193 242 if (gdqp) 194 - xfs_qm_dqput(gdqp); 243 + xfs_qm_dqrele(gdqp); 195 244 if (pdqp) 196 - xfs_qm_dqput(pdqp); 245 + xfs_qm_dqrele(pdqp); 246 + 247 + if (flags & XFS_QMOPT_UQUOTA) 248 + return xfs_qm_dqpurge(dqp, NULL); 249 + 197 250 return 0; 198 251 } 199 252 ··· 241 222 struct xfs_mount *mp, 242 223 uint flags) 243 224 { 244 - if (flags & XFS_QMOPT_UQUOTA) 245 - xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); 225 + /* 226 + * We have to release group/project dquot hint(s) from the user dquot 227 + * at first if they are there, otherwise we would run into an infinite 228 + * loop while walking through radix tree to purge other type of dquots 229 + * since their refcount is not zero if the user dquot refers to them 230 + * as hint. 231 + * 232 + * Call the special xfs_qm_dqpurge_hints() will end up go through the 233 + * general xfs_qm_dqpurge() against user dquot cache if requested. 234 + */ 235 + xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags); 236 + 246 237 if (flags & XFS_QMOPT_GQUOTA) 247 238 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); 248 239 if (flags & XFS_QMOPT_PQUOTA) ··· 2111 2082 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2112 2083 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 2113 2084 2114 - if (udqp) { 2085 + if (udqp && XFS_IS_UQUOTA_ON(mp)) { 2115 2086 ASSERT(ip->i_udquot == NULL); 2116 - ASSERT(XFS_IS_UQUOTA_ON(mp)); 2117 2087 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 2118 2088 2119 2089 ip->i_udquot = xfs_qm_dqhold(udqp); 2120 2090 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 2121 2091 } 2122 - if (gdqp) { 2092 + if (gdqp && XFS_IS_GQUOTA_ON(mp)) { 2123 2093 ASSERT(ip->i_gdquot == NULL); 2124 - ASSERT(XFS_IS_GQUOTA_ON(mp)); 2125 2094 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); 2126 2095 ip->i_gdquot = xfs_qm_dqhold(gdqp); 2127 2096 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 2128 2097 } 2129 - if (pdqp) { 2098 + if (pdqp && XFS_IS_PQUOTA_ON(mp)) { 2130 2099 ASSERT(ip->i_pdquot == NULL); 2131 - ASSERT(XFS_IS_PQUOTA_ON(mp)); 2132 2100 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); 2133 2101 2134 2102 ip->i_pdquot = xfs_qm_dqhold(pdqp);
+12 -1
fs/xfs/xfs_trans_buf.c
··· 314 314 ASSERT(bp->b_iodone == NULL); 315 315 XFS_BUF_READ(bp); 316 316 bp->b_ops = ops; 317 - xfsbdstrat(tp->t_mountp, bp); 317 + 318 + /* 319 + * XXX(hch): clean up the error handling here to be less 320 + * of a mess.. 321 + */ 322 + if (XFS_FORCED_SHUTDOWN(mp)) { 323 + trace_xfs_bdstrat_shut(bp, _RET_IP_); 324 + xfs_bioerror_relse(bp); 325 + } else { 326 + xfs_buf_iorequest(bp); 327 + } 328 + 318 329 error = xfs_buf_iowait(bp); 319 330 if (error) { 320 331 xfs_buf_ioerror_alert(bp, __func__);