Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'nfs-for-6.16-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS clent updates from Anna Schumaker:
"New Features:

- Implement the Sunrpc rfc2203 rpcsec_gss sequence number cache

- Add support for FALLOC_FL_ZERO_RANGE on NFS v4.2

- Add a localio sysfs attribute

Stable Fixes:

- Fix double-unlock bug in nfs_return_empty_folio()

- Don't check for OPEN feature support in v4.1

- Always probe for LOCALIO support asynchronously

- Prevent hang on NFS mounts with xprtsec=[m]tls

Other Bugfixes:

- xattr handlers should check for absent nfs filehandles

- Fix setattr caching of TIME_[MODIFY|ACCESS]_SET when timestamps are
delegated

- Fix listxattr to return selinux security labels

- Connect to NFSv3 DS using TLS if MDS connection uses TLS

- Clear SB_RDONLY before getting a superblock, and ignore when
remounting

- Fix incorrect handling of NFS error codes in nfs4_do_mkdir()

- Various nfs_localio fixes from Neil Brown that include fixing an
rcu compilation error found by older gcc versions.

- Update stats on flexfiles pNFS DSes when receiving NFS4ERR_DELAY

Cleanups:

- Add a refcount tracker for struct net in the nfs_client

- Allow FREE_STATEID to clean up delegations

- Always set NLINK even if the server doesn't support it

- Cleanups to the NFS folio writeback code

- Remove dead code from xs_tcp_tls_setup_socket()"

* tag 'nfs-for-6.16-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (30 commits)
flexfiles/pNFS: update stats on NFS4ERR_DELAY for v4.1 DSes
nfs_localio: change nfsd_file_put_local() to take a pointer to __rcu pointer
nfs_localio: protect race between nfs_uuid_put() and nfs_close_local_fh()
nfs_localio: duplicate nfs_close_local_fh()
nfs_localio: simplify interface to nfsd for getting nfsd_file
nfs_localio: always hold nfsd net ref with nfsd_file ref
nfs_localio: use cmpxchg() to install new nfs_file_localio
SUNRPC: Remove dead code from xs_tcp_tls_setup_socket()
SUNRPC: Prevent hang on NFS mount with xprtsec=[m]tls
nfs: fix incorrect handling of large-number NFS errors in nfs4_do_mkdir()
nfs: ignore SB_RDONLY when remounting nfs
nfs: clear SB_RDONLY before getting superblock
NFS: always probe for LOCALIO support asynchronously
pnfs/flexfiles: connect to NFSv3 DS using TLS if MDS connection uses TLS
NFS: add localio to sysfs
nfs: use writeback_iter directly
nfs: refactor nfs_do_writepage
nfs: don't return AOP_WRITEPAGE_ACTIVATE from nfs_do_writepage
nfs: fold nfs_page_async_flush into nfs_do_writepage
NFSv4: Always set NLINK even if the server doesn't support it
...

+555 -227
+3 -3
fs/nfs/client.c
··· 180 180 clp->cl_proto = cl_init->proto; 181 181 clp->cl_nconnect = cl_init->nconnect; 182 182 clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1; 183 - clp->cl_net = get_net(cl_init->net); 183 + clp->cl_net = get_net_track(cl_init->net, &clp->cl_ns_tracker, GFP_KERNEL); 184 184 185 185 #if IS_ENABLED(CONFIG_NFS_LOCALIO) 186 186 seqlock_init(&clp->cl_boot_lock); ··· 250 250 if (!IS_ERR(clp->cl_rpcclient)) 251 251 rpc_shutdown_client(clp->cl_rpcclient); 252 252 253 - put_net(clp->cl_net); 253 + put_net_track(clp->cl_net, &clp->cl_ns_tracker); 254 254 put_nfs_version(clp->cl_nfs_mod); 255 255 kfree(clp->cl_hostname); 256 256 kfree(clp->cl_acceptor); ··· 439 439 spin_unlock(&nn->nfs_client_lock); 440 440 new = rpc_ops->init_client(new, cl_init); 441 441 if (!IS_ERR(new)) 442 - nfs_local_probe(new); 442 + nfs_local_probe_async(new); 443 443 return new; 444 444 } 445 445
+18 -7
fs/nfs/delegation.c
··· 1021 1021 nfs_inode_find_state_and_recover(inode, stateid); 1022 1022 } 1023 1023 1024 - void nfs_remove_bad_delegation(struct inode *inode, 1025 - const nfs4_stateid *stateid) 1026 - { 1027 - nfs_revoke_delegation(inode, stateid); 1028 - } 1029 - EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation); 1030 - 1031 1024 void nfs_delegation_mark_returned(struct inode *inode, 1032 1025 const nfs4_stateid *stateid) 1033 1026 { ··· 1061 1068 1062 1069 nfs_inode_find_state_and_recover(inode, stateid); 1063 1070 } 1071 + 1072 + /** 1073 + * nfs_remove_bad_delegation - handle delegations that are unusable 1074 + * @inode: inode to process 1075 + * @stateid: the delegation's stateid 1076 + * 1077 + * If the server ACK-ed our FREE_STATEID then clean 1078 + * up the delegation, else mark and keep the revoked state. 1079 + */ 1080 + void nfs_remove_bad_delegation(struct inode *inode, 1081 + const nfs4_stateid *stateid) 1082 + { 1083 + if (stateid && stateid->type == NFS4_FREED_STATEID_TYPE) 1084 + nfs_delegation_mark_returned(inode, stateid); 1085 + else 1086 + nfs_revoke_delegation(inode, stateid); 1087 + } 1088 + EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation); 1064 1089 1065 1090 /** 1066 1091 * nfs_expire_unused_delegation_types
+2
fs/nfs/flexfilelayout/flexfilelayout.c
··· 1129 1129 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 1130 1130 break; 1131 1131 case -NFS4ERR_DELAY: 1132 + nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1133 + fallthrough; 1132 1134 case -NFS4ERR_GRACE: 1133 1135 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); 1134 1136 break;
+1 -1
fs/nfs/flexfilelayout/flexfilelayoutdev.c
··· 400 400 * keep ds_clp even if DS is local, so that if local IO cannot 401 401 * proceed somehow, we can fall back to NFS whenever we want. 402 402 */ 403 - nfs_local_probe(ds->ds_clp); 403 + nfs_local_probe_async(ds->ds_clp); 404 404 max_payload = 405 405 nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient), 406 406 NULL);
+47 -4
fs/nfs/inode.c
··· 557 557 set_nlink(inode, fattr->nlink); 558 558 else if (fattr_supported & NFS_ATTR_FATTR_NLINK) 559 559 nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK); 560 + else 561 + set_nlink(inode, 1); 560 562 if (fattr->valid & NFS_ATTR_FATTR_OWNER) 561 563 inode->i_uid = fattr->uid; 562 564 else if (fattr_supported & NFS_ATTR_FATTR_OWNER) ··· 635 633 } 636 634 } 637 635 636 + static void nfs_set_timestamps_to_ts(struct inode *inode, struct iattr *attr) 637 + { 638 + unsigned int cache_flags = 0; 639 + 640 + if (attr->ia_valid & ATTR_MTIME_SET) { 641 + struct timespec64 ctime = inode_get_ctime(inode); 642 + struct timespec64 mtime = inode_get_mtime(inode); 643 + struct timespec64 now; 644 + int updated = 0; 645 + 646 + now = inode_set_ctime_current(inode); 647 + if (!timespec64_equal(&now, &ctime)) 648 + updated |= S_CTIME; 649 + 650 + inode_set_mtime_to_ts(inode, attr->ia_mtime); 651 + if (!timespec64_equal(&now, &mtime)) 652 + updated |= S_MTIME; 653 + 654 + inode_maybe_inc_iversion(inode, updated); 655 + cache_flags |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 656 + } 657 + if (attr->ia_valid & ATTR_ATIME_SET) { 658 + inode_set_atime_to_ts(inode, attr->ia_atime); 659 + cache_flags |= NFS_INO_INVALID_ATIME; 660 + } 661 + NFS_I(inode)->cache_validity &= ~cache_flags; 662 + } 663 + 638 664 static void nfs_update_timestamps(struct inode *inode, unsigned int ia_valid) 639 665 { 640 666 enum file_time_flags time_flags = 0; ··· 731 701 732 702 if (nfs_have_delegated_mtime(inode) && attr->ia_valid & ATTR_MTIME) { 733 703 spin_lock(&inode->i_lock); 734 - nfs_update_timestamps(inode, attr->ia_valid); 704 + if (attr->ia_valid & ATTR_MTIME_SET) { 705 + nfs_set_timestamps_to_ts(inode, attr); 706 + attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET| 707 + ATTR_ATIME|ATTR_ATIME_SET); 708 + } else { 709 + nfs_update_timestamps(inode, attr->ia_valid); 710 + attr->ia_valid &= ~(ATTR_MTIME|ATTR_ATIME); 711 + } 735 712 spin_unlock(&inode->i_lock); 736 - attr->ia_valid &= ~(ATTR_MTIME | ATTR_ATIME); 737 713 } else if (nfs_have_delegated_atime(inode) && 738 714 attr->ia_valid & ATTR_ATIME && 739 715 !(attr->ia_valid & ATTR_MTIME)) { 740 - nfs_update_delegated_atime(inode); 741 - attr->ia_valid &= ~ATTR_ATIME; 716 + if (attr->ia_valid & ATTR_ATIME_SET) { 717 + spin_lock(&inode->i_lock); 718 + nfs_set_timestamps_to_ts(inode, attr); 719 + spin_unlock(&inode->i_lock); 720 + attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET); 721 + } else { 722 + nfs_update_delegated_atime(inode); 723 + attr->ia_valid &= ~ATTR_ATIME; 724 + } 742 725 } 743 726 744 727 /* Optimization: if the end result is no change, don't RPC */
-1
fs/nfs/internal.h
··· 455 455 456 456 #if IS_ENABLED(CONFIG_NFS_LOCALIO) 457 457 /* localio.c */ 458 - extern void nfs_local_probe(struct nfs_client *); 459 458 extern void nfs_local_probe_async(struct nfs_client *); 460 459 extern void nfs_local_probe_async_work(struct work_struct *); 461 460 extern struct nfsd_file *nfs_local_open_fh(struct nfs_client *,
+18 -33
fs/nfs/localio.c
··· 171 171 * - called after alloc_client and init_client (so cl_rpcclient exists) 172 172 * - this function is idempotent, it can be called for old or new clients 173 173 */ 174 - void nfs_local_probe(struct nfs_client *clp) 174 + static void nfs_local_probe(struct nfs_client *clp) 175 175 { 176 176 /* Disallow localio if disabled via sysfs or AUTH_SYS isn't used */ 177 177 if (!localio_enabled || ··· 191 191 nfs_localio_enable_client(clp); 192 192 nfs_uuid_end(&clp->cl_uuid); 193 193 } 194 - EXPORT_SYMBOL_GPL(nfs_local_probe); 195 194 196 195 void nfs_local_probe_async_work(struct work_struct *work) 197 196 { 198 197 struct nfs_client *clp = 199 198 container_of(work, struct nfs_client, cl_local_probe_work); 200 199 200 + if (!refcount_inc_not_zero(&clp->cl_count)) 201 + return; 201 202 nfs_local_probe(clp); 203 + nfs_put_client(clp); 202 204 } 203 205 204 206 void nfs_local_probe_async(struct nfs_client *clp) ··· 209 207 } 210 208 EXPORT_SYMBOL_GPL(nfs_local_probe_async); 211 209 212 - static inline struct nfsd_file *nfs_local_file_get(struct nfsd_file *nf) 210 + static inline void nfs_local_file_put(struct nfsd_file *localio) 213 211 { 214 - return nfs_to->nfsd_file_get(nf); 215 - } 212 + /* nfs_to_nfsd_file_put_local() expects an __rcu pointer 213 + * but we have a __kernel pointer. It is always safe 214 + * to cast a __kernel pointer to an __rcu pointer 215 + * because the cast only weakens what is known about the pointer. 216 + */ 217 + struct nfsd_file __rcu *nf = (struct nfsd_file __rcu*) localio; 216 218 217 - static inline void nfs_local_file_put(struct nfsd_file *nf) 218 - { 219 - nfs_to->nfsd_file_put(nf); 219 + nfs_to_nfsd_file_put_local(&nf); 220 220 } 221 221 222 222 /* ··· 230 226 static struct nfsd_file * 231 227 __nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, 232 228 struct nfs_fh *fh, struct nfs_file_localio *nfl, 229 + struct nfsd_file __rcu **pnf, 233 230 const fmode_t mode) 234 231 { 235 232 struct nfsd_file *localio; 236 233 237 234 localio = nfs_open_local_fh(&clp->cl_uuid, clp->cl_rpcclient, 238 - cred, fh, nfl, mode); 235 + cred, fh, nfl, pnf, mode); 239 236 if (IS_ERR(localio)) { 240 237 int status = PTR_ERR(localio); 241 238 trace_nfs_local_open_fh(fh, mode, status); ··· 263 258 struct nfs_fh *fh, struct nfs_file_localio *nfl, 264 259 const fmode_t mode) 265 260 { 266 - struct nfsd_file *nf, *new, __rcu **pnf; 261 + struct nfsd_file *nf, __rcu **pnf; 267 262 268 263 if (!nfs_server_is_local(clp)) 269 264 return NULL; ··· 275 270 else 276 271 pnf = &nfl->ro_file; 277 272 278 - new = NULL; 279 - rcu_read_lock(); 280 - nf = rcu_dereference(*pnf); 281 - if (!nf) { 282 - rcu_read_unlock(); 283 - new = __nfs_local_open_fh(clp, cred, fh, nfl, mode); 284 - if (IS_ERR(new)) 285 - return NULL; 286 - rcu_read_lock(); 287 - /* try to swap in the pointer */ 288 - spin_lock(&clp->cl_uuid.lock); 289 - nf = rcu_dereference_protected(*pnf, 1); 290 - if (!nf) { 291 - nf = new; 292 - new = NULL; 293 - rcu_assign_pointer(*pnf, nf); 294 - } 295 - spin_unlock(&clp->cl_uuid.lock); 296 - } 297 - nf = nfs_local_file_get(nf); 298 - rcu_read_unlock(); 299 - if (new) 300 - nfs_to_nfsd_file_put_local(new); 273 + nf = __nfs_local_open_fh(clp, cred, fh, nfl, pnf, mode); 274 + if (IS_ERR(nf)) 275 + return NULL; 301 276 return nf; 302 277 } 303 278 EXPORT_SYMBOL_GPL(nfs_local_open_fh);
+1
fs/nfs/nfs42.h
··· 21 21 ssize_t nfs42_proc_copy(struct file *, loff_t, struct file *, loff_t, size_t, 22 22 struct nl4_server *, nfs4_stateid *, bool); 23 23 int nfs42_proc_deallocate(struct file *, loff_t, loff_t); 24 + int nfs42_proc_zero_range(struct file *, loff_t, loff_t); 24 25 loff_t nfs42_proc_llseek(struct file *, loff_t, int); 25 26 int nfs42_proc_layoutstats_generic(struct nfs_server *, 26 27 struct nfs42_layoutstat_data *);
+27 -2
fs/nfs/nfs42proc.c
··· 146 146 147 147 err = nfs42_proc_fallocate(&msg, filep, offset, len); 148 148 if (err == -EOPNOTSUPP) 149 - NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; 149 + NFS_SERVER(inode)->caps &= ~(NFS_CAP_ALLOCATE | 150 + NFS_CAP_ZERO_RANGE); 150 151 151 152 inode_unlock(inode); 152 153 return err; ··· 170 169 if (err == 0) 171 170 truncate_pagecache_range(inode, offset, (offset + len) -1); 172 171 if (err == -EOPNOTSUPP) 173 - NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; 172 + NFS_SERVER(inode)->caps &= ~(NFS_CAP_DEALLOCATE | 173 + NFS_CAP_ZERO_RANGE); 174 + 175 + inode_unlock(inode); 176 + return err; 177 + } 178 + 179 + int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len) 180 + { 181 + struct rpc_message msg = { 182 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ZERO_RANGE], 183 + }; 184 + struct inode *inode = file_inode(filep); 185 + int err; 186 + 187 + if (!nfs_server_capable(inode, NFS_CAP_ZERO_RANGE)) 188 + return -EOPNOTSUPP; 189 + 190 + inode_lock(inode); 191 + 192 + err = nfs42_proc_fallocate(&msg, filep, offset, len); 193 + if (err == 0) 194 + truncate_pagecache_range(inode, offset, (offset + len) -1); 195 + if (err == -EOPNOTSUPP) 196 + NFS_SERVER(inode)->caps &= ~NFS_CAP_ZERO_RANGE; 174 197 175 198 inode_unlock(inode); 176 199 return err;
+64
fs/nfs/nfs42xdr.c
··· 174 174 decode_putfh_maxsz + \ 175 175 decode_deallocate_maxsz + \ 176 176 decode_getattr_maxsz) 177 + #define NFS4_enc_zero_range_sz (compound_encode_hdr_maxsz + \ 178 + encode_sequence_maxsz + \ 179 + encode_putfh_maxsz + \ 180 + encode_deallocate_maxsz + \ 181 + encode_allocate_maxsz + \ 182 + encode_getattr_maxsz) 183 + #define NFS4_dec_zero_range_sz (compound_decode_hdr_maxsz + \ 184 + decode_sequence_maxsz + \ 185 + decode_putfh_maxsz + \ 186 + decode_deallocate_maxsz + \ 187 + decode_allocate_maxsz + \ 188 + decode_getattr_maxsz) 177 189 #define NFS4_enc_read_plus_sz (compound_encode_hdr_maxsz + \ 178 190 encode_sequence_maxsz + \ 179 191 encode_putfh_maxsz + \ ··· 656 644 encode_sequence(xdr, &args->seq_args, &hdr); 657 645 encode_putfh(xdr, args->falloc_fh, &hdr); 658 646 encode_deallocate(xdr, args, &hdr); 647 + encode_getfattr(xdr, args->falloc_bitmask, &hdr); 648 + encode_nops(&hdr); 649 + } 650 + 651 + /* 652 + * Encode ZERO_RANGE request 653 + */ 654 + static void nfs4_xdr_enc_zero_range(struct rpc_rqst *req, 655 + struct xdr_stream *xdr, 656 + const void *data) 657 + { 658 + const struct nfs42_falloc_args *args = data; 659 + struct compound_hdr hdr = { 660 + .minorversion = nfs4_xdr_minorversion(&args->seq_args), 661 + }; 662 + 663 + encode_compound_hdr(xdr, req, &hdr); 664 + encode_sequence(xdr, &args->seq_args, &hdr); 665 + encode_putfh(xdr, args->falloc_fh, &hdr); 666 + encode_deallocate(xdr, args, &hdr); 667 + encode_allocate(xdr, args, &hdr); 659 668 encode_getfattr(xdr, args->falloc_bitmask, &hdr); 660 669 encode_nops(&hdr); 661 670 } ··· 1536 1503 if (status) 1537 1504 goto out; 1538 1505 status = decode_deallocate(xdr, res); 1506 + if (status) 1507 + goto out; 1508 + decode_getfattr(xdr, res->falloc_fattr, res->falloc_server); 1509 + out: 1510 + return status; 1511 + } 1512 + 1513 + /* 1514 + * Decode ZERO_RANGE request 1515 + */ 1516 + static int nfs4_xdr_dec_zero_range(struct rpc_rqst *rqstp, 1517 + struct xdr_stream *xdr, 1518 + void *data) 1519 + { 1520 + struct nfs42_falloc_res *res = data; 1521 + struct compound_hdr hdr; 1522 + int status; 1523 + 1524 + status = decode_compound_hdr(xdr, &hdr); 1525 + if (status) 1526 + goto out; 1527 + status = decode_sequence(xdr, &res->seq_res, rqstp); 1528 + if (status) 1529 + goto out; 1530 + status = decode_putfh(xdr); 1531 + if (status) 1532 + goto out; 1533 + status = decode_deallocate(xdr, res); 1534 + if (status) 1535 + goto out; 1536 + status = decode_allocate(xdr, res); 1539 1537 if (status) 1540 1538 goto out; 1541 1539 decode_getfattr(xdr, res->falloc_fattr, res->falloc_server);
+1 -2
fs/nfs/nfs4_fs.h
··· 67 67 void (*free_lock_state)(struct nfs_server *, 68 68 struct nfs4_lock_state *); 69 69 int (*test_and_free_expired)(struct nfs_server *, 70 - const nfs4_stateid *, 71 - const struct cred *); 70 + nfs4_stateid *, const struct cred *); 72 71 struct nfs_seqid * 73 72 (*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 74 73 void (*session_trunk)(struct rpc_clnt *clnt,
+9 -1
fs/nfs/nfs4file.c
··· 225 225 if (!S_ISREG(inode->i_mode)) 226 226 return -EOPNOTSUPP; 227 227 228 - if ((mode != 0) && (mode != (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))) 228 + switch (mode) { 229 + case 0: 230 + case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: 231 + case FALLOC_FL_ZERO_RANGE: 232 + break; 233 + default: 229 234 return -EOPNOTSUPP; 235 + } 230 236 231 237 ret = inode_newsize_ok(inode, offset + len); 232 238 if (ret < 0) ··· 240 234 241 235 if (mode & FALLOC_FL_PUNCH_HOLE) 242 236 return nfs42_proc_deallocate(filep, offset, len); 237 + else if (mode & FALLOC_FL_ZERO_RANGE) 238 + return nfs42_proc_zero_range(filep, offset ,len); 243 239 return nfs42_proc_allocate(filep, offset, len); 244 240 } 245 241
+49 -26
fs/nfs/nfs4proc.c
··· 105 105 bool is_privileged); 106 106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *, 107 107 const struct cred *); 108 - static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *, 108 + static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 109 109 const struct cred *, bool); 110 110 #endif 111 111 ··· 325 325 326 326 if (nfs_have_delegated_mtime(inode)) { 327 327 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 328 - dst[1] &= ~FATTR4_WORD1_TIME_ACCESS; 328 + dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 329 329 if (!(cache_validity & NFS_INO_INVALID_MTIME)) 330 - dst[1] &= ~FATTR4_WORD1_TIME_MODIFY; 330 + dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET); 331 331 if (!(cache_validity & NFS_INO_INVALID_CTIME)) 332 - dst[1] &= ~FATTR4_WORD1_TIME_METADATA; 332 + dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET); 333 333 } else if (nfs_have_delegated_atime(inode)) { 334 334 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 335 - dst[1] &= ~FATTR4_WORD1_TIME_ACCESS; 335 + dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 336 336 } 337 337 } 338 338 ··· 2903 2903 } 2904 2904 2905 2905 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2906 - const nfs4_stateid *stateid, 2907 - const struct cred *cred) 2906 + nfs4_stateid *stateid, const struct cred *cred) 2908 2907 { 2909 2908 return -NFS4ERR_BAD_STATEID; 2910 2909 } 2911 2910 2912 2911 #if defined(CONFIG_NFS_V4_1) 2913 2912 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2914 - const nfs4_stateid *stateid, 2915 - const struct cred *cred) 2913 + nfs4_stateid *stateid, const struct cred *cred) 2916 2914 { 2917 2915 int status; 2918 2916 ··· 2919 2921 break; 2920 2922 case NFS4_INVALID_STATEID_TYPE: 2921 2923 case NFS4_SPECIAL_STATEID_TYPE: 2924 + case NFS4_FREED_STATEID_TYPE: 2922 2925 return -NFS4ERR_BAD_STATEID; 2923 2926 case NFS4_REVOKED_STATEID_TYPE: 2924 2927 goto out_free; ··· 3975 3976 FATTR4_WORD0_CASE_INSENSITIVE | 3976 3977 FATTR4_WORD0_CASE_PRESERVING; 3977 3978 if (minorversion) 3978 - bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT | 3979 - FATTR4_WORD2_OPEN_ARGUMENTS; 3979 + bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3980 + if (minorversion > 1) 3981 + bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS; 3980 3982 3981 3983 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3982 3984 if (status == 0) { ··· 5164 5164 } 5165 5165 5166 5166 static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry, 5167 - struct nfs4_createdata *data) 5167 + struct nfs4_createdata *data, int *statusp) 5168 5168 { 5169 - int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5169 + struct dentry *ret; 5170 + 5171 + *statusp = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5170 5172 &data->arg.seq_args, &data->res.seq_res, 1); 5171 5173 5172 - if (status) 5173 - return ERR_PTR(status); 5174 + if (*statusp) 5175 + return NULL; 5174 5176 5175 5177 spin_lock(&dir->i_lock); 5176 5178 /* Creating a directory bumps nlink in the parent */ ··· 5181 5179 data->res.fattr->time_start, 5182 5180 NFS_INO_INVALID_DATA); 5183 5181 spin_unlock(&dir->i_lock); 5184 - return nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr); 5182 + ret = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr); 5183 + if (!IS_ERR(ret)) 5184 + return ret; 5185 + *statusp = PTR_ERR(ret); 5186 + return NULL; 5185 5187 } 5186 5188 5187 5189 static void nfs4_free_createdata(struct nfs4_createdata *data) ··· 5246 5240 5247 5241 static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5248 5242 struct iattr *sattr, 5249 - struct nfs4_label *label) 5243 + struct nfs4_label *label, int *statusp) 5250 5244 { 5251 5245 struct nfs4_createdata *data; 5252 - struct dentry *ret = ERR_PTR(-ENOMEM); 5246 + struct dentry *ret = NULL; 5253 5247 5248 + *statusp = -ENOMEM; 5254 5249 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 5255 5250 if (data == NULL) 5256 5251 goto out; 5257 5252 5258 5253 data->arg.label = label; 5259 - ret = nfs4_do_mkdir(dir, dentry, data); 5254 + ret = nfs4_do_mkdir(dir, dentry, data, statusp); 5260 5255 5261 5256 nfs4_free_createdata(data); 5262 5257 out: ··· 5280 5273 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5281 5274 sattr->ia_mode &= ~current_umask(); 5282 5275 do { 5283 - alias = _nfs4_proc_mkdir(dir, dentry, sattr, label); 5284 - err = PTR_ERR_OR_ZERO(alias); 5276 + alias = _nfs4_proc_mkdir(dir, dentry, sattr, label, &err); 5285 5277 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5286 - err = nfs4_handle_exception(NFS_SERVER(dir), err, 5287 - &exception); 5278 + if (err) 5279 + alias = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 5280 + err, 5281 + &exception)); 5288 5282 } while (exception.retry); 5289 5283 nfs4_label_release_security(label); 5290 5284 ··· 6219 6211 struct nfs_server *server = NFS_SERVER(inode); 6220 6212 int ret; 6221 6213 6214 + if (unlikely(NFS_FH(inode)->size == 0)) 6215 + return -ENODATA; 6222 6216 if (!nfs4_server_supports_acls(server, type)) 6223 6217 return -EOPNOTSUPP; 6224 6218 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); ··· 6295 6285 { 6296 6286 struct nfs4_exception exception = { }; 6297 6287 int err; 6288 + 6289 + if (unlikely(NFS_FH(inode)->size == 0)) 6290 + return -ENODATA; 6298 6291 do { 6299 6292 err = __nfs4_proc_set_acl(inode, buf, buflen, type); 6300 6293 trace_nfs4_set_acl(inode, err); ··· 10624 10611 * Note: this function is always asynchronous. 10625 10612 */ 10626 10613 static int nfs41_free_stateid(struct nfs_server *server, 10627 - const nfs4_stateid *stateid, 10614 + nfs4_stateid *stateid, 10628 10615 const struct cred *cred, 10629 10616 bool privileged) 10630 10617 { ··· 10664 10651 if (IS_ERR(task)) 10665 10652 return PTR_ERR(task); 10666 10653 rpc_put_task(task); 10654 + stateid->type = NFS4_FREED_STATEID_TYPE; 10667 10655 return 0; 10668 10656 } 10669 10657 ··· 10831 10817 | NFS_CAP_OFFLOAD_CANCEL 10832 10818 | NFS_CAP_COPY_NOTIFY 10833 10819 | NFS_CAP_DEALLOCATE 10820 + | NFS_CAP_ZERO_RANGE 10834 10821 | NFS_CAP_SEEK 10835 10822 | NFS_CAP_LAYOUTSTATS 10836 10823 | NFS_CAP_CLONE ··· 10867 10852 10868 10853 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10869 10854 { 10870 - ssize_t error, error2, error3; 10855 + ssize_t error, error2, error3, error4; 10871 10856 size_t left = size; 10872 10857 10873 10858 error = generic_listxattr(dentry, list, left); ··· 10890 10875 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); 10891 10876 if (error3 < 0) 10892 10877 return error3; 10878 + if (list) { 10879 + list += error3; 10880 + left -= error3; 10881 + } 10893 10882 10894 - error += error2 + error3; 10883 + error4 = security_inode_listsecurity(d_inode(dentry), list, left); 10884 + if (error4 < 0) 10885 + return error4; 10886 + 10887 + error += error2 + error3 + error4; 10895 10888 if (size && error > size) 10896 10889 return -ERANGE; 10897 10890 return error;
+1
fs/nfs/nfs4xdr.c
··· 7711 7711 PROC42(LISTXATTRS, enc_listxattrs, dec_listxattrs), 7712 7712 PROC42(REMOVEXATTR, enc_removexattr, dec_removexattr), 7713 7713 PROC42(READ_PLUS, enc_read_plus, dec_read_plus), 7714 + PROC42(ZERO_RANGE, enc_zero_range, dec_zero_range), 7714 7715 }; 7715 7716 7716 7717 static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
+10 -1
fs/nfs/pnfs_nfs.c
··· 830 830 .servername = clp->cl_hostname, 831 831 .connect_timeout = connect_timeout, 832 832 .reconnect_timeout = connect_timeout, 833 + .xprtsec = clp->cl_xprtsec, 833 834 }; 834 835 835 - if (da->da_transport != clp->cl_proto) 836 + if (da->da_transport != clp->cl_proto && 837 + clp->cl_proto != XPRT_TRANSPORT_TCP_TLS) 836 838 continue; 839 + if (da->da_transport == XPRT_TRANSPORT_TCP && 840 + mds_srv->nfs_client->cl_proto == XPRT_TRANSPORT_TCP_TLS) 841 + xprt_args.ident = XPRT_TRANSPORT_TCP_TLS; 842 + 837 843 if (da->da_addr.ss_family != clp->cl_addr.ss_family) 838 844 continue; 839 845 /* Add this address as an alias */ ··· 847 841 rpc_clnt_test_and_add_xprt, NULL); 848 842 continue; 849 843 } 844 + if (da->da_transport == XPRT_TRANSPORT_TCP && 845 + mds_srv->nfs_client->cl_proto == XPRT_TRANSPORT_TCP_TLS) 846 + da->da_transport = XPRT_TRANSPORT_TCP_TLS; 850 847 clp = get_v3_ds_connect(mds_srv, 851 848 &da->da_addr, 852 849 da->da_addrlen, da->da_transport,
+2 -1
fs/nfs/read.c
··· 56 56 { 57 57 folio_zero_segment(folio, 0, folio_size(folio)); 58 58 folio_mark_uptodate(folio); 59 - folio_unlock(folio); 59 + if (nfs_netfs_folio_unlock(folio)) 60 + folio_unlock(folio); 60 61 return 0; 61 62 } 62 63
+19
fs/nfs/super.c
··· 1052 1052 sync_filesystem(sb); 1053 1053 1054 1054 /* 1055 + * The SB_RDONLY flag has been removed from the superblock during 1056 + * mounts to prevent interference between different filesystems. 1057 + * Similarly, it is also necessary to ignore the SB_RDONLY flag 1058 + * during reconfiguration; otherwise, it may also result in the 1059 + * creation of redundant superblocks when mounting a directory with 1060 + * different rw and ro flags multiple times. 1061 + */ 1062 + fc->sb_flags_mask &= ~SB_RDONLY; 1063 + 1064 + /* 1055 1065 * Userspace mount programs that send binary options generally send 1056 1066 * them populated with default values. We have no way to know which 1057 1067 * ones were explicitly specified. Fall back to legacy behavior and ··· 1318 1308 if (IS_ERR(server)) 1319 1309 return PTR_ERR(server); 1320 1310 1311 + /* 1312 + * When NFS_MOUNT_UNSHARED is not set, NFS forces the sharing of a 1313 + * superblock among each filesystem that mounts sub-directories 1314 + * belonging to a single exported root path. 1315 + * To prevent interference between different filesystems, the 1316 + * SB_RDONLY flag should be removed from the superblock. 1317 + */ 1321 1318 if (server->flags & NFS_MOUNT_UNSHARED) 1322 1319 compare_super = NULL; 1320 + else 1321 + fc->sb_flags &= ~SB_RDONLY; 1323 1322 1324 1323 /* -o noac implies -o sync */ 1325 1324 if (server->flags & NFS_MOUNT_NOAC)
+28
fs/nfs/sysfs.c
··· 387 387 } 388 388 #endif /* CONFIG_NFS_V4_1 */ 389 389 390 + #if IS_ENABLED(CONFIG_NFS_LOCALIO) 391 + 392 + static ssize_t 393 + localio_show(struct kobject *kobj, struct kobj_attribute *attr, 394 + char *buf) 395 + { 396 + struct nfs_server *server = container_of(kobj, struct nfs_server, kobj); 397 + bool localio = nfs_server_is_local(server->nfs_client); 398 + return sysfs_emit(buf, "%d\n", localio); 399 + } 400 + 401 + static struct kobj_attribute nfs_sysfs_attr_localio = __ATTR_RO(localio); 402 + 403 + static void nfs_sysfs_add_nfs_localio_server(struct nfs_server *server) 404 + { 405 + int ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_localio.attr, 406 + nfs_netns_server_namespace(&server->kobj)); 407 + if (ret < 0) 408 + pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", 409 + server->s_sysfs_id, ret); 410 + } 411 + #else 412 + static inline void nfs_sysfs_add_nfs_localio_server(struct nfs_server *server) 413 + { 414 + } 415 + #endif /* IS_ENABLED(CONFIG_NFS_LOCALIO) */ 416 + 390 417 void nfs_sysfs_add_server(struct nfs_server *server) 391 418 { 392 419 int ret; ··· 432 405 server->s_sysfs_id, ret); 433 406 434 407 nfs_sysfs_add_nfsv41_server(server); 408 + nfs_sysfs_add_nfs_localio_server(server); 435 409 } 436 410 EXPORT_SYMBOL_GPL(nfs_sysfs_add_server); 437 411
+19 -35
fs/nfs/write.c
··· 632 632 * Find an associated nfs write request, and prepare to flush it out 633 633 * May return an error if the user signalled nfs_wait_on_request(). 634 634 */ 635 - static int nfs_page_async_flush(struct folio *folio, 636 - struct writeback_control *wbc, 637 - struct nfs_pageio_descriptor *pgio) 635 + static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc, 636 + struct nfs_pageio_descriptor *pgio) 638 637 { 639 638 struct nfs_page *req; 640 - int ret = 0; 639 + int ret; 640 + 641 + nfs_pageio_cond_complete(pgio, folio->index); 641 642 642 643 req = nfs_lock_and_join_requests(folio); 643 644 if (!req) 644 - goto out; 645 - ret = PTR_ERR(req); 645 + return 0; 646 646 if (IS_ERR(req)) 647 - goto out; 647 + return PTR_ERR(req); 648 648 649 649 nfs_folio_set_writeback(folio); 650 650 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); ··· 654 654 if (nfs_error_is_fatal_on_server(ret)) 655 655 goto out_launder; 656 656 657 - ret = 0; 658 657 if (!nfs_pageio_add_request(pgio, req)) { 659 658 ret = pgio->pg_error; 660 659 /* ··· 661 662 */ 662 663 if (nfs_error_is_fatal_on_server(ret)) 663 664 goto out_launder; 664 - if (wbc->sync_mode == WB_SYNC_NONE) 665 - ret = AOP_WRITEPAGE_ACTIVATE; 666 665 folio_redirty_for_writepage(wbc, folio); 667 666 nfs_redirty_request(req); 668 667 pgio->pg_error = 0; 669 - } else 670 - nfs_add_stats(folio->mapping->host, 671 - NFSIOS_WRITEPAGES, 1); 672 - out: 673 - return ret; 668 + return ret; 669 + } 670 + 671 + nfs_add_stats(folio->mapping->host, NFSIOS_WRITEPAGES, 1); 672 + return 0; 673 + 674 674 out_launder: 675 675 nfs_write_error(req, ret); 676 676 return 0; 677 - } 678 - 679 - static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc, 680 - struct nfs_pageio_descriptor *pgio) 681 - { 682 - nfs_pageio_cond_complete(pgio, folio->index); 683 - return nfs_page_async_flush(folio, wbc, pgio); 684 677 } 685 678 686 679 /* ··· 692 701 pgio.pg_error = 0; 693 702 nfs_pageio_complete(&pgio); 694 703 return err; 695 - } 696 - 697 - static int nfs_writepages_callback(struct folio *folio, 698 - struct writeback_control *wbc, void *data) 699 - { 700 - int ret; 701 - 702 - ret = nfs_do_writepage(folio, wbc, data); 703 - if (ret != AOP_WRITEPAGE_ACTIVATE) 704 - folio_unlock(folio); 705 - return ret; 706 704 } 707 705 708 706 static void nfs_io_completion_commit(void *inode) ··· 729 749 } 730 750 731 751 do { 752 + struct folio *folio = NULL; 753 + 732 754 nfs_pageio_init_write(&pgio, inode, priority, false, 733 755 &nfs_async_write_completion_ops); 734 756 pgio.pg_io_completion = ioc; 735 - err = write_cache_pages(mapping, wbc, nfs_writepages_callback, 736 - &pgio); 757 + while ((folio = writeback_iter(mapping, wbc, folio, &err))) { 758 + err = nfs_do_writepage(folio, wbc, &pgio); 759 + folio_unlock(folio); 760 + } 737 761 pgio.pg_error = 0; 738 762 nfs_pageio_complete(&pgio); 739 763 if (err == -EAGAIN && mntflags & NFS_MOUNT_SOFTERR)
+67 -34
fs/nfs_common/nfslocalio.c
··· 151 151 */ 152 152 static bool nfs_uuid_put(nfs_uuid_t *nfs_uuid) 153 153 { 154 - LIST_HEAD(local_files); 155 - struct nfs_file_localio *nfl, *tmp; 154 + struct nfs_file_localio *nfl; 156 155 157 156 spin_lock(&nfs_uuid->lock); 158 157 if (unlikely(!rcu_access_pointer(nfs_uuid->net))) { ··· 165 166 nfs_uuid->dom = NULL; 166 167 } 167 168 168 - list_splice_init(&nfs_uuid->files, &local_files); 169 - spin_unlock(&nfs_uuid->lock); 170 - 171 169 /* Walk list of files and ensure their last references dropped */ 172 - list_for_each_entry_safe(nfl, tmp, &local_files, list) { 173 - nfs_close_local_fh(nfl); 174 - cond_resched(); 175 - } 176 170 177 - spin_lock(&nfs_uuid->lock); 178 - BUG_ON(!list_empty(&nfs_uuid->files)); 171 + while ((nfl = list_first_entry_or_null(&nfs_uuid->files, 172 + struct nfs_file_localio, 173 + list)) != NULL) { 174 + /* If nfs_uuid is already NULL, nfs_close_local_fh is 175 + * closing and we must wait, else we unlink and close. 176 + */ 177 + if (rcu_access_pointer(nfl->nfs_uuid) == NULL) { 178 + /* nfs_close_local_fh() is doing the 179 + * close and we must wait. until it unlinks 180 + */ 181 + wait_var_event_spinlock(nfl, 182 + list_first_entry_or_null( 183 + &nfs_uuid->files, 184 + struct nfs_file_localio, 185 + list) != nfl, 186 + &nfs_uuid->lock); 187 + continue; 188 + } 189 + 190 + /* Remove nfl from nfs_uuid->files list */ 191 + list_del_init(&nfl->list); 192 + spin_unlock(&nfs_uuid->lock); 193 + 194 + nfs_to_nfsd_file_put_local(&nfl->ro_file); 195 + nfs_to_nfsd_file_put_local(&nfl->rw_file); 196 + cond_resched(); 197 + 198 + spin_lock(&nfs_uuid->lock); 199 + /* Now we can allow racing nfs_close_local_fh() to 200 + * skip the locking. 201 + */ 202 + RCU_INIT_POINTER(nfl->nfs_uuid, NULL); 203 + wake_up_var_locked(&nfl->nfs_uuid, &nfs_uuid->lock); 204 + } 179 205 180 206 /* Remove client from nn->local_clients */ 181 207 if (nfs_uuid->list_lock) { ··· 261 237 struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid, 262 238 struct rpc_clnt *rpc_clnt, const struct cred *cred, 263 239 const struct nfs_fh *nfs_fh, struct nfs_file_localio *nfl, 240 + struct nfsd_file __rcu **pnf, 264 241 const fmode_t fmode) 265 242 { 266 243 struct net *net; ··· 286 261 rcu_read_unlock(); 287 262 /* We have an implied reference to net thanks to nfsd_net_try_get */ 288 263 localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt, 289 - cred, nfs_fh, fmode); 290 - if (IS_ERR(localio)) 291 - nfs_to_nfsd_net_put(net); 292 - else 264 + cred, nfs_fh, pnf, fmode); 265 + nfs_to_nfsd_net_put(net); 266 + if (!IS_ERR(localio)) 293 267 nfs_uuid_add_file(uuid, nfl); 294 268 295 269 return localio; ··· 297 273 298 274 void nfs_close_local_fh(struct nfs_file_localio *nfl) 299 275 { 300 - struct nfsd_file *ro_nf = NULL; 301 - struct nfsd_file *rw_nf = NULL; 302 276 nfs_uuid_t *nfs_uuid; 303 277 304 278 rcu_read_lock(); ··· 307 285 return; 308 286 } 309 287 310 - ro_nf = rcu_access_pointer(nfl->ro_file); 311 - rw_nf = rcu_access_pointer(nfl->rw_file); 312 - if (ro_nf || rw_nf) { 313 - spin_lock(&nfs_uuid->lock); 314 - if (ro_nf) 315 - ro_nf = rcu_dereference_protected(xchg(&nfl->ro_file, NULL), 1); 316 - if (rw_nf) 317 - rw_nf = rcu_dereference_protected(xchg(&nfl->rw_file, NULL), 1); 318 - 319 - /* Remove nfl from nfs_uuid->files list */ 320 - RCU_INIT_POINTER(nfl->nfs_uuid, NULL); 321 - list_del_init(&nfl->list); 288 + spin_lock(&nfs_uuid->lock); 289 + if (!rcu_access_pointer(nfl->nfs_uuid)) { 290 + /* nfs_uuid_put has finished here */ 322 291 spin_unlock(&nfs_uuid->lock); 323 292 rcu_read_unlock(); 324 - 325 - if (ro_nf) 326 - nfs_to_nfsd_file_put_local(ro_nf); 327 - if (rw_nf) 328 - nfs_to_nfsd_file_put_local(rw_nf); 329 293 return; 330 294 } 295 + if (list_empty(&nfs_uuid->files)) { 296 + /* nfs_uuid_put() has started closing files, wait for it 297 + * to finished 298 + */ 299 + spin_unlock(&nfs_uuid->lock); 300 + rcu_read_unlock(); 301 + wait_var_event(&nfl->nfs_uuid, 302 + rcu_access_pointer(nfl->nfs_uuid) == NULL); 303 + return; 304 + } 305 + /* tell nfs_uuid_put() to wait for us */ 306 + RCU_INIT_POINTER(nfl->nfs_uuid, NULL); 307 + spin_unlock(&nfs_uuid->lock); 331 308 rcu_read_unlock(); 309 + 310 + nfs_to_nfsd_file_put_local(&nfl->ro_file); 311 + nfs_to_nfsd_file_put_local(&nfl->rw_file); 312 + 313 + /* Remove nfl from nfs_uuid->files list and signal nfs_uuid_put() 314 + * that we are done. The moment we drop the spinlock the 315 + * nfs_uuid could be freed. 316 + */ 317 + spin_lock(&nfs_uuid->lock); 318 + list_del_init(&nfl->list); 319 + wake_up_var_locked(&nfl->nfs_uuid, &nfs_uuid->lock); 320 + spin_unlock(&nfs_uuid->lock); 332 321 } 333 322 EXPORT_SYMBOL_GPL(nfs_close_local_fh); 334 323
+29 -3
fs/nfsd/filecache.c
··· 378 378 * the reference of the nfsd_file. 379 379 */ 380 380 struct net * 381 - nfsd_file_put_local(struct nfsd_file *nf) 381 + nfsd_file_put_local(struct nfsd_file __rcu **pnf) 382 + { 383 + struct nfsd_file *nf; 384 + struct net *net = NULL; 385 + 386 + nf = unrcu_pointer(xchg(pnf, NULL)); 387 + if (nf) { 388 + net = nf->nf_net; 389 + nfsd_file_put(nf); 390 + } 391 + return net; 392 + } 393 + 394 + /** 395 + * nfsd_file_get_local - get nfsd_file reference and reference to net 396 + * @nf: nfsd_file of which to put the reference 397 + * 398 + * Get reference to both the nfsd_file and nf->nf_net. 399 + */ 400 + struct nfsd_file * 401 + nfsd_file_get_local(struct nfsd_file *nf) 382 402 { 383 403 struct net *net = nf->nf_net; 384 404 385 - nfsd_file_put(nf); 386 - return net; 405 + if (nfsd_net_try_get(net)) { 406 + nf = nfsd_file_get(nf); 407 + if (!nf) 408 + nfsd_net_put(net); 409 + } else { 410 + nf = NULL; 411 + } 412 + return nf; 387 413 } 388 414 389 415 /**
+2 -1
fs/nfsd/filecache.h
··· 62 62 int nfsd_file_cache_start_net(struct net *net); 63 63 void nfsd_file_cache_shutdown_net(struct net *net); 64 64 void nfsd_file_put(struct nfsd_file *nf); 65 - struct net *nfsd_file_put_local(struct nfsd_file *nf); 65 + struct net *nfsd_file_put_local(struct nfsd_file __rcu **nf); 66 + struct nfsd_file *nfsd_file_get_local(struct nfsd_file *nf); 66 67 struct nfsd_file *nfsd_file_get(struct nfsd_file *nf); 67 68 struct file *nfsd_file_file(struct nfsd_file *nf); 68 69 void nfsd_file_close_inode_sync(struct inode *inode);
+52 -18
fs/nfsd/localio.c
··· 24 24 #include "filecache.h" 25 25 #include "cache.h" 26 26 27 - static const struct nfsd_localio_operations nfsd_localio_ops = { 28 - .nfsd_net_try_get = nfsd_net_try_get, 29 - .nfsd_net_put = nfsd_net_put, 30 - .nfsd_open_local_fh = nfsd_open_local_fh, 31 - .nfsd_file_put_local = nfsd_file_put_local, 32 - .nfsd_file_get = nfsd_file_get, 33 - .nfsd_file_put = nfsd_file_put, 34 - .nfsd_file_file = nfsd_file_file, 35 - }; 36 - 37 - void nfsd_localio_ops_init(void) 38 - { 39 - nfs_to = &nfsd_localio_ops; 40 - } 41 - 42 27 /** 43 28 * nfsd_open_local_fh - lookup a local filehandle @nfs_fh and map to nfsd_file 44 29 * ··· 32 47 * @rpc_clnt: rpc_clnt that the client established 33 48 * @cred: cred that the client established 34 49 * @nfs_fh: filehandle to lookup 50 + * @nfp: place to find the nfsd_file, or store it if it was non-NULL 35 51 * @fmode: fmode_t to use for open 36 52 * 37 53 * This function maps a local fh to a path on a local filesystem. ··· 43 57 * set. Caller (NFS client) is responsible for calling nfsd_net_put and 44 58 * nfsd_file_put (via nfs_to_nfsd_file_put_local). 45 59 */ 46 - struct nfsd_file * 60 + static struct nfsd_file * 47 61 nfsd_open_local_fh(struct net *net, struct auth_domain *dom, 48 62 struct rpc_clnt *rpc_clnt, const struct cred *cred, 49 - const struct nfs_fh *nfs_fh, const fmode_t fmode) 63 + const struct nfs_fh *nfs_fh, struct nfsd_file __rcu **pnf, 64 + const fmode_t fmode) 50 65 { 51 66 int mayflags = NFSD_MAY_LOCALIO; 52 67 struct svc_cred rq_cred; ··· 57 70 58 71 if (nfs_fh->size > NFS4_FHSIZE) 59 72 return ERR_PTR(-EINVAL); 73 + 74 + if (!nfsd_net_try_get(net)) 75 + return ERR_PTR(-ENXIO); 76 + 77 + rcu_read_lock(); 78 + localio = nfsd_file_get(rcu_dereference(*pnf)); 79 + rcu_read_unlock(); 80 + if (localio) 81 + return localio; 60 82 61 83 /* nfs_fh -> svc_fh */ 62 84 fh_init(&fh, NFS4_FHSIZE); ··· 88 92 if (rq_cred.cr_group_info) 89 93 put_group_info(rq_cred.cr_group_info); 90 94 95 + if (!IS_ERR(localio)) { 96 + struct nfsd_file *new; 97 + if (!nfsd_net_try_get(net)) { 98 + nfsd_file_put(localio); 99 + nfsd_net_put(net); 100 + return ERR_PTR(-ENXIO); 101 + } 102 + nfsd_file_get(localio); 103 + again: 104 + new = unrcu_pointer(cmpxchg(pnf, NULL, RCU_INITIALIZER(localio))); 105 + if (new) { 106 + /* Some other thread installed an nfsd_file */ 107 + if (nfsd_file_get(new) == NULL) 108 + goto again; 109 + /* 110 + * Drop the ref we were going to install and the 111 + * one we were going to return. 112 + */ 113 + nfsd_file_put(localio); 114 + nfsd_file_put(localio); 115 + localio = new; 116 + } 117 + } else 118 + nfsd_net_put(net); 119 + 91 120 return localio; 92 121 } 93 - EXPORT_SYMBOL_GPL(nfsd_open_local_fh); 122 + 123 + static const struct nfsd_localio_operations nfsd_localio_ops = { 124 + .nfsd_net_try_get = nfsd_net_try_get, 125 + .nfsd_net_put = nfsd_net_put, 126 + .nfsd_open_local_fh = nfsd_open_local_fh, 127 + .nfsd_file_put_local = nfsd_file_put_local, 128 + .nfsd_file_get_local = nfsd_file_get_local, 129 + .nfsd_file_file = nfsd_file_file, 130 + }; 131 + 132 + void nfsd_localio_ops_init(void) 133 + { 134 + nfs_to = &nfsd_localio_ops; 135 + } 94 136 95 137 /* 96 138 * UUID_IS_LOCAL XDR functions
+2
include/linux/nfs4.h
··· 72 72 NFS4_LAYOUT_STATEID_TYPE, 73 73 NFS4_PNFS_DS_STATEID_TYPE, 74 74 NFS4_REVOKED_STATEID_TYPE, 75 + NFS4_FREED_STATEID_TYPE, 75 76 } type; 76 77 }; 77 78 ··· 679 678 NFSPROC4_CLNT_SEEK, 680 679 NFSPROC4_CLNT_ALLOCATE, 681 680 NFSPROC4_CLNT_DEALLOCATE, 681 + NFSPROC4_CLNT_ZERO_RANGE, 682 682 NFSPROC4_CLNT_LAYOUTSTATS, 683 683 NFSPROC4_CLNT_CLONE, 684 684 NFSPROC4_CLNT_COPY,
+2
include/linux/nfs_fs_sb.h
··· 125 125 */ 126 126 char cl_ipaddr[48]; 127 127 struct net *cl_net; 128 + netns_tracker cl_ns_tracker; 128 129 struct list_head pending_cb_stateids; 129 130 struct rcu_head rcu; 130 131 ··· 304 303 #define NFS_CAP_CASE_PRESERVING (1U << 7) 305 304 #define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8) 306 305 #define NFS_CAP_OFFLOAD_STATUS (1U << 9) 306 + #define NFS_CAP_ZERO_RANGE (1U << 10) 307 307 #define NFS_CAP_OPEN_XOR (1U << 12) 308 308 #define NFS_CAP_DELEGTIME (1U << 13) 309 309 #define NFS_CAP_POSIX_LOCK (1U << 14)
+13 -13
include/linux/nfslocalio.h
··· 50 50 spinlock_t *nn_local_clients_lock); 51 51 52 52 /* localio needs to map filehandle -> struct nfsd_file */ 53 - extern struct nfsd_file * 54 - nfsd_open_local_fh(struct net *, struct auth_domain *, struct rpc_clnt *, 55 - const struct cred *, const struct nfs_fh *, 56 - const fmode_t) __must_hold(rcu); 57 53 void nfs_close_local_fh(struct nfs_file_localio *); 58 54 59 55 struct nfsd_localio_operations { ··· 60 64 struct rpc_clnt *, 61 65 const struct cred *, 62 66 const struct nfs_fh *, 67 + struct nfsd_file __rcu **pnf, 63 68 const fmode_t); 64 - struct net *(*nfsd_file_put_local)(struct nfsd_file *); 65 - struct nfsd_file *(*nfsd_file_get)(struct nfsd_file *); 66 - void (*nfsd_file_put)(struct nfsd_file *); 69 + struct net *(*nfsd_file_put_local)(struct nfsd_file __rcu **); 70 + struct nfsd_file *(*nfsd_file_get_local)(struct nfsd_file *); 67 71 struct file *(*nfsd_file_file)(struct nfsd_file *); 68 72 } ____cacheline_aligned; 69 73 ··· 73 77 struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *, 74 78 struct rpc_clnt *, const struct cred *, 75 79 const struct nfs_fh *, struct nfs_file_localio *, 80 + struct nfsd_file __rcu **pnf, 76 81 const fmode_t); 77 82 78 83 static inline void nfs_to_nfsd_net_put(struct net *net) ··· 88 91 rcu_read_unlock(); 89 92 } 90 93 91 - static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio) 94 + static inline void nfs_to_nfsd_file_put_local(struct nfsd_file __rcu **localio) 92 95 { 93 96 /* 94 - * Must not hold RCU otherwise nfsd_file_put() can easily trigger: 95 - * "Voluntary context switch within RCU read-side critical section!" 96 - * by scheduling deep in underlying filesystem (e.g. XFS). 97 + * Either *localio must be guaranteed to be non-NULL, or caller 98 + * must prevent nfsd shutdown from completing as nfs_close_local_fh() 99 + * does by blocking the nfs_uuid from being finally put. 97 100 */ 98 - struct net *net = nfs_to->nfsd_file_put_local(localio); 101 + struct net *net; 99 102 100 - nfs_to_nfsd_net_put(net); 103 + net = nfs_to->nfsd_file_put_local(localio); 104 + 105 + if (net) 106 + nfs_to_nfsd_net_put(net); 101 107 } 102 108 103 109 #else /* CONFIG_NFS_LOCALIO */
+16 -1
include/linux/sunrpc/xprt.h
··· 30 30 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) 31 31 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) 32 32 33 + #define RPC_GSS_SEQNO_ARRAY_SIZE 3U 34 + 33 35 enum rpc_display_format_t { 34 36 RPC_DISPLAY_ADDR = 0, 35 37 RPC_DISPLAY_PORT, ··· 68 66 struct rpc_cred * rq_cred; /* Bound cred */ 69 67 __be32 rq_xid; /* request XID */ 70 68 int rq_cong; /* has incremented xprt->cong */ 71 - u32 rq_seqno; /* gss seq no. used on req. */ 69 + u32 rq_seqnos[RPC_GSS_SEQNO_ARRAY_SIZE]; /* past gss req seq nos. */ 70 + unsigned int rq_seqno_count; /* number of entries in rq_seqnos */ 72 71 int rq_enc_pages_num; 73 72 struct page **rq_enc_pages; /* scratch pages for use by 74 73 gss privacy code */ ··· 121 118 }; 122 119 #define rq_svec rq_snd_buf.head 123 120 #define rq_slen rq_snd_buf.len 121 + 122 + static inline int xprt_rqst_add_seqno(struct rpc_rqst *req, u32 seqno) 123 + { 124 + if (likely(req->rq_seqno_count < RPC_GSS_SEQNO_ARRAY_SIZE)) 125 + req->rq_seqno_count++; 126 + 127 + /* Shift array to make room for the newest element at the beginning */ 128 + memmove(&req->rq_seqnos[1], &req->rq_seqnos[0], 129 + (RPC_GSS_SEQNO_ARRAY_SIZE - 1) * sizeof(req->rq_seqnos[0])); 130 + req->rq_seqnos[0] = seqno; 131 + return 0; 132 + } 124 133 125 134 /* RPC transport layer security policies */ 126 135 enum xprtsec_policies {
+2 -2
include/trace/events/rpcgss.h
··· 409 409 __entry->task_id = task->tk_pid; 410 410 __entry->client_id = task->tk_client->cl_clid; 411 411 __entry->xid = be32_to_cpu(rqst->rq_xid); 412 - __entry->seqno = rqst->rq_seqno; 412 + __entry->seqno = *rqst->rq_seqnos; 413 413 ), 414 414 415 415 TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x seqno=%u", ··· 440 440 __entry->client_id = task->tk_client->cl_clid; 441 441 __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); 442 442 __entry->seq_xmit = seq_xmit; 443 - __entry->seqno = task->tk_rqstp->rq_seqno; 443 + __entry->seqno = *task->tk_rqstp->rq_seqnos; 444 444 __entry->ret = ret; 445 445 ), 446 446
+1 -1
include/trace/events/sunrpc.h
··· 1098 1098 __entry->client_id = rqst->rq_task->tk_client ? 1099 1099 rqst->rq_task->tk_client->cl_clid : -1; 1100 1100 __entry->xid = be32_to_cpu(rqst->rq_xid); 1101 - __entry->seqno = rqst->rq_seqno; 1101 + __entry->seqno = *rqst->rq_seqnos; 1102 1102 __entry->status = status; 1103 1103 ), 1104 1104
+36 -23
net/sunrpc/auth_gss/auth_gss.c
··· 1545 1545 struct kvec iov; 1546 1546 struct xdr_buf verf_buf; 1547 1547 int status; 1548 + u32 seqno; 1548 1549 1549 1550 /* Credential */ 1550 1551 ··· 1557 1556 cred_len = p++; 1558 1557 1559 1558 spin_lock(&ctx->gc_seq_lock); 1560 - req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; 1559 + seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; 1560 + xprt_rqst_add_seqno(req, seqno); 1561 1561 spin_unlock(&ctx->gc_seq_lock); 1562 - if (req->rq_seqno == MAXSEQ) 1562 + if (*req->rq_seqnos == MAXSEQ) 1563 1563 goto expired; 1564 1564 trace_rpcgss_seqno(task); 1565 1565 1566 1566 *p++ = cpu_to_be32(RPC_GSS_VERSION); 1567 1567 *p++ = cpu_to_be32(ctx->gc_proc); 1568 - *p++ = cpu_to_be32(req->rq_seqno); 1568 + *p++ = cpu_to_be32(*req->rq_seqnos); 1569 1569 *p++ = cpu_to_be32(gss_cred->gc_service); 1570 1570 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx); 1571 1571 *cred_len = cpu_to_be32((p - (cred_len + 1)) << 2); ··· 1680 1678 return 0; 1681 1679 } 1682 1680 1681 + static u32 1682 + gss_validate_seqno_mic(struct gss_cl_ctx *ctx, u32 seqno, __be32 *seq, __be32 *p, u32 len) 1683 + { 1684 + struct kvec iov; 1685 + struct xdr_buf verf_buf; 1686 + struct xdr_netobj mic; 1687 + 1688 + *seq = cpu_to_be32(seqno); 1689 + iov.iov_base = seq; 1690 + iov.iov_len = 4; 1691 + xdr_buf_from_iov(&iov, &verf_buf); 1692 + mic.data = (u8 *)p; 1693 + mic.len = len; 1694 + return gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1695 + } 1696 + 1683 1697 static int 1684 1698 gss_validate(struct rpc_task *task, struct xdr_stream *xdr) 1685 1699 { 1686 1700 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1687 1701 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1688 1702 __be32 *p, *seq = NULL; 1689 - struct kvec iov; 1690 - struct xdr_buf verf_buf; 1691 - struct xdr_netobj mic; 1692 1703 u32 len, maj_stat; 1693 1704 int status; 1705 + int i = 1; /* don't recheck the first item */ 1694 1706 1695 1707 p = xdr_inline_decode(xdr, 2 * sizeof(*p)); 1696 1708 if (!p) ··· 1721 1705 seq = kmalloc(4, GFP_KERNEL); 1722 1706 if (!seq) 1723 1707 goto validate_failed; 1724 - *seq = cpu_to_be32(task->tk_rqstp->rq_seqno); 1725 - iov.iov_base = seq; 1726 - iov.iov_len = 4; 1727 - xdr_buf_from_iov(&iov, &verf_buf); 1728 - mic.data = (u8 *)p; 1729 - mic.len = len; 1730 - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1708 + maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[0], seq, p, len); 1709 + /* RFC 2203 5.3.3.1 - compute the checksum of each sequence number in the cache */ 1710 + while (unlikely(maj_stat == GSS_S_BAD_SIG && i < task->tk_rqstp->rq_seqno_count)) 1711 + maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[i], seq, p, len); 1731 1712 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1732 1713 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1733 1714 if (maj_stat) ··· 1763 1750 if (!p) 1764 1751 goto wrap_failed; 1765 1752 integ_len = p++; 1766 - *p = cpu_to_be32(rqstp->rq_seqno); 1753 + *p = cpu_to_be32(*rqstp->rq_seqnos); 1767 1754 1768 1755 if (rpcauth_wrap_req_encode(task, xdr)) 1769 1756 goto wrap_failed; ··· 1860 1847 if (!p) 1861 1848 goto wrap_failed; 1862 1849 opaque_len = p++; 1863 - *p = cpu_to_be32(rqstp->rq_seqno); 1850 + *p = cpu_to_be32(*rqstp->rq_seqnos); 1864 1851 1865 1852 if (rpcauth_wrap_req_encode(task, xdr)) 1866 1853 goto wrap_failed; ··· 2014 2001 offset = rcv_buf->len - xdr_stream_remaining(xdr); 2015 2002 if (xdr_stream_decode_u32(xdr, &seqno)) 2016 2003 goto unwrap_failed; 2017 - if (seqno != rqstp->rq_seqno) 2004 + if (seqno != *rqstp->rq_seqnos) 2018 2005 goto bad_seqno; 2019 2006 if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len)) 2020 2007 goto unwrap_failed; ··· 2058 2045 trace_rpcgss_unwrap_failed(task); 2059 2046 goto out; 2060 2047 bad_seqno: 2061 - trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno); 2048 + trace_rpcgss_bad_seqno(task, *rqstp->rq_seqnos, seqno); 2062 2049 goto out; 2063 2050 bad_mic: 2064 2051 trace_rpcgss_verify_mic(task, maj_stat); ··· 2090 2077 if (maj_stat != GSS_S_COMPLETE) 2091 2078 goto bad_unwrap; 2092 2079 /* gss_unwrap decrypted the sequence number */ 2093 - if (be32_to_cpup(p++) != rqstp->rq_seqno) 2080 + if (be32_to_cpup(p++) != *rqstp->rq_seqnos) 2094 2081 goto bad_seqno; 2095 2082 2096 2083 /* gss_unwrap redacts the opaque blob from the head iovec. ··· 2106 2093 trace_rpcgss_unwrap_failed(task); 2107 2094 return -EIO; 2108 2095 bad_seqno: 2109 - trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p)); 2096 + trace_rpcgss_bad_seqno(task, *rqstp->rq_seqnos, be32_to_cpup(--p)); 2110 2097 return -EIO; 2111 2098 bad_unwrap: 2112 2099 trace_rpcgss_unwrap(task, maj_stat); ··· 2131 2118 if (!ctx) 2132 2119 goto out; 2133 2120 2134 - if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq))) 2121 + if (gss_seq_is_newer(*req->rq_seqnos, READ_ONCE(ctx->gc_seq))) 2135 2122 goto out_ctx; 2136 2123 2137 2124 seq_xmit = READ_ONCE(ctx->gc_seq_xmit); 2138 - while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) { 2125 + while (gss_seq_is_newer(*req->rq_seqnos, seq_xmit)) { 2139 2126 u32 tmp = seq_xmit; 2140 2127 2141 - seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno); 2128 + seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, *req->rq_seqnos); 2142 2129 if (seq_xmit == tmp) { 2143 2130 ret = false; 2144 2131 goto out_ctx; ··· 2147 2134 2148 2135 win = ctx->gc_win; 2149 2136 if (win > 0) 2150 - ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win); 2137 + ret = !gss_seq_is_newer(*req->rq_seqnos, seq_xmit - win); 2151 2138 2152 2139 out_ctx: 2153 2140 gss_put_ctx(ctx);
+7 -2
net/sunrpc/clnt.c
··· 2771 2771 case -EPROTONOSUPPORT: 2772 2772 goto out_err; 2773 2773 case -EACCES: 2774 - /* Re-encode with a fresh cred */ 2775 - fallthrough; 2774 + /* possible RPCSEC_GSS out-of-sequence event (RFC2203), 2775 + * reset recv state and keep waiting, don't retransmit 2776 + */ 2777 + task->tk_rqstp->rq_reply_bytes_recvd = 0; 2778 + task->tk_status = xprt_request_enqueue_receive(task); 2779 + task->tk_action = call_transmit_status; 2780 + return -EBADMSG; 2776 2781 default: 2777 2782 goto out_garbage; 2778 2783 }
+2 -1
net/sunrpc/xprt.c
··· 1365 1365 INIT_LIST_HEAD(&req->rq_xmit2); 1366 1366 goto out; 1367 1367 } 1368 - } else if (!req->rq_seqno) { 1368 + } else if (req->rq_seqno_count == 0) { 1369 1369 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1370 1370 if (pos->rq_task->tk_owner != task->tk_owner) 1371 1371 continue; ··· 1898 1898 req->rq_snd_buf.bvec = NULL; 1899 1899 req->rq_rcv_buf.bvec = NULL; 1900 1900 req->rq_release_snd_buf = NULL; 1901 + req->rq_seqno_count = 0; 1901 1902 xprt_init_majortimeo(task, req, task->tk_client->cl_timeout); 1902 1903 1903 1904 trace_xprt_reserve(req);
+5 -11
net/sunrpc/xprtsock.c
··· 2726 2726 if (status) 2727 2727 goto out_close; 2728 2728 xprt_release_write(lower_xprt, NULL); 2729 - 2730 2729 trace_rpc_socket_connect(upper_xprt, upper_transport->sock, 0); 2731 - if (!xprt_test_and_set_connected(upper_xprt)) { 2732 - upper_xprt->connect_cookie++; 2733 - clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state); 2734 - xprt_clear_connecting(upper_xprt); 2735 - 2736 - upper_xprt->stat.connect_count++; 2737 - upper_xprt->stat.connect_time += (long)jiffies - 2738 - upper_xprt->stat.connect_start; 2739 - xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING); 2740 - } 2741 2730 rpc_shutdown_client(lower_clnt); 2731 + 2732 + /* Check for ingress data that arrived before the socket's 2733 + * ->data_ready callback was set up. 2734 + */ 2735 + xs_poll_check_readable(upper_transport); 2742 2736 2743 2737 out_unlock: 2744 2738 current_restore_flags(pflags, PF_MEMALLOC);