Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'nfs-for-4.20-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client bugfixes from Trond Myklebust:
"Highlights include:

Bugfix:
- Fix build issues on architectures that don't provide 64-bit cmpxchg

Cleanups:
- Fix a spelling mistake"

* tag 'nfs-for-4.20-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
NFS: fix spelling mistake, EACCESS -> EACCES
SUNRPC: Use atomic(64)_t for seq_send(64)

+17 -40
+1 -1
fs/nfs/nfs4proc.c
··· 3788 3788 } 3789 3789 3790 3790 /* 3791 - * -EACCESS could mean that the user doesn't have correct permissions 3791 + * -EACCES could mean that the user doesn't have correct permissions 3792 3792 * to access the mount. It could also mean that we tried to mount 3793 3793 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 3794 3794 * existing mount programs don't handle -EACCES very well so it should
+2 -5
include/linux/sunrpc/gss_krb5.h
··· 107 107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ 108 108 u8 cksum[GSS_KRB5_MAX_KEYLEN]; 109 109 s32 endtime; 110 - u32 seq_send; 111 - u64 seq_send64; 110 + atomic_t seq_send; 111 + atomic64_t seq_send64; 112 112 struct xdr_netobj mech_used; 113 113 u8 initiator_sign[GSS_KRB5_MAX_KEYLEN]; 114 114 u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN]; ··· 117 117 u8 initiator_integ[GSS_KRB5_MAX_KEYLEN]; 118 118 u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN]; 119 119 }; 120 - 121 - extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx); 122 - extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx); 123 120 124 121 /* The length of the Kerberos GSS token header */ 125 122 #define GSS_KRB5_TOK_HDR_LEN (16)
+10 -6
net/sunrpc/auth_gss/gss_krb5_mech.c
··· 274 274 static int 275 275 gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) 276 276 { 277 + u32 seq_send; 277 278 int tmp; 278 279 279 280 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); ··· 316 315 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); 317 316 if (IS_ERR(p)) 318 317 goto out_err; 319 - p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); 318 + p = simple_get_bytes(p, end, &seq_send, sizeof(seq_send)); 320 319 if (IS_ERR(p)) 321 320 goto out_err; 321 + atomic_set(&ctx->seq_send, seq_send); 322 322 p = simple_get_netobj(p, end, &ctx->mech_used); 323 323 if (IS_ERR(p)) 324 324 goto out_err; ··· 609 607 gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, 610 608 gfp_t gfp_mask) 611 609 { 610 + u64 seq_send64; 612 611 int keylen; 613 612 614 613 p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); ··· 620 617 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); 621 618 if (IS_ERR(p)) 622 619 goto out_err; 623 - p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64)); 620 + p = simple_get_bytes(p, end, &seq_send64, sizeof(seq_send64)); 624 621 if (IS_ERR(p)) 625 622 goto out_err; 623 + atomic64_set(&ctx->seq_send64, seq_send64); 626 624 /* set seq_send for use by "older" enctypes */ 627 - ctx->seq_send = ctx->seq_send64; 628 - if (ctx->seq_send64 != ctx->seq_send) { 629 - dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, 630 - (unsigned long)ctx->seq_send64, ctx->seq_send); 625 + atomic_set(&ctx->seq_send, seq_send64); 626 + if (seq_send64 != atomic_read(&ctx->seq_send)) { 627 + dprintk("%s: seq_send64 %llx, seq_send %x overflow?\n", __func__, 628 + seq_send64, atomic_read(&ctx->seq_send)); 631 629 p = ERR_PTR(-EINVAL); 632 630 goto out_err; 633 631 }
+2 -26
net/sunrpc/auth_gss/gss_krb5_seal.c
··· 123 123 return krb5_hdr; 124 124 } 125 125 126 - u32 127 - gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx) 128 - { 129 - u32 old, seq_send = READ_ONCE(ctx->seq_send); 130 - 131 - do { 132 - old = seq_send; 133 - seq_send = cmpxchg(&ctx->seq_send, old, old + 1); 134 - } while (old != seq_send); 135 - return seq_send; 136 - } 137 - 138 - u64 139 - gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx) 140 - { 141 - u64 old, seq_send = READ_ONCE(ctx->seq_send); 142 - 143 - do { 144 - old = seq_send; 145 - seq_send = cmpxchg64(&ctx->seq_send64, old, old + 1); 146 - } while (old != seq_send); 147 - return seq_send; 148 - } 149 - 150 126 static u32 151 127 gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, 152 128 struct xdr_netobj *token) ··· 153 177 154 178 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); 155 179 156 - seq_send = gss_seq_send_fetch_and_inc(ctx); 180 + seq_send = atomic_fetch_inc(&ctx->seq_send); 157 181 158 182 if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff, 159 183 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)) ··· 181 205 182 206 /* Set up the sequence number. Now 64-bits in clear 183 207 * text and w/o direction indicator */ 184 - seq_send_be64 = cpu_to_be64(gss_seq_send64_fetch_and_inc(ctx)); 208 + seq_send_be64 = cpu_to_be64(atomic64_fetch_inc(&ctx->seq_send64)); 185 209 memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8); 186 210 187 211 if (ctx->initiate) {
+2 -2
net/sunrpc/auth_gss/gss_krb5_wrap.c
··· 228 228 229 229 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); 230 230 231 - seq_send = gss_seq_send_fetch_and_inc(kctx); 231 + seq_send = atomic_fetch_inc(&kctx->seq_send); 232 232 233 233 /* XXX would probably be more efficient to compute checksum 234 234 * and encrypt at the same time: */ ··· 475 475 *be16ptr++ = 0; 476 476 477 477 be64ptr = (__be64 *)be16ptr; 478 - *be64ptr = cpu_to_be64(gss_seq_send64_fetch_and_inc(kctx)); 478 + *be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64)); 479 479 480 480 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages); 481 481 if (err)