aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@mips.com>2018-11-01 13:51:34 -0400
committerTrond Myklebust <trond.myklebust@hammerspace.com>2018-11-01 13:55:24 -0400
commitc3be6577d82a9f0163eb1e2c37a477414d12a209 (patch)
treed6cee7e555354f3f81f10b35b284b93fecc68ade
parent86bbd7422ae6a33735df6846fd685e46686da714 (diff)
SUNRPC: Use atomic(64)_t for seq_send(64)
The seq_send & seq_send64 fields in struct krb5_ctx are used as atomically incrementing counters. This is implemented using cmpxchg() & cmpxchg64() to implement what amount to custom versions of atomic_fetch_inc() & atomic64_fetch_inc(). Besides the duplication, using cmpxchg64() has another major drawback in that some 32 bit architectures don't provide it. As such commit 571ed1fd2390 ("SUNRPC: Replace krb5_seq_lock with a lockless scheme") resulted in build failures for some architectures. Change seq_send to be an atomic_t and seq_send64 to be an atomic64_t, then use atomic(64)_* functions to manipulate the values. The atomic64_t type & associated functions are provided even on architectures which lack real 64 bit atomic memory access via CONFIG_GENERIC_ATOMIC64 which uses spinlocks to serialize access. This fixes the build failures for architectures lacking cmpxchg64(). A potential alternative that was raised would be to provide cmpxchg64() on the 32 bit architectures that currently lack it, using spinlocks. However this would provide a version of cmpxchg64() with semantics a little different to the implementations on architectures with real 64 bit atomics - the spinlock-based implementation would only work if all access to the memory used with cmpxchg64() is *always* performed using cmpxchg64(). That is not currently a requirement for users of cmpxchg64(), and making it one seems questionable. As such avoiding cmpxchg64() outside of architecture-specific code seems best, particularly in cases where atomic64_t seems like a better fit anyway. The CONFIG_GENERIC_ATOMIC64 implementation of atomic64_* functions will use spinlocks & so faces the same issue, but with the key difference that the memory backing an atomic64_t ought to always be accessed via the atomic64_* functions anyway making the issue moot. Signed-off-by: Paul Burton <paul.burton@mips.com> Fixes: 571ed1fd2390 ("SUNRPC: Replace krb5_seq_lock with a lockless scheme") Cc: Trond Myklebust <trond.myklebust@hammerspace.com> Cc: Anna Schumaker <anna.schumaker@netapp.com> Cc: J. Bruce Fields <bfields@fieldses.org> Cc: Jeff Layton <jlayton@kernel.org> Cc: David S. Miller <davem@davemloft.net> Cc: linux-nfs@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
-rw-r--r--include/linux/sunrpc/gss_krb5.h7
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c16
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c28
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c4
4 files changed, 16 insertions, 39 deletions
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 69f749afa617..4162de72e95c 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -107,8 +107,8 @@ struct krb5_ctx {
107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ 107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
108 u8 cksum[GSS_KRB5_MAX_KEYLEN]; 108 u8 cksum[GSS_KRB5_MAX_KEYLEN];
109 s32 endtime; 109 s32 endtime;
110 u32 seq_send; 110 atomic_t seq_send;
111 u64 seq_send64; 111 atomic64_t seq_send64;
112 struct xdr_netobj mech_used; 112 struct xdr_netobj mech_used;
113 u8 initiator_sign[GSS_KRB5_MAX_KEYLEN]; 113 u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
114 u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN]; 114 u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
@@ -118,9 +118,6 @@ struct krb5_ctx {
118 u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN]; 118 u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
119}; 119};
120 120
121extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx);
122extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx);
123
124/* The length of the Kerberos GSS token header */ 121/* The length of the Kerberos GSS token header */
125#define GSS_KRB5_TOK_HDR_LEN (16) 122#define GSS_KRB5_TOK_HDR_LEN (16)
126 123
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 7bb2514aadd9..71cb29dc86c2 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -275,6 +275,7 @@ out_err:
275static int 275static int
276gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) 276gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
277{ 277{
278 u32 seq_send;
278 int tmp; 279 int tmp;
279 280
280 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 281 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
@@ -316,9 +317,10 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
316 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); 317 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
317 if (IS_ERR(p)) 318 if (IS_ERR(p))
318 goto out_err; 319 goto out_err;
319 p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); 320 p = simple_get_bytes(p, end, &seq_send, sizeof(seq_send));
320 if (IS_ERR(p)) 321 if (IS_ERR(p))
321 goto out_err; 322 goto out_err;
323 atomic_set(&ctx->seq_send, seq_send);
322 p = simple_get_netobj(p, end, &ctx->mech_used); 324 p = simple_get_netobj(p, end, &ctx->mech_used);
323 if (IS_ERR(p)) 325 if (IS_ERR(p))
324 goto out_err; 326 goto out_err;
@@ -610,6 +612,7 @@ static int
610gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, 612gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
611 gfp_t gfp_mask) 613 gfp_t gfp_mask)
612{ 614{
615 u64 seq_send64;
613 int keylen; 616 int keylen;
614 617
615 p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); 618 p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
@@ -620,14 +623,15 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
620 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); 623 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
621 if (IS_ERR(p)) 624 if (IS_ERR(p))
622 goto out_err; 625 goto out_err;
623 p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64)); 626 p = simple_get_bytes(p, end, &seq_send64, sizeof(seq_send64));
624 if (IS_ERR(p)) 627 if (IS_ERR(p))
625 goto out_err; 628 goto out_err;
629 atomic64_set(&ctx->seq_send64, seq_send64);
626 /* set seq_send for use by "older" enctypes */ 630 /* set seq_send for use by "older" enctypes */
627 ctx->seq_send = ctx->seq_send64; 631 atomic_set(&ctx->seq_send, seq_send64);
628 if (ctx->seq_send64 != ctx->seq_send) { 632 if (seq_send64 != atomic_read(&ctx->seq_send)) {
629 dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, 633 dprintk("%s: seq_send64 %llx, seq_send %x overflow?\n", __func__,
630 (unsigned long)ctx->seq_send64, ctx->seq_send); 634 seq_send64, atomic_read(&ctx->seq_send));
631 p = ERR_PTR(-EINVAL); 635 p = ERR_PTR(-EINVAL);
632 goto out_err; 636 goto out_err;
633 } 637 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index b4adeb06660b..48fe4a591b54 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -123,30 +123,6 @@ setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
123 return krb5_hdr; 123 return krb5_hdr;
124} 124}
125 125
126u32
127gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx)
128{
129 u32 old, seq_send = READ_ONCE(ctx->seq_send);
130
131 do {
132 old = seq_send;
133 seq_send = cmpxchg(&ctx->seq_send, old, old + 1);
134 } while (old != seq_send);
135 return seq_send;
136}
137
138u64
139gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx)
140{
141 u64 old, seq_send = READ_ONCE(ctx->seq_send);
142
143 do {
144 old = seq_send;
145 seq_send = cmpxchg64(&ctx->seq_send64, old, old + 1);
146 } while (old != seq_send);
147 return seq_send;
148}
149
150static u32 126static u32
151gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, 127gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
152 struct xdr_netobj *token) 128 struct xdr_netobj *token)
@@ -177,7 +153,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
177 153
178 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); 154 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
179 155
180 seq_send = gss_seq_send_fetch_and_inc(ctx); 156 seq_send = atomic_fetch_inc(&ctx->seq_send);
181 157
182 if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff, 158 if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
183 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)) 159 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
@@ -205,7 +181,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
205 181
206 /* Set up the sequence number. Now 64-bits in clear 182 /* Set up the sequence number. Now 64-bits in clear
207 * text and w/o direction indicator */ 183 * text and w/o direction indicator */
208 seq_send_be64 = cpu_to_be64(gss_seq_send64_fetch_and_inc(ctx)); 184 seq_send_be64 = cpu_to_be64(atomic64_fetch_inc(&ctx->seq_send64));
209 memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8); 185 memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8);
210 186
211 if (ctx->initiate) { 187 if (ctx->initiate) {
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 41cb294cd071..6af6f119d9c1 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -228,7 +228,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
228 228
229 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); 229 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
230 230
231 seq_send = gss_seq_send_fetch_and_inc(kctx); 231 seq_send = atomic_fetch_inc(&kctx->seq_send);
232 232
233 /* XXX would probably be more efficient to compute checksum 233 /* XXX would probably be more efficient to compute checksum
234 * and encrypt at the same time: */ 234 * and encrypt at the same time: */
@@ -475,7 +475,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
475 *be16ptr++ = 0; 475 *be16ptr++ = 0;
476 476
477 be64ptr = (__be64 *)be16ptr; 477 be64ptr = (__be64 *)be16ptr;
478 *be64ptr = cpu_to_be64(gss_seq_send64_fetch_and_inc(kctx)); 478 *be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64));
479 479
480 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages); 480 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
481 if (err) 481 if (err)