diff options
author | Eric Biggers <ebiggers@google.com> | 2018-07-23 13:54:58 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-08-03 06:06:04 -0400 |
commit | 318abdfbe708aaaa652c79fb500e9bd60521f9dc (patch) | |
tree | 37e2e89d56b6d5e057ee515c50ade51e4406b3b2 | |
parent | 0868def3e4100591e7a1fdbf3eed1439cc8f7ca3 (diff) |
crypto: ablkcipher - fix crash flushing dcache in error path
Like the skcipher_walk and blkcipher_walk cases:
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
ablkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing ablkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: bf06099db18a ("crypto: skcipher - Add ablkcipher_walk interfaces")
Cc: <stable@vger.kernel.org> # v2.6.35+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | crypto/ablkcipher.c | 57 |
1 files changed, 26 insertions, 31 deletions
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 1edb5000d783..8882e90e868e 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
@@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) | |||
71 | return max(start, end_page); | 71 | return max(start, end_page); |
72 | } | 72 | } |
73 | 73 | ||
74 | static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, | 74 | static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk, |
75 | unsigned int bsize) | 75 | unsigned int n) |
76 | { | 76 | { |
77 | unsigned int n = bsize; | ||
78 | |||
79 | for (;;) { | 77 | for (;;) { |
80 | unsigned int len_this_page = scatterwalk_pagelen(&walk->out); | 78 | unsigned int len_this_page = scatterwalk_pagelen(&walk->out); |
81 | 79 | ||
@@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, | |||
87 | n -= len_this_page; | 85 | n -= len_this_page; |
88 | scatterwalk_start(&walk->out, sg_next(walk->out.sg)); | 86 | scatterwalk_start(&walk->out, sg_next(walk->out.sg)); |
89 | } | 87 | } |
90 | |||
91 | return bsize; | ||
92 | } | 88 | } |
93 | 89 | ||
94 | static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, | 90 | static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk, |
95 | unsigned int n) | 91 | unsigned int n) |
96 | { | 92 | { |
97 | scatterwalk_advance(&walk->in, n); | 93 | scatterwalk_advance(&walk->in, n); |
98 | scatterwalk_advance(&walk->out, n); | 94 | scatterwalk_advance(&walk->out, n); |
99 | |||
100 | return n; | ||
101 | } | 95 | } |
102 | 96 | ||
103 | static int ablkcipher_walk_next(struct ablkcipher_request *req, | 97 | static int ablkcipher_walk_next(struct ablkcipher_request *req, |
@@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req, | |||
107 | struct ablkcipher_walk *walk, int err) | 101 | struct ablkcipher_walk *walk, int err) |
108 | { | 102 | { |
109 | struct crypto_tfm *tfm = req->base.tfm; | 103 | struct crypto_tfm *tfm = req->base.tfm; |
110 | unsigned int nbytes = 0; | 104 | unsigned int n; /* bytes processed */ |
105 | bool more; | ||
111 | 106 | ||
112 | if (likely(err >= 0)) { | 107 | if (unlikely(err < 0)) |
113 | unsigned int n = walk->nbytes - err; | 108 | goto finish; |
114 | 109 | ||
115 | if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) | 110 | n = walk->nbytes - err; |
116 | n = ablkcipher_done_fast(walk, n); | 111 | walk->total -= n; |
117 | else if (WARN_ON(err)) { | 112 | more = (walk->total != 0); |
118 | err = -EINVAL; | ||
119 | goto err; | ||
120 | } else | ||
121 | n = ablkcipher_done_slow(walk, n); | ||
122 | 113 | ||
123 | nbytes = walk->total - n; | 114 | if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) { |
124 | err = 0; | 115 | ablkcipher_done_fast(walk, n); |
116 | } else { | ||
117 | if (WARN_ON(err)) { | ||
118 | /* unexpected case; didn't process all bytes */ | ||
119 | err = -EINVAL; | ||
120 | goto finish; | ||
121 | } | ||
122 | ablkcipher_done_slow(walk, n); | ||
125 | } | 123 | } |
126 | 124 | ||
127 | scatterwalk_done(&walk->in, 0, nbytes); | 125 | scatterwalk_done(&walk->in, 0, more); |
128 | scatterwalk_done(&walk->out, 1, nbytes); | 126 | scatterwalk_done(&walk->out, 1, more); |
129 | |||
130 | err: | ||
131 | walk->total = nbytes; | ||
132 | walk->nbytes = nbytes; | ||
133 | 127 | ||
134 | if (nbytes) { | 128 | if (more) { |
135 | crypto_yield(req->base.flags); | 129 | crypto_yield(req->base.flags); |
136 | return ablkcipher_walk_next(req, walk); | 130 | return ablkcipher_walk_next(req, walk); |
137 | } | 131 | } |
138 | 132 | err = 0; | |
133 | finish: | ||
134 | walk->nbytes = 0; | ||
139 | if (walk->iv != req->info) | 135 | if (walk->iv != req->info) |
140 | memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); | 136 | memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); |
141 | kfree(walk->iv_buffer); | 137 | kfree(walk->iv_buffer); |
142 | |||
143 | return err; | 138 | return err; |
144 | } | 139 | } |
145 | EXPORT_SYMBOL_GPL(ablkcipher_walk_done); | 140 | EXPORT_SYMBOL_GPL(ablkcipher_walk_done); |