summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2019-01-31 23:51:41 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-03-23 08:44:32 +0100
commit82351c83b1c5a4c4052207cffaf2423a4c5e7871 (patch)
tree0b0db2d4b855e4aa5d829a1319542cb5e25ab408 /crypto
parentef3e805c9476e17743b8eeaa9c8fa5b0c7cbf9c0 (diff)
crypto: ahash - fix another early termination in hash walk
commit 77568e535af7c4f97eaef1e555bf0af83772456c upstream. Hash algorithms with an alignmask set, e.g. "xcbc(aes-aesni)" and "michael_mic", fail the improved hash tests because they sometimes produce the wrong digest. The bug is that in the case where a scatterlist element crosses pages, not all the data is actually hashed because the scatterlist walk terminates too early. This happens because the 'nbytes' variable in crypto_hash_walk_done() is assigned the number of bytes remaining in the page, then later interpreted as the number of bytes remaining in the scatterlist element. Fix it. Fixes: 900a081f6912 ("crypto: ahash - Fix early termination in hash walk") Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/ahash.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 6978ad86e516..595c4f3657ff 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -85,17 +85,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
- unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
- if (nbytes && walk->offset & alignmask && !err) {
- walk->offset = ALIGN(walk->offset, alignmask + 1);
- nbytes = min(nbytes,
- ((unsigned int)(PAGE_SIZE)) - walk->offset);
- walk->entrylen -= nbytes;
+ if (walk->entrylen && (walk->offset & alignmask) && !err) {
+ unsigned int nbytes;
+ walk->offset = ALIGN(walk->offset, alignmask + 1);
+ nbytes = min(walk->entrylen,
+ (unsigned int)(PAGE_SIZE - walk->offset));
if (nbytes) {
+ walk->entrylen -= nbytes;
walk->data += walk->offset;
return nbytes;
}
@@ -115,7 +115,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (err)
return err;
- if (nbytes) {
+ if (walk->entrylen) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);