[Ocf-linux-users] [PATCH] Fix crypto_hash functions on kernels 4.6.0 and newer
Brought to you by:
david-m
|
From: James H. <jam...@gm...> - 2022-10-08 09:14:02
|
The legacy crypto_hash interface was removed as of kernel version
4.6.0, adapt this interface to be compatible with the replacement
crypto_shash interface.
Signed-off-by: James Hilliard <jam...@gm...>
---
ocf/cryptosoft.c | 185 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 185 insertions(+)
diff --git a/ocf/cryptosoft.c b/ocf/cryptosoft.c
index caf9c06..d903b6e 100644
--- a/ocf/cryptosoft.c
+++ b/ocf/cryptosoft.c
@@ -56,6 +56,9 @@
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
#include <crypto/hash.h>
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
+#include <linux/highmem.h>
+#endif
#include <cryptodev.h>
#include <uio.h>
@@ -184,6 +187,188 @@ static struct kmem_cache *swcr_req_cache;
#define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
#define plain(X) #X , 0
#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
+ #define hash_desc shash_desc
+ #define crypto_free_hash crypto_free_shash
+ #define crypto_hash_tfm crypto_shash_tfm
+ #define crypto_alloc_hash crypto_alloc_shash
+ #define crypto_hash_digestsize crypto_shash_digestsize
+ #define crypto_hash_setkey crypto_shash_setkey
+ #define crypto_has_hash crypto_has_ahash
+ #define crypto_hash_cast(X) container_of(X, struct crypto_shash, base)
+
+struct crypto_hash_walk {
+ char *data;
+
+ unsigned int offset;
+ unsigned int alignmask;
+
+ struct page *pg;
+ unsigned int entrylen;
+
+ unsigned int total;
+ struct scatterlist *sg;
+
+ unsigned int flags;
+};
+
+static int hash_walk_next(struct crypto_hash_walk *walk)
+{
+ unsigned int alignmask = walk->alignmask;
+ unsigned int offset = walk->offset;
+ unsigned int nbytes = min(walk->entrylen,
+ ((unsigned int)(PAGE_SIZE)) - offset);
+
+ if (walk->flags & CRYPTO_ALG_ASYNC)
+ walk->data = kmap(walk->pg);
+ else
+ walk->data = kmap_atomic(walk->pg);
+ walk->data += offset;
+
+ if (offset & alignmask) {
+ unsigned int unaligned = alignmask + 1 - (offset & alignmask);
+
+ if (nbytes > unaligned)
+ nbytes = unaligned;
+ }
+
+ walk->entrylen -= nbytes;
+ return nbytes;
+}
+
+static int hash_walk_new_entry(struct crypto_hash_walk *walk)
+{
+ struct scatterlist *sg;
+
+ sg = walk->sg;
+ walk->offset = sg->offset;
+ walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+ walk->offset = offset_in_page(walk->offset);
+ walk->entrylen = sg->length;
+
+ if (walk->entrylen > walk->total)
+ walk->entrylen = walk->total;
+ walk->total -= walk->entrylen;
+
+ return hash_walk_next(walk);
+}
+
+static int crypto_hash_walk_first_compat(struct shash_desc *sdesc,
+ struct crypto_hash_walk *walk,
+ struct scatterlist *sg, unsigned int len)
+{
+ walk->total = len;
+
+ if (!walk->total) {
+ walk->entrylen = 0;
+ return 0;
+ }
+
+ walk->alignmask = crypto_shash_alignmask(sdesc->tfm);
+ walk->sg = sg;
+ walk->flags = sdesc->flags & CRYPTO_TFM_REQ_MASK;
+
+ return hash_walk_new_entry(walk);
+}
+
+static inline void crypto_yield(u32 flags)
+{
+ if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+ cond_resched();
+}
+
+static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
+{
+ unsigned int alignmask = walk->alignmask;
+ unsigned int nbytes = walk->entrylen;
+
+ walk->data -= walk->offset;
+
+ if (nbytes && walk->offset & alignmask && !err) {
+ walk->offset = ALIGN(walk->offset, alignmask + 1);
+ walk->data += walk->offset;
+
+ nbytes = min(nbytes,
+ ((unsigned int)(PAGE_SIZE)) - walk->offset);
+ walk->entrylen -= nbytes;
+
+ return nbytes;
+ }
+
+ if (walk->flags & CRYPTO_ALG_ASYNC)
+ kunmap(walk->pg);
+ else {
+ kunmap_atomic(walk->data);
+ /*
+ * The may sleep test only makes sense for sync users.
+ * Async users don't need to sleep here anyway.
+ */
+ crypto_yield(walk->flags);
+ }
+
+ if (err)
+ return err;
+
+ if (nbytes) {
+ walk->offset = 0;
+ walk->pg++;
+ return hash_walk_next(walk);
+ }
+
+ if (!walk->total)
+ return 0;
+
+ walk->sg = sg_next(walk->sg);
+
+ return hash_walk_new_entry(walk);
+}
+
+static int shash_compat_update(struct shash_desc *desc, struct scatterlist *sg,
+ unsigned int len)
+{
+ struct crypto_hash_walk walk;
+ int nbytes;
+
+ for (nbytes = crypto_hash_walk_first_compat(desc, &walk, sg, len);
+ nbytes > 0; nbytes = crypto_hash_walk_done(&walk, nbytes))
+ nbytes = crypto_shash_update(desc, walk.data, nbytes);
+
+ return nbytes;
+}
+
+static int crypto_hash_digest(struct shash_desc *desc, struct scatterlist *sg,
+ unsigned int nbytes, u8 *out)
+{
+ unsigned int offset = sg->offset;
+ int err;
+
+ if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
+ void *data;
+
+ desc->flags = desc->flags;
+
+ data = kmap_atomic(sg_page(sg));
+ err = crypto_shash_digest(desc, data + offset, nbytes, out);
+ kunmap_atomic(data);
+ crypto_yield(desc->flags);
+ goto out;
+ }
+
+ err = crypto_shash_init(desc);
+ if (err)
+ goto out;
+
+ err = shash_compat_update(desc, sg, nbytes);
+ if (err)
+ goto out;
+
+ err = crypto_shash_final(desc, out);
+
+out:
+ return err;
+}
+
+#endif /* if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */
#define ecb(X) "ecb(" #X ")" , 0
#define cbc(X) "cbc(" #X ")" , 0
#define hmac(X) "hmac(" #X ")" , 0
--
2.37.3
|