This list is closed, nobody may subscribe to it.
2007 |
Jan
|
Feb
(10) |
Mar
(26) |
Apr
(8) |
May
(3) |
Jun
|
Jul
(26) |
Aug
(10) |
Sep
|
Oct
|
Nov
(2) |
Dec
(4) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2008 |
Jan
|
Feb
(13) |
Mar
(4) |
Apr
(3) |
May
(5) |
Jun
|
Jul
(7) |
Aug
(8) |
Sep
(5) |
Oct
(16) |
Nov
|
Dec
(6) |
2009 |
Jan
(2) |
Feb
|
Mar
(3) |
Apr
|
May
|
Jun
(19) |
Jul
(4) |
Aug
|
Sep
(13) |
Oct
(10) |
Nov
(12) |
Dec
(2) |
2010 |
Jan
|
Feb
(2) |
Mar
(17) |
Apr
(28) |
May
|
Jun
(17) |
Jul
(11) |
Aug
(12) |
Sep
(2) |
Oct
|
Nov
|
Dec
(1) |
2011 |
Jan
|
Feb
|
Mar
(20) |
Apr
(10) |
May
(1) |
Jun
|
Jul
|
Aug
(15) |
Sep
(14) |
Oct
(2) |
Nov
|
Dec
|
2012 |
Jan
(1) |
Feb
(53) |
Mar
(15) |
Apr
(4) |
May
(2) |
Jun
(13) |
Jul
|
Aug
|
Sep
(12) |
Oct
|
Nov
|
Dec
(6) |
2013 |
Jan
(7) |
Feb
(8) |
Mar
(4) |
Apr
(5) |
May
|
Jun
|
Jul
|
Aug
(5) |
Sep
(6) |
Oct
|
Nov
(5) |
Dec
(8) |
2014 |
Jan
(17) |
Feb
(24) |
Mar
(8) |
Apr
(7) |
May
(18) |
Jun
(15) |
Jul
(5) |
Aug
(2) |
Sep
(49) |
Oct
(28) |
Nov
(7) |
Dec
(30) |
2015 |
Jan
(40) |
Feb
|
Mar
(9) |
Apr
(2) |
May
(9) |
Jun
(31) |
Jul
(33) |
Aug
(5) |
Sep
(20) |
Oct
|
Nov
(3) |
Dec
(12) |
2016 |
Jan
(14) |
Feb
(29) |
Mar
(10) |
Apr
(4) |
May
(4) |
Jun
|
Jul
(5) |
Aug
(19) |
Sep
(21) |
Oct
(2) |
Nov
(36) |
Dec
(30) |
2017 |
Jan
(101) |
Feb
(12) |
Mar
(7) |
Apr
(2) |
May
(29) |
Jun
(22) |
Jul
(7) |
Aug
(93) |
Sep
(27) |
Oct
(39) |
Nov
|
Dec
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:23:37
|
DRBG is starting an async. crypto op and waiting for it complete. Move it over to generic code doing the same. The code now also passes CRYPTO_TFM_REQ_MAY_SLEEP flag indicating crypto request memory allocation may use GFP_KERNEL which should be perfectly fine as the code is obviously sleeping for the completion of the request any way. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/drbg.c | 36 +++++++++--------------------------- include/crypto/drbg.h | 3 +-- 2 files changed, 10 insertions(+), 29 deletions(-) diff --git a/crypto/drbg.c b/crypto/drbg.c index 633a88e..c522251 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg) return 0; } -static void drbg_skcipher_cb(struct crypto_async_request *req, int error) -{ - struct drbg_state *drbg = req->data; - - if (error == -EINPROGRESS) - return; - drbg->ctr_async_err = error; - complete(&drbg->ctr_completion); -} - static int drbg_init_sym_kernel(struct drbg_state *drbg) { struct crypto_cipher *tfm; @@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) return PTR_ERR(sk_tfm); } drbg->ctr_handle = sk_tfm; - init_completion(&drbg->ctr_completion); + crypto_init_wait(&drbg->ctr_wait); req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); if (!req) { @@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) return -ENOMEM; } drbg->ctr_req = req; - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - drbg_skcipher_cb, drbg); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &drbg->ctr_wait); alignmask = crypto_skcipher_alignmask(sk_tfm); drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask, @@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, /* Output buffer may not be valid for SGL, use scratchpad */ skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, cryptlen, drbg->V); - ret = crypto_skcipher_encrypt(drbg->ctr_req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&drbg->ctr_completion); - if (!drbg->ctr_async_err) { - reinit_completion(&drbg->ctr_completion); - break; - } - default: + ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req), + &drbg->ctr_wait); + if (ret) goto out; - } - init_completion(&drbg->ctr_completion); + + crypto_init_wait(&drbg->ctr_wait); memcpy(outbuf, drbg->outscratchpad, cryptlen); diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 22f884c..8f94110 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -126,8 +126,7 @@ struct drbg_state { __u8 *ctr_null_value; /* CTR mode aligned zero buf */ __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ __u8 *outscratchpad; /* CTR mode aligned outbuf */ - struct completion ctr_completion; /* CTR mode async handler */ - int ctr_async_err; /* CTR mode async error */ + struct crypto_wait ctr_wait; /* CTR mode async wait obj */ bool seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:23:25
|
public_key_verify_signature() is starting an async crypto op and waiting for it to complete. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/asymmetric_keys/public_key.c | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 3cd6e12..d916235 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3) public_key_signature_free(payload3); } -struct public_key_completion { - struct completion completion; - int err; -}; - -static void public_key_verify_done(struct crypto_async_request *req, int err) -{ - struct public_key_completion *compl = req->data; - - if (err == -EINPROGRESS) - return; - - compl->err = err; - complete(&compl->completion); -} - /* * Verify a signature using a public key. */ int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig) { - struct public_key_completion compl; + struct crypto_wait cwait; struct crypto_akcipher *tfm; struct akcipher_request *req; struct scatterlist sig_sg, digest_sg; @@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey, sg_init_one(&digest_sg, output, outlen); akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size, outlen); - init_completion(&compl.completion); + crypto_init_wait(&cwait); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - public_key_verify_done, &compl); + crypto_req_done, &cwait); /* Perform the verification calculation. This doesn't actually do the * verification, but rather calculates the hash expected by the * signature and returns that to us. */ - ret = crypto_akcipher_verify(req); - if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { - wait_for_completion(&compl.completion); - ret = compl.err; - } + ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); if (ret < 0) goto out_free_output; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:23:13
|
algif starts several async crypto ops and waits for their completion. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/af_alg.c | 27 --------------------------- crypto/algif_aead.c | 8 ++++---- crypto/algif_hash.c | 30 ++++++++++++++---------------- crypto/algif_skcipher.c | 9 ++++----- include/crypto/if_alg.h | 15 +-------------- 5 files changed, 23 insertions(+), 66 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index d6936c0..f8917e7 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) } EXPORT_SYMBOL_GPL(af_alg_cmsg_send); -int af_alg_wait_for_completion(int err, struct af_alg_completion *completion) -{ - switch (err) { - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&completion->completion); - reinit_completion(&completion->completion); - err = completion->err; - break; - }; - - return err; -} -EXPORT_SYMBOL_GPL(af_alg_wait_for_completion); - -void af_alg_complete(struct crypto_async_request *req, int err) -{ - struct af_alg_completion *completion = req->data; - - if (err == -EINPROGRESS) - return; - - completion->err = err; - complete(&completion->completion); -} -EXPORT_SYMBOL_GPL(af_alg_complete); - /** * af_alg_alloc_tsgl - allocate the TX SGL * diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 48d46e7..abbac8a 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* Synchronous operation */ aead_request_set_callback(&areq->cra_u.aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); - err = af_alg_wait_for_completion(ctx->enc ? + crypto_req_done, &ctx->wait); + err = crypto_wait_req(ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : crypto_aead_decrypt(&areq->cra_u.aead_req), - &ctx->completion); + &ctx->wait); } /* AIO operation in progress */ @@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) ctx->merge = 0; ctx->enc = 0; ctx->aead_assoclen = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 3b3c154..d2ab8de 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -26,7 +26,7 @@ struct hash_ctx { u8 *result; - struct af_alg_completion completion; + struct crypto_wait wait; unsigned int len; bool more; @@ -102,8 +102,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, if ((msg->msg_flags & MSG_MORE)) hash_free_result(sk, ctx); - err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait); if (err) goto unlock; } @@ -124,8 +123,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); - err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_update(&ctx->req), + &ctx->wait); af_alg_free_sg(&ctx->sgl); if (err) goto unlock; @@ -143,8 +142,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, goto unlock; ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); - err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_final(&ctx->req), + &ctx->wait); } unlock: @@ -185,7 +184,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, } else { if (!ctx->more) { err = crypto_ahash_init(&ctx->req); - err = af_alg_wait_for_completion(err, &ctx->completion); + err = crypto_wait_req(err, &ctx->wait); if (err) goto unlock; } @@ -193,7 +192,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, err = crypto_ahash_update(&ctx->req); } - err = af_alg_wait_for_completion(err, &ctx->completion); + err = crypto_wait_req(err, &ctx->wait); if (err) goto unlock; @@ -229,17 +228,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); if (!result && !ctx->more) { - err = af_alg_wait_for_completion( - crypto_ahash_init(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_init(&ctx->req), + &ctx->wait); if (err) goto unlock; } if (!result || ctx->more) { ctx->more = 0; - err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_final(&ctx->req), + &ctx->wait); if (err) goto unlock; } @@ -490,13 +488,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ctx->result = NULL; ctx->len = len; ctx->more = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; ahash_request_set_tfm(&ctx->req, hash); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); + crypto_req_done, &ctx->wait); sk->sk_destruct = hash_sock_destruct; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 8ae4170..9954b07 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, skcipher_request_set_callback(&areq->cra_u.skcipher_req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, - &ctx->completion); - err = af_alg_wait_for_completion(ctx->enc ? + crypto_req_done, &ctx->wait); + err = crypto_wait_req(ctx->enc ? crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), - &ctx->completion); + &ctx->wait); } /* AIO operation in progress */ @@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ctx->more = 0; ctx->merge = 0; ctx->enc = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 75ec9c6..6abf0a3 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -40,11 +40,6 @@ struct alg_sock { void *private; }; -struct af_alg_completion { - struct completion completion; - int err; -}; - struct af_alg_control { struct af_alg_iv *iv; int op; @@ -152,7 +147,7 @@ struct af_alg_ctx { void *iv; size_t aead_assoclen; - struct af_alg_completion completion; + struct crypto_wait wait; size_t used; size_t rcvused; @@ -177,19 +172,11 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new); int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); -int af_alg_wait_for_completion(int err, struct af_alg_completion *completion); -void af_alg_complete(struct crypto_async_request *req, int err); - static inline struct alg_sock *alg_sk(struct sock *sk) { return (struct alg_sock *)sk; } -static inline void af_alg_init_completion(struct af_alg_completion *completion) -{ - init_completion(&completion->completion); -} - /** * Size of available buffer for sending data from user space to kernel. * -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:22:59
|
Invoking a possibly async. crypto op and waiting for completion while correctly handling backlog processing is a common task in the crypto API implementation and outside users of it. This patch adds a generic implementation for doing so in preparation for using it across the board instead of hand rolled versions. Signed-off-by: Gilad Ben-Yossef <gi...@be...> CC: Eric Biggers <ebi...@gm...> --- crypto/api.c | 13 +++++++++++++ include/linux/crypto.h | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/crypto/api.c b/crypto/api.c index 941cd4c..2a2479d 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -24,6 +24,7 @@ #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/completion.h> #include "internal.h" LIST_HEAD(crypto_alg_list); @@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_alg); +void crypto_req_done(struct crypto_async_request *req, int err) +{ + struct crypto_wait *wait = req->data; + + if (err == -EINPROGRESS) + return; + + wait->err = err; + complete(&wait->completion); +} +EXPORT_SYMBOL_GPL(crypto_req_done); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 84da997..bb00186 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/uaccess.h> +#include <linux/completion.h> /* * Autoloaded crypto modules should only use a prefixed name to avoid allowing @@ -468,6 +469,45 @@ struct crypto_alg { } CRYPTO_MINALIGN_ATTR; /* + * A helper struct for waiting for completion of async crypto ops + */ +struct crypto_wait { + struct completion completion; + int err; +}; + +/* + * Macro for declaring a crypto op async wait object on stack + */ +#define DECLARE_CRYPTO_WAIT(_wait) \ + struct crypto_wait _wait = { \ + COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } + +/* + * Async ops completion helper functioons + */ +void crypto_req_done(struct crypto_async_request *req, int err); + +static inline int crypto_wait_req(int err, struct crypto_wait *wait) +{ + switch (err) { + case -EINPROGRESS: + case -EBUSY: + wait_for_completion(&wait->completion); + reinit_completion(&wait->completion); + err = wait->err; + break; + }; + + return err; +} + +static inline void crypto_init_wait(struct crypto_wait *wait) +{ + init_completion(&wait->completion); +} + +/* * Algorithm registration interface. */ int crypto_register_alg(struct crypto_alg *alg); @@ -1604,5 +1644,6 @@ static inline int crypto_comp_decompress(struct crypto_comp *tfm, src, slen, dst, dlen); } + #endif /* _LINUX_CRYPTO_H */ -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:22:46
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- drivers/crypto/marvell/cesa.c | 3 +-- drivers/crypto/marvell/cesa.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 6e7a5c7..269737f 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -183,8 +183,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req, spin_lock_bh(&engine->lock); ret = crypto_enqueue_request(&engine->queue, req); if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && - (ret == -EINPROGRESS || - (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + (ret == -EINPROGRESS || ret == -EBUSY) mv_cesa_tdma_chain(engine, creq); spin_unlock_bh(&engine->lock); diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index b7872f6..63c8457 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -763,7 +763,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req, * the backlog and will be processed later. There's no need to * clean it up. */ - if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + if (ret == -EBUSY) return false; /* Request wasn't queued, we need to clean it up */ -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:22:33
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/ahash.c | 12 +++--------- crypto/cts.c | 6 ++---- crypto/lrw.c | 8 ++------ crypto/rsa-pkcs1pad.c | 16 ++++------------ crypto/xts.c | 8 ++------ 5 files changed, 13 insertions(+), 37 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 826cd7a..d63eeef 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req, return err; err = op(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; ahash_restore_req(req, err); @@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) req->base.complete = ahash_def_finup_done2; err = crypto_ahash_reqtfm(req)->final(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; out: @@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req) return err; err = tfm->update(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); diff --git a/crypto/cts.c b/crypto/cts.c index 243f591..4773c18 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err) goto out; err = cts_cbc_encrypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return; out: @@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err) goto out; err = cts_cbc_decrypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return; out: diff --git a/crypto/lrw.c b/crypto/lrw.c index a8bfae4..695cea9 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err) crypto_skcipher_encrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } @@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err) crypto_skcipher_decrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 407c64b..2908f93 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) req->dst, ctx->key_size - 1, req->dst_len); err = crypto_akcipher_encrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_encrypt_sign_complete(req, err); return err; @@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) ctx->key_size); err = crypto_akcipher_decrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_decrypt_complete(req, err); return err; @@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req) req->dst, ctx->key_size - 1, req->dst_len); err = crypto_akcipher_sign(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_encrypt_sign_complete(req, err); return err; @@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) ctx->key_size); err = crypto_akcipher_verify(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_verify_complete(req, err); return err; diff --git a/crypto/xts.c b/crypto/xts.c index d86c11a..af68012 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err) crypto_skcipher_encrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } @@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err) crypto_skcipher_decrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:22:20
|
Replace -EBUSY with -EAGAIN when reporting transient busy indication in the absence of backlog. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- drivers/crypto/ccp/ccp-crypto-main.c | 8 +++----- drivers/crypto/ccp/ccp-dev.c | 7 +++++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 35a9de7..403ff0a 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) /* Check if the cmd can/should be queued */ if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { - ret = -EBUSY; - if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) + if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) { + ret = -EAGAIN; goto e_lock; + } } /* Look for an entry with the same tfm. If there is a cmd @@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) ret = ccp_enqueue_cmd(crypto_cmd->cmd); if (!ccp_crypto_success(ret)) goto e_lock; /* Error, don't queue it */ - if ((ret == -EBUSY) && - !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) - goto e_lock; /* Not backlogging, don't queue it */ } if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 4e029b1..3d637e3 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd) i = ccp->cmd_q_count; if (ccp->cmd_count >= MAX_CMD_QLEN) { - ret = -EBUSY; - if (cmd->flags & CCP_CMD_MAY_BACKLOG) + if (cmd->flags & CCP_CMD_MAY_BACKLOG) { + ret = -EBUSY; list_add_tail(&cmd->entry, &ccp->backlog); + } else { + ret = -EAGAIN; + } } else { ret = -EINPROGRESS; ccp->cmd_count++; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:22:08
|
The crypto API was using the -EBUSY return value to indicate both a hard failure to submit a crypto operation into a transformation provider when the latter was busy and the backlog mechanism was not enabled as well as a notification that the operation was queued into the backlog when the backlog mechanism was enabled. Having the same return code indicate two very different conditions depending on a flag is both error prone and requires extra runtime check like the following to discern between the cases: if (err == -EINPROGRESS || (err == -EBUSY && (ahash_request_flags(req) & CRYPTO_TFM_REQ_MAY_BACKLOG))) This patch changes the return code used to indicate a crypto op failed due to the transformation provider being transiently busy to -EAGAIN. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/algapi.c | 6 ++++-- crypto/algif_hash.c | 20 +++++++++++++++++--- crypto/cryptd.c | 4 +--- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index aa699ff..916bee3 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue, int err = -EINPROGRESS; if (unlikely(queue->qlen >= queue->max_qlen)) { - err = -EBUSY; - if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + err = -EAGAIN; goto out; + } + err = -EBUSY; if (queue->backlog == &queue->list) queue->backlog = &request->list; } diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 5e92bd2..3b3c154 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -39,6 +39,20 @@ struct algif_hash_tfm { bool has_key; }; +/* Previous versions of crypto_* ops used to return -EBUSY + * rather than -EAGAIN to indicate being tied up. The in + * kernel API changed but we don't want to break the user + * space API. As only the hash user interface exposed this + * error ever to the user, do the translation here. + */ +static inline int crypto_user_err(int err) +{ + if (err == -EAGAIN) + return -EBUSY; + + return err; +} + static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) { unsigned ds; @@ -136,7 +150,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, unlock: release_sock(sk); - return err ?: copied; + return err ? crypto_user_err(err) : copied; } static ssize_t hash_sendpage(struct socket *sock, struct page *page, @@ -188,7 +202,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, unlock: release_sock(sk); - return err ?: size; + return err ? crypto_user_err(err) : size; } static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, @@ -236,7 +250,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, hash_free_result(sk, ctx); release_sock(sk); - return err ?: len; + return err ? crypto_user_err(err) : len; } static int hash_accept(struct socket *sock, struct socket *newsock, int flags, diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0508c48..d1dbdce 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, int cpu, err; struct cryptd_cpu_queue *cpu_queue; atomic_t *refcnt; - bool may_backlog; cpu = get_cpu(); cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); refcnt = crypto_tfm_ctx(request->tfm); - may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; - if (err == -EBUSY && !may_backlog) + if (err == -EAGAIN) goto out_put_cpu; queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:21:55
|
Many users of kernel async. crypto services have a pattern of starting an async. crypto op and than using a completion to wait for it to end. This patch set simplifies this common use case in two ways: First, by separating the return codes of the case where a request is queued to a backlog due to the provider being busy (-EBUSY) from the case the request has failed due to the provider being busy and backlogging is not enabled (-EAGAIN). Next, this change is than built on to create a generic API to wait for a async. crypto operation to complete. The end result is a smaller code base and an API that is easier to use and more difficult to get wrong. The patch set was boot tested on x86_64 and arm64 which at the very least tests the crypto users via testmgr and tcrypt but I do note that I do not have access to some of the HW whose drivers are modified nor do I claim I was able to test all of the corner cases. The patch set is based upon linux-next release tagged next-20170811. Changes from v4: - Rebase on top of latest algif changes from Stephan Mueller. - Fix typo in ccp patch title. Changes from v3: - Instead of changing the return code to indicate backlog queueing, change the return code to indicate transient busy state, as suggested by Herbert Xu. Changes from v2: - Patch title changed from "introduce crypto wait for async op" to better reflect the current state. - Rebase on top of latest linux-next. - Add a new return code of -EIOCBQUEUED for backlog queueing, as suggested by Herbert Xu. - Transform more users to the new API. - Update the drbg change to account for new init as indicated by Stephan Muller. Changes from v1: - Address review comments from Eric Biggers. - Separated out bug fixes of existing code and rebase on top of that patch set. - Rename 'ecr' to 'wait' in fscrypto code. - Split patch introducing the new API from the change moving over the algif code which it originated from to the new API. - Inline crypto_wait_req(). - Some code indentation fixes. Gilad Ben-Yossef (19): crypto: change transient busy return code to -EAGAIN crypto: ccp: use -EAGAIN for transient busy indication crypto: remove redundant backlog checks on EBUSY crypto: marvell/cesa: remove redundant backlog checks on EBUSY crypto: introduce crypto wait for async op crypto: move algif to generic async completion crypto: move pub key to generic async completion crypto: move drbg to generic async completion crypto: move gcm to generic async completion crypto: move testmgr to generic async completion fscrypt: move to generic async completion dm: move dm-verity to generic async completion cifs: move to generic async completion ima: move to generic async completion crypto: tcrypt: move to generic async completion crypto: talitos: move to generic async completion crypto: qce: move to generic async completion crypto: mediatek: move to generic async completion crypto: adapt api sample to use async. op wait Documentation/crypto/api-samples.rst | 52 ++------- crypto/af_alg.c | 27 ----- crypto/ahash.c | 12 +-- crypto/algapi.c | 6 +- crypto/algif_aead.c | 8 +- crypto/algif_hash.c | 50 +++++---- crypto/algif_skcipher.c | 9 +- crypto/api.c | 13 +++ crypto/asymmetric_keys/public_key.c | 28 +---- crypto/cryptd.c | 4 +- crypto/cts.c | 6 +- crypto/drbg.c | 36 ++----- crypto/gcm.c | 32 ++---- crypto/lrw.c | 8 +- crypto/rsa-pkcs1pad.c | 16 +-- crypto/tcrypt.c | 84 +++++---------- crypto/testmgr.c | 204 ++++++++++++----------------------- crypto/xts.c | 8 +- drivers/crypto/ccp/ccp-crypto-main.c | 8 +- drivers/crypto/ccp/ccp-dev.c | 7 +- drivers/crypto/marvell/cesa.c | 3 +- drivers/crypto/marvell/cesa.h | 2 +- drivers/crypto/mediatek/mtk-aes.c | 31 +----- drivers/crypto/qce/sha.c | 30 +----- drivers/crypto/talitos.c | 38 +------ drivers/md/dm-verity-target.c | 81 ++++---------- drivers/md/dm-verity.h | 5 - fs/cifs/smb2ops.c | 30 +----- fs/crypto/crypto.c | 28 +---- fs/crypto/fname.c | 36 ++----- fs/crypto/fscrypt_private.h | 10 -- fs/crypto/keyinfo.c | 21 +--- include/crypto/drbg.h | 3 +- include/crypto/if_alg.h | 15 +-- include/linux/crypto.h | 41 +++++++ security/integrity/ima/ima_crypto.c | 56 +++------- 36 files changed, 311 insertions(+), 737 deletions(-) -- 2.1.4 |
From: Magalhaes, G. (B. R&D-CL) <gui...@hp...> - 2017-08-10 14:17:24
|
Hi Mimi, > The namespacing design will need to support different types of > environments. For example, depending on the environment, namespaces > can come and go frequently. In such an environment do we really want > all the namespace measurements to be included in the system > measurement list, or should each namespace have its own measurement > list? Should the namespace measurement policy be the same as the > system measurement policy? Should the namespace (container) owner be > allowed to modify their own policy? If a file matches a rule in both > the namespace and system policies, should the file measurement be in > both the namespace and the system measurement lists? Based on our previous discussions, the IMA policy should be set independently for different namespaces and each namespace will have a measurement list. Based on that and considering now the issues related to namespacing the IMA policy interface, this interface to set policy in the host and inside namespaces could be solved entirely by leveraging the existent policy file. IMA then would detect the current namespace and assume the rules are added to that namespace. The root user, even when mapped to a non-root user outside the namespace, is able to set the namespace policy just like it is done in the host policy. Once the namespace policy is set, the same policy rules should be read back from the policy file, with the same uids that were set in the rule and that make sense inside the current user namespace, not the mapped uid to the 'kernel' uid. This point is important because currently the uids in the IMA policy rules are mapped to kernel uids at parsing time. Another required change for this proposed interface to set the policy is to allow read/write access to the policy file to all users, since a root user in a given user namespace may not be a root user outside that user namespace. The kernel would still block access to the policy file based on the SYS_ADMIN capability and any changes to the policy would be effective just for the current namespace. The securityfs access inside user namespaces is possible once the user id mapping is done correctly, the root user, as seen inside the namespace will be able to mount the securityfs. Again, the read/write access to all users must be granted for the policy file. The same changes could be made to the measurement list interface in securityfs when each namespace has its own measurement list. A management tool in user space might be very helpful to set or read the policy or measurement list files in the current namespace or its children namespaces and to map the uid considering the current and target namespaces. This tool could also relate containers in user space to its IMA state for policy and appraise mode. Does this design solve properly the basic architectural issues only related to namespacing the interface to set the IMA policy? I have a patch with these changes ready to be posted. -- Guilherme > -----Original Message----- > From: Mimi Zohar [mailto:zo...@li...] > Sent: segunda-feira, 29 de maio de 2017 14:33 > To: Magalhaes, Guilherme (Brazil R&D-CL) <gui...@hp...>; > John Johansen <joh...@ca...>; dmi...@gm... > Cc: vi...@ze...; jam...@or...; se...@ha...; > lin...@vg...; lin...@vg...; linux-ima- > de...@li...; lin...@li...; linux- > sec...@vg...; ty...@do...; Souza, Joaquim (Brazil > R&D-ECL) <joa...@hp...>; Edwards, Nigel <nig...@hp...> > Subject: Re: [RFC 04/11] ima: add support to namespace securityfs file > > Hi Guilherme, > > (Wow, you should did Cc a lot of people.) > > On Thu, 2017-05-25 at 19:04 +0000, Magalhaes, Guilherme (Brazil R&D- > CL) wrote: > > Mimi, > > With the securityfs symlink we would address the case of setting > > policy inside containers, but we still would need a way to set the > > IMA policy per namespace outside containers. So, the current > > proposed interface would address the latter case. > > As an alternative to symlinks, taking this patch set as base, and > > still considering setting policy inside containers (or inside > > namespaces in general), it is possible to bind mount the securityfs > > files into the containers, but it would be needed to prevent > > read/write access to the namespaced IMA policy files for processes > > not running on the same namespace. > > > > These mechanisms would not require a change in the proposed design. > > Do you think these mechanisms are enough for the flexibility you > > asked? > > I'm really sorry Guileherme, but as I previously explained, IMA has > many aspects to it - the original file measurements (IMA-measurement), > file hash/signature appraisal (IMA-appraisal), and file audit messages > (IMA-audit) used for security analytics/forensics, not the file system > auditing. To namespace IMA properly requires addressing some > underlying problems - securityfs, root privilege required for writing > security xattrs, per namespace IMA keyring - to name a few. > > I understand wanting to namespace the appraisal aspect first, but when > you asked me if anyone else is working on namespacing IMA at the > moment, I suggested starting with the IMA-audit aspect for a > reason. By beginning with the IMA-audit aspect, we could ignore, at > least for the time being, some of these underlying problems, but > define the basic namespacing architecture. By jumping to namespacing > the appraisal aspect, you've simply avoided addressing the IMA > namespacing architecture issues, which need to be addressed. > > Here are some, not by any means all, of the issues with namespacing > IMA. > > > - IMA-measurements: > > The namespacing design will need to support different types of > environments. For example, depending on the environment, namespaces > can come and go frequently. In such an environment do we really want > all the namespace measurements to be included in the system > measurement list, or should each namespace have its own measurement > list? Should the namespace measurement policy be the same as the > system measurement policy? Should the namespace (container) owner be > allowed to modify their own policy? If a file matches a rule in both > the namespace and system policies, should the file measurement be in > both the namespace and the system measurement lists? > > > - IMA-appraisal > > Assuming the IMA appraisal policy requires file signatures, signatures > are verified based on the keys on the IMA keyring. When discussing > namespacing IMA-appraisal, we might want different sets of keys in > different namespaces. This requires separate keyrings for each > namespace. In some use cases, we might want to include the keys on > the system IMA keyring, while in other use cases we might not. > > Even if real root initially installs the files with their file > signatures, stored as extended attributes, how will software be > updated, as writing file signatures requires root > privilege?Capabilities has recently added support to permit root in > the namespace to write security.capability. Similarly when > namespacing IMA, root in the namespace needs the ability to write > security.ima. > > > - IMA-audit: > > The IMA-audit messages can augment other file system security > information used in security analytics/forensics. This information > should be on a per namespace basis, meaning that each time a new file > is accessed/executed, there needs to be a separate audit message, even > if a message already exists in another namespace. Maintaining and > cleaning up this per namespace cache information, allows development > of the IMA namespace architecture independently of other issues. > > My original suggestion stands. Start with namespacing IMA- > audit. Afterwards resolve the securityfs issues needed for > namespacing IMA-measurement, and subsequently resolve the keyring and > xattr issues for namespacing IMA-appraisal. Although other subsystems > have already addressed some of the issues listed here, the advice to > start with IMA-audit is still valid. > > Small incremental change does not imply without an overall design, but > an overall design which is broken up in such a way (to ease review) > that builds upon the previous change. > > As both Apparmor and IMA use securityfs for policy, it would be nice > if their method for loading namespace policies would be similar too. > > Mimi |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-09 05:18:10
|
On Tue, Aug 8, 2017 at 6:24 PM, Gary R Hook <gar...@am...> wrote: > On 08/08/2017 07:03 AM, Gilad Ben-Yossef wrote: >> >> Replace -EBUSY with -EAGAIN when reporting transient busy >> indication in the absence of backlog. >> >> Signed-off-by: Gilad Ben-Yossef <gi...@be...> > > > Could we use "ccp" in the subject line, please? Of course, sorry about the the typo. Gilad -- Gilad Ben-Yossef Chief Coffee Drinker "If you take a class in large-scale robotics, can you end up in a situation where the homework eats your dog?" -- Jean-Baptiste Queru |
From: Gary R H. <gar...@am...> - 2017-08-08 15:25:48
|
On 08/08/2017 07:03 AM, Gilad Ben-Yossef wrote: > Replace -EBUSY with -EAGAIN when reporting transient busy > indication in the absence of backlog. > > Signed-off-by: Gilad Ben-Yossef <gi...@be...> Could we use "ccp" in the subject line, please? > --- > drivers/crypto/ccp/ccp-crypto-main.c | 8 +++----- > drivers/crypto/ccp/ccp-dev.c | 7 +++++-- > 2 files changed, 8 insertions(+), 7 deletions(-) > > diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c > index 35a9de7..403ff0a 100644 > --- a/drivers/crypto/ccp/ccp-crypto-main.c > +++ b/drivers/crypto/ccp/ccp-crypto-main.c > @@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) > > /* Check if the cmd can/should be queued */ > if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { > - ret = -EBUSY; > - if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) > + if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) { > + ret = -EAGAIN; > goto e_lock; > + } > } > > /* Look for an entry with the same tfm. If there is a cmd > @@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) > ret = ccp_enqueue_cmd(crypto_cmd->cmd); > if (!ccp_crypto_success(ret)) > goto e_lock; /* Error, don't queue it */ > - if ((ret == -EBUSY) && > - !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) > - goto e_lock; /* Not backlogging, don't queue it */ > } > > if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { > diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c > index 4e029b1..3d637e3 100644 > --- a/drivers/crypto/ccp/ccp-dev.c > +++ b/drivers/crypto/ccp/ccp-dev.c > @@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd) > i = ccp->cmd_q_count; > > if (ccp->cmd_count >= MAX_CMD_QLEN) { > - ret = -EBUSY; > - if (cmd->flags & CCP_CMD_MAY_BACKLOG) > + if (cmd->flags & CCP_CMD_MAY_BACKLOG) { > + ret = -EBUSY; > list_add_tail(&cmd->entry, &ccp->backlog); > + } else { > + ret = -EAGAIN; > + } > } else { > ret = -EINPROGRESS; > ccp->cmd_count++; > |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 14:09:21
|
On Tue, Aug 8, 2017 at 4:10 PM, Stephan Mueller <smu...@ch...> wrote: > Am Dienstag, 8. August 2017, 14:03:37 CEST schrieb Gilad Ben-Yossef: > > Hi Gilad, > >> algif starts several async crypto ops and waits for their completion. >> Move it over to generic code doing the same. > > Just FYI: your patch set and my patch [1] will clash. > > [1] https://patchwork.kernel.org/patch/9875959/ I'll try to rebase on linux-next + your patch than. Thanks for letting me know. Gilad -- Gilad Ben-Yossef Chief Coffee Drinker "If you take a class in large-scale robotics, can you end up in a situation where the homework eats your dog?" -- Jean-Baptiste Queru |
From: Stephan M. <smu...@ch...> - 2017-08-08 13:10:11
|
Am Dienstag, 8. August 2017, 14:03:37 CEST schrieb Gilad Ben-Yossef: Hi Gilad, > algif starts several async crypto ops and waits for their completion. > Move it over to generic code doing the same. Just FYI: your patch set and my patch [1] will clash. [1] https://patchwork.kernel.org/patch/9875959/ Ciao Stephan |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:32
|
fscrypt starts several async. crypto ops and waiting for them to complete. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- fs/crypto/crypto.c | 28 ++++------------------------ fs/crypto/fname.c | 36 ++++++------------------------------ fs/crypto/fscrypt_private.h | 10 ---------- fs/crypto/keyinfo.c | 21 +++------------------ 4 files changed, 13 insertions(+), 82 deletions(-) diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index c7835df..80a3cad 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -126,21 +126,6 @@ struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags) } EXPORT_SYMBOL(fscrypt_get_ctx); -/** - * page_crypt_complete() - completion callback for page crypto - * @req: The asynchronous cipher request context - * @res: The result of the cipher operation - */ -static void page_crypt_complete(struct crypto_async_request *req, int res) -{ - struct fscrypt_completion_result *ecr = req->data; - - if (res == -EINPROGRESS) - return; - ecr->res = res; - complete(&ecr->completion); -} - int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, u64 lblk_num, struct page *src_page, struct page *dest_page, unsigned int len, @@ -151,7 +136,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, u8 padding[FS_IV_SIZE - sizeof(__le64)]; } iv; struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct scatterlist dst, src; struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; @@ -179,7 +164,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, skcipher_request_set_callback( req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - page_crypt_complete, &ecr); + crypto_req_done, &wait); sg_init_table(&dst, 1); sg_set_page(&dst, dest_page, len, offs); @@ -187,14 +172,9 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, sg_set_page(&src, src_page, len, offs); skcipher_request_set_crypt(req, &src, &dst, len, &iv); if (rw == FS_DECRYPT) - res = crypto_skcipher_decrypt(req); + res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); else - res = crypto_skcipher_encrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - BUG_ON(req->base.data != &ecr); - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); skcipher_request_free(req); if (res) { printk_ratelimited(KERN_ERR diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index ad9f814..a80a0d3 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -15,21 +15,6 @@ #include "fscrypt_private.h" /** - * fname_crypt_complete() - completion callback for filename crypto - * @req: The asynchronous cipher request context - * @res: The result of the cipher operation - */ -static void fname_crypt_complete(struct crypto_async_request *req, int res) -{ - struct fscrypt_completion_result *ecr = req->data; - - if (res == -EINPROGRESS) - return; - ecr->res = res; - complete(&ecr->completion); -} - -/** * fname_encrypt() - encrypt a filename * * The caller must have allocated sufficient memory for the @oname string. @@ -40,7 +25,7 @@ static int fname_encrypt(struct inode *inode, const struct qstr *iname, struct fscrypt_str *oname) { struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; @@ -76,17 +61,12 @@ static int fname_encrypt(struct inode *inode, } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - fname_crypt_complete, &ecr); + crypto_req_done, &wait); sg_init_one(&sg, oname->name, cryptlen); skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); /* Do the encryption */ - res = crypto_skcipher_encrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - /* Request is being completed asynchronously; wait for it */ - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); skcipher_request_free(req); if (res < 0) { printk_ratelimited(KERN_ERR @@ -110,7 +90,7 @@ static int fname_decrypt(struct inode *inode, struct fscrypt_str *oname) { struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; @@ -131,7 +111,7 @@ static int fname_decrypt(struct inode *inode, } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - fname_crypt_complete, &ecr); + crypto_req_done, &wait); /* Initialize IV */ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); @@ -140,11 +120,7 @@ static int fname_decrypt(struct inode *inode, sg_init_one(&src_sg, iname->name, iname->len); sg_init_one(&dst_sg, oname->name, oname->len); skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); - res = crypto_skcipher_decrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); skcipher_request_free(req); if (res < 0) { printk_ratelimited(KERN_ERR diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index a1d5021..c0f1881 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -69,16 +69,6 @@ typedef enum { #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 #define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 -struct fscrypt_completion_result { - struct completion completion; - int res; -}; - -#define DECLARE_FS_COMPLETION_RESULT(ecr) \ - struct fscrypt_completion_result ecr = { \ - COMPLETION_INITIALIZER_ONSTACK((ecr).completion), 0 } - - /* crypto.c */ extern int fscrypt_initialize(unsigned int cop_flags); extern struct workqueue_struct *fscrypt_read_workqueue; diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 018c588..3c84cac 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -17,17 +17,6 @@ static struct crypto_shash *essiv_hash_tfm; -static void derive_crypt_complete(struct crypto_async_request *req, int rc) -{ - struct fscrypt_completion_result *ecr = req->data; - - if (rc == -EINPROGRESS) - return; - - ecr->res = rc; - complete(&ecr->completion); -} - /** * derive_key_aes() - Derive a key using AES-128-ECB * @deriving_key: Encryption key used for derivation. @@ -42,7 +31,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], { int res = 0; struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); @@ -59,7 +48,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - derive_crypt_complete, &ecr); + crypto_req_done, &wait); res = crypto_skcipher_setkey(tfm, deriving_key, FS_AES_128_ECB_KEY_SIZE); if (res < 0) @@ -69,11 +58,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], sg_init_one(&dst_sg, derived_raw_key, source_key->size); skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size, NULL); - res = crypto_skcipher_encrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); out: skcipher_request_free(req); crypto_free_skcipher(tfm); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:32
|
DRBG is starting an async. crypto op and waiting for it complete. Move it over to generic code doing the same. The code now also passes CRYPTO_TFM_REQ_MAY_SLEEP flag indicating crypto request memory allocation may use GFP_KERNEL which should be perfectly fine as the code is obviously sleeping for the completion of the request any way. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/drbg.c | 36 +++++++++--------------------------- include/crypto/drbg.h | 3 +-- 2 files changed, 10 insertions(+), 29 deletions(-) diff --git a/crypto/drbg.c b/crypto/drbg.c index 633a88e..c522251 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg) return 0; } -static void drbg_skcipher_cb(struct crypto_async_request *req, int error) -{ - struct drbg_state *drbg = req->data; - - if (error == -EINPROGRESS) - return; - drbg->ctr_async_err = error; - complete(&drbg->ctr_completion); -} - static int drbg_init_sym_kernel(struct drbg_state *drbg) { struct crypto_cipher *tfm; @@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) return PTR_ERR(sk_tfm); } drbg->ctr_handle = sk_tfm; - init_completion(&drbg->ctr_completion); + crypto_init_wait(&drbg->ctr_wait); req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); if (!req) { @@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) return -ENOMEM; } drbg->ctr_req = req; - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - drbg_skcipher_cb, drbg); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &drbg->ctr_wait); alignmask = crypto_skcipher_alignmask(sk_tfm); drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask, @@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, /* Output buffer may not be valid for SGL, use scratchpad */ skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, cryptlen, drbg->V); - ret = crypto_skcipher_encrypt(drbg->ctr_req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&drbg->ctr_completion); - if (!drbg->ctr_async_err) { - reinit_completion(&drbg->ctr_completion); - break; - } - default: + ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req), + &drbg->ctr_wait); + if (ret) goto out; - } - init_completion(&drbg->ctr_completion); + + crypto_init_wait(&drbg->ctr_wait); memcpy(outbuf, drbg->outscratchpad, cryptlen); diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 22f884c..8f94110 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -126,8 +126,7 @@ struct drbg_state { __u8 *ctr_null_value; /* CTR mode aligned zero buf */ __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ __u8 *outscratchpad; /* CTR mode aligned outbuf */ - struct completion ctr_completion; /* CTR mode async handler */ - int ctr_async_err; /* CTR mode async error */ + struct crypto_wait ctr_wait; /* CTR mode async wait obj */ bool seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:32
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- drivers/crypto/marvell/cesa.c | 3 +-- drivers/crypto/marvell/cesa.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 6e7a5c7..269737f 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -183,8 +183,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req, spin_lock_bh(&engine->lock); ret = crypto_enqueue_request(&engine->queue, req); if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && - (ret == -EINPROGRESS || - (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + (ret == -EINPROGRESS || ret == -EBUSY) mv_cesa_tdma_chain(engine, creq); spin_unlock_bh(&engine->lock); diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index b7872f6..63c8457 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -763,7 +763,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req, * the backlog and will be processed later. There's no need to * clean it up. */ - if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + if (ret == -EBUSY) return false; /* Request wasn't queued, we need to clean it up */ -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:32
|
public_key_verify_signature() is starting an async crypto op and waiting for it to complete. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/asymmetric_keys/public_key.c | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 3cd6e12..d916235 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3) public_key_signature_free(payload3); } -struct public_key_completion { - struct completion completion; - int err; -}; - -static void public_key_verify_done(struct crypto_async_request *req, int err) -{ - struct public_key_completion *compl = req->data; - - if (err == -EINPROGRESS) - return; - - compl->err = err; - complete(&compl->completion); -} - /* * Verify a signature using a public key. */ int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig) { - struct public_key_completion compl; + struct crypto_wait cwait; struct crypto_akcipher *tfm; struct akcipher_request *req; struct scatterlist sig_sg, digest_sg; @@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey, sg_init_one(&digest_sg, output, outlen); akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size, outlen); - init_completion(&compl.completion); + crypto_init_wait(&cwait); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - public_key_verify_done, &compl); + crypto_req_done, &cwait); /* Perform the verification calculation. This doesn't actually do the * verification, but rather calculates the hash expected by the * signature and returns that to us. */ - ret = crypto_akcipher_verify(req); - if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { - wait_for_completion(&compl.completion); - ret = compl.err; - } + ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); if (ret < 0) goto out_free_output; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:32
|
gcm is starting an async. crypto op and waiting for it complete. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/gcm.c | 32 ++++++-------------------------- 1 file changed, 6 insertions(+), 26 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index 3841b5e..fb923a5 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -16,7 +16,6 @@ #include <crypto/scatterwalk.h> #include <crypto/hash.h> #include "internal.h" -#include <linux/completion.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -78,11 +77,6 @@ struct crypto_gcm_req_priv_ctx { } u; }; -struct crypto_gcm_setkey_result { - int err; - struct completion completion; -}; - static struct { u8 buf[16]; struct scatterlist sg; @@ -98,17 +92,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); } -static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) -{ - struct crypto_gcm_setkey_result *result = req->data; - - if (err == -EINPROGRESS) - return; - - result->err = err; - complete(&result->completion); -} - static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { @@ -119,7 +102,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, be128 hash; u8 iv[16]; - struct crypto_gcm_setkey_result result; + struct crypto_wait wait; struct scatterlist sg[1]; struct skcipher_request req; @@ -140,21 +123,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, if (!data) return -ENOMEM; - init_completion(&data->result.completion); + crypto_init_wait(&data->wait); sg_init_one(data->sg, &data->hash, sizeof(data->hash)); skcipher_request_set_tfm(&data->req, ctr); skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_gcm_setkey_done, - &data->result); + crypto_req_done, + &data->wait); skcipher_request_set_crypt(&data->req, data->sg, data->sg, sizeof(data->hash), data->iv); - err = crypto_skcipher_encrypt(&data->req); - if (err == -EINPROGRESS || err == -EBUSY) { - wait_for_completion(&data->result.completion); - err = data->result.err; - } + err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), + &data->wait); if (err) goto out; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:32
|
Invoking a possibly async. crypto op and waiting for completion while correctly handling backlog processing is a common task in the crypto API implementation and outside users of it. This patch adds a generic implementation for doing so in preparation for using it across the board instead of hand rolled versions. Signed-off-by: Gilad Ben-Yossef <gi...@be...> CC: Eric Biggers <ebi...@gm...> --- crypto/api.c | 13 +++++++++++++ include/linux/crypto.h | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/crypto/api.c b/crypto/api.c index 941cd4c..2a2479d 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -24,6 +24,7 @@ #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/completion.h> #include "internal.h" LIST_HEAD(crypto_alg_list); @@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_alg); +void crypto_req_done(struct crypto_async_request *req, int err) +{ + struct crypto_wait *wait = req->data; + + if (err == -EINPROGRESS) + return; + + wait->err = err; + complete(&wait->completion); +} +EXPORT_SYMBOL_GPL(crypto_req_done); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 84da997..bb00186 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/uaccess.h> +#include <linux/completion.h> /* * Autoloaded crypto modules should only use a prefixed name to avoid allowing @@ -468,6 +469,45 @@ struct crypto_alg { } CRYPTO_MINALIGN_ATTR; /* + * A helper struct for waiting for completion of async crypto ops + */ +struct crypto_wait { + struct completion completion; + int err; +}; + +/* + * Macro for declaring a crypto op async wait object on stack + */ +#define DECLARE_CRYPTO_WAIT(_wait) \ + struct crypto_wait _wait = { \ + COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } + +/* + * Async ops completion helper functioons + */ +void crypto_req_done(struct crypto_async_request *req, int err); + +static inline int crypto_wait_req(int err, struct crypto_wait *wait) +{ + switch (err) { + case -EINPROGRESS: + case -EBUSY: + wait_for_completion(&wait->completion); + reinit_completion(&wait->completion); + err = wait->err; + break; + }; + + return err; +} + +static inline void crypto_init_wait(struct crypto_wait *wait) +{ + init_completion(&wait->completion); +} + +/* * Algorithm registration interface. */ int crypto_register_alg(struct crypto_alg *alg); @@ -1604,5 +1644,6 @@ static inline int crypto_comp_decompress(struct crypto_comp *tfm, src, slen, dst, dlen); } + #endif /* _LINUX_CRYPTO_H */ -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:26
|
Many users of kernel async. crypto services have a pattern of starting an async. crypto op and than using a completion to wait for it to end. This patch set simplifies this common use case in two ways: First, by separating the return codes of the case where a request is queued to a backlog due to the provider being busy (-EBUSY) from the case the request has failed due to the provider being busy and backlogging is not enabled (-EAGAIN). Next, this change is than built on to create a generic API to wait for a async. crypto operation to complete. The end result is a smaller code base and an API that is easier to use and more difficult to get wrong. The patch set was boot tested on x86_64 and arm64 which at the very least tests the crypto users via testmgr and tcrypt but I do note that I do not have access to some of the HW whose drivers are modified nor do I claim I was able to test all of the corner cases. The patch set is based upon linux-next release tagged next-20170808. Changes from v3: - Instead of changing the return code to indicate backlog queueing, change the return code to indicate transient busy state, as suggested by Herbert Xu. Changes from v2: - Patch title changed from "introduce crypto wait for async op" to better reflect the current state. - Rebase on top of latest linux-next. - Add a new return code of -EIOCBQUEUED for backlog queueing, as suggested by Herbert Xu. - Transform more users to the new API. - Update the drbg change to account for new init as indicated by Stephan Muller. Changes from v1: - Address review comments from Eric Biggers. - Separated out bug fixes of existing code and rebase on top of that patch set. - Rename 'ecr' to 'wait' in fscrypto code. - Split patch introducing the new API from the change moving over the algif code which it originated from to the new API. - Inline crypto_wait_req(). - Some code indentation fixes. Gilad Ben-Yossef (19): crypto: change transient busy return code to -EAGAIN crypto: ccm: use -EAGAIN for transient busy indication crypto: remove redundant backlog checks on EBUSY crypto: marvell/cesa: remove redundant backlog checks on EBUSY crypto: introduce crypto wait for async op crypto: move algif to generic async completion crypto: move pub key to generic async completion crypto: move drbg to generic async completion crypto: move gcm to generic async completion crypto: move testmgr to generic async completion fscrypt: move to generic async completion dm: move dm-verity to generic async completion cifs: move to generic async completion ima: move to generic async completion crypto: tcrypt: move to generic async completion crypto: talitos: move to generic async completion crypto: qce: move to generic async completion crypto: mediatek: move to generic async completion crypto: adapt api sample to use async. op wait Documentation/crypto/api-samples.rst | 52 ++------- crypto/af_alg.c | 27 ----- crypto/ahash.c | 12 +-- crypto/algapi.c | 6 +- crypto/algif_aead.c | 14 +-- crypto/algif_hash.c | 50 +++++---- crypto/algif_skcipher.c | 15 ++- crypto/api.c | 13 +++ crypto/asymmetric_keys/public_key.c | 28 +---- crypto/cryptd.c | 4 +- crypto/cts.c | 6 +- crypto/drbg.c | 36 ++----- crypto/gcm.c | 32 ++---- crypto/lrw.c | 8 +- crypto/rsa-pkcs1pad.c | 16 +-- crypto/tcrypt.c | 84 +++++---------- crypto/testmgr.c | 204 ++++++++++++----------------------- crypto/xts.c | 8 +- drivers/crypto/ccp/ccp-crypto-main.c | 8 +- drivers/crypto/ccp/ccp-dev.c | 7 +- drivers/crypto/marvell/cesa.c | 3 +- drivers/crypto/marvell/cesa.h | 2 +- drivers/crypto/mediatek/mtk-aes.c | 31 +----- drivers/crypto/qce/sha.c | 30 +----- drivers/crypto/talitos.c | 38 +------ drivers/md/dm-verity-target.c | 81 ++++---------- drivers/md/dm-verity.h | 5 - fs/cifs/smb2ops.c | 30 +----- fs/crypto/crypto.c | 28 +---- fs/crypto/fname.c | 36 ++----- fs/crypto/fscrypt_private.h | 10 -- fs/crypto/keyinfo.c | 21 +--- include/crypto/drbg.h | 3 +- include/crypto/if_alg.h | 13 --- include/linux/crypto.h | 41 +++++++ security/integrity/ima/ima_crypto.c | 56 +++------- 36 files changed, 316 insertions(+), 742 deletions(-) -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:26
|
The crypto API was using the -EBUSY return value to indicate both a hard failure to submit a crypto operation into a transformation provider when the latter was busy and the backlog mechanism was not enabled as well as a notification that the operation was queued into the backlog when the backlog mechanism was enabled. Having the same return code indicate two very different conditions depending on a flag is both error prone and requires extra runtime check like the following to discern between the cases: if (err == -EINPROGRESS || (err == -EBUSY && (ahash_request_flags(req) & CRYPTO_TFM_REQ_MAY_BACKLOG))) This patch changes the return code used to indicate a crypto op failed due to the transformation provider being transiently busy to -EAGAIN. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/algapi.c | 6 ++++-- crypto/algif_hash.c | 20 +++++++++++++++++--- crypto/cryptd.c | 4 +--- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index aa699ff..916bee3 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue, int err = -EINPROGRESS; if (unlikely(queue->qlen >= queue->max_qlen)) { - err = -EBUSY; - if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + err = -EAGAIN; goto out; + } + err = -EBUSY; if (queue->backlog == &queue->list) queue->backlog = &request->list; } diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 5e92bd2..3b3c154 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -39,6 +39,20 @@ struct algif_hash_tfm { bool has_key; }; +/* Previous versions of crypto_* ops used to return -EBUSY + * rather than -EAGAIN to indicate being tied up. The in + * kernel API changed but we don't want to break the user + * space API. As only the hash user interface exposed this + * error ever to the user, do the translation here. + */ +static inline int crypto_user_err(int err) +{ + if (err == -EAGAIN) + return -EBUSY; + + return err; +} + static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) { unsigned ds; @@ -136,7 +150,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, unlock: release_sock(sk); - return err ?: copied; + return err ? crypto_user_err(err) : copied; } static ssize_t hash_sendpage(struct socket *sock, struct page *page, @@ -188,7 +202,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, unlock: release_sock(sk); - return err ?: size; + return err ? crypto_user_err(err) : size; } static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, @@ -236,7 +250,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, hash_free_result(sk, ctx); release_sock(sk); - return err ?: len; + return err ? crypto_user_err(err) : len; } static int hash_accept(struct socket *sock, struct socket *newsock, int flags, diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0508c48..d1dbdce 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, int cpu, err; struct cryptd_cpu_queue *cpu_queue; atomic_t *refcnt; - bool may_backlog; cpu = get_cpu(); cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); refcnt = crypto_tfm_ctx(request->tfm); - may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; - if (err == -EBUSY && !may_backlog) + if (err == -EAGAIN) goto out_put_cpu; queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:26
|
Replace -EBUSY with -EAGAIN when reporting transient busy indication in the absence of backlog. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- drivers/crypto/ccp/ccp-crypto-main.c | 8 +++----- drivers/crypto/ccp/ccp-dev.c | 7 +++++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 35a9de7..403ff0a 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) /* Check if the cmd can/should be queued */ if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { - ret = -EBUSY; - if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) + if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) { + ret = -EAGAIN; goto e_lock; + } } /* Look for an entry with the same tfm. If there is a cmd @@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) ret = ccp_enqueue_cmd(crypto_cmd->cmd); if (!ccp_crypto_success(ret)) goto e_lock; /* Error, don't queue it */ - if ((ret == -EBUSY) && - !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) - goto e_lock; /* Not backlogging, don't queue it */ } if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 4e029b1..3d637e3 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd) i = ccp->cmd_q_count; if (ccp->cmd_count >= MAX_CMD_QLEN) { - ret = -EBUSY; - if (cmd->flags & CCP_CMD_MAY_BACKLOG) + if (cmd->flags & CCP_CMD_MAY_BACKLOG) { + ret = -EBUSY; list_add_tail(&cmd->entry, &ccp->backlog); + } else { + ret = -EAGAIN; + } } else { ret = -EINPROGRESS; ccp->cmd_count++; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:26
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/ahash.c | 12 +++--------- crypto/cts.c | 6 ++---- crypto/lrw.c | 8 ++------ crypto/rsa-pkcs1pad.c | 16 ++++------------ crypto/xts.c | 8 ++------ 5 files changed, 13 insertions(+), 37 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 826cd7a..d63eeef 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req, return err; err = op(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; ahash_restore_req(req, err); @@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) req->base.complete = ahash_def_finup_done2; err = crypto_ahash_reqtfm(req)->final(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; out: @@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req) return err; err = tfm->update(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); diff --git a/crypto/cts.c b/crypto/cts.c index 243f591..4773c18 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err) goto out; err = cts_cbc_encrypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return; out: @@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err) goto out; err = cts_cbc_decrypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return; out: diff --git a/crypto/lrw.c b/crypto/lrw.c index a8bfae4..695cea9 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err) crypto_skcipher_encrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } @@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err) crypto_skcipher_decrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 407c64b..2908f93 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) req->dst, ctx->key_size - 1, req->dst_len); err = crypto_akcipher_encrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_encrypt_sign_complete(req, err); return err; @@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) ctx->key_size); err = crypto_akcipher_decrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_decrypt_complete(req, err); return err; @@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req) req->dst, ctx->key_size - 1, req->dst_len); err = crypto_akcipher_sign(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_encrypt_sign_complete(req, err); return err; @@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) ctx->key_size); err = crypto_akcipher_verify(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_verify_complete(req, err); return err; diff --git a/crypto/xts.c b/crypto/xts.c index d86c11a..af68012 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err) crypto_skcipher_encrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } @@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err) crypto_skcipher_decrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-08 12:22:26
|
algif starts several async crypto ops and waits for their completion. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/af_alg.c | 27 --------------------------- crypto/algif_aead.c | 14 +++++++------- crypto/algif_hash.c | 30 ++++++++++++++---------------- crypto/algif_skcipher.c | 15 +++++++-------- include/crypto/if_alg.h | 13 ------------- 5 files changed, 28 insertions(+), 71 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 92a3d54..887f75c 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -480,33 +480,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) } EXPORT_SYMBOL_GPL(af_alg_cmsg_send); -int af_alg_wait_for_completion(int err, struct af_alg_completion *completion) -{ - switch (err) { - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&completion->completion); - reinit_completion(&completion->completion); - err = completion->err; - break; - }; - - return err; -} -EXPORT_SYMBOL_GPL(af_alg_wait_for_completion); - -void af_alg_complete(struct crypto_async_request *req, int err) -{ - struct af_alg_completion *completion = req->data; - - if (err == -EINPROGRESS) - return; - - completion->err = err; - complete(&completion->completion); -} -EXPORT_SYMBOL_GPL(af_alg_complete); - static int __init af_alg_init(void) { int err = proto_register(&alg_proto, 0); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 9755aac..36be369 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -78,7 +78,7 @@ struct aead_ctx { void *iv; size_t aead_assoclen; - struct af_alg_completion completion; /* sync work queue */ + struct crypto_wait wait; size_t used; /* TX bytes sent to kernel */ size_t rcvused; /* total RX bytes to be processed by kernel */ @@ -751,11 +751,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* Synchronous operation */ aead_request_set_callback(&areq->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); - err = af_alg_wait_for_completion(ctx->enc ? - crypto_aead_encrypt(&areq->aead_req) : - crypto_aead_decrypt(&areq->aead_req), - &ctx->completion); + crypto_req_done, &ctx->wait); + err = crypto_wait_req(ctx->enc ? + crypto_aead_encrypt(&areq->aead_req) : + crypto_aead_decrypt(&areq->aead_req), + &ctx->wait); } /* AIO operation in progress */ @@ -1036,7 +1036,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) ctx->merge = 0; ctx->enc = 0; ctx->aead_assoclen = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 3b3c154..d2ab8de 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -26,7 +26,7 @@ struct hash_ctx { u8 *result; - struct af_alg_completion completion; + struct crypto_wait wait; unsigned int len; bool more; @@ -102,8 +102,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, if ((msg->msg_flags & MSG_MORE)) hash_free_result(sk, ctx); - err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait); if (err) goto unlock; } @@ -124,8 +123,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); - err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_update(&ctx->req), + &ctx->wait); af_alg_free_sg(&ctx->sgl); if (err) goto unlock; @@ -143,8 +142,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, goto unlock; ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); - err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_final(&ctx->req), + &ctx->wait); } unlock: @@ -185,7 +184,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, } else { if (!ctx->more) { err = crypto_ahash_init(&ctx->req); - err = af_alg_wait_for_completion(err, &ctx->completion); + err = crypto_wait_req(err, &ctx->wait); if (err) goto unlock; } @@ -193,7 +192,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, err = crypto_ahash_update(&ctx->req); } - err = af_alg_wait_for_completion(err, &ctx->completion); + err = crypto_wait_req(err, &ctx->wait); if (err) goto unlock; @@ -229,17 +228,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); if (!result && !ctx->more) { - err = af_alg_wait_for_completion( - crypto_ahash_init(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_init(&ctx->req), + &ctx->wait); if (err) goto unlock; } if (!result || ctx->more) { ctx->more = 0; - err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_final(&ctx->req), + &ctx->wait); if (err) goto unlock; } @@ -490,13 +488,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ctx->result = NULL; ctx->len = len; ctx->more = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; ahash_request_set_tfm(&ctx->req, hash); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); + crypto_req_done, &ctx->wait); sk->sk_destruct = hash_sock_destruct; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 968d094..ebbd433 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -75,7 +75,7 @@ struct skcipher_ctx { void *iv; - struct af_alg_completion completion; + struct crypto_wait wait; size_t used; size_t rcvused; @@ -677,12 +677,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, skcipher_request_set_callback(&areq->req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, - &ctx->completion); - err = af_alg_wait_for_completion(ctx->enc ? - crypto_skcipher_encrypt(&areq->req) : - crypto_skcipher_decrypt(&areq->req), - &ctx->completion); + crypto_req_done, &ctx->wait); + err = crypto_wait_req(ctx->enc ? + crypto_skcipher_encrypt(&areq->req) : + crypto_skcipher_decrypt(&areq->req), + &ctx->wait); } /* AIO operation in progress */ @@ -950,7 +949,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ctx->more = 0; ctx->merge = 0; ctx->enc = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index e2b9c6f..14b79e5 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -37,11 +37,6 @@ struct alg_sock { void *private; }; -struct af_alg_completion { - struct completion completion; - int err; -}; - struct af_alg_control { struct af_alg_iv *iv; int op; @@ -81,17 +76,9 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new); int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); -int af_alg_wait_for_completion(int err, struct af_alg_completion *completion); -void af_alg_complete(struct crypto_async_request *req, int err); - static inline struct alg_sock *alg_sk(struct sock *sk) { return (struct alg_sock *)sk; } -static inline void af_alg_init_completion(struct af_alg_completion *completion) -{ - init_completion(&completion->completion); -} - #endif /* _CRYPTO_IF_ALG_H */ -- 2.1.4 |