This list is closed, nobody may subscribe to it.
| 2007 |
Jan
|
Feb
(1) |
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
(1) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2009 |
Jan
|
Feb
|
Mar
|
Apr
(1) |
May
(1) |
Jun
(2) |
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
| 2011 |
Jan
|
Feb
|
Mar
(1) |
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
| 2013 |
Jan
|
Feb
|
Mar
(7) |
Apr
|
May
(7) |
Jun
(7) |
Jul
(26) |
Aug
|
Sep
(7) |
Oct
(1) |
Nov
(35) |
Dec
(18) |
| 2014 |
Jan
(1) |
Feb
(2) |
Mar
(3) |
Apr
|
May
(16) |
Jun
(35) |
Jul
(103) |
Aug
(45) |
Sep
(226) |
Oct
(200) |
Nov
(66) |
Dec
(42) |
| 2015 |
Jan
(47) |
Feb
(3) |
Mar
(6) |
Apr
(14) |
May
(38) |
Jun
(10) |
Jul
(10) |
Aug
(15) |
Sep
(23) |
Oct
(78) |
Nov
(56) |
Dec
(70) |
| 2016 |
Jan
(9) |
Feb
(8) |
Mar
(15) |
Apr
(18) |
May
(78) |
Jun
(39) |
Jul
(3) |
Aug
(136) |
Sep
(134) |
Oct
(19) |
Nov
(48) |
Dec
(30) |
| 2017 |
Jan
(33) |
Feb
(35) |
Mar
(100) |
Apr
(87) |
May
(169) |
Jun
(119) |
Jul
(165) |
Aug
(241) |
Sep
(128) |
Oct
(42) |
Nov
|
Dec
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:24:03
|
testmgr is starting async. crypto ops and waiting for them to complete.
Move it over to generic code doing the same.
This also provides a test of the generic crypto async. wait code.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/testmgr.c | 204 ++++++++++++++++++-------------------------------------
1 file changed, 66 insertions(+), 138 deletions(-)
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 7125ba3..a65b4d5 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -76,11 +76,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
#define ENCRYPT 1
#define DECRYPT 0
-struct tcrypt_result {
- struct completion completion;
- int err;
-};
-
struct aead_test_suite {
struct {
const struct aead_testvec *vecs;
@@ -155,17 +150,6 @@ static void hexdump(unsigned char *buf, unsigned int len)
buf, len, false);
}
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
- struct tcrypt_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
int i;
@@ -193,20 +177,10 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
-static int wait_async_op(struct tcrypt_result *tr, int ret)
-{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- wait_for_completion(&tr->completion);
- reinit_completion(&tr->completion);
- ret = tr->err;
- }
- return ret;
-}
-
static int ahash_partial_update(struct ahash_request **preq,
struct crypto_ahash *tfm, const struct hash_testvec *template,
void *hash_buff, int k, int temp, struct scatterlist *sg,
- const char *algo, char *result, struct tcrypt_result *tresult)
+ const char *algo, char *result, struct crypto_wait *wait)
{
char *state;
struct ahash_request *req;
@@ -236,7 +210,7 @@ static int ahash_partial_update(struct ahash_request **preq,
}
ahash_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, tresult);
+ crypto_req_done, wait);
memcpy(hash_buff, template->plaintext + temp,
template->tap[k]);
@@ -247,7 +221,7 @@ static int ahash_partial_update(struct ahash_request **preq,
pr_err("alg: hash: Failed to import() for %s\n", algo);
goto out;
}
- ret = wait_async_op(tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), wait);
if (ret)
goto out;
*preq = req;
@@ -272,7 +246,7 @@ static int __test_hash(struct crypto_ahash *tfm,
char *result;
char *key;
struct ahash_request *req;
- struct tcrypt_result tresult;
+ struct crypto_wait wait;
void *hash_buff;
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
@@ -286,7 +260,7 @@ static int __test_hash(struct crypto_ahash *tfm,
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
- init_completion(&tresult.completion);
+ crypto_init_wait(&wait);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -295,7 +269,7 @@ static int __test_hash(struct crypto_ahash *tfm,
goto out_noreq;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &tresult);
+ crypto_req_done, &wait);
j = 0;
for (i = 0; i < tcount; i++) {
@@ -335,26 +309,26 @@ static int __test_hash(struct crypto_ahash *tfm,
ahash_request_set_crypt(req, sg, result, template[i].psize);
if (use_digest) {
- ret = wait_async_op(&tresult, crypto_ahash_digest(req));
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
} else {
- ret = wait_async_op(&tresult, crypto_ahash_init(req));
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_final(req));
+ ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
@@ -420,22 +394,10 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&tresult.completion);
- reinit_completion(&tresult.completion);
- ret = tresult.err;
- if (!ret)
- break;
- /* fall through */
- default:
- printk(KERN_ERR "alg: hash: digest failed "
- "on chunking test %d for %s: "
- "ret=%d\n", j, algo, -ret);
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n",
+ j, algo, -ret);
goto out;
}
@@ -486,13 +448,13 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
- ret = wait_async_op(&tresult, crypto_ahash_init(req));
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d for %s: ret=%d\n",
j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -503,7 +465,7 @@ static int __test_hash(struct crypto_ahash *tfm,
for (k = 1; k < template[i].np; k++) {
ret = ahash_partial_update(&req, tfm, &template[i],
hash_buff, k, temp, &sg[0], algo, result,
- &tresult);
+ &wait);
if (ret) {
pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -511,7 +473,7 @@ static int __test_hash(struct crypto_ahash *tfm,
}
temp += template[i].tap[k];
}
- ret = wait_async_op(&tresult, crypto_ahash_final(req));
+ ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -580,7 +542,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
struct scatterlist *sg;
struct scatterlist *sgout;
const char *e, *d;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int authsize, iv_len;
void *input;
void *output;
@@ -619,7 +581,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
else
e = "decryption";
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -629,7 +591,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
}
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
iv_len = crypto_aead_ivsize(tfm);
@@ -709,7 +671,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
aead_request_set_ad(req, template[i].alen);
- ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
switch (ret) {
case 0:
@@ -722,13 +685,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -866,7 +822,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
aead_request_set_ad(req, template[i].alen);
- ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
switch (ret) {
case 0:
@@ -879,13 +836,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -1083,7 +1033,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
struct scatterlist sg[8];
struct scatterlist sgout[8];
const char *e, *d;
- struct tcrypt_result result;
+ struct crypto_wait wait;
void *data;
char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
@@ -1107,7 +1057,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
else
e = "decryption";
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -1117,7 +1067,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
j = 0;
for (i = 0; i < tcount; i++) {
@@ -1164,21 +1114,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
template[i].ilen, iv);
- ret = enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+ crypto_skcipher_decrypt(req), &wait);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
- /* fall through */
- default:
+ if (ret) {
pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
@@ -1272,21 +1211,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
template[i].ilen, iv);
- ret = enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+ crypto_skcipher_decrypt(req), &wait);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
- /* fall through */
- default:
+ if (ret) {
pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
@@ -1462,7 +1390,7 @@ static int test_acomp(struct crypto_acomp *tfm,
int ret;
struct scatterlist src, dst;
struct acomp_req *req;
- struct tcrypt_result result;
+ struct crypto_wait wait;
output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
if (!output)
@@ -1486,7 +1414,7 @@ static int test_acomp(struct crypto_acomp *tfm,
}
memset(output, 0, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
@@ -1501,9 +1429,9 @@ static int test_acomp(struct crypto_acomp *tfm,
acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- ret = wait_async_op(&result, crypto_acomp_compress(req));
+ ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -1516,10 +1444,10 @@ static int test_acomp(struct crypto_acomp *tfm,
dlen = COMP_BUF_SIZE;
sg_init_one(&src, output, ilen);
sg_init_one(&dst, decomp_out, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
acomp_request_set_params(req, &src, &dst, ilen, dlen);
- ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -1563,7 +1491,7 @@ static int test_acomp(struct crypto_acomp *tfm,
}
memset(output, 0, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
@@ -1578,9 +1506,9 @@ static int test_acomp(struct crypto_acomp *tfm,
acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -2000,7 +1928,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
void *a_public = NULL;
void *a_ss = NULL;
void *shared_secret = NULL;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int out_len_max;
int err = -ENOMEM;
struct scatterlist src, dst;
@@ -2009,7 +1937,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
if (!req)
return err;
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
if (err < 0)
@@ -2027,10 +1955,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
sg_init_one(&dst, output_buf, out_len_max);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
/* Compute party A's public key */
- err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
+ err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
if (err) {
pr_err("alg: %s: Party A: generate public key test failed. err %d\n",
alg, err);
@@ -2069,8 +1997,8 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
kpp_request_set_input(req, &src, vec->b_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
- err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
+ crypto_req_done, &wait);
+ err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
if (err) {
pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n",
alg, err);
@@ -2100,9 +2028,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
kpp_request_set_input(req, &src, vec->expected_a_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
- err = wait_async_op(&result,
- crypto_kpp_compute_shared_secret(req));
+ crypto_req_done, &wait);
+ err = crypto_wait_req(crypto_kpp_compute_shared_secret(req),
+ &wait);
if (err) {
pr_err("alg: %s: Party B: compute shared secret failed. err %d\n",
alg, err);
@@ -2179,7 +2107,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
struct akcipher_request *req;
void *outbuf_enc = NULL;
void *outbuf_dec = NULL;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int out_len_max, out_len = 0;
int err = -ENOMEM;
struct scatterlist src, dst, src_tab[2];
@@ -2191,7 +2119,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
if (!req)
goto free_xbuf;
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
if (vecs->public_key_vec)
err = crypto_akcipher_set_pub_key(tfm, vecs->key,
@@ -2220,13 +2148,13 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
out_len_max);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- err = wait_async_op(&result, vecs->siggen_sigver_test ?
- /* Run asymmetric signature generation */
- crypto_akcipher_sign(req) :
- /* Run asymmetric encrypt */
- crypto_akcipher_encrypt(req));
+ err = crypto_wait_req(vecs->siggen_sigver_test ?
+ /* Run asymmetric signature generation */
+ crypto_akcipher_sign(req) :
+ /* Run asymmetric encrypt */
+ crypto_akcipher_encrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
goto free_all;
@@ -2261,14 +2189,14 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
sg_init_one(&src, xbuf[0], vecs->c_size);
sg_init_one(&dst, outbuf_dec, out_len_max);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
- err = wait_async_op(&result, vecs->siggen_sigver_test ?
- /* Run asymmetric signature verification */
- crypto_akcipher_verify(req) :
- /* Run asymmetric decrypt */
- crypto_akcipher_decrypt(req));
+ err = crypto_wait_req(vecs->siggen_sigver_test ?
+ /* Run asymmetric signature verification */
+ crypto_akcipher_verify(req) :
+ /* Run asymmetric decrypt */
+ crypto_akcipher_decrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
goto free_all;
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:23:49
|
gcm is starting an async. crypto op and waiting for it complete.
Move it over to generic code doing the same.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/gcm.c | 32 ++++++--------------------------
1 file changed, 6 insertions(+), 26 deletions(-)
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 3841b5e..fb923a5 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -16,7 +16,6 @@
#include <crypto/scatterwalk.h>
#include <crypto/hash.h>
#include "internal.h"
-#include <linux/completion.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -78,11 +77,6 @@ struct crypto_gcm_req_priv_ctx {
} u;
};
-struct crypto_gcm_setkey_result {
- int err;
- struct completion completion;
-};
-
static struct {
u8 buf[16];
struct scatterlist sg;
@@ -98,17 +92,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
-static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
-{
- struct crypto_gcm_setkey_result *result = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- result->err = err;
- complete(&result->completion);
-}
-
static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
@@ -119,7 +102,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
be128 hash;
u8 iv[16];
- struct crypto_gcm_setkey_result result;
+ struct crypto_wait wait;
struct scatterlist sg[1];
struct skcipher_request req;
@@ -140,21 +123,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (!data)
return -ENOMEM;
- init_completion(&data->result.completion);
+ crypto_init_wait(&data->wait);
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
skcipher_request_set_tfm(&data->req, ctr);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_gcm_setkey_done,
- &data->result);
+ crypto_req_done,
+ &data->wait);
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
sizeof(data->hash), data->iv);
- err = crypto_skcipher_encrypt(&data->req);
- if (err == -EINPROGRESS || err == -EBUSY) {
- wait_for_completion(&data->result.completion);
- err = data->result.err;
- }
+ err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
+ &data->wait);
if (err)
goto out;
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:23:37
|
DRBG is starting an async. crypto op and waiting for it complete.
Move it over to generic code doing the same.
The code now also passes CRYPTO_TFM_REQ_MAY_SLEEP flag indicating
crypto request memory allocation may use GFP_KERNEL which should
be perfectly fine as the code is obviously sleeping for the
completion of the request any way.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/drbg.c | 36 +++++++++---------------------------
include/crypto/drbg.h | 3 +--
2 files changed, 10 insertions(+), 29 deletions(-)
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 633a88e..c522251 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
return 0;
}
-static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
-{
- struct drbg_state *drbg = req->data;
-
- if (error == -EINPROGRESS)
- return;
- drbg->ctr_async_err = error;
- complete(&drbg->ctr_completion);
-}
-
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
struct crypto_cipher *tfm;
@@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return PTR_ERR(sk_tfm);
}
drbg->ctr_handle = sk_tfm;
- init_completion(&drbg->ctr_completion);
+ crypto_init_wait(&drbg->ctr_wait);
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) {
@@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return -ENOMEM;
}
drbg->ctr_req = req;
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- drbg_skcipher_cb, drbg);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm);
drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
@@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
/* Output buffer may not be valid for SGL, use scratchpad */
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
cryptlen, drbg->V);
- ret = crypto_skcipher_encrypt(drbg->ctr_req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&drbg->ctr_completion);
- if (!drbg->ctr_async_err) {
- reinit_completion(&drbg->ctr_completion);
- break;
- }
- default:
+ ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
+ &drbg->ctr_wait);
+ if (ret)
goto out;
- }
- init_completion(&drbg->ctr_completion);
+
+ crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen);
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 22f884c..8f94110 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -126,8 +126,7 @@ struct drbg_state {
__u8 *ctr_null_value; /* CTR mode aligned zero buf */
__u8 *outscratchpadbuf; /* CTR mode output scratchpad */
__u8 *outscratchpad; /* CTR mode aligned outbuf */
- struct completion ctr_completion; /* CTR mode async handler */
- int ctr_async_err; /* CTR mode async error */
+ struct crypto_wait ctr_wait; /* CTR mode async wait obj */
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:23:25
|
public_key_verify_signature() is starting an async crypto op and
waiting for it to complete. Move it over to generic code doing
the same.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/asymmetric_keys/public_key.c | 28 ++++------------------------
1 file changed, 4 insertions(+), 24 deletions(-)
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 3cd6e12..d916235 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3)
public_key_signature_free(payload3);
}
-struct public_key_completion {
- struct completion completion;
- int err;
-};
-
-static void public_key_verify_done(struct crypto_async_request *req, int err)
-{
- struct public_key_completion *compl = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- compl->err = err;
- complete(&compl->completion);
-}
-
/*
* Verify a signature using a public key.
*/
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig)
{
- struct public_key_completion compl;
+ struct crypto_wait cwait;
struct crypto_akcipher *tfm;
struct akcipher_request *req;
struct scatterlist sig_sg, digest_sg;
@@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey,
sg_init_one(&digest_sg, output, outlen);
akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
outlen);
- init_completion(&compl.completion);
+ crypto_init_wait(&cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
- public_key_verify_done, &compl);
+ crypto_req_done, &cwait);
/* Perform the verification calculation. This doesn't actually do the
* verification, but rather calculates the hash expected by the
* signature and returns that to us.
*/
- ret = crypto_akcipher_verify(req);
- if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
- wait_for_completion(&compl.completion);
- ret = compl.err;
- }
+ ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
if (ret < 0)
goto out_free_output;
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:23:13
|
algif starts several async crypto ops and waits for their completion.
Move it over to generic code doing the same.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/af_alg.c | 27 ---------------------------
crypto/algif_aead.c | 8 ++++----
crypto/algif_hash.c | 30 ++++++++++++++----------------
crypto/algif_skcipher.c | 9 ++++-----
include/crypto/if_alg.h | 15 +--------------
5 files changed, 23 insertions(+), 66 deletions(-)
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index d6936c0..f8917e7 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
}
EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
-int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
-{
- switch (err) {
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&completion->completion);
- reinit_completion(&completion->completion);
- err = completion->err;
- break;
- };
-
- return err;
-}
-EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
-
-void af_alg_complete(struct crypto_async_request *req, int err)
-{
- struct af_alg_completion *completion = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- completion->err = err;
- complete(&completion->completion);
-}
-EXPORT_SYMBOL_GPL(af_alg_complete);
-
/**
* af_alg_alloc_tsgl - allocate the TX SGL
*
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 48d46e7..abbac8a 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
- err = af_alg_wait_for_completion(ctx->enc ?
+ crypto_req_done, &ctx->wait);
+ err = crypto_wait_req(ctx->enc ?
crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req),
- &ctx->completion);
+ &ctx->wait);
}
/* AIO operation in progress */
@@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
ctx->merge = 0;
ctx->enc = 0;
ctx->aead_assoclen = 0;
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 3b3c154..d2ab8de 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -26,7 +26,7 @@ struct hash_ctx {
u8 *result;
- struct af_alg_completion completion;
+ struct crypto_wait wait;
unsigned int len;
bool more;
@@ -102,8 +102,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
if ((msg->msg_flags & MSG_MORE))
hash_free_result(sk, ctx);
- err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
if (err)
goto unlock;
}
@@ -124,8 +123,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
- err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_update(&ctx->req),
+ &ctx->wait);
af_alg_free_sg(&ctx->sgl);
if (err)
goto unlock;
@@ -143,8 +142,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
- err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
}
unlock:
@@ -185,7 +184,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
} else {
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
- err = af_alg_wait_for_completion(err, &ctx->completion);
+ err = crypto_wait_req(err, &ctx->wait);
if (err)
goto unlock;
}
@@ -193,7 +192,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
err = crypto_ahash_update(&ctx->req);
}
- err = af_alg_wait_for_completion(err, &ctx->completion);
+ err = crypto_wait_req(err, &ctx->wait);
if (err)
goto unlock;
@@ -229,17 +228,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
if (!result && !ctx->more) {
- err = af_alg_wait_for_completion(
- crypto_ahash_init(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_init(&ctx->req),
+ &ctx->wait);
if (err)
goto unlock;
}
if (!result || ctx->more) {
ctx->more = 0;
- err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
if (err)
goto unlock;
}
@@ -490,13 +488,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
ctx->result = NULL;
ctx->len = len;
ctx->more = 0;
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
ahash_request_set_tfm(&ctx->req, hash);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
+ crypto_req_done, &ctx->wait);
sk->sk_destruct = hash_sock_destruct;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 8ae4170..9954b07 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete,
- &ctx->completion);
- err = af_alg_wait_for_completion(ctx->enc ?
+ crypto_req_done, &ctx->wait);
+ err = crypto_wait_req(ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
- &ctx->completion);
+ &ctx->wait);
}
/* AIO operation in progress */
@@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 75ec9c6..6abf0a3 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -40,11 +40,6 @@ struct alg_sock {
void *private;
};
-struct af_alg_completion {
- struct completion completion;
- int err;
-};
-
struct af_alg_control {
struct af_alg_iv *iv;
int op;
@@ -152,7 +147,7 @@ struct af_alg_ctx {
void *iv;
size_t aead_assoclen;
- struct af_alg_completion completion;
+ struct crypto_wait wait;
size_t used;
size_t rcvused;
@@ -177,19 +172,11 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
-int af_alg_wait_for_completion(int err, struct af_alg_completion *completion);
-void af_alg_complete(struct crypto_async_request *req, int err);
-
static inline struct alg_sock *alg_sk(struct sock *sk)
{
return (struct alg_sock *)sk;
}
-static inline void af_alg_init_completion(struct af_alg_completion *completion)
-{
- init_completion(&completion->completion);
-}
-
/**
* Size of available buffer for sending data from user space to kernel.
*
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:22:59
|
Invoking a possibly async. crypto op and waiting for completion
while correctly handling backlog processing is a common task
in the crypto API implementation and outside users of it.
This patch adds a generic implementation for doing so in
preparation for using it across the board instead of hand
rolled versions.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
CC: Eric Biggers <ebi...@gm...>
---
crypto/api.c | 13 +++++++++++++
include/linux/crypto.h | 41 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 54 insertions(+)
diff --git a/crypto/api.c b/crypto/api.c
index 941cd4c..2a2479d 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -24,6 +24,7 @@
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/completion.h>
#include "internal.h"
LIST_HEAD(crypto_alg_list);
@@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_alg);
+void crypto_req_done(struct crypto_async_request *req, int err)
+{
+ struct crypto_wait *wait = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ wait->err = err;
+ complete(&wait->completion);
+}
+EXPORT_SYMBOL_GPL(crypto_req_done);
+
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 84da997..bb00186 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <linux/completion.h>
/*
* Autoloaded crypto modules should only use a prefixed name to avoid allowing
@@ -468,6 +469,45 @@ struct crypto_alg {
} CRYPTO_MINALIGN_ATTR;
/*
+ * A helper struct for waiting for completion of async crypto ops
+ */
+struct crypto_wait {
+ struct completion completion;
+ int err;
+};
+
+/*
+ * Macro for declaring a crypto op async wait object on stack
+ */
+#define DECLARE_CRYPTO_WAIT(_wait) \
+ struct crypto_wait _wait = { \
+ COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
+
+/*
+ * Async ops completion helper functioons
+ */
+void crypto_req_done(struct crypto_async_request *req, int err);
+
+static inline int crypto_wait_req(int err, struct crypto_wait *wait)
+{
+ switch (err) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&wait->completion);
+ reinit_completion(&wait->completion);
+ err = wait->err;
+ break;
+ };
+
+ return err;
+}
+
+static inline void crypto_init_wait(struct crypto_wait *wait)
+{
+ init_completion(&wait->completion);
+}
+
+/*
* Algorithm registration interface.
*/
int crypto_register_alg(struct crypto_alg *alg);
@@ -1604,5 +1644,6 @@ static inline int crypto_comp_decompress(struct crypto_comp *tfm,
src, slen, dst, dlen);
}
+
#endif /* _LINUX_CRYPTO_H */
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:22:46
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- drivers/crypto/marvell/cesa.c | 3 +-- drivers/crypto/marvell/cesa.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 6e7a5c7..269737f 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -183,8 +183,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req, spin_lock_bh(&engine->lock); ret = crypto_enqueue_request(&engine->queue, req); if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && - (ret == -EINPROGRESS || - (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + (ret == -EINPROGRESS || ret == -EBUSY) mv_cesa_tdma_chain(engine, creq); spin_unlock_bh(&engine->lock); diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index b7872f6..63c8457 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -763,7 +763,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req, * the backlog and will be processed later. There's no need to * clean it up. */ - if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + if (ret == -EBUSY) return false; /* Request wasn't queued, we need to clean it up */ -- 2.1.4 |
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:22:33
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/ahash.c | 12 +++--------- crypto/cts.c | 6 ++---- crypto/lrw.c | 8 ++------ crypto/rsa-pkcs1pad.c | 16 ++++------------ crypto/xts.c | 8 ++------ 5 files changed, 13 insertions(+), 37 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 826cd7a..d63eeef 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req, return err; err = op(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; ahash_restore_req(req, err); @@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) req->base.complete = ahash_def_finup_done2; err = crypto_ahash_reqtfm(req)->final(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; out: @@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req) return err; err = tfm->update(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); diff --git a/crypto/cts.c b/crypto/cts.c index 243f591..4773c18 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err) goto out; err = cts_cbc_encrypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return; out: @@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err) goto out; err = cts_cbc_decrypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return; out: diff --git a/crypto/lrw.c b/crypto/lrw.c index a8bfae4..695cea9 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err) crypto_skcipher_encrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } @@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err) crypto_skcipher_decrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 407c64b..2908f93 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) req->dst, ctx->key_size - 1, req->dst_len); err = crypto_akcipher_encrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_encrypt_sign_complete(req, err); return err; @@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) ctx->key_size); err = crypto_akcipher_decrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_decrypt_complete(req, err); return err; @@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req) req->dst, ctx->key_size - 1, req->dst_len); err = crypto_akcipher_sign(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_encrypt_sign_complete(req, err); return err; @@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) ctx->key_size); err = crypto_akcipher_verify(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_verify_complete(req, err); return err; diff --git a/crypto/xts.c b/crypto/xts.c index d86c11a..af68012 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err) crypto_skcipher_encrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } @@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err) crypto_skcipher_decrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } -- 2.1.4 |
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:22:20
|
Replace -EBUSY with -EAGAIN when reporting transient busy
indication in the absence of backlog.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
drivers/crypto/ccp/ccp-crypto-main.c | 8 +++-----
drivers/crypto/ccp/ccp-dev.c | 7 +++++--
2 files changed, 8 insertions(+), 7 deletions(-)
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 35a9de7..403ff0a 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
/* Check if the cmd can/should be queued */
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
- ret = -EBUSY;
- if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+ if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
+ ret = -EAGAIN;
goto e_lock;
+ }
}
/* Look for an entry with the same tfm. If there is a cmd
@@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
ret = ccp_enqueue_cmd(crypto_cmd->cmd);
if (!ccp_crypto_success(ret))
goto e_lock; /* Error, don't queue it */
- if ((ret == -EBUSY) &&
- !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
- goto e_lock; /* Not backlogging, don't queue it */
}
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 4e029b1..3d637e3 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd)
i = ccp->cmd_q_count;
if (ccp->cmd_count >= MAX_CMD_QLEN) {
- ret = -EBUSY;
- if (cmd->flags & CCP_CMD_MAY_BACKLOG)
+ if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
+ ret = -EBUSY;
list_add_tail(&cmd->entry, &ccp->backlog);
+ } else {
+ ret = -EAGAIN;
+ }
} else {
ret = -EINPROGRESS;
ccp->cmd_count++;
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:22:08
|
The crypto API was using the -EBUSY return value to indicate
both a hard failure to submit a crypto operation into a
transformation provider when the latter was busy and the backlog
mechanism was not enabled as well as a notification that the
operation was queued into the backlog when the backlog mechanism
was enabled.
Having the same return code indicate two very different conditions
depending on a flag is both error prone and requires extra runtime
check like the following to discern between the cases:
if (err == -EINPROGRESS ||
(err == -EBUSY && (ahash_request_flags(req) &
CRYPTO_TFM_REQ_MAY_BACKLOG)))
This patch changes the return code used to indicate a crypto op
failed due to the transformation provider being transiently busy
to -EAGAIN.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/algapi.c | 6 ++++--
crypto/algif_hash.c | 20 +++++++++++++++++---
crypto/cryptd.c | 4 +---
3 files changed, 22 insertions(+), 8 deletions(-)
diff --git a/crypto/algapi.c b/crypto/algapi.c
index aa699ff..916bee3 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue,
int err = -EINPROGRESS;
if (unlikely(queue->qlen >= queue->max_qlen)) {
- err = -EBUSY;
- if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -EAGAIN;
goto out;
+ }
+ err = -EBUSY;
if (queue->backlog == &queue->list)
queue->backlog = &request->list;
}
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 5e92bd2..3b3c154 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -39,6 +39,20 @@ struct algif_hash_tfm {
bool has_key;
};
+/* Previous versions of crypto_* ops used to return -EBUSY
+ * rather than -EAGAIN to indicate being tied up. The in
+ * kernel API changed but we don't want to break the user
+ * space API. As only the hash user interface exposed this
+ * error ever to the user, do the translation here.
+ */
+static inline int crypto_user_err(int err)
+{
+ if (err == -EAGAIN)
+ return -EBUSY;
+
+ return err;
+}
+
static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
@@ -136,7 +150,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
unlock:
release_sock(sk);
- return err ?: copied;
+ return err ? crypto_user_err(err) : copied;
}
static ssize_t hash_sendpage(struct socket *sock, struct page *page,
@@ -188,7 +202,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
unlock:
release_sock(sk);
- return err ?: size;
+ return err ? crypto_user_err(err) : size;
}
static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
@@ -236,7 +250,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
hash_free_result(sk, ctx);
release_sock(sk);
- return err ?: len;
+ return err ? crypto_user_err(err) : len;
}
static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 0508c48..d1dbdce 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
int cpu, err;
struct cryptd_cpu_queue *cpu_queue;
atomic_t *refcnt;
- bool may_backlog;
cpu = get_cpu();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
- may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
- if (err == -EBUSY && !may_backlog)
+ if (err == -EAGAIN)
goto out_put_cpu;
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-14 15:21:55
|
Many users of kernel async. crypto services have a pattern of starting an async. crypto op and than using a completion to wait for it to end. This patch set simplifies this common use case in two ways: First, by separating the return codes of the case where a request is queued to a backlog due to the provider being busy (-EBUSY) from the case the request has failed due to the provider being busy and backlogging is not enabled (-EAGAIN). Next, this change is than built on to create a generic API to wait for a async. crypto operation to complete. The end result is a smaller code base and an API that is easier to use and more difficult to get wrong. The patch set was boot tested on x86_64 and arm64 which at the very least tests the crypto users via testmgr and tcrypt but I do note that I do not have access to some of the HW whose drivers are modified nor do I claim I was able to test all of the corner cases. The patch set is based upon linux-next release tagged next-20170811. Changes from v4: - Rebase on top of latest algif changes from Stephan Mueller. - Fix typo in ccp patch title. Changes from v3: - Instead of changing the return code to indicate backlog queueing, change the return code to indicate transient busy state, as suggested by Herbert Xu. Changes from v2: - Patch title changed from "introduce crypto wait for async op" to better reflect the current state. - Rebase on top of latest linux-next. - Add a new return code of -EIOCBQUEUED for backlog queueing, as suggested by Herbert Xu. - Transform more users to the new API. - Update the drbg change to account for new init as indicated by Stephan Muller. Changes from v1: - Address review comments from Eric Biggers. - Separated out bug fixes of existing code and rebase on top of that patch set. - Rename 'ecr' to 'wait' in fscrypto code. - Split patch introducing the new API from the change moving over the algif code which it originated from to the new API. - Inline crypto_wait_req(). - Some code indentation fixes. Gilad Ben-Yossef (19): crypto: change transient busy return code to -EAGAIN crypto: ccp: use -EAGAIN for transient busy indication crypto: remove redundant backlog checks on EBUSY crypto: marvell/cesa: remove redundant backlog checks on EBUSY crypto: introduce crypto wait for async op crypto: move algif to generic async completion crypto: move pub key to generic async completion crypto: move drbg to generic async completion crypto: move gcm to generic async completion crypto: move testmgr to generic async completion fscrypt: move to generic async completion dm: move dm-verity to generic async completion cifs: move to generic async completion ima: move to generic async completion crypto: tcrypt: move to generic async completion crypto: talitos: move to generic async completion crypto: qce: move to generic async completion crypto: mediatek: move to generic async completion crypto: adapt api sample to use async. op wait Documentation/crypto/api-samples.rst | 52 ++------- crypto/af_alg.c | 27 ----- crypto/ahash.c | 12 +-- crypto/algapi.c | 6 +- crypto/algif_aead.c | 8 +- crypto/algif_hash.c | 50 +++++---- crypto/algif_skcipher.c | 9 +- crypto/api.c | 13 +++ crypto/asymmetric_keys/public_key.c | 28 +---- crypto/cryptd.c | 4 +- crypto/cts.c | 6 +- crypto/drbg.c | 36 ++----- crypto/gcm.c | 32 ++---- crypto/lrw.c | 8 +- crypto/rsa-pkcs1pad.c | 16 +-- crypto/tcrypt.c | 84 +++++---------- crypto/testmgr.c | 204 ++++++++++++----------------------- crypto/xts.c | 8 +- drivers/crypto/ccp/ccp-crypto-main.c | 8 +- drivers/crypto/ccp/ccp-dev.c | 7 +- drivers/crypto/marvell/cesa.c | 3 +- drivers/crypto/marvell/cesa.h | 2 +- drivers/crypto/mediatek/mtk-aes.c | 31 +----- drivers/crypto/qce/sha.c | 30 +----- drivers/crypto/talitos.c | 38 +------ drivers/md/dm-verity-target.c | 81 ++++---------- drivers/md/dm-verity.h | 5 - fs/cifs/smb2ops.c | 30 +----- fs/crypto/crypto.c | 28 +---- fs/crypto/fname.c | 36 ++----- fs/crypto/fscrypt_private.h | 10 -- fs/crypto/keyinfo.c | 21 +--- include/crypto/drbg.h | 3 +- include/crypto/if_alg.h | 15 +-- include/linux/crypto.h | 41 +++++++ security/integrity/ima/ima_crypto.c | 56 +++------- 36 files changed, 311 insertions(+), 737 deletions(-) -- 2.1.4 |
|
From: Mimi Z. <zo...@li...> - 2017-08-14 12:13:15
|
On Mon, 2017-08-14 at 13:56 +0300, Jarkko Sakkinen wrote: > > > Since the main concern about this change is breaking old systems that > > > might potentially have other peripherals hanging off the LPC bus, can > > > we define a new Kconfig option, with the default as 'N'? > > > > > > Mimi > > > > I guess that could make sense but I would like to hear feedback first. > > > > /Jarkko > > And I'm worried would that it'd be left for many years to come as an > option. I do not have any metrics what portion of hardware in the field > would break if this is turned on. > > It would slow down kernel testing as I would have to run tests for the > driver with that option turned on and off because it is a major shift > from how driver functions. And I have zero idea how long I would go on > doing this. > > One maybe a little bit better option would be to have a sysfs attribute > for this functionality (disable_burst_count). What do you think about > that? That works! So we'll define a module_param named disable_burst_count, which can be specified on the boot command line. Mimi |
|
From: Mimi Z. <zo...@li...> - 2017-08-14 12:04:39
|
On Mon, 2017-08-14 at 13:56 +0300, Jarkko Sakkinen wrote: > > > > I would like to see tpm_msleep() wrapper to replace current msleep() > > > > usage across the subsystem before considering this. I.e. wrapper that > > > > internally uses usleep_range(). This way we can mechanically convert > > > > everything to a more low latency option. > > > > > > Fine. I assume you meant tpm_sleep(), not tpm_msleep(). > > > > I think it would sense to have a function that takes msecs because msecs > > are mostly used everywhere in the subsystem. This way we don't have to > > change any of the existing constants. For now converting from msleep() to tpm_msleep() will be straight forward. Internally we would just use usleep_range(). Going forward, my concern is that even 1 msec might be too long for some of these sleeps. Mimi |
|
From: Jarkko S. <jar...@li...> - 2017-08-14 10:57:04
|
On Mon, Aug 14, 2017 at 01:51:30PM +0300, Jarkko Sakkinen wrote: > On Fri, Aug 11, 2017 at 11:30:19AM -0400, Mimi Zohar wrote: > > On Fri, 2017-08-11 at 14:14 +0300, Jarkko Sakkinen wrote: > > > On Wed, Aug 09, 2017 at 11:00:36PM +0200, Peter Huewe wrote: > > > > Hi Ken, > > > > (again speaking only on my behalf, not my employer) > > > > > > > > > Does anyone know of platforms where this occurs? > > > > > I suspect (but not sure) that the days of SuperIO connecting floppy > > > > > drives, printer ports, and PS/2 mouse ports on the LPC bus are over, and > > > > > such legacy systems will not have a TPM. Would SuperIO even support the > > > > > special TPM LPC bus cycles? > > > > > > > > Since we are the linux kernel, we do have to care for legacy devices. > > > > And a system with LPC, PS2Mouse on SuperIO and a TPM are not that uncommon. > > > > > > > > And heck, we even have support for 1.1b TPM devices.... > > > > > > > > > > > > >> One more viewpoint: TCG must added the burst count for a reason (might > > > > >> be very well related what Peter said). Is ignoring it something that TCG > > > > >> recommends? Not following standard exactly in the driver code sometimes > > > > >> makes sense on *small details* but I would not say that this a small > > > > >> detail... > > > > > > > > > I checked with the TCG's device driver work group (DDWG). Both the spec > > > > > editor and 3 TPM vendors - Infineon, Nuvoton, and ST Micro - agreed that > > > > > ignoring burst count may incur wait states but nothing more. Operations > > > > > will still be successful. > > > > > > > > Interesting - let me check with Georg tomorrow. > > > > Unfortunately I do not have access to my tcg mails from home (since I'm not working :), > > > > but did you _explicitly_ talk about LPC and the system? > > > > I'm sure the TPM does not care about the waitstates... > > > > > > > > If my memory does not betray me, > > > > it is actually possible to "freeze up" a system completly by flooding the lpc bus. > > > > Let me double check tomorrow... > > > > > > > > > > > > In anycase - I really would like to see a much more performant tpm subsystem - > > > > however it will be quite an effort with a lot of legacy testing. > > > > (which I unfortunately cannot spend on my private time ... and also of course lacking test systems). > > > > > > > > Thanks, > > > > Peter > > > > > > I would like to see tpm_msleep() wrapper to replace current msleep() > > > usage across the subsystem before considering this. I.e. wrapper that > > > internally uses usleep_range(). This way we can mechanically convert > > > everything to a more low latency option. > > > > Fine. I assume you meant tpm_sleep(), not tpm_msleep(). > > I think it would sense to have a function that takes msecs because msecs > are mostly used everywhere in the subsystem. This way we don't have to > change any of the existing constants. > > > > This should have been done already for patch that Mini and Nayna > > > provided instead of open coding stuff. > > > > At that time, we had no idea what caused the major change in TPM > > performance. We only knew that the change occurred somewhere between > > linux-4.7 and linux-4.8. Even after figuring out it was the change to > > msleep(), we were hoping that msleep() would be fixed. So your > > comment, that we should have done it differently back then, is > > unwarranted. > > I wasn't trying to point the blame to you at all. I didn't bring this to > table back then myself. I agree what you are saying. > > I was mainly trying to explain why I think it should be done this way > now while I didn't suggest it back then :-) > > > > That change is something that can be applied right now. On the other > > > hand, this is a very controversial change. > > > > Since the main concern about this change is breaking old systems that > > might potentially have other peripherals hanging off the LPC bus, can > > we define a new Kconfig option, with the default as 'N'? > > > > Mimi > > I guess that could make sense but I would like to hear feedback first. > > /Jarkko And I'm worried would that it'd be left for many years to come as an option. I do not have any metrics what portion of hardware in the field would break if this is turned on. It would slow down kernel testing as I would have to run tests for the driver with that option turned on and off because it is a major shift from how driver functions. And I have zero idea how long I would go on doing this. One maybe a little bit better option would be to have a sysfs attribute for this functionality (disable_burst_count). What do you think about that? /Jarkko |
|
From: Jarkko S. <jar...@li...> - 2017-08-14 10:51:48
|
On Fri, Aug 11, 2017 at 11:30:19AM -0400, Mimi Zohar wrote: > On Fri, 2017-08-11 at 14:14 +0300, Jarkko Sakkinen wrote: > > On Wed, Aug 09, 2017 at 11:00:36PM +0200, Peter Huewe wrote: > > > Hi Ken, > > > (again speaking only on my behalf, not my employer) > > > > > > > Does anyone know of platforms where this occurs? > > > > I suspect (but not sure) that the days of SuperIO connecting floppy > > > > drives, printer ports, and PS/2 mouse ports on the LPC bus are over, and > > > > such legacy systems will not have a TPM. Would SuperIO even support the > > > > special TPM LPC bus cycles? > > > > > > Since we are the linux kernel, we do have to care for legacy devices. > > > And a system with LPC, PS2Mouse on SuperIO and a TPM are not that uncommon. > > > > > > And heck, we even have support for 1.1b TPM devices.... > > > > > > > > > >> One more viewpoint: TCG must added the burst count for a reason (might > > > >> be very well related what Peter said). Is ignoring it something that TCG > > > >> recommends? Not following standard exactly in the driver code sometimes > > > >> makes sense on *small details* but I would not say that this a small > > > >> detail... > > > > > > > I checked with the TCG's device driver work group (DDWG). Both the spec > > > > editor and 3 TPM vendors - Infineon, Nuvoton, and ST Micro - agreed that > > > > ignoring burst count may incur wait states but nothing more. Operations > > > > will still be successful. > > > > > > Interesting - let me check with Georg tomorrow. > > > Unfortunately I do not have access to my tcg mails from home (since I'm not working :), > > > but did you _explicitly_ talk about LPC and the system? > > > I'm sure the TPM does not care about the waitstates... > > > > > > If my memory does not betray me, > > > it is actually possible to "freeze up" a system completly by flooding the lpc bus. > > > Let me double check tomorrow... > > > > > > > > > In anycase - I really would like to see a much more performant tpm subsystem - > > > however it will be quite an effort with a lot of legacy testing. > > > (which I unfortunately cannot spend on my private time ... and also of course lacking test systems). > > > > > > Thanks, > > > Peter > > > > I would like to see tpm_msleep() wrapper to replace current msleep() > > usage across the subsystem before considering this. I.e. wrapper that > > internally uses usleep_range(). This way we can mechanically convert > > everything to a more low latency option. > > Fine. I assume you meant tpm_sleep(), not tpm_msleep(). I think it would sense to have a function that takes msecs because msecs are mostly used everywhere in the subsystem. This way we don't have to change any of the existing constants. > > This should have been done already for patch that Mini and Nayna > > provided instead of open coding stuff. > > At that time, we had no idea what caused the major change in TPM > performance. We only knew that the change occurred somewhere between > linux-4.7 and linux-4.8. Even after figuring out it was the change to > msleep(), we were hoping that msleep() would be fixed. So your > comment, that we should have done it differently back then, is > unwarranted. I wasn't trying to point the blame to you at all. I didn't bring this to table back then myself. I agree what you are saying. I was mainly trying to explain why I think it should be done this way now while I didn't suggest it back then :-) > > That change is something that can be applied right now. On the other > > hand, this is a very controversial change. > > Since the main concern about this change is breaking old systems that > might potentially have other peripherals hanging off the LPC bus, can > we define a new Kconfig option, with the default as 'N'? > > Mimi I guess that could make sense but I would like to hear feedback first. /Jarkko |
|
From: Jarkko S. <jar...@li...> - 2017-08-14 10:10:57
|
On Fri, Aug 11, 2017 at 05:54:19PM -0400, Ken Goldman wrote: > On 8/9/2017 4:43 PM, Peter Huewe wrote: > > > > Yes that's bad, especially with current msleep(5) is actually > > msleep(20). However, please also keep in mind SPI tpms show a much > > higher burst count value, (255) our I2C TPM SLB9645 even shows > > something in the range of 1k. :) > > One of our platforms has a TPM 1.2 with an 8 byte FIFO and a static burst > count. This is where we're debugging. > > > > Would another solution be to reduce the burst count poll and sleep > > > to, e.g., 100 usec or even 10 usec? This would probably help > > > greatly, but till not incur the wait states that triggered the > > > NACK. > > > > If you use sleep it's not guaranteed that you wakeup after exactly xx > > specified amount of time. Just that you sleep at least xx amount of > > time. Otherwise you would have to do delay/busywaiting. > > Understood. However, if the DD maintainers will not accept ignoring > burstCount, would a short sleep (e.g., 10 usec) be acceptable. > > If so, we can benchmark it. > > > Imho the best option is to figure out whether any vendor can > > determine the "FIFO flush time" i.e. how much time does it take to > > empty the fifo and fillup the burstcount and use this on the lower > > bound of an usleep range. I don't think tpms are in the range of < 10 > > us... > > > > @Ken: Maybe can you check in DDWG? > > I asked this week. > > Nuvoton, ST Micro, and Infineon confirmed that the TPM empties a byte from > the FIFO in under 1 usec. So, even with a static burst count, the entire > FIFO would empty in under 10 usec. Does this break anything lets say in a decade time frame? If it does, I will not even consider this. Can you give a definitive answer for that? /Jarkko |
|
From: msuchanek <msu...@su...> - 2017-08-13 23:54:07
|
Hello, On Fri, 11 Aug 2017 17:32:12 -0400 Ken Goldman <kg...@li...> wrote: > On 8/9/2017 5:00 PM, Peter Huewe wrote: > > > > Since we are the linux kernel, we do have to care for legacy > > devices. And a system with LPC, PS2Mouse on SuperIO and a TPM are > > not that uncommon. > > > > And heck, we even have support for 1.1b TPM devices.... > > Understood. However, remember that SuperIO is a 1980's device that > predates the TPM. Since the TPM requires special LPC bus cycles, it's > even less likely that an old chipset has an attached TPM. Where are the PS/2 ports attached today? About 500 out of 700 mainboards sold today has a PS/2 port which is probably due to prevalence of legacy devices and usbhid limitations. Similarily many boards have serial and parallel hardware ports. In all diagrams detailed enough to show these ports I have seen them attached to the LPC bus. Thanks Michal |
|
From: SF M. E. <el...@us...> - 2017-08-13 17:36:21
|
From: Markus Elfring <el...@us...>
Date: Sun, 13 Aug 2017 19:29:11 +0200
Omit an extra message for a memory allocation failure in this function.
This issue was detected by using the Coccinelle software.
Signed-off-by: Markus Elfring <el...@us...>
---
security/integrity/ima/ima_queue.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index a02a86d51102..3b60e72aecb2 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -100,10 +100,9 @@ static int ima_add_digest_entry(struct ima_template_entry *entry,
unsigned int key;
qe = kmalloc(sizeof(*qe), GFP_KERNEL);
- if (qe == NULL) {
- pr_err("OUT OF MEMORY ERROR creating queue entry\n");
+ if (!qe)
return -ENOMEM;
- }
+
qe->entry = entry;
INIT_LIST_HEAD(&qe->later);
--
2.14.0
|
|
From: Ken G. <kg...@li...> - 2017-08-11 21:53:52
|
On 8/9/2017 4:43 PM, Peter Huewe wrote: > > Yes that's bad, especially with current msleep(5) is actually > msleep(20). However, please also keep in mind SPI tpms show a much > higher burst count value, (255) our I2C TPM SLB9645 even shows > something in the range of 1k. :) One of our platforms has a TPM 1.2 with an 8 byte FIFO and a static burst count. This is where we're debugging. >> Would another solution be to reduce the burst count poll and sleep >> to, e.g., 100 usec or even 10 usec? This would probably help >> greatly, but till not incur the wait states that triggered the >> NACK. > > If you use sleep it's not guaranteed that you wakeup after exactly xx > specified amount of time. Just that you sleep at least xx amount of > time. Otherwise you would have to do delay/busywaiting. Understood. However, if the DD maintainers will not accept ignoring burstCount, would a short sleep (e.g., 10 usec) be acceptable. If so, we can benchmark it. > Imho the best option is to figure out whether any vendor can > determine the "FIFO flush time" i.e. how much time does it take to > empty the fifo and fillup the burstcount and use this on the lower > bound of an usleep range. I don't think tpms are in the range of < 10 > us... > > @Ken: Maybe can you check in DDWG? I asked this week. Nuvoton, ST Micro, and Infineon confirmed that the TPM empties a byte from the FIFO in under 1 usec. So, even with a static burst count, the entire FIFO would empty in under 10 usec. |
|
From: Ken G. <kg...@li...> - 2017-08-11 21:42:15
|
Following up on this thread based on this week's TCG call ... 1 - burstCount can safely be ignored on writes. This is explicit in most places in the TCG spec. In places where it is not explicit, it was simply an editorial omission. We are going through the spec and adding "without incurring wait states." TCG is willing to publish an errata if that makes developers more comfortable. 2 - These are multi-mhz buses. The TPM vendors conformed that wait states, even if incurred, will be sub-usec. I.e., less that a microsecond. Essentially, the DD is loading the FIFO, and the TPM is unloading the FIFO at processor speeds. Thus, even if one were worried about an odd system new enough to have a TPM, but old enough to have an LPC attached printer, keyboard, mouse or floppy, the delay in printing or typing will be insignificant. 3 - I asked several platform vendors with long TCG experience, and they said that they know of no motherboards that share the LPC bus with a TPM plus another device. |
|
From: Ken G. <kg...@li...> - 2017-08-11 21:31:44
|
On 8/9/2017 5:00 PM, Peter Huewe wrote: > > Since we are the linux kernel, we do have to care for legacy > devices. And a system with LPC, PS2Mouse on SuperIO and a TPM are not > that uncommon. > > And heck, we even have support for 1.1b TPM devices.... Understood. However, remember that SuperIO is a 1980's device that predates the TPM. Since the TPM requires special LPC bus cycles, it's even less likely that an old chipset has an attached TPM. > Interesting - let me check with Georg tomorrow. Unfortunately I do > not have access to my tcg mails from home (since I'm not working :), > but did you _explicitly_ talk about LPC and the system? I'm sure the > TPM does not care about the waitstates... Yes, Infineon was one of the 3 TPM vendors who confirmed this. |
|
From: Christoph H. <hc...@ls...> - 2017-08-11 17:16:00
|
On Fri, Aug 11, 2017 at 01:11:44PM -0400, Mimi Zohar wrote: > After moving the libfs and efivarfs code to separate patches, the > patch hasn't changed all that much, just additional file systems. If > you don't object, I'll leave you as the author. Sure - feel free to handle it whichever way you want. |
|
From: Mimi Z. <zo...@li...> - 2017-08-11 17:12:08
|
On Fri, 2017-08-11 at 09:20 -0400, Mimi Zohar wrote: > On Fri, 2017-08-11 at 12:21 +0200, Christoph Hellwig wrote: > > On Thu, Aug 10, 2017 at 07:41:45PM -0400, Mimi Zohar wrote: > > > From: Christoph Hellwig <hc...@ls...> > > > > > > Add a new ->integrity_read file operation to read data for integrity > > > hash collection. This is defined to be equivalent to ->read_iter, > > > except that it will be called with the i_rwsem held exclusively. > > > > > > Signed-off-by: Christoph Hellwig <hc...@ls...> > > > > Btw, most of this is yours now, feel free to take over the authorship > > with a little credit to me for the initial patch if you want. > > Thank you so much for the initial design and patch! After moving the libfs and efivarfs code to separate patches, the patch hasn't changed all that much, just additional file systems. If you don't object, I'll leave you as the author. Mimi |
|
From: Martha H. <mar...@sp...> - 2017-08-11 17:11:50
|
An HTML attachment was scrubbed... |
|
From: Martha H. <mar...@sp...> - 2017-08-11 15:46:04
|
An HTML attachment was scrubbed... |