This list is closed, nobody may subscribe to it.
2007 |
Jan
|
Feb
(1) |
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
(1) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2009 |
Jan
|
Feb
|
Mar
|
Apr
(1) |
May
(1) |
Jun
(2) |
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
2011 |
Jan
|
Feb
|
Mar
(1) |
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
2013 |
Jan
|
Feb
|
Mar
(7) |
Apr
|
May
(7) |
Jun
(7) |
Jul
(26) |
Aug
|
Sep
(7) |
Oct
(1) |
Nov
(35) |
Dec
(18) |
2014 |
Jan
(1) |
Feb
(2) |
Mar
(3) |
Apr
|
May
(16) |
Jun
(35) |
Jul
(103) |
Aug
(45) |
Sep
(226) |
Oct
(200) |
Nov
(66) |
Dec
(42) |
2015 |
Jan
(47) |
Feb
(3) |
Mar
(6) |
Apr
(14) |
May
(38) |
Jun
(10) |
Jul
(10) |
Aug
(15) |
Sep
(23) |
Oct
(78) |
Nov
(56) |
Dec
(70) |
2016 |
Jan
(9) |
Feb
(8) |
Mar
(15) |
Apr
(18) |
May
(78) |
Jun
(39) |
Jul
(3) |
Aug
(136) |
Sep
(134) |
Oct
(19) |
Nov
(48) |
Dec
(30) |
2017 |
Jan
(33) |
Feb
(35) |
Mar
(100) |
Apr
(87) |
May
(169) |
Jun
(119) |
Jul
(165) |
Aug
(241) |
Sep
(128) |
Oct
(42) |
Nov
|
Dec
|
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:09:30
|
The qce driver starts several async crypto ops and waits for their completions. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- drivers/crypto/qce/sha.c | 30 ++++-------------------------- 1 file changed, 4 insertions(+), 26 deletions(-) diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 47e114a..53227d7 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -349,28 +349,12 @@ static int qce_ahash_digest(struct ahash_request *req) return qce->async_req_enqueue(tmpl->qce, &req->base); } -struct qce_ahash_result { - struct completion completion; - int error; -}; - -static void qce_digest_complete(struct crypto_async_request *req, int error) -{ - struct qce_ahash_result *result = req->data; - - if (error == -EINPROGRESS) - return; - - result->error = error; - complete(&result->completion); -} - static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned int digestsize = crypto_ahash_digestsize(tfm); struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); - struct qce_ahash_result result; + struct crypto_wait wait; struct ahash_request *req; struct scatterlist sg; unsigned int blocksize; @@ -405,9 +389,9 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, goto err_free_ahash; } - init_completion(&result.completion); + crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - qce_digest_complete, &result); + crypto_req_done, &wait); crypto_ahash_clear_flags(ahash_tfm, ~0); buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); @@ -420,13 +404,7 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, sg_init_one(&sg, buf, keylen); ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); - ret = crypto_ahash_digest(req); - if (ret == -EINPROGRESS || ret == -EBUSY) { - ret = wait_for_completion_interruptible(&result.completion); - if (!ret) - ret = result.error; - } - + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); if (ret) crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:09:15
|
The talitos driver starts several async crypto ops and waits for their completions. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- drivers/crypto/talitos.c | 38 +++++--------------------------------- 1 file changed, 5 insertions(+), 33 deletions(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 79791c6..194a307 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2037,22 +2037,6 @@ static int ahash_import(struct ahash_request *areq, const void *in) return 0; } -struct keyhash_result { - struct completion completion; - int err; -}; - -static void keyhash_complete(struct crypto_async_request *req, int err) -{ - struct keyhash_result *res = req->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, u8 *hash) { @@ -2060,10 +2044,10 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, struct scatterlist sg[1]; struct ahash_request *req; - struct keyhash_result hresult; + struct crypto_wait wait; int ret; - init_completion(&hresult.completion); + crypto_init_wait(&wait); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) @@ -2072,25 +2056,13 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, /* Keep tfm keylen == 0 during hash of the long key */ ctx->keylen = 0; ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - keyhash_complete, &hresult); + crypto_req_done, &wait); sg_init_one(&sg[0], key, keylen); ahash_request_set_crypt(req, sg, hash, keylen); - ret = crypto_ahash_digest(req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - ret = wait_for_completion_interruptible( - &hresult.completion); - if (!ret) - ret = hresult.err; - break; - default: - break; - } + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); + ahash_request_free(req); return ret; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:09:03
|
tcrypt starts several async crypto ops and waits for their completions. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/tcrypt.c | 84 +++++++++++++++++---------------------------------------- 1 file changed, 25 insertions(+), 59 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0022a18..802aa81 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -79,34 +79,11 @@ static char *check[] = { NULL }; -struct tcrypt_result { - struct completion completion; - int err; -}; - -static void tcrypt_complete(struct crypto_async_request *req, int err) -{ - struct tcrypt_result *res = req->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - static inline int do_one_aead_op(struct aead_request *req, int ret) { - if (ret == -EINPROGRESS || ret == -EBUSY) { - struct tcrypt_result *tr = req->base.data; + struct crypto_wait *wait = req->base.data; - ret = wait_for_completion_interruptible(&tr->completion); - if (!ret) - ret = tr->err; - reinit_completion(&tr->completion); - } - - return ret; + return crypto_wait_req(ret, wait); } static int test_aead_jiffies(struct aead_request *req, int enc, @@ -248,7 +225,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, char *axbuf[XBUFSIZE]; unsigned int *b_size; unsigned int iv_len; - struct tcrypt_result result; + struct crypto_wait wait; iv = kzalloc(MAX_IVLEN, GFP_KERNEL); if (!iv) @@ -284,7 +261,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, goto out_notfm; } - init_completion(&result.completion); + crypto_init_wait(&wait); printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, get_driver_name(crypto_aead, tfm), e); @@ -296,7 +273,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, } aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); i = 0; do { @@ -397,21 +374,16 @@ static void test_hash_sg_init(struct scatterlist *sg) static inline int do_one_ahash_op(struct ahash_request *req, int ret) { - if (ret == -EINPROGRESS || ret == -EBUSY) { - struct tcrypt_result *tr = req->base.data; + struct crypto_wait *wait = req->base.data; - wait_for_completion(&tr->completion); - reinit_completion(&tr->completion); - ret = tr->err; - } - return ret; + return crypto_wait_req(ret, wait); } struct test_mb_ahash_data { struct scatterlist sg[TVMEMSIZE]; char result[64]; struct ahash_request *req; - struct tcrypt_result tresult; + struct crypto_wait wait; char *xbuf[XBUFSIZE]; }; @@ -440,7 +412,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, if (testmgr_alloc_buf(data[i].xbuf)) goto out; - init_completion(&data[i].tresult.completion); + crypto_init_wait(&data[i].wait); data[i].req = ahash_request_alloc(tfm, GFP_KERNEL); if (!data[i].req) { @@ -449,8 +421,8 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, goto out; } - ahash_request_set_callback(data[i].req, 0, - tcrypt_complete, &data[i].tresult); + ahash_request_set_callback(data[i].req, 0, crypto_req_done, + &data[i].wait); test_hash_sg_init(data[i].sg); } @@ -492,16 +464,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, if (ret) break; - complete(&data[k].tresult.completion); - data[k].tresult.err = 0; + crypto_req_done(&data[k].req->base, 0); } for (j = 0; j < k; j++) { - struct tcrypt_result *tr = &data[j].tresult; + struct crypto_wait *wait = &data[j].wait; + int wait_ret; - wait_for_completion(&tr->completion); - if (tr->err) - ret = tr->err; + wait_ret = crypto_wait_req(-EINPROGRESS, wait); + if (wait_ret) + ret = wait_ret; } end = get_cycles(); @@ -679,7 +651,7 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, struct hash_speed *speed, unsigned mask) { struct scatterlist sg[TVMEMSIZE]; - struct tcrypt_result tresult; + struct crypto_wait wait; struct ahash_request *req; struct crypto_ahash *tfm; char *output; @@ -708,9 +680,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, goto out; } - init_completion(&tresult.completion); + crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &tresult); + crypto_req_done, &wait); output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL); if (!output) @@ -765,15 +737,9 @@ static void test_hash_speed(const char *algo, unsigned int secs, static inline int do_one_acipher_op(struct skcipher_request *req, int ret) { - if (ret == -EINPROGRESS || ret == -EBUSY) { - struct tcrypt_result *tr = req->base.data; - - wait_for_completion(&tr->completion); - reinit_completion(&tr->completion); - ret = tr->err; - } + struct crypto_wait *wait = req->base.data; - return ret; + return crypto_wait_req(ret, wait); } static int test_acipher_jiffies(struct skcipher_request *req, int enc, @@ -853,7 +819,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, unsigned int tcount, u8 *keysize, bool async) { unsigned int ret, i, j, k, iv_len; - struct tcrypt_result tresult; + struct crypto_wait wait; const char *key; char iv[128]; struct skcipher_request *req; @@ -866,7 +832,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, else e = "decryption"; - init_completion(&tresult.completion); + crypto_init_wait(&wait); tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC); @@ -887,7 +853,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &tresult); + crypto_req_done, &wait); i = 0; do { -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:08:47
|
ima starts several async crypto ops and waits for their completions. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> Acked-by: Mimi Zohar <zo...@li...> --- security/integrity/ima/ima_crypto.c | 56 +++++++++++-------------------------- 1 file changed, 17 insertions(+), 39 deletions(-) diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index a856d8c..9057b16 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -27,11 +27,6 @@ #include "ima.h" -struct ahash_completion { - struct completion completion; - int err; -}; - /* minimum file size for ahash use */ static unsigned long ima_ahash_minsize; module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); @@ -196,30 +191,13 @@ static void ima_free_atfm(struct crypto_ahash *tfm) crypto_free_ahash(tfm); } -static void ahash_complete(struct crypto_async_request *req, int err) +static inline int ahash_wait(int err, struct crypto_wait *wait) { - struct ahash_completion *res = req->data; - if (err == -EINPROGRESS) - return; - res->err = err; - complete(&res->completion); -} + err = crypto_wait_req(err, wait); -static int ahash_wait(int err, struct ahash_completion *res) -{ - switch (err) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&res->completion); - reinit_completion(&res->completion); - err = res->err; - /* fall through */ - default: + if (err) pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); - } return err; } @@ -233,7 +211,7 @@ static int ima_calc_file_hash_atfm(struct file *file, int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0; struct ahash_request *req; struct scatterlist sg[1]; - struct ahash_completion res; + struct crypto_wait wait; size_t rbuf_size[2]; hash->length = crypto_ahash_digestsize(tfm); @@ -242,12 +220,12 @@ static int ima_calc_file_hash_atfm(struct file *file, if (!req) return -ENOMEM; - init_completion(&res.completion); + crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - ahash_complete, &res); + crypto_req_done, &wait); - rc = ahash_wait(crypto_ahash_init(req), &res); + rc = ahash_wait(crypto_ahash_init(req), &wait); if (rc) goto out1; @@ -288,7 +266,7 @@ static int ima_calc_file_hash_atfm(struct file *file, * read/request, wait for the completion of the * previous ahash_update() request. */ - rc = ahash_wait(ahash_rc, &res); + rc = ahash_wait(ahash_rc, &wait); if (rc) goto out3; } @@ -304,7 +282,7 @@ static int ima_calc_file_hash_atfm(struct file *file, * read/request, wait for the completion of the * previous ahash_update() request. */ - rc = ahash_wait(ahash_rc, &res); + rc = ahash_wait(ahash_rc, &wait); if (rc) goto out3; } @@ -318,7 +296,7 @@ static int ima_calc_file_hash_atfm(struct file *file, active = !active; /* swap buffers, if we use two */ } /* wait for the last update request to complete */ - rc = ahash_wait(ahash_rc, &res); + rc = ahash_wait(ahash_rc, &wait); out3: if (read) file->f_mode &= ~FMODE_READ; @@ -327,7 +305,7 @@ static int ima_calc_file_hash_atfm(struct file *file, out2: if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); - rc = ahash_wait(crypto_ahash_final(req), &res); + rc = ahash_wait(crypto_ahash_final(req), &wait); } out1: ahash_request_free(req); @@ -537,7 +515,7 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len, { struct ahash_request *req; struct scatterlist sg; - struct ahash_completion res; + struct crypto_wait wait; int rc, ahash_rc = 0; hash->length = crypto_ahash_digestsize(tfm); @@ -546,12 +524,12 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len, if (!req) return -ENOMEM; - init_completion(&res.completion); + crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - ahash_complete, &res); + crypto_req_done, &wait); - rc = ahash_wait(crypto_ahash_init(req), &res); + rc = ahash_wait(crypto_ahash_init(req), &wait); if (rc) goto out; @@ -561,10 +539,10 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len, ahash_rc = crypto_ahash_update(req); /* wait for the update request to complete */ - rc = ahash_wait(ahash_rc, &res); + rc = ahash_wait(ahash_rc, &wait); if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); - rc = ahash_wait(crypto_ahash_final(req), &res); + rc = ahash_wait(crypto_ahash_final(req), &wait); } out: ahash_request_free(req); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:08:32
|
cifs starts an async. crypto op and waits for their completion. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> Acked-by: Pavel Shilovsky <ps...@mi...> --- fs/cifs/smb2ops.c | 30 ++++-------------------------- 1 file changed, 4 insertions(+), 26 deletions(-) diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index cfacf2c..16fb041 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1878,22 +1878,6 @@ init_sg(struct smb_rqst *rqst, u8 *sign) return sg; } -struct cifs_crypt_result { - int err; - struct completion completion; -}; - -static void cifs_crypt_complete(struct crypto_async_request *req, int err) -{ - struct cifs_crypt_result *res = req->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - static int smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) { @@ -1934,12 +1918,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) struct aead_request *req; char *iv; unsigned int iv_len; - struct cifs_crypt_result result = {0, }; + DECLARE_CRYPTO_WAIT(wait); struct crypto_aead *tfm; unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); - init_completion(&result.completion); - rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key); if (rc) { cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__, @@ -1999,14 +1981,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) aead_request_set_ad(req, assoc_data_len); aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - cifs_crypt_complete, &result); + crypto_req_done, &wait); - rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); - - if (rc == -EINPROGRESS || rc == -EBUSY) { - wait_for_completion(&result.completion); - rc = result.err; - } + rc = crypto_wait_req(enc ? crypto_aead_encrypt(req) + : crypto_aead_decrypt(req), &wait); if (!rc && enc) memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:08:17
|
dm-verity is starting async. crypto ops and waiting for them to complete. Move it over to generic code doing the same. This also avoids a future potential data coruption bug created by the use of wait_for_completion_interruptible() without dealing correctly with an interrupt aborting the wait prior to the async op finishing, should this code ever move to a context where signals are not masked. Signed-off-by: Gilad Ben-Yossef <gi...@be...> CC: Mikulas Patocka <mpa...@re...> --- drivers/md/dm-verity-target.c | 81 +++++++++++-------------------------------- drivers/md/dm-verity.h | 5 --- 2 files changed, 20 insertions(+), 66 deletions(-) diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 79f18d4..8df08a8 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -92,74 +92,33 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, return block >> (level * v->hash_per_block_bits); } -/* - * Callback function for asynchrnous crypto API completion notification - */ -static void verity_op_done(struct crypto_async_request *base, int err) -{ - struct verity_result *res = (struct verity_result *)base->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - -/* - * Wait for async crypto API callback - */ -static inline int verity_complete_op(struct verity_result *res, int ret) -{ - switch (ret) { - case 0: - break; - - case -EINPROGRESS: - case -EBUSY: - ret = wait_for_completion_interruptible(&res->completion); - if (!ret) - ret = res->err; - reinit_completion(&res->completion); - break; - - default: - DMERR("verity_wait_hash: crypto op submission failed: %d", ret); - } - - if (unlikely(ret < 0)) - DMERR("verity_wait_hash: crypto op failed: %d", ret); - - return ret; -} - static int verity_hash_update(struct dm_verity *v, struct ahash_request *req, const u8 *data, size_t len, - struct verity_result *res) + struct crypto_wait *wait) { struct scatterlist sg; sg_init_one(&sg, data, len); ahash_request_set_crypt(req, &sg, NULL, len); - return verity_complete_op(res, crypto_ahash_update(req)); + return crypto_wait_req(crypto_ahash_update(req), wait); } /* * Wrapper for crypto_ahash_init, which handles verity salting. */ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, - struct verity_result *res) + struct crypto_wait *wait) { int r; ahash_request_set_tfm(req, v->tfm); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, - verity_op_done, (void *)res); - init_completion(&res->completion); + crypto_req_done, (void *)wait); + crypto_init_wait(wait); - r = verity_complete_op(res, crypto_ahash_init(req)); + r = crypto_wait_req(crypto_ahash_init(req), wait); if (unlikely(r < 0)) { DMERR("crypto_ahash_init failed: %d", r); @@ -167,18 +126,18 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, } if (likely(v->salt_size && (v->version >= 1))) - r = verity_hash_update(v, req, v->salt, v->salt_size, res); + r = verity_hash_update(v, req, v->salt, v->salt_size, wait); return r; } static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, - u8 *digest, struct verity_result *res) + u8 *digest, struct crypto_wait *wait) { int r; if (unlikely(v->salt_size && (!v->version))) { - r = verity_hash_update(v, req, v->salt, v->salt_size, res); + r = verity_hash_update(v, req, v->salt, v->salt_size, wait); if (r < 0) { DMERR("verity_hash_final failed updating salt: %d", r); @@ -187,7 +146,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, } ahash_request_set_crypt(req, NULL, digest, 0); - r = verity_complete_op(res, crypto_ahash_final(req)); + r = crypto_wait_req(crypto_ahash_final(req), wait); out: return r; } @@ -196,17 +155,17 @@ int verity_hash(struct dm_verity *v, struct ahash_request *req, const u8 *data, size_t len, u8 *digest) { int r; - struct verity_result res; + struct crypto_wait wait; - r = verity_hash_init(v, req, &res); + r = verity_hash_init(v, req, &wait); if (unlikely(r < 0)) goto out; - r = verity_hash_update(v, req, data, len, &res); + r = verity_hash_update(v, req, data, len, &wait); if (unlikely(r < 0)) goto out; - r = verity_hash_final(v, req, digest, &res); + r = verity_hash_final(v, req, digest, &wait); out: return r; @@ -389,7 +348,7 @@ int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, * Calculates the digest for the given bio */ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, - struct bvec_iter *iter, struct verity_result *res) + struct bvec_iter *iter, struct crypto_wait *wait) { unsigned int todo = 1 << v->data_dev_block_bits; struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); @@ -414,7 +373,7 @@ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, */ sg_set_page(&sg, bv.bv_page, len, bv.bv_offset); ahash_request_set_crypt(req, &sg, NULL, len); - r = verity_complete_op(res, crypto_ahash_update(req)); + r = crypto_wait_req(crypto_ahash_update(req), wait); if (unlikely(r < 0)) { DMERR("verity_for_io_block crypto op failed: %d", r); @@ -482,7 +441,7 @@ static int verity_verify_io(struct dm_verity_io *io) struct dm_verity *v = io->v; struct bvec_iter start; unsigned b; - struct verity_result res; + struct crypto_wait wait; for (b = 0; b < io->n_blocks; b++) { int r; @@ -507,17 +466,17 @@ static int verity_verify_io(struct dm_verity_io *io) continue; } - r = verity_hash_init(v, req, &res); + r = verity_hash_init(v, req, &wait); if (unlikely(r < 0)) return r; start = io->iter; - r = verity_for_io_block(v, io, &io->iter, &res); + r = verity_for_io_block(v, io, &io->iter, &wait); if (unlikely(r < 0)) return r; r = verity_hash_final(v, req, verity_io_real_digest(v, io), - &res); + &wait); if (unlikely(r < 0)) return r; diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index a59e0ad..b675bc0 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -90,11 +90,6 @@ struct dm_verity_io { */ }; -struct verity_result { - struct completion completion; - int err; -}; - static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v, struct dm_verity_io *io) { -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:08:02
|
fscrypt starts several async. crypto ops and waiting for them to complete. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- fs/crypto/crypto.c | 28 ++++------------------------ fs/crypto/fname.c | 36 ++++++------------------------------ fs/crypto/fscrypt_private.h | 10 ---------- fs/crypto/keyinfo.c | 21 +++------------------ 4 files changed, 13 insertions(+), 82 deletions(-) diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index c7835df..80a3cad 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -126,21 +126,6 @@ struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags) } EXPORT_SYMBOL(fscrypt_get_ctx); -/** - * page_crypt_complete() - completion callback for page crypto - * @req: The asynchronous cipher request context - * @res: The result of the cipher operation - */ -static void page_crypt_complete(struct crypto_async_request *req, int res) -{ - struct fscrypt_completion_result *ecr = req->data; - - if (res == -EINPROGRESS) - return; - ecr->res = res; - complete(&ecr->completion); -} - int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, u64 lblk_num, struct page *src_page, struct page *dest_page, unsigned int len, @@ -151,7 +136,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, u8 padding[FS_IV_SIZE - sizeof(__le64)]; } iv; struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct scatterlist dst, src; struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; @@ -179,7 +164,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, skcipher_request_set_callback( req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - page_crypt_complete, &ecr); + crypto_req_done, &wait); sg_init_table(&dst, 1); sg_set_page(&dst, dest_page, len, offs); @@ -187,14 +172,9 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, sg_set_page(&src, src_page, len, offs); skcipher_request_set_crypt(req, &src, &dst, len, &iv); if (rw == FS_DECRYPT) - res = crypto_skcipher_decrypt(req); + res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); else - res = crypto_skcipher_encrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - BUG_ON(req->base.data != &ecr); - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); skcipher_request_free(req); if (res) { printk_ratelimited(KERN_ERR diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index ad9f814..a80a0d3 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -15,21 +15,6 @@ #include "fscrypt_private.h" /** - * fname_crypt_complete() - completion callback for filename crypto - * @req: The asynchronous cipher request context - * @res: The result of the cipher operation - */ -static void fname_crypt_complete(struct crypto_async_request *req, int res) -{ - struct fscrypt_completion_result *ecr = req->data; - - if (res == -EINPROGRESS) - return; - ecr->res = res; - complete(&ecr->completion); -} - -/** * fname_encrypt() - encrypt a filename * * The caller must have allocated sufficient memory for the @oname string. @@ -40,7 +25,7 @@ static int fname_encrypt(struct inode *inode, const struct qstr *iname, struct fscrypt_str *oname) { struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; @@ -76,17 +61,12 @@ static int fname_encrypt(struct inode *inode, } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - fname_crypt_complete, &ecr); + crypto_req_done, &wait); sg_init_one(&sg, oname->name, cryptlen); skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); /* Do the encryption */ - res = crypto_skcipher_encrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - /* Request is being completed asynchronously; wait for it */ - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); skcipher_request_free(req); if (res < 0) { printk_ratelimited(KERN_ERR @@ -110,7 +90,7 @@ static int fname_decrypt(struct inode *inode, struct fscrypt_str *oname) { struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; @@ -131,7 +111,7 @@ static int fname_decrypt(struct inode *inode, } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - fname_crypt_complete, &ecr); + crypto_req_done, &wait); /* Initialize IV */ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); @@ -140,11 +120,7 @@ static int fname_decrypt(struct inode *inode, sg_init_one(&src_sg, iname->name, iname->len); sg_init_one(&dst_sg, oname->name, oname->len); skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); - res = crypto_skcipher_decrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); skcipher_request_free(req); if (res < 0) { printk_ratelimited(KERN_ERR diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index a1d5021..c0f1881 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -69,16 +69,6 @@ typedef enum { #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 #define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 -struct fscrypt_completion_result { - struct completion completion; - int res; -}; - -#define DECLARE_FS_COMPLETION_RESULT(ecr) \ - struct fscrypt_completion_result ecr = { \ - COMPLETION_INITIALIZER_ONSTACK((ecr).completion), 0 } - - /* crypto.c */ extern int fscrypt_initialize(unsigned int cop_flags); extern struct workqueue_struct *fscrypt_read_workqueue; diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 018c588..3c84cac 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -17,17 +17,6 @@ static struct crypto_shash *essiv_hash_tfm; -static void derive_crypt_complete(struct crypto_async_request *req, int rc) -{ - struct fscrypt_completion_result *ecr = req->data; - - if (rc == -EINPROGRESS) - return; - - ecr->res = rc; - complete(&ecr->completion); -} - /** * derive_key_aes() - Derive a key using AES-128-ECB * @deriving_key: Encryption key used for derivation. @@ -42,7 +31,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], { int res = 0; struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); @@ -59,7 +48,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - derive_crypt_complete, &ecr); + crypto_req_done, &wait); res = crypto_skcipher_setkey(tfm, deriving_key, FS_AES_128_ECB_KEY_SIZE); if (res < 0) @@ -69,11 +58,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], sg_init_one(&dst_sg, derived_raw_key, source_key->size); skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size, NULL); - res = crypto_skcipher_encrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); out: skcipher_request_free(req); crypto_free_skcipher(tfm); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:07:53
|
testmgr is starting async. crypto ops and waiting for them to complete. Move it over to generic code doing the same. This also provides a test of the generic crypto async. wait code. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/testmgr.c | 204 ++++++++++++++++++------------------------------------- 1 file changed, 66 insertions(+), 138 deletions(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 7125ba3..a65b4d5 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -76,11 +76,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) #define ENCRYPT 1 #define DECRYPT 0 -struct tcrypt_result { - struct completion completion; - int err; -}; - struct aead_test_suite { struct { const struct aead_testvec *vecs; @@ -155,17 +150,6 @@ static void hexdump(unsigned char *buf, unsigned int len) buf, len, false); } -static void tcrypt_complete(struct crypto_async_request *req, int err) -{ - struct tcrypt_result *res = req->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - static int testmgr_alloc_buf(char *buf[XBUFSIZE]) { int i; @@ -193,20 +177,10 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) free_page((unsigned long)buf[i]); } -static int wait_async_op(struct tcrypt_result *tr, int ret) -{ - if (ret == -EINPROGRESS || ret == -EBUSY) { - wait_for_completion(&tr->completion); - reinit_completion(&tr->completion); - ret = tr->err; - } - return ret; -} - static int ahash_partial_update(struct ahash_request **preq, struct crypto_ahash *tfm, const struct hash_testvec *template, void *hash_buff, int k, int temp, struct scatterlist *sg, - const char *algo, char *result, struct tcrypt_result *tresult) + const char *algo, char *result, struct crypto_wait *wait) { char *state; struct ahash_request *req; @@ -236,7 +210,7 @@ static int ahash_partial_update(struct ahash_request **preq, } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, tresult); + crypto_req_done, wait); memcpy(hash_buff, template->plaintext + temp, template->tap[k]); @@ -247,7 +221,7 @@ static int ahash_partial_update(struct ahash_request **preq, pr_err("alg: hash: Failed to import() for %s\n", algo); goto out; } - ret = wait_async_op(tresult, crypto_ahash_update(req)); + ret = crypto_wait_req(crypto_ahash_update(req), wait); if (ret) goto out; *preq = req; @@ -272,7 +246,7 @@ static int __test_hash(struct crypto_ahash *tfm, char *result; char *key; struct ahash_request *req; - struct tcrypt_result tresult; + struct crypto_wait wait; void *hash_buff; char *xbuf[XBUFSIZE]; int ret = -ENOMEM; @@ -286,7 +260,7 @@ static int __test_hash(struct crypto_ahash *tfm, if (testmgr_alloc_buf(xbuf)) goto out_nobuf; - init_completion(&tresult.completion); + crypto_init_wait(&wait); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { @@ -295,7 +269,7 @@ static int __test_hash(struct crypto_ahash *tfm, goto out_noreq; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &tresult); + crypto_req_done, &wait); j = 0; for (i = 0; i < tcount; i++) { @@ -335,26 +309,26 @@ static int __test_hash(struct crypto_ahash *tfm, ahash_request_set_crypt(req, sg, result, template[i].psize); if (use_digest) { - ret = wait_async_op(&tresult, crypto_ahash_digest(req)); + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); if (ret) { pr_err("alg: hash: digest failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } } else { - ret = wait_async_op(&tresult, crypto_ahash_init(req)); + ret = crypto_wait_req(crypto_ahash_init(req), &wait); if (ret) { pr_err("alg: hash: init failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } - ret = wait_async_op(&tresult, crypto_ahash_update(req)); + ret = crypto_wait_req(crypto_ahash_update(req), &wait); if (ret) { pr_err("alg: hash: update failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } - ret = wait_async_op(&tresult, crypto_ahash_final(req)); + ret = crypto_wait_req(crypto_ahash_final(req), &wait); if (ret) { pr_err("alg: hash: final failed on test %d " "for %s: ret=%d\n", j, algo, -ret); @@ -420,22 +394,10 @@ static int __test_hash(struct crypto_ahash *tfm, } ahash_request_set_crypt(req, sg, result, template[i].psize); - ret = crypto_ahash_digest(req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&tresult.completion); - reinit_completion(&tresult.completion); - ret = tresult.err; - if (!ret) - break; - /* fall through */ - default: - printk(KERN_ERR "alg: hash: digest failed " - "on chunking test %d for %s: " - "ret=%d\n", j, algo, -ret); + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); + if (ret) { + pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n", + j, algo, -ret); goto out; } @@ -486,13 +448,13 @@ static int __test_hash(struct crypto_ahash *tfm, } ahash_request_set_crypt(req, sg, result, template[i].tap[0]); - ret = wait_async_op(&tresult, crypto_ahash_init(req)); + ret = crypto_wait_req(crypto_ahash_init(req), &wait); if (ret) { pr_err("alg: hash: init failed on test %d for %s: ret=%d\n", j, algo, -ret); goto out; } - ret = wait_async_op(&tresult, crypto_ahash_update(req)); + ret = crypto_wait_req(crypto_ahash_update(req), &wait); if (ret) { pr_err("alg: hash: update failed on test %d for %s: ret=%d\n", j, algo, -ret); @@ -503,7 +465,7 @@ static int __test_hash(struct crypto_ahash *tfm, for (k = 1; k < template[i].np; k++) { ret = ahash_partial_update(&req, tfm, &template[i], hash_buff, k, temp, &sg[0], algo, result, - &tresult); + &wait); if (ret) { pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n", j, algo, -ret); @@ -511,7 +473,7 @@ static int __test_hash(struct crypto_ahash *tfm, } temp += template[i].tap[k]; } - ret = wait_async_op(&tresult, crypto_ahash_final(req)); + ret = crypto_wait_req(crypto_ahash_final(req), &wait); if (ret) { pr_err("alg: hash: final failed on test %d for %s: ret=%d\n", j, algo, -ret); @@ -580,7 +542,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, struct scatterlist *sg; struct scatterlist *sgout; const char *e, *d; - struct tcrypt_result result; + struct crypto_wait wait; unsigned int authsize, iv_len; void *input; void *output; @@ -619,7 +581,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, else e = "decryption"; - init_completion(&result.completion); + crypto_init_wait(&wait); req = aead_request_alloc(tfm, GFP_KERNEL); if (!req) { @@ -629,7 +591,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, } aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); iv_len = crypto_aead_ivsize(tfm); @@ -709,7 +671,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, aead_request_set_ad(req, template[i].alen); - ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); + ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) + : crypto_aead_decrypt(req), &wait); switch (ret) { case 0: @@ -722,13 +685,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, goto out; } break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&result.completion); - reinit_completion(&result.completion); - ret = result.err; - if (!ret) - break; case -EBADMSG: if (template[i].novrfy) /* verification failure was expected */ @@ -866,7 +822,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, aead_request_set_ad(req, template[i].alen); - ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); + ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) + : crypto_aead_decrypt(req), &wait); switch (ret) { case 0: @@ -879,13 +836,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, goto out; } break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&result.completion); - reinit_completion(&result.completion); - ret = result.err; - if (!ret) - break; case -EBADMSG: if (template[i].novrfy) /* verification failure was expected */ @@ -1083,7 +1033,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, struct scatterlist sg[8]; struct scatterlist sgout[8]; const char *e, *d; - struct tcrypt_result result; + struct crypto_wait wait; void *data; char iv[MAX_IVLEN]; char *xbuf[XBUFSIZE]; @@ -1107,7 +1057,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, else e = "decryption"; - init_completion(&result.completion); + crypto_init_wait(&wait); req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { @@ -1117,7 +1067,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); j = 0; for (i = 0; i < tcount; i++) { @@ -1164,21 +1114,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, template[i].ilen, iv); - ret = enc ? crypto_skcipher_encrypt(req) : - crypto_skcipher_decrypt(req); + ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : + crypto_skcipher_decrypt(req), &wait); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&result.completion); - reinit_completion(&result.completion); - ret = result.err; - if (!ret) - break; - /* fall through */ - default: + if (ret) { pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", d, e, j, algo, -ret); goto out; @@ -1272,21 +1211,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, template[i].ilen, iv); - ret = enc ? crypto_skcipher_encrypt(req) : - crypto_skcipher_decrypt(req); + ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : + crypto_skcipher_decrypt(req), &wait); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&result.completion); - reinit_completion(&result.completion); - ret = result.err; - if (!ret) - break; - /* fall through */ - default: + if (ret) { pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", d, e, j, algo, -ret); goto out; @@ -1462,7 +1390,7 @@ static int test_acomp(struct crypto_acomp *tfm, int ret; struct scatterlist src, dst; struct acomp_req *req; - struct tcrypt_result result; + struct crypto_wait wait; output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); if (!output) @@ -1486,7 +1414,7 @@ static int test_acomp(struct crypto_acomp *tfm, } memset(output, 0, dlen); - init_completion(&result.completion); + crypto_init_wait(&wait); sg_init_one(&src, input_vec, ilen); sg_init_one(&dst, output, dlen); @@ -1501,9 +1429,9 @@ static int test_acomp(struct crypto_acomp *tfm, acomp_request_set_params(req, &src, &dst, ilen, dlen); acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); - ret = wait_async_op(&result, crypto_acomp_compress(req)); + ret = crypto_wait_req(crypto_acomp_compress(req), &wait); if (ret) { pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", i + 1, algo, -ret); @@ -1516,10 +1444,10 @@ static int test_acomp(struct crypto_acomp *tfm, dlen = COMP_BUF_SIZE; sg_init_one(&src, output, ilen); sg_init_one(&dst, decomp_out, dlen); - init_completion(&result.completion); + crypto_init_wait(&wait); acomp_request_set_params(req, &src, &dst, ilen, dlen); - ret = wait_async_op(&result, crypto_acomp_decompress(req)); + ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); if (ret) { pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", i + 1, algo, -ret); @@ -1563,7 +1491,7 @@ static int test_acomp(struct crypto_acomp *tfm, } memset(output, 0, dlen); - init_completion(&result.completion); + crypto_init_wait(&wait); sg_init_one(&src, input_vec, ilen); sg_init_one(&dst, output, dlen); @@ -1578,9 +1506,9 @@ static int test_acomp(struct crypto_acomp *tfm, acomp_request_set_params(req, &src, &dst, ilen, dlen); acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); - ret = wait_async_op(&result, crypto_acomp_decompress(req)); + ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); if (ret) { pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", i + 1, algo, -ret); @@ -2000,7 +1928,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, void *a_public = NULL; void *a_ss = NULL; void *shared_secret = NULL; - struct tcrypt_result result; + struct crypto_wait wait; unsigned int out_len_max; int err = -ENOMEM; struct scatterlist src, dst; @@ -2009,7 +1937,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, if (!req) return err; - init_completion(&result.completion); + crypto_init_wait(&wait); err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size); if (err < 0) @@ -2027,10 +1955,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, sg_init_one(&dst, output_buf, out_len_max); kpp_request_set_output(req, &dst, out_len_max); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); /* Compute party A's public key */ - err = wait_async_op(&result, crypto_kpp_generate_public_key(req)); + err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait); if (err) { pr_err("alg: %s: Party A: generate public key test failed. err %d\n", alg, err); @@ -2069,8 +1997,8 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, kpp_request_set_input(req, &src, vec->b_public_size); kpp_request_set_output(req, &dst, out_len_max); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); - err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req)); + crypto_req_done, &wait); + err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait); if (err) { pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n", alg, err); @@ -2100,9 +2028,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, kpp_request_set_input(req, &src, vec->expected_a_public_size); kpp_request_set_output(req, &dst, out_len_max); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); - err = wait_async_op(&result, - crypto_kpp_compute_shared_secret(req)); + crypto_req_done, &wait); + err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), + &wait); if (err) { pr_err("alg: %s: Party B: compute shared secret failed. err %d\n", alg, err); @@ -2179,7 +2107,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, struct akcipher_request *req; void *outbuf_enc = NULL; void *outbuf_dec = NULL; - struct tcrypt_result result; + struct crypto_wait wait; unsigned int out_len_max, out_len = 0; int err = -ENOMEM; struct scatterlist src, dst, src_tab[2]; @@ -2191,7 +2119,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, if (!req) goto free_xbuf; - init_completion(&result.completion); + crypto_init_wait(&wait); if (vecs->public_key_vec) err = crypto_akcipher_set_pub_key(tfm, vecs->key, @@ -2220,13 +2148,13 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size, out_len_max); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); - err = wait_async_op(&result, vecs->siggen_sigver_test ? - /* Run asymmetric signature generation */ - crypto_akcipher_sign(req) : - /* Run asymmetric encrypt */ - crypto_akcipher_encrypt(req)); + err = crypto_wait_req(vecs->siggen_sigver_test ? + /* Run asymmetric signature generation */ + crypto_akcipher_sign(req) : + /* Run asymmetric encrypt */ + crypto_akcipher_encrypt(req), &wait); if (err) { pr_err("alg: akcipher: encrypt test failed. err %d\n", err); goto free_all; @@ -2261,14 +2189,14 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, sg_init_one(&src, xbuf[0], vecs->c_size); sg_init_one(&dst, outbuf_dec, out_len_max); - init_completion(&result.completion); + crypto_init_wait(&wait); akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max); - err = wait_async_op(&result, vecs->siggen_sigver_test ? - /* Run asymmetric signature verification */ - crypto_akcipher_verify(req) : - /* Run asymmetric decrypt */ - crypto_akcipher_decrypt(req)); + err = crypto_wait_req(vecs->siggen_sigver_test ? + /* Run asymmetric signature verification */ + crypto_akcipher_verify(req) : + /* Run asymmetric decrypt */ + crypto_akcipher_decrypt(req), &wait); if (err) { pr_err("alg: akcipher: decrypt test failed. err %d\n", err); goto free_all; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:07:34
|
gcm is starting an async. crypto op and waiting for it complete. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/gcm.c | 32 ++++++-------------------------- 1 file changed, 6 insertions(+), 26 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index 3841b5e..fb923a5 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -16,7 +16,6 @@ #include <crypto/scatterwalk.h> #include <crypto/hash.h> #include "internal.h" -#include <linux/completion.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -78,11 +77,6 @@ struct crypto_gcm_req_priv_ctx { } u; }; -struct crypto_gcm_setkey_result { - int err; - struct completion completion; -}; - static struct { u8 buf[16]; struct scatterlist sg; @@ -98,17 +92,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); } -static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) -{ - struct crypto_gcm_setkey_result *result = req->data; - - if (err == -EINPROGRESS) - return; - - result->err = err; - complete(&result->completion); -} - static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { @@ -119,7 +102,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, be128 hash; u8 iv[16]; - struct crypto_gcm_setkey_result result; + struct crypto_wait wait; struct scatterlist sg[1]; struct skcipher_request req; @@ -140,21 +123,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, if (!data) return -ENOMEM; - init_completion(&data->result.completion); + crypto_init_wait(&data->wait); sg_init_one(data->sg, &data->hash, sizeof(data->hash)); skcipher_request_set_tfm(&data->req, ctr); skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_gcm_setkey_done, - &data->result); + crypto_req_done, + &data->wait); skcipher_request_set_crypt(&data->req, data->sg, data->sg, sizeof(data->hash), data->iv); - err = crypto_skcipher_encrypt(&data->req); - if (err == -EINPROGRESS || err == -EBUSY) { - wait_for_completion(&data->result.completion); - err = data->result.err; - } + err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), + &data->wait); if (err) goto out; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:07:21
|
DRBG is starting an async. crypto op and waiting for it complete. Move it over to generic code doing the same. The code now also passes CRYPTO_TFM_REQ_MAY_SLEEP flag indicating crypto request memory allocation may use GFP_KERNEL which should be perfectly fine as the code is obviously sleeping for the completion of the request any way. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/drbg.c | 36 +++++++++--------------------------- include/crypto/drbg.h | 3 +-- 2 files changed, 10 insertions(+), 29 deletions(-) diff --git a/crypto/drbg.c b/crypto/drbg.c index 633a88e..c522251 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg) return 0; } -static void drbg_skcipher_cb(struct crypto_async_request *req, int error) -{ - struct drbg_state *drbg = req->data; - - if (error == -EINPROGRESS) - return; - drbg->ctr_async_err = error; - complete(&drbg->ctr_completion); -} - static int drbg_init_sym_kernel(struct drbg_state *drbg) { struct crypto_cipher *tfm; @@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) return PTR_ERR(sk_tfm); } drbg->ctr_handle = sk_tfm; - init_completion(&drbg->ctr_completion); + crypto_init_wait(&drbg->ctr_wait); req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); if (!req) { @@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) return -ENOMEM; } drbg->ctr_req = req; - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - drbg_skcipher_cb, drbg); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &drbg->ctr_wait); alignmask = crypto_skcipher_alignmask(sk_tfm); drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask, @@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, /* Output buffer may not be valid for SGL, use scratchpad */ skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, cryptlen, drbg->V); - ret = crypto_skcipher_encrypt(drbg->ctr_req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&drbg->ctr_completion); - if (!drbg->ctr_async_err) { - reinit_completion(&drbg->ctr_completion); - break; - } - default: + ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req), + &drbg->ctr_wait); + if (ret) goto out; - } - init_completion(&drbg->ctr_completion); + + crypto_init_wait(&drbg->ctr_wait); memcpy(outbuf, drbg->outscratchpad, cryptlen); diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 22f884c..8f94110 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -126,8 +126,7 @@ struct drbg_state { __u8 *ctr_null_value; /* CTR mode aligned zero buf */ __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ __u8 *outscratchpad; /* CTR mode aligned outbuf */ - struct completion ctr_completion; /* CTR mode async handler */ - int ctr_async_err; /* CTR mode async error */ + struct crypto_wait ctr_wait; /* CTR mode async wait obj */ bool seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:07:08
|
public_key_verify_signature() is starting an async crypto op and waiting for it to complete. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/asymmetric_keys/public_key.c | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 3cd6e12..d916235 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3) public_key_signature_free(payload3); } -struct public_key_completion { - struct completion completion; - int err; -}; - -static void public_key_verify_done(struct crypto_async_request *req, int err) -{ - struct public_key_completion *compl = req->data; - - if (err == -EINPROGRESS) - return; - - compl->err = err; - complete(&compl->completion); -} - /* * Verify a signature using a public key. */ int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig) { - struct public_key_completion compl; + struct crypto_wait cwait; struct crypto_akcipher *tfm; struct akcipher_request *req; struct scatterlist sig_sg, digest_sg; @@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey, sg_init_one(&digest_sg, output, outlen); akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size, outlen); - init_completion(&compl.completion); + crypto_init_wait(&cwait); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - public_key_verify_done, &compl); + crypto_req_done, &cwait); /* Perform the verification calculation. This doesn't actually do the * verification, but rather calculates the hash expected by the * signature and returns that to us. */ - ret = crypto_akcipher_verify(req); - if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { - wait_for_completion(&compl.completion); - ret = compl.err; - } + ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); if (ret < 0) goto out_free_output; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:06:59
|
algif starts several async crypto ops and waits for their completion. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/af_alg.c | 27 --------------------------- crypto/algif_aead.c | 8 ++++---- crypto/algif_hash.c | 30 ++++++++++++++---------------- crypto/algif_skcipher.c | 9 ++++----- include/crypto/if_alg.h | 15 +-------------- 5 files changed, 23 insertions(+), 66 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index d6936c0..f8917e7 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) } EXPORT_SYMBOL_GPL(af_alg_cmsg_send); -int af_alg_wait_for_completion(int err, struct af_alg_completion *completion) -{ - switch (err) { - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&completion->completion); - reinit_completion(&completion->completion); - err = completion->err; - break; - }; - - return err; -} -EXPORT_SYMBOL_GPL(af_alg_wait_for_completion); - -void af_alg_complete(struct crypto_async_request *req, int err) -{ - struct af_alg_completion *completion = req->data; - - if (err == -EINPROGRESS) - return; - - completion->err = err; - complete(&completion->completion); -} -EXPORT_SYMBOL_GPL(af_alg_complete); - /** * af_alg_alloc_tsgl - allocate the TX SGL * diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 48d46e7..abbac8a 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* Synchronous operation */ aead_request_set_callback(&areq->cra_u.aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); - err = af_alg_wait_for_completion(ctx->enc ? + crypto_req_done, &ctx->wait); + err = crypto_wait_req(ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : crypto_aead_decrypt(&areq->cra_u.aead_req), - &ctx->completion); + &ctx->wait); } /* AIO operation in progress */ @@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) ctx->merge = 0; ctx->enc = 0; ctx->aead_assoclen = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 3b3c154..d2ab8de 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -26,7 +26,7 @@ struct hash_ctx { u8 *result; - struct af_alg_completion completion; + struct crypto_wait wait; unsigned int len; bool more; @@ -102,8 +102,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, if ((msg->msg_flags & MSG_MORE)) hash_free_result(sk, ctx); - err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait); if (err) goto unlock; } @@ -124,8 +123,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); - err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_update(&ctx->req), + &ctx->wait); af_alg_free_sg(&ctx->sgl); if (err) goto unlock; @@ -143,8 +142,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, goto unlock; ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); - err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_final(&ctx->req), + &ctx->wait); } unlock: @@ -185,7 +184,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, } else { if (!ctx->more) { err = crypto_ahash_init(&ctx->req); - err = af_alg_wait_for_completion(err, &ctx->completion); + err = crypto_wait_req(err, &ctx->wait); if (err) goto unlock; } @@ -193,7 +192,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, err = crypto_ahash_update(&ctx->req); } - err = af_alg_wait_for_completion(err, &ctx->completion); + err = crypto_wait_req(err, &ctx->wait); if (err) goto unlock; @@ -229,17 +228,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); if (!result && !ctx->more) { - err = af_alg_wait_for_completion( - crypto_ahash_init(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_init(&ctx->req), + &ctx->wait); if (err) goto unlock; } if (!result || ctx->more) { ctx->more = 0; - err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_final(&ctx->req), + &ctx->wait); if (err) goto unlock; } @@ -490,13 +488,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ctx->result = NULL; ctx->len = len; ctx->more = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; ahash_request_set_tfm(&ctx->req, hash); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); + crypto_req_done, &ctx->wait); sk->sk_destruct = hash_sock_destruct; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 8ae4170..9954b07 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, skcipher_request_set_callback(&areq->cra_u.skcipher_req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, - &ctx->completion); - err = af_alg_wait_for_completion(ctx->enc ? + crypto_req_done, &ctx->wait); + err = crypto_wait_req(ctx->enc ? crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), - &ctx->completion); + &ctx->wait); } /* AIO operation in progress */ @@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ctx->more = 0; ctx->merge = 0; ctx->enc = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 75ec9c6..6abf0a3 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -40,11 +40,6 @@ struct alg_sock { void *private; }; -struct af_alg_completion { - struct completion completion; - int err; -}; - struct af_alg_control { struct af_alg_iv *iv; int op; @@ -152,7 +147,7 @@ struct af_alg_ctx { void *iv; size_t aead_assoclen; - struct af_alg_completion completion; + struct crypto_wait wait; size_t used; size_t rcvused; @@ -177,19 +172,11 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new); int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); -int af_alg_wait_for_completion(int err, struct af_alg_completion *completion); -void af_alg_complete(struct crypto_async_request *req, int err); - static inline struct alg_sock *alg_sk(struct sock *sk) { return (struct alg_sock *)sk; } -static inline void af_alg_init_completion(struct af_alg_completion *completion) -{ - init_completion(&completion->completion); -} - /** * Size of available buffer for sending data from user space to kernel. * -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:06:45
|
Invoking a possibly async. crypto op and waiting for completion while correctly handling backlog processing is a common task in the crypto API implementation and outside users of it. This patch adds a generic implementation for doing so in preparation for using it across the board instead of hand rolled versions. Signed-off-by: Gilad Ben-Yossef <gi...@be...> CC: Eric Biggers <ebi...@gm...> CC: Jonathan Cameron <Jon...@hu...> --- crypto/api.c | 13 +++++++++++++ include/linux/crypto.h | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/crypto/api.c b/crypto/api.c index 941cd4c..2a2479d 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -24,6 +24,7 @@ #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/completion.h> #include "internal.h" LIST_HEAD(crypto_alg_list); @@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_alg); +void crypto_req_done(struct crypto_async_request *req, int err) +{ + struct crypto_wait *wait = req->data; + + if (err == -EINPROGRESS) + return; + + wait->err = err; + complete(&wait->completion); +} +EXPORT_SYMBOL_GPL(crypto_req_done); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 84da997..78508ca 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/uaccess.h> +#include <linux/completion.h> /* * Autoloaded crypto modules should only use a prefixed name to avoid allowing @@ -468,6 +469,45 @@ struct crypto_alg { } CRYPTO_MINALIGN_ATTR; /* + * A helper struct for waiting for completion of async crypto ops + */ +struct crypto_wait { + struct completion completion; + int err; +}; + +/* + * Macro for declaring a crypto op async wait object on stack + */ +#define DECLARE_CRYPTO_WAIT(_wait) \ + struct crypto_wait _wait = { \ + COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } + +/* + * Async ops completion helper functioons + */ +void crypto_req_done(struct crypto_async_request *req, int err); + +static inline int crypto_wait_req(int err, struct crypto_wait *wait) +{ + switch (err) { + case -EINPROGRESS: + case -EBUSY: + wait_for_completion(&wait->completion); + reinit_completion(&wait->completion); + err = wait->err; + break; + }; + + return err; +} + +static inline void crypto_init_wait(struct crypto_wait *wait) +{ + init_completion(&wait->completion); +} + +/* * Algorithm registration interface. */ int crypto_register_alg(struct crypto_alg *alg); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:06:26
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> Acked-by: Boris Brezillon <bor...@fr...> --- drivers/crypto/marvell/cesa.c | 3 +-- drivers/crypto/marvell/cesa.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 6e7a5c7..269737f 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -183,8 +183,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req, spin_lock_bh(&engine->lock); ret = crypto_enqueue_request(&engine->queue, req); if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && - (ret == -EINPROGRESS || - (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + (ret == -EINPROGRESS || ret == -EBUSY) mv_cesa_tdma_chain(engine, creq); spin_unlock_bh(&engine->lock); diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index b7872f6..63c8457 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -763,7 +763,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req, * the backlog and will be processed later. There's no need to * clean it up. */ - if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + if (ret == -EBUSY) return false; /* Request wasn't queued, we need to clean it up */ -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:06:13
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/ahash.c | 12 +++--------- crypto/cts.c | 6 ++---- crypto/lrw.c | 8 ++------ crypto/rsa-pkcs1pad.c | 16 ++++------------ crypto/xts.c | 8 ++------ 5 files changed, 13 insertions(+), 37 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 826cd7a..d63eeef 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req, return err; err = op(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; ahash_restore_req(req, err); @@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) req->base.complete = ahash_def_finup_done2; err = crypto_ahash_reqtfm(req)->final(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; out: @@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req) return err; err = tfm->update(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); diff --git a/crypto/cts.c b/crypto/cts.c index 243f591..4773c18 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err) goto out; err = cts_cbc_encrypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return; out: @@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err) goto out; err = cts_cbc_decrypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return; out: diff --git a/crypto/lrw.c b/crypto/lrw.c index a8bfae4..695cea9 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err) crypto_skcipher_encrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } @@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err) crypto_skcipher_decrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 407c64b..2908f93 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) req->dst, ctx->key_size - 1, req->dst_len); err = crypto_akcipher_encrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_encrypt_sign_complete(req, err); return err; @@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) ctx->key_size); err = crypto_akcipher_decrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_decrypt_complete(req, err); return err; @@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req) req->dst, ctx->key_size - 1, req->dst_len); err = crypto_akcipher_sign(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_encrypt_sign_complete(req, err); return err; @@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) ctx->key_size); err = crypto_akcipher_verify(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_verify_complete(req, err); return err; diff --git a/crypto/xts.c b/crypto/xts.c index d86c11a..af68012 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err) crypto_skcipher_encrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } @@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err) crypto_skcipher_decrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:05:59
|
Replace -EBUSY with -EAGAIN when reporting transient busy indication in the absence of backlog. Signed-off-by: Gilad Ben-Yossef <gi...@be...> Reviewed-by: Gary R Hook <gar...@am...> --- drivers/crypto/ccp/ccp-crypto-main.c | 8 +++----- drivers/crypto/ccp/ccp-dev.c | 7 +++++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 35a9de7..403ff0a 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) /* Check if the cmd can/should be queued */ if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { - ret = -EBUSY; - if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) + if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) { + ret = -EAGAIN; goto e_lock; + } } /* Look for an entry with the same tfm. If there is a cmd @@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) ret = ccp_enqueue_cmd(crypto_cmd->cmd); if (!ccp_crypto_success(ret)) goto e_lock; /* Error, don't queue it */ - if ((ret == -EBUSY) && - !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) - goto e_lock; /* Not backlogging, don't queue it */ } if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 4e029b1..3d637e3 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd) i = ccp->cmd_q_count; if (ccp->cmd_count >= MAX_CMD_QLEN) { - ret = -EBUSY; - if (cmd->flags & CCP_CMD_MAY_BACKLOG) + if (cmd->flags & CCP_CMD_MAY_BACKLOG) { + ret = -EBUSY; list_add_tail(&cmd->entry, &ccp->backlog); + } else { + ret = -EAGAIN; + } } else { ret = -EINPROGRESS; ccp->cmd_count++; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:05:45
|
The crypto API was using the -EBUSY return value to indicate both a hard failure to submit a crypto operation into a transformation provider when the latter was busy and the backlog mechanism was not enabled as well as a notification that the operation was queued into the backlog when the backlog mechanism was enabled. Having the same return code indicate two very different conditions depending on a flag is both error prone and requires extra runtime check like the following to discern between the cases: if (err == -EINPROGRESS || (err == -EBUSY && (ahash_request_flags(req) & CRYPTO_TFM_REQ_MAY_BACKLOG))) This patch changes the return code used to indicate a crypto op failed due to the transformation provider being transiently busy to -EAGAIN. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/algapi.c | 6 ++++-- crypto/algif_hash.c | 20 +++++++++++++++++--- crypto/cryptd.c | 4 +--- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index aa699ff..916bee3 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue, int err = -EINPROGRESS; if (unlikely(queue->qlen >= queue->max_qlen)) { - err = -EBUSY; - if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + err = -EAGAIN; goto out; + } + err = -EBUSY; if (queue->backlog == &queue->list) queue->backlog = &request->list; } diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 5e92bd2..3b3c154 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -39,6 +39,20 @@ struct algif_hash_tfm { bool has_key; }; +/* Previous versions of crypto_* ops used to return -EBUSY + * rather than -EAGAIN to indicate being tied up. The in + * kernel API changed but we don't want to break the user + * space API. As only the hash user interface exposed this + * error ever to the user, do the translation here. + */ +static inline int crypto_user_err(int err) +{ + if (err == -EAGAIN) + return -EBUSY; + + return err; +} + static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) { unsigned ds; @@ -136,7 +150,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, unlock: release_sock(sk); - return err ?: copied; + return err ? crypto_user_err(err) : copied; } static ssize_t hash_sendpage(struct socket *sock, struct page *page, @@ -188,7 +202,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, unlock: release_sock(sk); - return err ?: size; + return err ? crypto_user_err(err) : size; } static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, @@ -236,7 +250,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, hash_free_result(sk, ctx); release_sock(sk); - return err ?: len; + return err ? crypto_user_err(err) : len; } static int hash_accept(struct socket *sock, struct socket *newsock, int flags, diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0508c48..d1dbdce 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, int cpu, err; struct cryptd_cpu_queue *cpu_queue; atomic_t *refcnt; - bool may_backlog; cpu = get_cpu(); cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); refcnt = crypto_tfm_ctx(request->tfm); - may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; - if (err == -EBUSY && !may_backlog) + if (err == -EAGAIN) goto out_put_cpu; queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 14:05:33
|
Many users of kernel async. crypto services have a pattern of starting an async. crypto op and than using a completion to wait for it to end. This patch set simplifies this common use case in two ways: First, by separating the return codes of the case where a request is queued to a backlog due to the provider being busy (-EBUSY) from the case the request has failed due to the provider being busy and backlogging is not enabled (-EAGAIN). Next, this change is than built on to create a generic API to wait for a async. crypto operation to complete. The end result is a smaller code base and an API that is easier to use and more difficult to get wrong. The patch set was boot tested on x86_64 and arm64 which at the very least tests the crypto users via testmgr and tcrypt but I do note that I do not have access to some of the HW whose drivers are modified nor do I claim I was able to test all of the corner cases. The patch set is based upon linux-next release tagged next-20170817. Changes from v5: - Remove redundant new line as spotted by Jonathan Cameron. - Reworded dm-verity change commit message to better clarify potential issue averted by change as pointed out by Mikulas Patocka. Changes from v4: - Rebase on top of latest algif changes from Stephan Mueller. - Fix typo in ccp patch title. Changes from v3: - Instead of changing the return code to indicate backlog queueing, change the return code to indicate transient busy state, as suggested by Herbert Xu. Changes from v2: - Patch title changed from "introduce crypto wait for async op" to better reflect the current state. - Rebase on top of latest linux-next. - Add a new return code of -EIOCBQUEUED for backlog queueing, as suggested by Herbert Xu. - Transform more users to the new API. - Update the drbg change to account for new init as indicated by Stephan Muller. Changes from v1: - Address review comments from Eric Biggers. - Separated out bug fixes of existing code and rebase on top of that patch set. - Rename 'ecr' to 'wait' in fscrypto code. - Split patch introducing the new API from the change moving over the algif code which it originated from to the new API. - Inline crypto_wait_req(). - Some code indentation fixes. Gilad Ben-Yossef (19): crypto: change transient busy return code to -EAGAIN crypto: ccp: use -EAGAIN for transient busy indication crypto: remove redundant backlog checks on EBUSY crypto: marvell/cesa: remove redundant backlog checks on EBUSY crypto: introduce crypto wait for async op crypto: move algif to generic async completion crypto: move pub key to generic async completion crypto: move drbg to generic async completion crypto: move gcm to generic async completion crypto: move testmgr to generic async completion fscrypt: move to generic async completion dm: move dm-verity to generic async completion cifs: move to generic async completion ima: move to generic async completion crypto: tcrypt: move to generic async completion crypto: talitos: move to generic async completion crypto: qce: move to generic async completion crypto: mediatek: move to generic async completion crypto: adapt api sample to use async. op wait Documentation/crypto/api-samples.rst | 52 ++------- crypto/af_alg.c | 27 ----- crypto/ahash.c | 12 +-- crypto/algapi.c | 6 +- crypto/algif_aead.c | 8 +- crypto/algif_hash.c | 50 +++++---- crypto/algif_skcipher.c | 9 +- crypto/api.c | 13 +++ crypto/asymmetric_keys/public_key.c | 28 +---- crypto/cryptd.c | 4 +- crypto/cts.c | 6 +- crypto/drbg.c | 36 ++----- crypto/gcm.c | 32 ++---- crypto/lrw.c | 8 +- crypto/rsa-pkcs1pad.c | 16 +-- crypto/tcrypt.c | 84 +++++---------- crypto/testmgr.c | 204 ++++++++++++----------------------- crypto/xts.c | 8 +- drivers/crypto/ccp/ccp-crypto-main.c | 8 +- drivers/crypto/ccp/ccp-dev.c | 7 +- drivers/crypto/marvell/cesa.c | 3 +- drivers/crypto/marvell/cesa.h | 2 +- drivers/crypto/mediatek/mtk-aes.c | 31 +----- drivers/crypto/qce/sha.c | 30 +----- drivers/crypto/talitos.c | 38 +------ drivers/md/dm-verity-target.c | 81 ++++---------- drivers/md/dm-verity.h | 5 - fs/cifs/smb2ops.c | 30 +----- fs/crypto/crypto.c | 28 +---- fs/crypto/fname.c | 36 ++----- fs/crypto/fscrypt_private.h | 10 -- fs/crypto/keyinfo.c | 21 +--- include/crypto/drbg.h | 3 +- include/crypto/if_alg.h | 15 +-- include/linux/crypto.h | 40 +++++++ security/integrity/ima/ima_crypto.c | 56 +++------- 36 files changed, 310 insertions(+), 737 deletions(-) -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 12:49:48
|
On Tue, Aug 15, 2017 at 5:23 AM, Jonathan Cameron <Jon...@hu...> wrote: > On Mon, 14 Aug 2017 18:21:15 +0300 > Gilad Ben-Yossef <gi...@be...> wrote: > >> Invoking a possibly async. crypto op and waiting for completion >> while correctly handling backlog processing is a common task >> in the crypto API implementation and outside users of it. >> >> This patch adds a generic implementation for doing so in >> preparation for using it across the board instead of hand >> rolled versions. > > Trivial observation. Shouldn't have this bonus blank line inserted here. > >> #endif /* _LINUX_CRYPTO_H */ >> > Indeed. Will be fixed. Thanks, Gilad -- Gilad Ben-Yossef Chief Coffee Drinker "If you take a class in large-scale robotics, can you end up in a situation where the homework eats your dog?" -- Jean-Baptiste Queru |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-21 12:48:17
|
On Sat, Aug 19, 2017 at 11:08 PM, Mikulas Patocka <mpa...@re...> wrote: > > > > On Mon, 14 Aug 2017, Gilad Ben-Yossef wrote: > > > dm-verity is starting async. crypto ops and waiting for them to complete. > > Move it over to generic code doing the same. > > > > This also fixes a possible data coruption bug created by the > > use of wait_for_completion_interruptible() without dealing > > correctly with an interrupt aborting the wait prior to the > > async op finishing. > > What is the exact problem there? The interruptible sleep is called from a > workqueue and workqueues have all signals blocked. Are signals unblocked > for some reason there? I should have used "potential" rather then "possible". My bad. My point was that the use of wait_for_completion_interruptible() is wrong because we are not really ready to handle a signal *if* it arrives. Yes, since we are being called from a workqueue this will not happen, but: a. we (or rather I, since I wrote the offending code...) shouldn't call *_interruptible() in a function that is executing with signals masked unconditionally. It makes not sense. b. someone might move the context this is executing in in the future, or make some other change that will enable signals and create a very subtle bug. > > > Should there be another patch for stable kernels that fixes the > interruptible sleep? No, for the reason you pointed yourself - signals are blocked here, so this is a potential bug only but can't be triggered. I already sent a patch set fixing similar places that had the same issue and it went into stable - e.g. https://patchwork.kernel.org/patch/9781693/ Thanks, Gilad > > > Mikulas > > > Signed-off-by: Gilad Ben-Yossef <gi...@be...> > > --- > > drivers/md/dm-verity-target.c | 81 +++++++++++-------------------------------- > > drivers/md/dm-verity.h | 5 --- > > 2 files changed, 20 insertions(+), 66 deletions(-) > > > > diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c > > index 79f18d4..8df08a8 100644 > > --- a/drivers/md/dm-verity-target.c > > +++ b/drivers/md/dm-verity-target.c > > @@ -92,74 +92,33 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, > > return block >> (level * v->hash_per_block_bits); > > } > > > > -/* > > - * Callback function for asynchrnous crypto API completion notification > > - */ > > -static void verity_op_done(struct crypto_async_request *base, int err) > > -{ > > - struct verity_result *res = (struct verity_result *)base->data; > > - > > - if (err == -EINPROGRESS) > > - return; > > - > > - res->err = err; > > - complete(&res->completion); > > -} > > - > > -/* > > - * Wait for async crypto API callback > > - */ > > -static inline int verity_complete_op(struct verity_result *res, int ret) > > -{ > > - switch (ret) { > > - case 0: > > - break; > > - > > - case -EINPROGRESS: > > - case -EBUSY: > > - ret = wait_for_completion_interruptible(&res->completion); > > - if (!ret) > > - ret = res->err; > > - reinit_completion(&res->completion); > > - break; > > - > > - default: > > - DMERR("verity_wait_hash: crypto op submission failed: %d", ret); > > - } > > - > > - if (unlikely(ret < 0)) > > - DMERR("verity_wait_hash: crypto op failed: %d", ret); > > - > > - return ret; > > -} > > - > > static int verity_hash_update(struct dm_verity *v, struct ahash_request *req, > > const u8 *data, size_t len, > > - struct verity_result *res) > > + struct crypto_wait *wait) > > { > > struct scatterlist sg; > > > > sg_init_one(&sg, data, len); > > ahash_request_set_crypt(req, &sg, NULL, len); > > > > - return verity_complete_op(res, crypto_ahash_update(req)); > > + return crypto_wait_req(crypto_ahash_update(req), wait); > > } > > > > /* > > * Wrapper for crypto_ahash_init, which handles verity salting. > > */ > > static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, > > - struct verity_result *res) > > + struct crypto_wait *wait) > > { > > int r; > > > > ahash_request_set_tfm(req, v->tfm); > > ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | > > CRYPTO_TFM_REQ_MAY_BACKLOG, > > - verity_op_done, (void *)res); > > - init_completion(&res->completion); > > + crypto_req_done, (void *)wait); > > + crypto_init_wait(wait); > > > > - r = verity_complete_op(res, crypto_ahash_init(req)); > > + r = crypto_wait_req(crypto_ahash_init(req), wait); > > > > if (unlikely(r < 0)) { > > DMERR("crypto_ahash_init failed: %d", r); > > @@ -167,18 +126,18 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, > > } > > > > if (likely(v->salt_size && (v->version >= 1))) > > - r = verity_hash_update(v, req, v->salt, v->salt_size, res); > > + r = verity_hash_update(v, req, v->salt, v->salt_size, wait); > > > > return r; > > } > > > > static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, > > - u8 *digest, struct verity_result *res) > > + u8 *digest, struct crypto_wait *wait) > > { > > int r; > > > > if (unlikely(v->salt_size && (!v->version))) { > > - r = verity_hash_update(v, req, v->salt, v->salt_size, res); > > + r = verity_hash_update(v, req, v->salt, v->salt_size, wait); > > > > if (r < 0) { > > DMERR("verity_hash_final failed updating salt: %d", r); > > @@ -187,7 +146,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, > > } > > > > ahash_request_set_crypt(req, NULL, digest, 0); > > - r = verity_complete_op(res, crypto_ahash_final(req)); > > + r = crypto_wait_req(crypto_ahash_final(req), wait); > > out: > > return r; > > } > > @@ -196,17 +155,17 @@ int verity_hash(struct dm_verity *v, struct ahash_request *req, > > const u8 *data, size_t len, u8 *digest) > > { > > int r; > > - struct verity_result res; > > + struct crypto_wait wait; > > > > - r = verity_hash_init(v, req, &res); > > + r = verity_hash_init(v, req, &wait); > > if (unlikely(r < 0)) > > goto out; > > > > - r = verity_hash_update(v, req, data, len, &res); > > + r = verity_hash_update(v, req, data, len, &wait); > > if (unlikely(r < 0)) > > goto out; > > > > - r = verity_hash_final(v, req, digest, &res); > > + r = verity_hash_final(v, req, digest, &wait); > > > > out: > > return r; > > @@ -389,7 +348,7 @@ int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, > > * Calculates the digest for the given bio > > */ > > int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, > > - struct bvec_iter *iter, struct verity_result *res) > > + struct bvec_iter *iter, struct crypto_wait *wait) > > { > > unsigned int todo = 1 << v->data_dev_block_bits; > > struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); > > @@ -414,7 +373,7 @@ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, > > */ > > sg_set_page(&sg, bv.bv_page, len, bv.bv_offset); > > ahash_request_set_crypt(req, &sg, NULL, len); > > - r = verity_complete_op(res, crypto_ahash_update(req)); > > + r = crypto_wait_req(crypto_ahash_update(req), wait); > > > > if (unlikely(r < 0)) { > > DMERR("verity_for_io_block crypto op failed: %d", r); > > @@ -482,7 +441,7 @@ static int verity_verify_io(struct dm_verity_io *io) > > struct dm_verity *v = io->v; > > struct bvec_iter start; > > unsigned b; > > - struct verity_result res; > > + struct crypto_wait wait; > > > > for (b = 0; b < io->n_blocks; b++) { > > int r; > > @@ -507,17 +466,17 @@ static int verity_verify_io(struct dm_verity_io *io) > > continue; > > } > > > > - r = verity_hash_init(v, req, &res); > > + r = verity_hash_init(v, req, &wait); > > if (unlikely(r < 0)) > > return r; > > > > start = io->iter; > > - r = verity_for_io_block(v, io, &io->iter, &res); > > + r = verity_for_io_block(v, io, &io->iter, &wait); > > if (unlikely(r < 0)) > > return r; > > > > r = verity_hash_final(v, req, verity_io_real_digest(v, io), > > - &res); > > + &wait); > > if (unlikely(r < 0)) > > return r; > > > > diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h > > index a59e0ad..b675bc0 100644 > > --- a/drivers/md/dm-verity.h > > +++ b/drivers/md/dm-verity.h > > @@ -90,11 +90,6 @@ struct dm_verity_io { > > */ > > }; > > > > -struct verity_result { > > - struct completion completion; > > - int err; > > -}; > > - > > static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v, > > struct dm_verity_io *io) > > { > > -- > > 2.1.4 > > > > -- > > dm-devel mailing list > > dm-...@re... > > https://www.redhat.com/mailman/listinfo/dm-devel > > -- Gilad Ben-Yossef Chief Coffee Drinker "If you take a class in large-scale robotics, can you end up in a situation where the homework eats your dog?" -- Jean-Baptiste Queru |
From: <ww...@iZ...> - 2017-08-21 01:13:51
|
Dear Ima, We can not deliver your parcel arrived at August 17. Please review delivery label in attachment! Sincerely yours, Philip Pierson, UPS Mail Delivery Agent. -------------- next part -------------- An HTML attachment was scrubbed... |
From: Mikulas P. <mpa...@re...> - 2017-08-19 20:08:33
|
On Mon, 14 Aug 2017, Gilad Ben-Yossef wrote: > dm-verity is starting async. crypto ops and waiting for them to complete. > Move it over to generic code doing the same. > > This also fixes a possible data coruption bug created by the > use of wait_for_completion_interruptible() without dealing > correctly with an interrupt aborting the wait prior to the > async op finishing. What is the exact problem there? The interruptible sleep is called from a workqueue and workqueues have all signals blocked. Are signals unblocked for some reason there? Should there be another patch for stable kernels that fixes the interruptible sleep? Mikulas > Signed-off-by: Gilad Ben-Yossef <gi...@be...> > --- > drivers/md/dm-verity-target.c | 81 +++++++++++-------------------------------- > drivers/md/dm-verity.h | 5 --- > 2 files changed, 20 insertions(+), 66 deletions(-) > > diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c > index 79f18d4..8df08a8 100644 > --- a/drivers/md/dm-verity-target.c > +++ b/drivers/md/dm-verity-target.c > @@ -92,74 +92,33 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, > return block >> (level * v->hash_per_block_bits); > } > > -/* > - * Callback function for asynchrnous crypto API completion notification > - */ > -static void verity_op_done(struct crypto_async_request *base, int err) > -{ > - struct verity_result *res = (struct verity_result *)base->data; > - > - if (err == -EINPROGRESS) > - return; > - > - res->err = err; > - complete(&res->completion); > -} > - > -/* > - * Wait for async crypto API callback > - */ > -static inline int verity_complete_op(struct verity_result *res, int ret) > -{ > - switch (ret) { > - case 0: > - break; > - > - case -EINPROGRESS: > - case -EBUSY: > - ret = wait_for_completion_interruptible(&res->completion); > - if (!ret) > - ret = res->err; > - reinit_completion(&res->completion); > - break; > - > - default: > - DMERR("verity_wait_hash: crypto op submission failed: %d", ret); > - } > - > - if (unlikely(ret < 0)) > - DMERR("verity_wait_hash: crypto op failed: %d", ret); > - > - return ret; > -} > - > static int verity_hash_update(struct dm_verity *v, struct ahash_request *req, > const u8 *data, size_t len, > - struct verity_result *res) > + struct crypto_wait *wait) > { > struct scatterlist sg; > > sg_init_one(&sg, data, len); > ahash_request_set_crypt(req, &sg, NULL, len); > > - return verity_complete_op(res, crypto_ahash_update(req)); > + return crypto_wait_req(crypto_ahash_update(req), wait); > } > > /* > * Wrapper for crypto_ahash_init, which handles verity salting. > */ > static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, > - struct verity_result *res) > + struct crypto_wait *wait) > { > int r; > > ahash_request_set_tfm(req, v->tfm); > ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | > CRYPTO_TFM_REQ_MAY_BACKLOG, > - verity_op_done, (void *)res); > - init_completion(&res->completion); > + crypto_req_done, (void *)wait); > + crypto_init_wait(wait); > > - r = verity_complete_op(res, crypto_ahash_init(req)); > + r = crypto_wait_req(crypto_ahash_init(req), wait); > > if (unlikely(r < 0)) { > DMERR("crypto_ahash_init failed: %d", r); > @@ -167,18 +126,18 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, > } > > if (likely(v->salt_size && (v->version >= 1))) > - r = verity_hash_update(v, req, v->salt, v->salt_size, res); > + r = verity_hash_update(v, req, v->salt, v->salt_size, wait); > > return r; > } > > static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, > - u8 *digest, struct verity_result *res) > + u8 *digest, struct crypto_wait *wait) > { > int r; > > if (unlikely(v->salt_size && (!v->version))) { > - r = verity_hash_update(v, req, v->salt, v->salt_size, res); > + r = verity_hash_update(v, req, v->salt, v->salt_size, wait); > > if (r < 0) { > DMERR("verity_hash_final failed updating salt: %d", r); > @@ -187,7 +146,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, > } > > ahash_request_set_crypt(req, NULL, digest, 0); > - r = verity_complete_op(res, crypto_ahash_final(req)); > + r = crypto_wait_req(crypto_ahash_final(req), wait); > out: > return r; > } > @@ -196,17 +155,17 @@ int verity_hash(struct dm_verity *v, struct ahash_request *req, > const u8 *data, size_t len, u8 *digest) > { > int r; > - struct verity_result res; > + struct crypto_wait wait; > > - r = verity_hash_init(v, req, &res); > + r = verity_hash_init(v, req, &wait); > if (unlikely(r < 0)) > goto out; > > - r = verity_hash_update(v, req, data, len, &res); > + r = verity_hash_update(v, req, data, len, &wait); > if (unlikely(r < 0)) > goto out; > > - r = verity_hash_final(v, req, digest, &res); > + r = verity_hash_final(v, req, digest, &wait); > > out: > return r; > @@ -389,7 +348,7 @@ int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, > * Calculates the digest for the given bio > */ > int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, > - struct bvec_iter *iter, struct verity_result *res) > + struct bvec_iter *iter, struct crypto_wait *wait) > { > unsigned int todo = 1 << v->data_dev_block_bits; > struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); > @@ -414,7 +373,7 @@ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, > */ > sg_set_page(&sg, bv.bv_page, len, bv.bv_offset); > ahash_request_set_crypt(req, &sg, NULL, len); > - r = verity_complete_op(res, crypto_ahash_update(req)); > + r = crypto_wait_req(crypto_ahash_update(req), wait); > > if (unlikely(r < 0)) { > DMERR("verity_for_io_block crypto op failed: %d", r); > @@ -482,7 +441,7 @@ static int verity_verify_io(struct dm_verity_io *io) > struct dm_verity *v = io->v; > struct bvec_iter start; > unsigned b; > - struct verity_result res; > + struct crypto_wait wait; > > for (b = 0; b < io->n_blocks; b++) { > int r; > @@ -507,17 +466,17 @@ static int verity_verify_io(struct dm_verity_io *io) > continue; > } > > - r = verity_hash_init(v, req, &res); > + r = verity_hash_init(v, req, &wait); > if (unlikely(r < 0)) > return r; > > start = io->iter; > - r = verity_for_io_block(v, io, &io->iter, &res); > + r = verity_for_io_block(v, io, &io->iter, &wait); > if (unlikely(r < 0)) > return r; > > r = verity_hash_final(v, req, verity_io_real_digest(v, io), > - &res); > + &wait); > if (unlikely(r < 0)) > return r; > > diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h > index a59e0ad..b675bc0 100644 > --- a/drivers/md/dm-verity.h > +++ b/drivers/md/dm-verity.h > @@ -90,11 +90,6 @@ struct dm_verity_io { > */ > }; > > -struct verity_result { > - struct completion completion; > - int err; > -}; > - > static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v, > struct dm_verity_io *io) > { > -- > 2.1.4 > > -- > dm-devel mailing list > dm-...@re... > https://www.redhat.com/mailman/listinfo/dm-devel > |
From: Matthew G. <mj...@sr...> - 2017-08-17 20:39:46
|
I'm pleased to say that after the success last year, there will be another TPM microconference at this year's Linux Plumbers Conference. The current schedule has this taking place on Wednesday the 13th of September, so just under 4 weeks from now. We have a list of proposals for discussion at http://wiki.linuxplumbersconf.org/2017:tpms but please feel free to add more! I intend to finalise the schedule by the end of next week, so please do so as soon as you can. For those of you who weren't there, the Linux Plumbers conference is an event dedicated to bringing together people working on various infrastructural components (the plumbing) of Linux. Microconferences are 3 hour long events dedicated to a specific topic, with the focus on identifying problems and having enough people in the room to start figuring out what the solutions should be - the format is typically some short presentations coupled with discussion. -- Matthew Garrett | mj...@sr... |
From: Mimi Z. <zo...@li...> - 2017-08-17 15:43:50
|
> diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c > index 87d2b601cf8e..5a244ebc61d9 100644 > --- a/security/integrity/ima/ima_appraise.c > +++ b/security/integrity/ima/ima_appraise.c > @@ -190,6 +190,64 @@ int ima_read_xattr(struct dentry *dentry, > return ret; > } > > +static void process_xattr_error(int rc, struct integrity_iint_cache *iint, > + int opened, char const **cause, > + enum integrity_status *status) > +{ > + if (rc && rc != -ENODATA) > + return; > + > + *cause = iint->flags & IMA_DIGSIG_REQUIRED ? > + "IMA-signature-required" : "missing-hash"; > + *status = INTEGRITY_NOLABEL; > + > + if (opened & FILE_CREATED) > + iint->flags |= IMA_NEW_FILE; > + > + if ((iint->flags & IMA_NEW_FILE) && > + !(iint->flags & IMA_DIGSIG_REQUIRED)) > + *status = INTEGRITY_PASS; > +} > + > +static int appraise_modsig(struct integrity_iint_cache *iint, > + struct evm_ima_xattr_data *xattr_value, > + int xattr_len) > +{ > + enum hash_algo algo; > + const void *digest; > + void *buf; > + int rc, len; > + u8 dig_len; > + > + rc = ima_modsig_verify(INTEGRITY_KEYRING_IMA, xattr_value); > + if (rc) > + return rc; > + > + /* > + * The signature is good. Now let's put the sig hash > + * into the iint cache so that it gets stored in the > + * measurement list. > + */ > + > + rc = ima_get_modsig_hash(xattr_value, &algo, &digest, &dig_len); > + if (rc) > + return rc; > + > + len = sizeof(iint->ima_hash) + dig_len; > + buf = krealloc(iint->ima_hash, len, GFP_NOFS); > + if (!buf) > + return -ENOMEM; > + > + iint->ima_hash = buf; > + iint->flags |= IMA_DIGSIG; > + iint->ima_hash->algo = algo; > + iint->ima_hash->length = dig_len; > + > + memcpy(iint->ima_hash->digest, digest, dig_len); > + > + return 0; > +} Depending on the IMA policy, the file could already have been measured. That measurement list entry might include the file signature, as stored in the xattr, in the ima-sig template data. I think even if a measurement list entry exists, we would want an additional measurement list entry, which includes the appended signature in the ima-sig template data. Mimi |
From: Mimi Z. <zo...@li...> - 2017-08-17 15:05:00
|
On Fri, 2017-08-04 at 19:03 -0300, Thiago Jung Bauermann wrote: > This patch introduces the modsig keyword to the IMA policy syntax to > specify that a given hook should expect the file to have the IMA signature > appended to it. Here is how it can be used in a rule: > > appraise func=KEXEC_KERNEL_CHECK appraise_type=modsig|imasig > > With this rule, IMA will accept either an appended signature or a signature > stored in the extended attribute. In that case, it will first check whether > there is an appended signature, and if not it will read it from the > extended attribute. > > The format of the appended signature is the same used for signed kernel > modules. This means that the file can be signed with the scripts/sign-file > tool, with a command line such as this: > > $ sign-file sha256 privkey_ima.pem x509_ima.der vmlinux > > This code only works for files that are hashed from a memory buffer, not > for files that are read from disk at the time of hash calculation. In other > words, only hooks that use kernel_read_file can support appended > signatures. This means that only FIRMWARE_CHECK, KEXEC_KERNEL_CHECK, > KEXEC_INITRAMFS_CHECK and POLICY_CHECK can be supported. > > This feature warrants a separate config option because enabling it brings > in many other config options. > > Signed-off-by: Thiago Jung Bauermann <bau...@li...> Other than the appended signature not being properly included in the measurement list, the patch seems to be working. This patch is on the rather large size. Could you go back and break this patch up into smaller, more concise patches, with clear patch descriptions (eg. separate code cleanup from changes, new policy option, code for appraising an attached signature, storing the appended signature in the measurement list, etc)? thanks! Mimi > --- > security/integrity/ima/Kconfig | 13 +++ > security/integrity/ima/Makefile | 1 + > security/integrity/ima/ima.h | 70 +++++++++++- > security/integrity/ima/ima_appraise.c | 178 +++++++++++++++++++++++++----- > security/integrity/ima/ima_main.c | 7 +- > security/integrity/ima/ima_modsig.c | 178 ++++++++++++++++++++++++++++++ > security/integrity/ima/ima_policy.c | 26 +++-- > security/integrity/ima/ima_template_lib.c | 14 ++- > security/integrity/integrity.h | 4 +- > 9 files changed, 443 insertions(+), 48 deletions(-) > > diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig > index 35ef69312811..55f734a6124b 100644 > --- a/security/integrity/ima/Kconfig > +++ b/security/integrity/ima/Kconfig > @@ -163,6 +163,19 @@ config IMA_APPRAISE_BOOTPARAM > This option enables the different "ima_appraise=" modes > (eg. fix, log) from the boot command line. > > +config IMA_APPRAISE_MODSIG > + bool "Support module-style signatures for appraisal" > + depends on IMA_APPRAISE > + depends on INTEGRITY_ASYMMETRIC_KEYS > + select PKCS7_MESSAGE_PARSER > + select MODULE_SIG_FORMAT > + default n > + help > + Adds support for signatures appended to files. The format of the > + appended signature is the same used for signed kernel modules. > + The modsig keyword can be used in the IMA policy to allow a hook > + to accept such signatures. > + > config IMA_TRUSTED_KEYRING > bool "Require all keys on the .ima keyring be signed (deprecated)" > depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING > diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile > index 29f198bde02b..c72026acecc3 100644 > --- a/security/integrity/ima/Makefile > +++ b/security/integrity/ima/Makefile > @@ -8,5 +8,6 @@ obj-$(CONFIG_IMA) += ima.o > ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \ > ima_policy.o ima_template.o ima_template_lib.o > ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o > +ima-$(CONFIG_IMA_APPRAISE_MODSIG) += ima_modsig.o > ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o > obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o > diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h > index d52b487ad259..5492af2cd7c7 100644 > --- a/security/integrity/ima/ima.h > +++ b/security/integrity/ima/ima.h > @@ -190,6 +190,8 @@ enum ima_hooks { > __ima_hooks(__ima_hook_enumify) > }; > > +extern const char *const func_tokens[]; > + > /* LIM API function definitions */ > int ima_get_action(struct inode *inode, int mask, > enum ima_hooks func, int *pcr); > @@ -236,9 +238,10 @@ int ima_policy_show(struct seq_file *m, void *v); > #ifdef CONFIG_IMA_APPRAISE > int ima_appraise_measurement(enum ima_hooks func, > struct integrity_iint_cache *iint, > - struct file *file, const unsigned char *filename, > - struct evm_ima_xattr_data *xattr_value, > - int xattr_len, int opened); > + struct file *file, const void *buf, loff_t size, > + const unsigned char *filename, > + struct evm_ima_xattr_data **xattr_value, > + int *xattr_len, int opened); > int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func); > void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file); > enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, > @@ -248,13 +251,28 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, > int ima_read_xattr(struct dentry *dentry, > struct evm_ima_xattr_data **xattr_value); > > +#ifdef CONFIG_IMA_APPRAISE_MODSIG > +bool ima_hook_supports_modsig(enum ima_hooks func); > +int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len, > + struct evm_ima_xattr_data **xattr_value, > + int *xattr_len); > +int ima_get_modsig_hash(struct evm_ima_xattr_data *hdr, enum hash_algo *algo, > + void const **hash, u8 *len); > +int ima_modsig_serialize_data(struct evm_ima_xattr_data *hdr, > + struct evm_ima_xattr_data **data, int *data_len); > +int ima_modsig_verify(const unsigned int keyring_id, > + struct evm_ima_xattr_data *hdr); > +void ima_free_xattr_data(struct evm_ima_xattr_data *hdr); > +#endif > + > #else > static inline int ima_appraise_measurement(enum ima_hooks func, > struct integrity_iint_cache *iint, > - struct file *file, > + struct file *file, const void *buf, > + loff_t size, > const unsigned char *filename, > - struct evm_ima_xattr_data *xattr_value, > - int xattr_len, int opened) > + struct evm_ima_xattr_data **xattr_value, > + int *xattr_len, int opened) > { > return INTEGRITY_UNKNOWN; > } > @@ -291,6 +309,46 @@ static inline int ima_read_xattr(struct dentry *dentry, > > #endif /* CONFIG_IMA_APPRAISE */ > > +#ifndef CONFIG_IMA_APPRAISE_MODSIG > +static inline bool ima_hook_supports_modsig(enum ima_hooks func) > +{ > + return false; > +} > + > +static inline int ima_read_modsig(enum ima_hooks func, const void *buf, > + loff_t buf_len, > + struct evm_ima_xattr_data **xattr_value, > + int *xattr_len) > +{ > + return -ENOTSUPP; > +} > + > +static inline int ima_get_modsig_hash(struct evm_ima_xattr_data *hdr, > + enum hash_algo *algo, void const **hash, > + u8 *len) > +{ > + return -ENOTSUPP; > +} > + > +static inline int ima_modsig_serialize_data(struct evm_ima_xattr_data *hdr, > + struct evm_ima_xattr_data **data, > + int *data_len) > +{ > + return -ENOTSUPP; > +} > + > +static inline int ima_modsig_verify(const unsigned int keyring_id, > + struct evm_ima_xattr_data *hdr) > +{ > + return -ENOTSUPP; > +} > + > +static inline void ima_free_xattr_data(struct evm_ima_xattr_data *hdr) > +{ > + kfree(hdr); > +} > +#endif > + > /* LSM based policy rules require audit */ > #ifdef CONFIG_IMA_LSM_RULES > > diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c > index 87d2b601cf8e..5a244ebc61d9 100644 > --- a/security/integrity/ima/ima_appraise.c > +++ b/security/integrity/ima/ima_appraise.c > @@ -190,6 +190,64 @@ int ima_read_xattr(struct dentry *dentry, > return ret; > } > > +static void process_xattr_error(int rc, struct integrity_iint_cache *iint, > + int opened, char const **cause, > + enum integrity_status *status) > +{ > + if (rc && rc != -ENODATA) > + return; > + > + *cause = iint->flags & IMA_DIGSIG_REQUIRED ? > + "IMA-signature-required" : "missing-hash"; > + *status = INTEGRITY_NOLABEL; > + > + if (opened & FILE_CREATED) > + iint->flags |= IMA_NEW_FILE; > + > + if ((iint->flags & IMA_NEW_FILE) && > + !(iint->flags & IMA_DIGSIG_REQUIRED)) > + *status = INTEGRITY_PASS; > +} > + > +static int appraise_modsig(struct integrity_iint_cache *iint, > + struct evm_ima_xattr_data *xattr_value, > + int xattr_len) > +{ > + enum hash_algo algo; > + const void *digest; > + void *buf; > + int rc, len; > + u8 dig_len; > + > + rc = ima_modsig_verify(INTEGRITY_KEYRING_IMA, xattr_value); > + if (rc) > + return rc; > + > + /* > + * The signature is good. Now let's put the sig hash > + * into the iint cache so that it gets stored in the > + * measurement list. > + */ > + > + rc = ima_get_modsig_hash(xattr_value, &algo, &digest, &dig_len); > + if (rc) > + return rc; > + > + len = sizeof(iint->ima_hash) + dig_len; > + buf = krealloc(iint->ima_hash, len, GFP_NOFS); > + if (!buf) > + return -ENOMEM; > + > + iint->ima_hash = buf; > + iint->flags |= IMA_DIGSIG; > + iint->ima_hash->algo = algo; > + iint->ima_hash->length = dig_len; > + > + memcpy(iint->ima_hash->digest, digest, dig_len); > + > + return 0; > +} > + > /* > * ima_appraise_measurement - appraise file measurement > * > @@ -200,44 +258,55 @@ int ima_read_xattr(struct dentry *dentry, > */ > int ima_appraise_measurement(enum ima_hooks func, > struct integrity_iint_cache *iint, > - struct file *file, const unsigned char *filename, > - struct evm_ima_xattr_data *xattr_value, > - int xattr_len, int opened) > + struct file *file, const void *buf, loff_t size, > + const unsigned char *filename, > + struct evm_ima_xattr_data **xattr_value_, > + int *xattr_len_, int opened) > { > static const char op[] = "appraise_data"; > - char *cause = "unknown"; > + const char *cause = "unknown"; > struct dentry *dentry = file_dentry(file); > struct inode *inode = d_backing_inode(dentry); > enum integrity_status status = INTEGRITY_UNKNOWN; > - int rc = xattr_len, hash_start = 0; > + struct evm_ima_xattr_data *xattr_value = *xattr_value_; > + int xattr_len = *xattr_len_, rc = xattr_len, hash_start = 0; > + bool appraising_modsig = false; > + > + if (iint->flags & IMA_MODSIG_ALLOWED && > + !ima_read_modsig(func, buf, size, &xattr_value, &xattr_len)) { > + appraising_modsig = true; > + rc = xattr_len; > + } > > - if (!(inode->i_opflags & IOP_XATTR)) > + /* If not appraising a modsig, we need an xattr. */ > + if (!appraising_modsig && !(inode->i_opflags & IOP_XATTR)) > return INTEGRITY_UNKNOWN; > > if (rc <= 0) { > - if (rc && rc != -ENODATA) > - goto out; > - > - cause = iint->flags & IMA_DIGSIG_REQUIRED ? > - "IMA-signature-required" : "missing-hash"; > - status = INTEGRITY_NOLABEL; > - if (opened & FILE_CREATED) > - iint->flags |= IMA_NEW_FILE; > - if ((iint->flags & IMA_NEW_FILE) && > - !(iint->flags & IMA_DIGSIG_REQUIRED)) > - status = INTEGRITY_PASS; > + process_xattr_error(rc, iint, opened, &cause, &status); > goto out; > } > > - status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint); > - if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) { > - if ((status == INTEGRITY_NOLABEL) > - || (status == INTEGRITY_NOXATTRS)) > - cause = "missing-HMAC"; > - else if (status == INTEGRITY_FAIL) > - cause = "invalid-HMAC"; > + status = evm_verifyxattr(dentry, XATTR_NAME_IMA, NULL, 0, iint); > + switch (status) { > + case INTEGRITY_PASS: > + case INTEGRITY_UNKNOWN: > + break; > + case INTEGRITY_NOXATTRS: > + /* No EVM protected xattrs. */ > + if (appraising_modsig) > + break; > + case INTEGRITY_NOLABEL: > + /* No security.evm xattr. */ > + cause = "missing-HMAC"; > + goto out; > + case INTEGRITY_FAIL: > + /* Invalid HMAC/signature. */ > + cause = "invalid-HMAC"; > goto out; > } > + > + retry: > switch (xattr_value->type) { > case IMA_XATTR_DIGEST_NG: > /* first byte contains algorithm id */ > @@ -281,6 +350,54 @@ int ima_appraise_measurement(enum ima_hooks func, > status = INTEGRITY_PASS; > } > break; > + case IMA_MODSIG: > + /* > + * To avoid being tricked into an infinite loop, we don't allow > + * a modsig stored in the xattr. > + */ > + if (!appraising_modsig) { > + status = INTEGRITY_UNKNOWN; > + cause = "unknown-ima-data"; > + break; > + } > + > + rc = appraise_modsig(iint, xattr_value, xattr_len); > + if (!rc) { > + kfree(*xattr_value_); > + *xattr_value_ = xattr_value; > + *xattr_len_ = xattr_len; > + > + status = INTEGRITY_PASS; > + break; > + } > + > + ima_free_xattr_data(xattr_value); > + > + /* > + * The appended signature failed verification. Let's see if > + * there's an extended attribute to fall back to. > + */ > + if (inode->i_opflags & IOP_XATTR && *xattr_len_ != 0) { > + xattr_value = *xattr_value_; > + xattr_len = rc = *xattr_len_; > + appraising_modsig = false; > + > + if (rc <= 0) { > + process_xattr_error(rc, iint, opened, &cause, > + &status); > + goto out; > + } > + > + goto retry; > + } > + > + if (rc == -EOPNOTSUPP) > + status = INTEGRITY_UNKNOWN; > + else if (rc) { > + cause = "invalid-signature"; > + status = INTEGRITY_FAIL; > + } > + break; > default: > status = INTEGRITY_UNKNOWN; > cause = "unknown-ima-data"; > @@ -291,13 +408,15 @@ int ima_appraise_measurement(enum ima_hooks func, > if (status != INTEGRITY_PASS) { > if ((ima_appraise & IMA_APPRAISE_FIX) && > (!xattr_value || > - xattr_value->type != EVM_IMA_XATTR_DIGSIG)) { > + (xattr_value->type != EVM_IMA_XATTR_DIGSIG && > + xattr_value->type != IMA_MODSIG))) { > if (!ima_fix_xattr(dentry, iint)) > status = INTEGRITY_PASS; > } else if ((inode->i_size == 0) && > (iint->flags & IMA_NEW_FILE) && > (xattr_value && > - xattr_value->type == EVM_IMA_XATTR_DIGSIG)) { > + (xattr_value->type == EVM_IMA_XATTR_DIGSIG || > + xattr_value->type == IMA_MODSIG))) { > status = INTEGRITY_PASS; > } > integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename, > @@ -398,6 +517,7 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, > const void *xattr_value, size_t xattr_value_len) > { > const struct evm_ima_xattr_data *xvalue = xattr_value; > + bool digsig; > int result; > > result = ima_protect_xattr(dentry, xattr_name, xattr_value, > @@ -405,8 +525,10 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, > if (result == 1) { > if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST)) > return -EINVAL; > - ima_reset_appraise_flags(d_backing_inode(dentry), > - (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0); > + > + digsig = xvalue->type == EVM_IMA_XATTR_DIGSIG || > + xvalue->type == IMA_MODSIG; > + ima_reset_appraise_flags(d_backing_inode(dentry), digsig); > result = 0; > } > return result; > diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c > index 0b4845e7248d..93fa257c71a7 100644 > --- a/security/integrity/ima/ima_main.c > +++ b/security/integrity/ima/ima_main.c > @@ -245,8 +245,9 @@ static int process_measurement(struct file *file, char *buf, loff_t size, > pathname = ima_d_path(&file->f_path, &pathbuf, filename); > > if (action & IMA_APPRAISE_SUBMASK) > - rc = ima_appraise_measurement(func, iint, file, pathname, > - xattr_value, xattr_len, opened); > + rc = ima_appraise_measurement(func, iint, file, buf, size, > + pathname, &xattr_value, > + &xattr_len, opened); > if (action & IMA_MEASURE) > ima_store_measurement(iint, file, pathname, > xattr_value, xattr_len, pcr); > @@ -257,7 +258,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size, > if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG) && > !(iint->flags & IMA_NEW_FILE)) > rc = -EACCES; > - kfree(xattr_value); > + ima_free_xattr_data(xattr_value); > out_free: > if (pathbuf) > __putname(pathbuf); > diff --git a/security/integrity/ima/ima_modsig.c b/security/integrity/ima/ima_modsig.c > new file mode 100644 > index 000000000000..7983edf84511 > --- /dev/null > +++ b/security/integrity/ima/ima_modsig.c > @@ -0,0 +1,178 @@ > +/* > + * IMA support for appraising module-style appended signatures. > + * > + * Copyright (C) 2017 IBM Corporation > + * > + * Author: > + * Thiago Jung Bauermann <bau...@li...> > + * > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU General Public License as published by > + * the Free Software Foundation (version 2 of the License). > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > + * GNU General Public License for more details. > + */ > + > +#include <linux/types.h> > +#include <linux/module_signature.h> > +#include <keys/asymmetric-type.h> > +#include <crypto/pkcs7.h> > + > +#include "ima.h" > + > +struct modsig_hdr { > + uint8_t type; /* Should be IMA_MODSIG. */ > + const void *data; /* Pointer to data covered by pkcs7_msg. */ > + size_t data_len; > + struct pkcs7_message *pkcs7_msg; > + int raw_pkcs7_len; > + > + /* This will be in the measurement list if required by the template. */ > + struct evm_ima_xattr_data raw_pkcs7; > +}; > + > +/** > + * ima_hook_supports_modsig - can the policy allow modsig for this hook? > + * > + * modsig is only supported by hooks using ima_post_read_file, because only they > + * preload the contents of the file in a buffer. FILE_CHECK does that in some > + * cases, but not when reached from vfs_open. POLICY_CHECK can support it, but > + * it's not useful in practice because it's a text file so deny. > + */ > +bool ima_hook_supports_modsig(enum ima_hooks func) > +{ > + switch (func) { > + case FIRMWARE_CHECK: > + case KEXEC_KERNEL_CHECK: > + case KEXEC_INITRAMFS_CHECK: > + return true; > + default: > + return false; > + } > +} > + > +int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len, > + struct evm_ima_xattr_data **xattr_value, > + int *xattr_len) > +{ > + const size_t marker_len = sizeof(MODULE_SIG_STRING) - 1; > + const struct module_signature *sig; > + struct modsig_hdr *hdr; > + size_t sig_len; > + const void *p; > + int rc; > + > + /* > + * Not supposed to happen. Hooks that support modsig are > + * whitelisted when parsing the policy using > + * ima_hooks_supports_modsig. > + */ > + if (!buf || !buf_len) { > + WARN_ONCE(true, "%s doesn't support modsig\n", > + func_tokens[func]); > + return -ENOENT; > + } else if (buf_len <= marker_len + sizeof(*sig)) > + return -ENOENT; > + > + p = buf + buf_len - marker_len; > + if (memcmp(p, MODULE_SIG_STRING, marker_len)) > + return -ENOENT; > + > + buf_len -= marker_len; > + sig = (const struct module_signature *) (p - sizeof(*sig)); > + > + rc = validate_module_sig(sig, buf_len); > + if (rc) > + return rc; > + > + sig_len = be32_to_cpu(sig->sig_len); > + buf_len -= sig_len + sizeof(*sig); > + > + hdr = kmalloc(sizeof(*hdr) + sig_len, GFP_KERNEL); > + if (!hdr) > + return -ENOMEM; > + > + hdr->pkcs7_msg = pkcs7_parse_message(buf + buf_len, sig_len); > + if (IS_ERR(hdr->pkcs7_msg)) { > + rc = PTR_ERR(hdr->pkcs7_msg); > + kfree(hdr); > + return rc; > + } > + > + memcpy(hdr->raw_pkcs7.data, buf + buf_len, sig_len); > + hdr->raw_pkcs7_len = sig_len + 1; > + hdr->raw_pkcs7.type = IMA_MODSIG; > + > + hdr->type = IMA_MODSIG; > + hdr->data = buf; > + hdr->data_len = buf_len; > + > + *xattr_value = (typeof(*xattr_value)) hdr; > + *xattr_len = sizeof(*hdr); > + > + return 0; > +} > + > +int ima_get_modsig_hash(struct evm_ima_xattr_data *hdr, enum hash_algo *algo, > + void const **hash, u8 *len) > +{ > + const struct public_key_signature *pks; > + struct modsig_hdr *modsig = (typeof(modsig)) hdr; > + int i; > + > + pks = pkcs7_get_message_sig(modsig->pkcs7_msg); > + if (!pks) > + return -EBADMSG; > + > + for (i = 0; i < HASH_ALGO__LAST; i++) > + if (!strcmp(hash_algo_name[i], pks->hash_algo)) > + break; > + > + *algo = i; > + *hash = pks->digest; > + *len = pks->digest_size; > + > + return 0; > +} > + > +int ima_modsig_serialize_data(struct evm_ima_xattr_data *hdr, > + struct evm_ima_xattr_data **data, int *data_len) > +{ > + struct modsig_hdr *modsig = (struct modsig_hdr *) hdr; > + > + *data = &modsig->raw_pkcs7; > + *data_len = modsig->raw_pkcs7_len; > + > + return 0; > +} > + > +int ima_modsig_verify(const unsigned int keyring_id, > + struct evm_ima_xattr_data *hdr) > +{ > + struct modsig_hdr *modsig = (struct modsig_hdr *) hdr; > + struct key *trusted_keys = integrity_keyring_from_id(keyring_id); > + > + if (IS_ERR(trusted_keys)) > + return -EINVAL; > + > + return verify_pkcs7_message_sig(modsig->data, modsig->data_len, > + modsig->pkcs7_msg, trusted_keys, > + VERIFYING_MODULE_SIGNATURE, NULL, NULL); > +} > + > +void ima_free_xattr_data(struct evm_ima_xattr_data *hdr) > +{ > + if (!hdr) > + return; > + > + if (hdr->type == IMA_MODSIG) { > + struct modsig_hdr *modsig = (struct modsig_hdr *) hdr; > + > + pkcs7_free_message(modsig->pkcs7_msg); > + } > + > + kfree(hdr); > +} > diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c > index 95209a5f8595..cb056d20e6e5 100644 > --- a/security/integrity/ima/ima_policy.c > +++ b/security/integrity/ima/ima_policy.c > @@ -851,8 +851,12 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) > } > > ima_log_string(ab, "appraise_type", args[0].from); > - if ((strcmp(args[0].from, "imasig")) == 0) > + if (strcmp(args[0].from, "imasig") == 0) > entry->flags |= IMA_DIGSIG_REQUIRED; > + else if (ima_hook_supports_modsig(entry->func) && > + strcmp(args[0].from, "modsig|imasig") == 0) > + entry->flags |= IMA_DIGSIG_REQUIRED > + | IMA_MODSIG_ALLOWED; > else > result = -EINVAL; > break; > @@ -958,6 +962,12 @@ void ima_delete_rules(void) > } > } > > +#define __ima_hook_stringify(str) (#str), > + > +const char *const func_tokens[] = { > + __ima_hooks(__ima_hook_stringify) > +}; > + > #ifdef CONFIG_IMA_READ_POLICY > enum { > mask_exec = 0, mask_write, mask_read, mask_append > @@ -970,12 +980,6 @@ static const char *const mask_tokens[] = { > "MAY_APPEND" > }; > > -#define __ima_hook_stringify(str) (#str), > - > -static const char *const func_tokens[] = { > - __ima_hooks(__ima_hook_stringify) > -}; > - > void *ima_policy_start(struct seq_file *m, loff_t *pos) > { > loff_t l = *pos; > @@ -1138,8 +1142,12 @@ int ima_policy_show(struct seq_file *m, void *v) > } > } > } > - if (entry->flags & IMA_DIGSIG_REQUIRED) > - seq_puts(m, "appraise_type=imasig "); > + if (entry->flags & IMA_DIGSIG_REQUIRED) { > + if (entry->flags & IMA_MODSIG_ALLOWED) > + seq_puts(m, "appraise_type=modsig|imasig "); > + else > + seq_puts(m, "appraise_type=imasig "); > + } > if (entry->flags & IMA_PERMIT_DIRECTIO) > seq_puts(m, "permit_directio "); > rcu_read_unlock(); > diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c > index 28af43f63572..8c7fa52604a5 100644 > --- a/security/integrity/ima/ima_template_lib.c > +++ b/security/integrity/ima/ima_template_lib.c > @@ -383,9 +383,21 @@ int ima_eventsig_init(struct ima_event_data *event_data, > int xattr_len = event_data->xattr_len; > int rc = 0; > > - if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG)) > + if (!xattr_value || (xattr_value->type != EVM_IMA_XATTR_DIGSIG && > + xattr_value->type != IMA_MODSIG)) > goto out; > > + /* > + * The xattr_value for IMA_MODSIG is a runtime structure containing > + * pointers. Get its raw data instead. > + */ > + if (xattr_value->type == IMA_MODSIG) { > + rc = ima_modsig_serialize_data(xattr_value, &xattr_value, > + &xattr_len); > + if (rc) > + goto out; > + } > + > rc = ima_write_template_field_data(xattr_value, xattr_len, fmt, > field_data); > out: > diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h > index 1f8f1a31d487..0b8b9efa0d2e 100644 > --- a/security/integrity/integrity.h > +++ b/security/integrity/integrity.h > @@ -28,11 +28,12 @@ > > /* iint cache flags */ > #define IMA_ACTION_FLAGS 0xff000000 > -#define IMA_ACTION_RULE_FLAGS 0x06000000 > +#define IMA_ACTION_RULE_FLAGS 0x16000000 > #define IMA_DIGSIG 0x01000000 > #define IMA_DIGSIG_REQUIRED 0x02000000 > #define IMA_PERMIT_DIRECTIO 0x04000000 > #define IMA_NEW_FILE 0x08000000 > +#define IMA_MODSIG_ALLOWED 0x10000000 > > #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \ > IMA_APPRAISE_SUBMASK) > @@ -58,6 +59,7 @@ enum evm_ima_xattr_type { > EVM_XATTR_HMAC, > EVM_IMA_XATTR_DIGSIG, > IMA_XATTR_DIGEST_NG, > + IMA_MODSIG, > IMA_XATTR_LAST > }; > |