This list is closed, nobody may subscribe to it.
| 2007 |
Jan
|
Feb
(1) |
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
(1) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2009 |
Jan
|
Feb
|
Mar
|
Apr
(1) |
May
(1) |
Jun
(2) |
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
| 2011 |
Jan
|
Feb
|
Mar
(1) |
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
| 2013 |
Jan
|
Feb
|
Mar
(7) |
Apr
|
May
(7) |
Jun
(7) |
Jul
(26) |
Aug
|
Sep
(7) |
Oct
(1) |
Nov
(35) |
Dec
(18) |
| 2014 |
Jan
(1) |
Feb
(2) |
Mar
(3) |
Apr
|
May
(16) |
Jun
(35) |
Jul
(103) |
Aug
(45) |
Sep
(226) |
Oct
(200) |
Nov
(66) |
Dec
(42) |
| 2015 |
Jan
(47) |
Feb
(3) |
Mar
(6) |
Apr
(14) |
May
(38) |
Jun
(10) |
Jul
(10) |
Aug
(15) |
Sep
(23) |
Oct
(78) |
Nov
(56) |
Dec
(70) |
| 2016 |
Jan
(9) |
Feb
(8) |
Mar
(15) |
Apr
(18) |
May
(78) |
Jun
(39) |
Jul
(3) |
Aug
(136) |
Sep
(134) |
Oct
(19) |
Nov
(48) |
Dec
(30) |
| 2017 |
Jan
(33) |
Feb
(35) |
Mar
(100) |
Apr
(87) |
May
(169) |
Jun
(119) |
Jul
(165) |
Aug
(241) |
Sep
(128) |
Oct
(42) |
Nov
|
Dec
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:42:41
|
dm-verity is starting async. crypto ops and waiting for them to complete.
Move it over to generic code doing the same.
This also avoids a future potential data coruption bug created
by the use of wait_for_completion_interruptible() without dealing
correctly with an interrupt aborting the wait prior to the
async op finishing, should this code ever move to a context
where signals are not masked.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
CC: Mikulas Patocka <mpa...@re...>
---
drivers/md/dm-verity-target.c | 81 +++++++++++--------------------------------
drivers/md/dm-verity.h | 5 ---
2 files changed, 20 insertions(+), 66 deletions(-)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index bda3cac..811ad28 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -92,74 +92,33 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
return block >> (level * v->hash_per_block_bits);
}
-/*
- * Callback function for asynchrnous crypto API completion notification
- */
-static void verity_op_done(struct crypto_async_request *base, int err)
-{
- struct verity_result *res = (struct verity_result *)base->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
-/*
- * Wait for async crypto API callback
- */
-static inline int verity_complete_op(struct verity_result *res, int ret)
-{
- switch (ret) {
- case 0:
- break;
-
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(&res->completion);
- if (!ret)
- ret = res->err;
- reinit_completion(&res->completion);
- break;
-
- default:
- DMERR("verity_wait_hash: crypto op submission failed: %d", ret);
- }
-
- if (unlikely(ret < 0))
- DMERR("verity_wait_hash: crypto op failed: %d", ret);
-
- return ret;
-}
-
static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len,
- struct verity_result *res)
+ struct crypto_wait *wait)
{
struct scatterlist sg;
sg_init_one(&sg, data, len);
ahash_request_set_crypt(req, &sg, NULL, len);
- return verity_complete_op(res, crypto_ahash_update(req));
+ return crypto_wait_req(crypto_ahash_update(req), wait);
}
/*
* Wrapper for crypto_ahash_init, which handles verity salting.
*/
static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
- struct verity_result *res)
+ struct crypto_wait *wait)
{
int r;
ahash_request_set_tfm(req, v->tfm);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- verity_op_done, (void *)res);
- init_completion(&res->completion);
+ crypto_req_done, (void *)wait);
+ crypto_init_wait(wait);
- r = verity_complete_op(res, crypto_ahash_init(req));
+ r = crypto_wait_req(crypto_ahash_init(req), wait);
if (unlikely(r < 0)) {
DMERR("crypto_ahash_init failed: %d", r);
@@ -167,18 +126,18 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
}
if (likely(v->salt_size && (v->version >= 1)))
- r = verity_hash_update(v, req, v->salt, v->salt_size, res);
+ r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
return r;
}
static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
- u8 *digest, struct verity_result *res)
+ u8 *digest, struct crypto_wait *wait)
{
int r;
if (unlikely(v->salt_size && (!v->version))) {
- r = verity_hash_update(v, req, v->salt, v->salt_size, res);
+ r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
if (r < 0) {
DMERR("verity_hash_final failed updating salt: %d", r);
@@ -187,7 +146,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
}
ahash_request_set_crypt(req, NULL, digest, 0);
- r = verity_complete_op(res, crypto_ahash_final(req));
+ r = crypto_wait_req(crypto_ahash_final(req), wait);
out:
return r;
}
@@ -196,17 +155,17 @@ int verity_hash(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len, u8 *digest)
{
int r;
- struct verity_result res;
+ struct crypto_wait wait;
- r = verity_hash_init(v, req, &res);
+ r = verity_hash_init(v, req, &wait);
if (unlikely(r < 0))
goto out;
- r = verity_hash_update(v, req, data, len, &res);
+ r = verity_hash_update(v, req, data, len, &wait);
if (unlikely(r < 0))
goto out;
- r = verity_hash_final(v, req, digest, &res);
+ r = verity_hash_final(v, req, digest, &wait);
out:
return r;
@@ -389,7 +348,7 @@ int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
* Calculates the digest for the given bio
*/
int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
- struct bvec_iter *iter, struct verity_result *res)
+ struct bvec_iter *iter, struct crypto_wait *wait)
{
unsigned int todo = 1 << v->data_dev_block_bits;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
@@ -414,7 +373,7 @@ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
*/
sg_set_page(&sg, bv.bv_page, len, bv.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, len);
- r = verity_complete_op(res, crypto_ahash_update(req));
+ r = crypto_wait_req(crypto_ahash_update(req), wait);
if (unlikely(r < 0)) {
DMERR("verity_for_io_block crypto op failed: %d", r);
@@ -482,7 +441,7 @@ static int verity_verify_io(struct dm_verity_io *io)
struct dm_verity *v = io->v;
struct bvec_iter start;
unsigned b;
- struct verity_result res;
+ struct crypto_wait wait;
for (b = 0; b < io->n_blocks; b++) {
int r;
@@ -507,17 +466,17 @@ static int verity_verify_io(struct dm_verity_io *io)
continue;
}
- r = verity_hash_init(v, req, &res);
+ r = verity_hash_init(v, req, &wait);
if (unlikely(r < 0))
return r;
start = io->iter;
- r = verity_for_io_block(v, io, &io->iter, &res);
+ r = verity_for_io_block(v, io, &io->iter, &wait);
if (unlikely(r < 0))
return r;
r = verity_hash_final(v, req, verity_io_real_digest(v, io),
- &res);
+ &wait);
if (unlikely(r < 0))
return r;
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index a59e0ad..b675bc0 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -90,11 +90,6 @@ struct dm_verity_io {
*/
};
-struct verity_result {
- struct completion completion;
- int err;
-};
-
static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v,
struct dm_verity_io *io)
{
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:42:22
|
fscrypt starts several async. crypto ops and waiting for them to
complete. Move it over to generic code doing the same.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
fs/crypto/crypto.c | 28 ++++------------------------
fs/crypto/fname.c | 36 ++++++------------------------------
fs/crypto/fscrypt_private.h | 10 ----------
fs/crypto/keyinfo.c | 21 +++------------------
4 files changed, 13 insertions(+), 82 deletions(-)
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index c7835df..80a3cad 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -126,21 +126,6 @@ struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
}
EXPORT_SYMBOL(fscrypt_get_ctx);
-/**
- * page_crypt_complete() - completion callback for page crypto
- * @req: The asynchronous cipher request context
- * @res: The result of the cipher operation
- */
-static void page_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct fscrypt_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
u64 lblk_num, struct page *src_page,
struct page *dest_page, unsigned int len,
@@ -151,7 +136,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
u8 padding[FS_IV_SIZE - sizeof(__le64)];
} iv;
struct skcipher_request *req = NULL;
- DECLARE_FS_COMPLETION_RESULT(ecr);
+ DECLARE_CRYPTO_WAIT(wait);
struct scatterlist dst, src;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
@@ -179,7 +164,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
skcipher_request_set_callback(
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- page_crypt_complete, &ecr);
+ crypto_req_done, &wait);
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, len, offs);
@@ -187,14 +172,9 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
sg_set_page(&src, src_page, len, offs);
skcipher_request_set_crypt(req, &src, &dst, len, &iv);
if (rw == FS_DECRYPT)
- res = crypto_skcipher_decrypt(req);
+ res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
else
- res = crypto_skcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
+ res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
skcipher_request_free(req);
if (res) {
printk_ratelimited(KERN_ERR
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index ad9f814..a80a0d3 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -15,21 +15,6 @@
#include "fscrypt_private.h"
/**
- * fname_crypt_complete() - completion callback for filename crypto
- * @req: The asynchronous cipher request context
- * @res: The result of the cipher operation
- */
-static void fname_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct fscrypt_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
-/**
* fname_encrypt() - encrypt a filename
*
* The caller must have allocated sufficient memory for the @oname string.
@@ -40,7 +25,7 @@ static int fname_encrypt(struct inode *inode,
const struct qstr *iname, struct fscrypt_str *oname)
{
struct skcipher_request *req = NULL;
- DECLARE_FS_COMPLETION_RESULT(ecr);
+ DECLARE_CRYPTO_WAIT(wait);
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
@@ -76,17 +61,12 @@ static int fname_encrypt(struct inode *inode,
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- fname_crypt_complete, &ecr);
+ crypto_req_done, &wait);
sg_init_one(&sg, oname->name, cryptlen);
skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
/* Do the encryption */
- res = crypto_skcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- /* Request is being completed asynchronously; wait for it */
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
+ res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
skcipher_request_free(req);
if (res < 0) {
printk_ratelimited(KERN_ERR
@@ -110,7 +90,7 @@ static int fname_decrypt(struct inode *inode,
struct fscrypt_str *oname)
{
struct skcipher_request *req = NULL;
- DECLARE_FS_COMPLETION_RESULT(ecr);
+ DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
@@ -131,7 +111,7 @@ static int fname_decrypt(struct inode *inode,
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- fname_crypt_complete, &ecr);
+ crypto_req_done, &wait);
/* Initialize IV */
memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
@@ -140,11 +120,7 @@ static int fname_decrypt(struct inode *inode,
sg_init_one(&src_sg, iname->name, iname->len);
sg_init_one(&dst_sg, oname->name, oname->len);
skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
- res = crypto_skcipher_decrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
+ res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
skcipher_request_free(req);
if (res < 0) {
printk_ratelimited(KERN_ERR
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index a1d5021..c0f1881 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -69,16 +69,6 @@ typedef enum {
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
-struct fscrypt_completion_result {
- struct completion completion;
- int res;
-};
-
-#define DECLARE_FS_COMPLETION_RESULT(ecr) \
- struct fscrypt_completion_result ecr = { \
- COMPLETION_INITIALIZER_ONSTACK((ecr).completion), 0 }
-
-
/* crypto.c */
extern int fscrypt_initialize(unsigned int cop_flags);
extern struct workqueue_struct *fscrypt_read_workqueue;
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 018c588..3c84cac 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -17,17 +17,6 @@
static struct crypto_shash *essiv_hash_tfm;
-static void derive_crypt_complete(struct crypto_async_request *req, int rc)
-{
- struct fscrypt_completion_result *ecr = req->data;
-
- if (rc == -EINPROGRESS)
- return;
-
- ecr->res = rc;
- complete(&ecr->completion);
-}
-
/**
* derive_key_aes() - Derive a key using AES-128-ECB
* @deriving_key: Encryption key used for derivation.
@@ -42,7 +31,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
{
int res = 0;
struct skcipher_request *req = NULL;
- DECLARE_FS_COMPLETION_RESULT(ecr);
+ DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
@@ -59,7 +48,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- derive_crypt_complete, &ecr);
+ crypto_req_done, &wait);
res = crypto_skcipher_setkey(tfm, deriving_key,
FS_AES_128_ECB_KEY_SIZE);
if (res < 0)
@@ -69,11 +58,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
sg_init_one(&dst_sg, derived_raw_key, source_key->size);
skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size,
NULL);
- res = crypto_skcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
+ res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
out:
skcipher_request_free(req);
crypto_free_skcipher(tfm);
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:42:08
|
testmgr is starting async. crypto ops and waiting for them to complete.
Move it over to generic code doing the same.
This also provides a test of the generic crypto async. wait code.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/testmgr.c | 204 ++++++++++++++++++-------------------------------------
1 file changed, 66 insertions(+), 138 deletions(-)
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 8a124d3..af968f4 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -76,11 +76,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
#define ENCRYPT 1
#define DECRYPT 0
-struct tcrypt_result {
- struct completion completion;
- int err;
-};
-
struct aead_test_suite {
struct {
const struct aead_testvec *vecs;
@@ -155,17 +150,6 @@ static void hexdump(unsigned char *buf, unsigned int len)
buf, len, false);
}
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
- struct tcrypt_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
int i;
@@ -193,20 +177,10 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
-static int wait_async_op(struct tcrypt_result *tr, int ret)
-{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- wait_for_completion(&tr->completion);
- reinit_completion(&tr->completion);
- ret = tr->err;
- }
- return ret;
-}
-
static int ahash_partial_update(struct ahash_request **preq,
struct crypto_ahash *tfm, const struct hash_testvec *template,
void *hash_buff, int k, int temp, struct scatterlist *sg,
- const char *algo, char *result, struct tcrypt_result *tresult)
+ const char *algo, char *result, struct crypto_wait *wait)
{
char *state;
struct ahash_request *req;
@@ -236,7 +210,7 @@ static int ahash_partial_update(struct ahash_request **preq,
}
ahash_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, tresult);
+ crypto_req_done, wait);
memcpy(hash_buff, template->plaintext + temp,
template->tap[k]);
@@ -247,7 +221,7 @@ static int ahash_partial_update(struct ahash_request **preq,
pr_err("alg: hash: Failed to import() for %s\n", algo);
goto out;
}
- ret = wait_async_op(tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), wait);
if (ret)
goto out;
*preq = req;
@@ -272,7 +246,7 @@ static int __test_hash(struct crypto_ahash *tfm,
char *result;
char *key;
struct ahash_request *req;
- struct tcrypt_result tresult;
+ struct crypto_wait wait;
void *hash_buff;
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
@@ -286,7 +260,7 @@ static int __test_hash(struct crypto_ahash *tfm,
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
- init_completion(&tresult.completion);
+ crypto_init_wait(&wait);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -295,7 +269,7 @@ static int __test_hash(struct crypto_ahash *tfm,
goto out_noreq;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &tresult);
+ crypto_req_done, &wait);
j = 0;
for (i = 0; i < tcount; i++) {
@@ -335,26 +309,26 @@ static int __test_hash(struct crypto_ahash *tfm,
ahash_request_set_crypt(req, sg, result, template[i].psize);
if (use_digest) {
- ret = wait_async_op(&tresult, crypto_ahash_digest(req));
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
} else {
- ret = wait_async_op(&tresult, crypto_ahash_init(req));
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_final(req));
+ ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
@@ -420,22 +394,10 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&tresult.completion);
- reinit_completion(&tresult.completion);
- ret = tresult.err;
- if (!ret)
- break;
- /* fall through */
- default:
- printk(KERN_ERR "alg: hash: digest failed "
- "on chunking test %d for %s: "
- "ret=%d\n", j, algo, -ret);
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n",
+ j, algo, -ret);
goto out;
}
@@ -486,13 +448,13 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
- ret = wait_async_op(&tresult, crypto_ahash_init(req));
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d for %s: ret=%d\n",
j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -503,7 +465,7 @@ static int __test_hash(struct crypto_ahash *tfm,
for (k = 1; k < template[i].np; k++) {
ret = ahash_partial_update(&req, tfm, &template[i],
hash_buff, k, temp, &sg[0], algo, result,
- &tresult);
+ &wait);
if (ret) {
pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -511,7 +473,7 @@ static int __test_hash(struct crypto_ahash *tfm,
}
temp += template[i].tap[k];
}
- ret = wait_async_op(&tresult, crypto_ahash_final(req));
+ ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -580,7 +542,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
struct scatterlist *sg;
struct scatterlist *sgout;
const char *e, *d;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int authsize, iv_len;
void *input;
void *output;
@@ -619,7 +581,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
else
e = "decryption";
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -629,7 +591,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
}
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
iv_len = crypto_aead_ivsize(tfm);
@@ -709,7 +671,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
aead_request_set_ad(req, template[i].alen);
- ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
switch (ret) {
case 0:
@@ -722,13 +685,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -866,7 +822,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
aead_request_set_ad(req, template[i].alen);
- ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
switch (ret) {
case 0:
@@ -879,13 +836,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -1083,7 +1033,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
struct scatterlist sg[8];
struct scatterlist sgout[8];
const char *e, *d;
- struct tcrypt_result result;
+ struct crypto_wait wait;
void *data;
char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
@@ -1107,7 +1057,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
else
e = "decryption";
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -1117,7 +1067,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
j = 0;
for (i = 0; i < tcount; i++) {
@@ -1164,21 +1114,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
template[i].ilen, iv);
- ret = enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+ crypto_skcipher_decrypt(req), &wait);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
- /* fall through */
- default:
+ if (ret) {
pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
@@ -1272,21 +1211,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
template[i].ilen, iv);
- ret = enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+ crypto_skcipher_decrypt(req), &wait);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
- /* fall through */
- default:
+ if (ret) {
pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
@@ -1462,7 +1390,7 @@ static int test_acomp(struct crypto_acomp *tfm,
int ret;
struct scatterlist src, dst;
struct acomp_req *req;
- struct tcrypt_result result;
+ struct crypto_wait wait;
output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
if (!output)
@@ -1486,7 +1414,7 @@ static int test_acomp(struct crypto_acomp *tfm,
}
memset(output, 0, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
@@ -1501,9 +1429,9 @@ static int test_acomp(struct crypto_acomp *tfm,
acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- ret = wait_async_op(&result, crypto_acomp_compress(req));
+ ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -1516,10 +1444,10 @@ static int test_acomp(struct crypto_acomp *tfm,
dlen = COMP_BUF_SIZE;
sg_init_one(&src, output, ilen);
sg_init_one(&dst, decomp_out, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
acomp_request_set_params(req, &src, &dst, ilen, dlen);
- ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -1563,7 +1491,7 @@ static int test_acomp(struct crypto_acomp *tfm,
}
memset(output, 0, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
@@ -1578,9 +1506,9 @@ static int test_acomp(struct crypto_acomp *tfm,
acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -2000,7 +1928,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
void *a_public = NULL;
void *a_ss = NULL;
void *shared_secret = NULL;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int out_len_max;
int err = -ENOMEM;
struct scatterlist src, dst;
@@ -2009,7 +1937,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
if (!req)
return err;
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
if (err < 0)
@@ -2027,10 +1955,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
sg_init_one(&dst, output_buf, out_len_max);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
/* Compute party A's public key */
- err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
+ err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
if (err) {
pr_err("alg: %s: Party A: generate public key test failed. err %d\n",
alg, err);
@@ -2069,8 +1997,8 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
kpp_request_set_input(req, &src, vec->b_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
- err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
+ crypto_req_done, &wait);
+ err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
if (err) {
pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n",
alg, err);
@@ -2100,9 +2028,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
kpp_request_set_input(req, &src, vec->expected_a_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
- err = wait_async_op(&result,
- crypto_kpp_compute_shared_secret(req));
+ crypto_req_done, &wait);
+ err = crypto_wait_req(crypto_kpp_compute_shared_secret(req),
+ &wait);
if (err) {
pr_err("alg: %s: Party B: compute shared secret failed. err %d\n",
alg, err);
@@ -2179,7 +2107,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
struct akcipher_request *req;
void *outbuf_enc = NULL;
void *outbuf_dec = NULL;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int out_len_max, out_len = 0;
int err = -ENOMEM;
struct scatterlist src, dst, src_tab[2];
@@ -2191,7 +2119,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
if (!req)
goto free_xbuf;
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
if (vecs->public_key_vec)
err = crypto_akcipher_set_pub_key(tfm, vecs->key,
@@ -2220,13 +2148,13 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
out_len_max);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- err = wait_async_op(&result, vecs->siggen_sigver_test ?
- /* Run asymmetric signature generation */
- crypto_akcipher_sign(req) :
- /* Run asymmetric encrypt */
- crypto_akcipher_encrypt(req));
+ err = crypto_wait_req(vecs->siggen_sigver_test ?
+ /* Run asymmetric signature generation */
+ crypto_akcipher_sign(req) :
+ /* Run asymmetric encrypt */
+ crypto_akcipher_encrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
goto free_all;
@@ -2261,14 +2189,14 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
sg_init_one(&src, xbuf[0], vecs->c_size);
sg_init_one(&dst, outbuf_dec, out_len_max);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
- err = wait_async_op(&result, vecs->siggen_sigver_test ?
- /* Run asymmetric signature verification */
- crypto_akcipher_verify(req) :
- /* Run asymmetric decrypt */
- crypto_akcipher_decrypt(req));
+ err = crypto_wait_req(vecs->siggen_sigver_test ?
+ /* Run asymmetric signature verification */
+ crypto_akcipher_verify(req) :
+ /* Run asymmetric decrypt */
+ crypto_akcipher_decrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
goto free_all;
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:41:53
|
gcm is starting an async. crypto op and waiting for it complete.
Move it over to generic code doing the same.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/gcm.c | 32 ++++++--------------------------
1 file changed, 6 insertions(+), 26 deletions(-)
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 3841b5e..fb923a5 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -16,7 +16,6 @@
#include <crypto/scatterwalk.h>
#include <crypto/hash.h>
#include "internal.h"
-#include <linux/completion.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -78,11 +77,6 @@ struct crypto_gcm_req_priv_ctx {
} u;
};
-struct crypto_gcm_setkey_result {
- int err;
- struct completion completion;
-};
-
static struct {
u8 buf[16];
struct scatterlist sg;
@@ -98,17 +92,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
-static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
-{
- struct crypto_gcm_setkey_result *result = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- result->err = err;
- complete(&result->completion);
-}
-
static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
@@ -119,7 +102,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
be128 hash;
u8 iv[16];
- struct crypto_gcm_setkey_result result;
+ struct crypto_wait wait;
struct scatterlist sg[1];
struct skcipher_request req;
@@ -140,21 +123,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (!data)
return -ENOMEM;
- init_completion(&data->result.completion);
+ crypto_init_wait(&data->wait);
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
skcipher_request_set_tfm(&data->req, ctr);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_gcm_setkey_done,
- &data->result);
+ crypto_req_done,
+ &data->wait);
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
sizeof(data->hash), data->iv);
- err = crypto_skcipher_encrypt(&data->req);
- if (err == -EINPROGRESS || err == -EBUSY) {
- wait_for_completion(&data->result.completion);
- err = data->result.err;
- }
+ err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
+ &data->wait);
if (err)
goto out;
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:41:38
|
DRBG is starting an async. crypto op and waiting for it complete.
Move it over to generic code doing the same.
The code now also passes CRYPTO_TFM_REQ_MAY_SLEEP flag indicating
crypto request memory allocation may use GFP_KERNEL which should
be perfectly fine as the code is obviously sleeping for the
completion of the request any way.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/drbg.c | 36 +++++++++---------------------------
include/crypto/drbg.h | 3 +--
2 files changed, 10 insertions(+), 29 deletions(-)
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 633a88e..c522251 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
return 0;
}
-static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
-{
- struct drbg_state *drbg = req->data;
-
- if (error == -EINPROGRESS)
- return;
- drbg->ctr_async_err = error;
- complete(&drbg->ctr_completion);
-}
-
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
struct crypto_cipher *tfm;
@@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return PTR_ERR(sk_tfm);
}
drbg->ctr_handle = sk_tfm;
- init_completion(&drbg->ctr_completion);
+ crypto_init_wait(&drbg->ctr_wait);
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) {
@@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return -ENOMEM;
}
drbg->ctr_req = req;
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- drbg_skcipher_cb, drbg);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm);
drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
@@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
/* Output buffer may not be valid for SGL, use scratchpad */
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
cryptlen, drbg->V);
- ret = crypto_skcipher_encrypt(drbg->ctr_req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&drbg->ctr_completion);
- if (!drbg->ctr_async_err) {
- reinit_completion(&drbg->ctr_completion);
- break;
- }
- default:
+ ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
+ &drbg->ctr_wait);
+ if (ret)
goto out;
- }
- init_completion(&drbg->ctr_completion);
+
+ crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen);
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 22f884c..8f94110 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -126,8 +126,7 @@ struct drbg_state {
__u8 *ctr_null_value; /* CTR mode aligned zero buf */
__u8 *outscratchpadbuf; /* CTR mode output scratchpad */
__u8 *outscratchpad; /* CTR mode aligned outbuf */
- struct completion ctr_completion; /* CTR mode async handler */
- int ctr_async_err; /* CTR mode async error */
+ struct crypto_wait ctr_wait; /* CTR mode async wait obj */
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:41:21
|
public_key_verify_signature() is starting an async crypto op and
waiting for it to complete. Move it over to generic code doing
the same.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/asymmetric_keys/public_key.c | 28 ++++------------------------
1 file changed, 4 insertions(+), 24 deletions(-)
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 3cd6e12..d916235 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3)
public_key_signature_free(payload3);
}
-struct public_key_completion {
- struct completion completion;
- int err;
-};
-
-static void public_key_verify_done(struct crypto_async_request *req, int err)
-{
- struct public_key_completion *compl = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- compl->err = err;
- complete(&compl->completion);
-}
-
/*
* Verify a signature using a public key.
*/
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig)
{
- struct public_key_completion compl;
+ struct crypto_wait cwait;
struct crypto_akcipher *tfm;
struct akcipher_request *req;
struct scatterlist sig_sg, digest_sg;
@@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey,
sg_init_one(&digest_sg, output, outlen);
akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
outlen);
- init_completion(&compl.completion);
+ crypto_init_wait(&cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
- public_key_verify_done, &compl);
+ crypto_req_done, &cwait);
/* Perform the verification calculation. This doesn't actually do the
* verification, but rather calculates the hash expected by the
* signature and returns that to us.
*/
- ret = crypto_akcipher_verify(req);
- if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
- wait_for_completion(&compl.completion);
- ret = compl.err;
- }
+ ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
if (ret < 0)
goto out_free_output;
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:41:08
|
algif starts several async crypto ops and waits for their completion.
Move it over to generic code doing the same.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/af_alg.c | 27 ---------------------------
crypto/algif_aead.c | 8 ++++----
crypto/algif_hash.c | 30 ++++++++++++++----------------
crypto/algif_skcipher.c | 9 ++++-----
include/crypto/if_alg.h | 15 +--------------
5 files changed, 23 insertions(+), 66 deletions(-)
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index ffa9f4c..cf312ed 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
}
EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
-int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
-{
- switch (err) {
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&completion->completion);
- reinit_completion(&completion->completion);
- err = completion->err;
- break;
- };
-
- return err;
-}
-EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
-
-void af_alg_complete(struct crypto_async_request *req, int err)
-{
- struct af_alg_completion *completion = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- completion->err = err;
- complete(&completion->completion);
-}
-EXPORT_SYMBOL_GPL(af_alg_complete);
-
/**
* af_alg_alloc_tsgl - allocate the TX SGL
*
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 516b38c..aacae08 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
- err = af_alg_wait_for_completion(ctx->enc ?
+ crypto_req_done, &ctx->wait);
+ err = crypto_wait_req(ctx->enc ?
crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req),
- &ctx->completion);
+ &ctx->wait);
}
/* AIO operation in progress */
@@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
ctx->merge = 0;
ctx->enc = 0;
ctx->aead_assoclen = 0;
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 3b3c154..d2ab8de 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -26,7 +26,7 @@ struct hash_ctx {
u8 *result;
- struct af_alg_completion completion;
+ struct crypto_wait wait;
unsigned int len;
bool more;
@@ -102,8 +102,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
if ((msg->msg_flags & MSG_MORE))
hash_free_result(sk, ctx);
- err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
if (err)
goto unlock;
}
@@ -124,8 +123,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
- err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_update(&ctx->req),
+ &ctx->wait);
af_alg_free_sg(&ctx->sgl);
if (err)
goto unlock;
@@ -143,8 +142,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
- err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
}
unlock:
@@ -185,7 +184,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
} else {
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
- err = af_alg_wait_for_completion(err, &ctx->completion);
+ err = crypto_wait_req(err, &ctx->wait);
if (err)
goto unlock;
}
@@ -193,7 +192,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
err = crypto_ahash_update(&ctx->req);
}
- err = af_alg_wait_for_completion(err, &ctx->completion);
+ err = crypto_wait_req(err, &ctx->wait);
if (err)
goto unlock;
@@ -229,17 +228,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
if (!result && !ctx->more) {
- err = af_alg_wait_for_completion(
- crypto_ahash_init(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_init(&ctx->req),
+ &ctx->wait);
if (err)
goto unlock;
}
if (!result || ctx->more) {
ctx->more = 0;
- err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
if (err)
goto unlock;
}
@@ -490,13 +488,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
ctx->result = NULL;
ctx->len = len;
ctx->more = 0;
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
ahash_request_set_tfm(&ctx->req, hash);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
+ crypto_req_done, &ctx->wait);
sk->sk_destruct = hash_sock_destruct;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 8ae4170..9954b07 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete,
- &ctx->completion);
- err = af_alg_wait_for_completion(ctx->enc ?
+ crypto_req_done, &ctx->wait);
+ err = crypto_wait_req(ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
- &ctx->completion);
+ &ctx->wait);
}
/* AIO operation in progress */
@@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 75ec9c6..6abf0a3 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -40,11 +40,6 @@ struct alg_sock {
void *private;
};
-struct af_alg_completion {
- struct completion completion;
- int err;
-};
-
struct af_alg_control {
struct af_alg_iv *iv;
int op;
@@ -152,7 +147,7 @@ struct af_alg_ctx {
void *iv;
size_t aead_assoclen;
- struct af_alg_completion completion;
+ struct crypto_wait wait;
size_t used;
size_t rcvused;
@@ -177,19 +172,11 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
-int af_alg_wait_for_completion(int err, struct af_alg_completion *completion);
-void af_alg_complete(struct crypto_async_request *req, int err);
-
static inline struct alg_sock *alg_sk(struct sock *sk)
{
return (struct alg_sock *)sk;
}
-static inline void af_alg_init_completion(struct af_alg_completion *completion)
-{
- init_completion(&completion->completion);
-}
-
/**
* Size of available buffer for sending data from user space to kernel.
*
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:40:53
|
Invoking a possibly async. crypto op and waiting for completion
while correctly handling backlog processing is a common task
in the crypto API implementation and outside users of it.
This patch adds a generic implementation for doing so in
preparation for using it across the board instead of hand
rolled versions.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
CC: Eric Biggers <ebi...@gm...>
CC: Jonathan Cameron <Jon...@hu...>
---
crypto/api.c | 13 +++++++++++++
include/linux/crypto.h | 40 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 53 insertions(+)
diff --git a/crypto/api.c b/crypto/api.c
index 941cd4c..2a2479d 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -24,6 +24,7 @@
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/completion.h>
#include "internal.h"
LIST_HEAD(crypto_alg_list);
@@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_alg);
+void crypto_req_done(struct crypto_async_request *req, int err)
+{
+ struct crypto_wait *wait = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ wait->err = err;
+ complete(&wait->completion);
+}
+EXPORT_SYMBOL_GPL(crypto_req_done);
+
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 84da997..78508ca 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <linux/completion.h>
/*
* Autoloaded crypto modules should only use a prefixed name to avoid allowing
@@ -468,6 +469,45 @@ struct crypto_alg {
} CRYPTO_MINALIGN_ATTR;
/*
+ * A helper struct for waiting for completion of async crypto ops
+ */
+struct crypto_wait {
+ struct completion completion;
+ int err;
+};
+
+/*
+ * Macro for declaring a crypto op async wait object on stack
+ */
+#define DECLARE_CRYPTO_WAIT(_wait) \
+ struct crypto_wait _wait = { \
+ COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
+
+/*
+ * Async ops completion helper functioons
+ */
+void crypto_req_done(struct crypto_async_request *req, int err);
+
+static inline int crypto_wait_req(int err, struct crypto_wait *wait)
+{
+ switch (err) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&wait->completion);
+ reinit_completion(&wait->completion);
+ err = wait->err;
+ break;
+ };
+
+ return err;
+}
+
+static inline void crypto_init_wait(struct crypto_wait *wait)
+{
+ init_completion(&wait->completion);
+}
+
+/*
* Algorithm registration interface.
*/
int crypto_register_alg(struct crypto_alg *alg);
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:40:39
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> Acked-by: Boris Brezillon <bor...@fr...> --- drivers/crypto/marvell/cesa.c | 3 +-- drivers/crypto/marvell/cesa.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 6e7a5c7..0c3909d 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -183,8 +183,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req, spin_lock_bh(&engine->lock); ret = crypto_enqueue_request(&engine->queue, req); if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && - (ret == -EINPROGRESS || - (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + (ret == -EINPROGRESS || ret == -EBUSY)) mv_cesa_tdma_chain(engine, creq); spin_unlock_bh(&engine->lock); diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index b7872f6..63c8457 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -763,7 +763,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req, * the backlog and will be processed later. There's no need to * clean it up. */ - if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + if (ret == -EBUSY) return false; /* Request wasn't queued, we need to clean it up */ -- 2.1.4 |
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:40:25
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/ahash.c | 12 +++--------- crypto/cts.c | 6 ++---- crypto/lrw.c | 8 ++------ crypto/rsa-pkcs1pad.c | 16 ++++------------ crypto/xts.c | 8 ++------ 5 files changed, 13 insertions(+), 37 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 5e8666e..3a35d67 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req, return err; err = op(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; ahash_restore_req(req, err); @@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) req->base.complete = ahash_def_finup_done2; err = crypto_ahash_reqtfm(req)->final(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; out: @@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req) return err; err = tfm->update(req); - if (err == -EINPROGRESS || - (err == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); diff --git a/crypto/cts.c b/crypto/cts.c index 243f591..4773c18 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err) goto out; err = cts_cbc_encrypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return; out: @@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err) goto out; err = cts_cbc_decrypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return; out: diff --git a/crypto/lrw.c b/crypto/lrw.c index a8bfae4..695cea9 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err) crypto_skcipher_encrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } @@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err) crypto_skcipher_decrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 407c64b..2908f93 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) req->dst, ctx->key_size - 1, req->dst_len); err = crypto_akcipher_encrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_encrypt_sign_complete(req, err); return err; @@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) ctx->key_size); err = crypto_akcipher_decrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_decrypt_complete(req, err); return err; @@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req) req->dst, ctx->key_size - 1, req->dst_len); err = crypto_akcipher_sign(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_encrypt_sign_complete(req, err); return err; @@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) ctx->key_size); err = crypto_akcipher_verify(&req_ctx->child_req); - if (err != -EINPROGRESS && - (err != -EBUSY || - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + if (err != -EINPROGRESS && err != -EBUSY) return pkcs1pad_verify_complete(req, err); return err; diff --git a/crypto/xts.c b/crypto/xts.c index d86c11a..af68012 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err) crypto_skcipher_encrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } @@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err) crypto_skcipher_decrypt(subreq) ?: post_crypt(req); - if (err == -EINPROGRESS || - (err == -EBUSY && - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (err == -EINPROGRESS || err == -EBUSY) return err; } -- 2.1.4 |
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:40:10
|
Replace -EBUSY with -EAGAIN when handling transient busy indication in the absence of backlog. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- Please squash this patch with the previous one when merging upstream. --- net/ipv4/ah4.c | 2 +- net/ipv4/esp4.c | 2 +- net/ipv6/ah6.c | 2 +- net/ipv6/esp6.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 37db44f..049cb0a 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -240,7 +240,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) if (err == -EINPROGRESS) goto out; - if (err == -EBUSY) + if (err == -EAGAIN) err = NET_XMIT_DROP; goto out_free; } diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index b00e4a4..ff8e088 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -432,7 +432,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * case -EINPROGRESS: goto error; - case -EBUSY: + case -EAGAIN: err = NET_XMIT_DROP; break; diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 7802b72..ba0c145 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -443,7 +443,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) if (err == -EINPROGRESS) goto out; - if (err == -EBUSY) + if (err == -EAGAIN) err = NET_XMIT_DROP; goto out_free; } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 89910e2..1a71ee5 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -396,7 +396,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info case -EINPROGRESS: goto error; - case -EBUSY: + case -EAGAIN: err = NET_XMIT_DROP; break; -- 2.1.4 |
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:39:55
|
Replace -EBUSY with -EAGAIN when reporting transient busy
indication in the absence of backlog.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
Reviewed-by: Gary R Hook <gar...@am...>
---
Please squash this patch with the previous one when merging upstream.
---
drivers/crypto/ccp/ccp-crypto-main.c | 8 +++-----
drivers/crypto/ccp/ccp-dev.c | 7 +++++--
2 files changed, 8 insertions(+), 7 deletions(-)
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 35a9de7..403ff0a 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
/* Check if the cmd can/should be queued */
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
- ret = -EBUSY;
- if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+ if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
+ ret = -EAGAIN;
goto e_lock;
+ }
}
/* Look for an entry with the same tfm. If there is a cmd
@@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
ret = ccp_enqueue_cmd(crypto_cmd->cmd);
if (!ccp_crypto_success(ret))
goto e_lock; /* Error, don't queue it */
- if ((ret == -EBUSY) &&
- !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
- goto e_lock; /* Not backlogging, don't queue it */
}
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 4e029b1..3d637e3 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd)
i = ccp->cmd_q_count;
if (ccp->cmd_count >= MAX_CMD_QLEN) {
- ret = -EBUSY;
- if (cmd->flags & CCP_CMD_MAY_BACKLOG)
+ if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
+ ret = -EBUSY;
list_add_tail(&cmd->entry, &ccp->backlog);
+ } else {
+ ret = -EAGAIN;
+ }
} else {
ret = -EINPROGRESS;
ccp->cmd_count++;
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:39:41
|
The crypto API was using the -EBUSY return value to indicate
both a hard failure to submit a crypto operation into a
transformation provider when the latter was busy and the backlog
mechanism was not enabled as well as a notification that the
operation was queued into the backlog when the backlog mechanism
was enabled.
Having the same return code indicate two very different conditions
depending on a flag is both error prone and requires extra runtime
check like the following to discern between the cases:
if (err == -EINPROGRESS ||
(err == -EBUSY && (ahash_request_flags(req) &
CRYPTO_TFM_REQ_MAY_BACKLOG)))
This patch changes the return code used to indicate a crypto op
failed due to the transformation provider being transiently busy
to -EAGAIN.
Signed-off-by: Gilad Ben-Yossef <gi...@be...>
---
crypto/algapi.c | 6 ++++--
crypto/algif_hash.c | 20 +++++++++++++++++---
crypto/cryptd.c | 4 +---
3 files changed, 22 insertions(+), 8 deletions(-)
diff --git a/crypto/algapi.c b/crypto/algapi.c
index aa699ff..916bee3 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue,
int err = -EINPROGRESS;
if (unlikely(queue->qlen >= queue->max_qlen)) {
- err = -EBUSY;
- if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -EAGAIN;
goto out;
+ }
+ err = -EBUSY;
if (queue->backlog == &queue->list)
queue->backlog = &request->list;
}
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 5e92bd2..3b3c154 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -39,6 +39,20 @@ struct algif_hash_tfm {
bool has_key;
};
+/* Previous versions of crypto_* ops used to return -EBUSY
+ * rather than -EAGAIN to indicate being tied up. The in
+ * kernel API changed but we don't want to break the user
+ * space API. As only the hash user interface exposed this
+ * error ever to the user, do the translation here.
+ */
+static inline int crypto_user_err(int err)
+{
+ if (err == -EAGAIN)
+ return -EBUSY;
+
+ return err;
+}
+
static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
@@ -136,7 +150,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
unlock:
release_sock(sk);
- return err ?: copied;
+ return err ? crypto_user_err(err) : copied;
}
static ssize_t hash_sendpage(struct socket *sock, struct page *page,
@@ -188,7 +202,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
unlock:
release_sock(sk);
- return err ?: size;
+ return err ? crypto_user_err(err) : size;
}
static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
@@ -236,7 +250,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
hash_free_result(sk, ctx);
release_sock(sk);
- return err ?: len;
+ return err ? crypto_user_err(err) : len;
}
static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 0508c48..d1dbdce 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
int cpu, err;
struct cryptd_cpu_queue *cpu_queue;
atomic_t *refcnt;
- bool may_backlog;
cpu = get_cpu();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
- may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
- if (err == -EBUSY && !may_backlog)
+ if (err == -EAGAIN)
goto out_put_cpu;
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
--
2.1.4
|
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:39:27
|
Many users of kernel async. crypto services have a pattern of starting an async. crypto op and than using a completion to wait for it to end. This patch set simplifies this common use case in two ways: First, by separating the return codes of the case where a request is queued to a backlog due to the provider being busy (-EBUSY) from the case the request has failed due to the provider being busy and backlogging is not enabled (-EAGAIN). Next, this change is than built on to create a generic API to wait for a async. crypto operation to complete. The end result is a smaller code base and an API that is easier to use and more difficult to get wrong. The patch set was boot tested on x86_64 and arm64 which at the very least tests the crypto users via testmgr and tcrypt but I do note that I do not have access to some of the HW whose drivers are modified nor do I claim I was able to test all of the corner cases. The patch set is based upon linux-next release tagged next-20170905. Changes from v7: - Turn -EBUSY to -EAGAIN also in crypto using net code which I missed before, as has been pointed out by Harsh Jain. Changes from v6: - Fix brown paper bag compile error on marvell/cesa code. Changes from v5: - Remove redundant new line as spotted by Jonathan Cameron. - Reworded dm-verity change commit message to better clarify potential issue averted by change as pointed out by Mikulas Patocka. Changes from v4: - Rebase on top of latest algif changes from Stephan Mueller. - Fix typo in ccp patch title. Changes from v3: - Instead of changing the return code to indicate backlog queueing, change the return code to indicate transient busy state, as suggested by Herbert Xu. Changes from v2: - Patch title changed from "introduce crypto wait for async op" to better reflect the current state. - Rebase on top of latest linux-next. - Add a new return code of -EIOCBQUEUED for backlog queueing, as suggested by Herbert Xu. - Transform more users to the new API. - Update the drbg change to account for new init as indicated by Stephan Muller. Changes from v1: - Address review comments from Eric Biggers. - Separated out bug fixes of existing code and rebase on top of that patch set. - Rename 'ecr' to 'wait' in fscrypto code. - Split patch introducing the new API from the change moving over the algif code which it originated from to the new API. - Inline crypto_wait_req(). - Some code indentation fixes. Gilad Ben-Yossef (20): crypto: change transient busy return code to -EAGAIN crypto: ccp: use -EAGAIN for transient busy indication net: use -EAGAIN for transient busy indication crypto: remove redundant backlog checks on EBUSY crypto: marvell/cesa: remove redundant backlog checks on EBUSY crypto: introduce crypto wait for async op crypto: move algif to generic async completion crypto: move pub key to generic async completion crypto: move drbg to generic async completion crypto: move gcm to generic async completion crypto: move testmgr to generic async completion fscrypt: move to generic async completion dm: move dm-verity to generic async completion cifs: move to generic async completion ima: move to generic async completion crypto: tcrypt: move to generic async completion crypto: talitos: move to generic async completion crypto: qce: move to generic async completion crypto: mediatek: move to generic async completion crypto: adapt api sample to use async. op wait Documentation/crypto/api-samples.rst | 52 ++------- crypto/af_alg.c | 27 ----- crypto/ahash.c | 12 +-- crypto/algapi.c | 6 +- crypto/algif_aead.c | 8 +- crypto/algif_hash.c | 50 +++++---- crypto/algif_skcipher.c | 9 +- crypto/api.c | 13 +++ crypto/asymmetric_keys/public_key.c | 28 +---- crypto/cryptd.c | 4 +- crypto/cts.c | 6 +- crypto/drbg.c | 36 ++----- crypto/gcm.c | 32 ++---- crypto/lrw.c | 8 +- crypto/rsa-pkcs1pad.c | 16 +-- crypto/tcrypt.c | 84 +++++---------- crypto/testmgr.c | 204 ++++++++++++----------------------- crypto/xts.c | 8 +- drivers/crypto/ccp/ccp-crypto-main.c | 8 +- drivers/crypto/ccp/ccp-dev.c | 7 +- drivers/crypto/marvell/cesa.c | 3 +- drivers/crypto/marvell/cesa.h | 2 +- drivers/crypto/mediatek/mtk-aes.c | 31 +----- drivers/crypto/qce/sha.c | 30 +----- drivers/crypto/talitos.c | 38 +------ drivers/md/dm-verity-target.c | 81 ++++---------- drivers/md/dm-verity.h | 5 - fs/cifs/smb2ops.c | 30 +----- fs/crypto/crypto.c | 28 +---- fs/crypto/fname.c | 36 ++----- fs/crypto/fscrypt_private.h | 10 -- fs/crypto/keyinfo.c | 21 +--- include/crypto/drbg.h | 3 +- include/crypto/if_alg.h | 15 +-- include/linux/crypto.h | 40 +++++++ net/ipv4/ah4.c | 2 +- net/ipv4/esp4.c | 2 +- net/ipv6/ah6.c | 2 +- net/ipv6/esp6.c | 2 +- security/integrity/ima/ima_crypto.c | 56 +++------- 40 files changed, 314 insertions(+), 741 deletions(-) -- 2.1.4 |
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-05 12:17:05
|
On Tue, Sep 5, 2017 at 2:23 PM, Harsh Jain <har...@gm...> wrote: > On Sun, Sep 3, 2017 at 11:47 AM, Gilad Ben-Yossef <gi...@be...> wrote: >> On Thu, Aug 31, 2017 at 3:31 PM, Harsh Jain <har...@gm...> wrote: >>> HI Gilad, >>> >>> I think we need an update in ESP also. Now EBUSY return means driver >>> has accepted, Packet should not be dropped in >>> >>> esp_output_tail() function. >> >> Good catch. You are right and the same holds true for ah_output() in ah4.c. >> >> But I do wonder, the code there now treats -EBUSY as a special case >> and returns NET_XMIT_DROP >> but if an AEAD or AHASH transformation return some other error, like >> -ENOMEM or -EINVAL shouldn't >> we return NET_XMIT_DROP in that case too? > I think we should not, XMIT_DROP implies drop current packet only, > later on when device is recovered from busy state, Upper layer > protocol(TCP) will re-transmit the packet. It helps in flow control. >> I see. Makes sense. Thanks, Gilad -- Gilad Ben-Yossef Chief Coffee Drinker "If you take a class in large-scale robotics, can you end up in a situation where the homework eats your dog?" -- Jean-Baptiste Queru |
|
From: Harsh J. <har...@gm...> - 2017-09-05 11:23:32
|
On Sun, Sep 3, 2017 at 11:47 AM, Gilad Ben-Yossef <gi...@be...> wrote: > On Thu, Aug 31, 2017 at 3:31 PM, Harsh Jain <har...@gm...> wrote: >> HI Gilad, >> >> I think we need an update in ESP also. Now EBUSY return means driver >> has accepted, Packet should not be dropped in >> >> esp_output_tail() function. > > Good catch. You are right and the same holds true for ah_output() in ah4.c. > > But I do wonder, the code there now treats -EBUSY as a special case > and returns NET_XMIT_DROP > but if an AEAD or AHASH transformation return some other error, like > -ENOMEM or -EINVAL shouldn't > we return NET_XMIT_DROP in that case too? I think we should not, XMIT_DROP implies drop current packet only, later on when device is recovered from busy state, Upper layer protocol(TCP) will re-transmit the packet. It helps in flow control. > > Any ideas? > > Gilad > >> >> >> On Thu, Aug 24, 2017 at 7:48 PM, Gilad Ben-Yossef <gi...@be...> wrote: >>> Many users of kernel async. crypto services have a pattern of >>> starting an async. crypto op and than using a completion >>> to wait for it to end. >>> >>> This patch set simplifies this common use case in two ways: >>> >>> First, by separating the return codes of the case where a >>> request is queued to a backlog due to the provider being >>> busy (-EBUSY) from the case the request has failed due >>> to the provider being busy and backlogging is not enabled >>> (-EAGAIN). >>> >>> Next, this change is than built on to create a generic API >>> to wait for a async. crypto operation to complete. >>> >>> The end result is a smaller code base and an API that is >>> easier to use and more difficult to get wrong. >>> >>> The patch set was boot tested on x86_64 and arm64 which >>> at the very least tests the crypto users via testmgr and >>> tcrypt but I do note that I do not have access to some >>> of the HW whose drivers are modified nor do I claim I was >>> able to test all of the corner cases. >>> >>> The patch set is based upon linux-next release tagged >>> next-20170824. >>> >>> Changes from v6: >>> - Fix brown paper bag compile error on marvell/cesa >>> code. >>> >>> Changes from v5: >>> - Remove redundant new line as spotted by Jonathan >>> Cameron. >>> - Reworded dm-verity change commit message to better >>> clarify potential issue averted by change as >>> pointed out by Mikulas Patocka. >>> >>> Changes from v4: >>> - Rebase on top of latest algif changes from Stephan >>> Mueller. >>> - Fix typo in ccp patch title. >>> >>> Changes from v3: >>> - Instead of changing the return code to indicate >>> backlog queueing, change the return code to indicate >>> transient busy state, as suggested by Herbert Xu. >>> >>> Changes from v2: >>> - Patch title changed from "introduce crypto wait for >>> async op" to better reflect the current state. >>> - Rebase on top of latest linux-next. >>> - Add a new return code of -EIOCBQUEUED for backlog >>> queueing, as suggested by Herbert Xu. >>> - Transform more users to the new API. >>> - Update the drbg change to account for new init as >>> indicated by Stephan Muller. >>> >>> Changes from v1: >>> - Address review comments from Eric Biggers. >>> - Separated out bug fixes of existing code and rebase >>> on top of that patch set. >>> - Rename 'ecr' to 'wait' in fscrypto code. >>> - Split patch introducing the new API from the change >>> moving over the algif code which it originated from >>> to the new API. >>> - Inline crypto_wait_req(). >>> - Some code indentation fixes. >>> >>> Gilad Ben-Yossef (19): >>> crypto: change transient busy return code to -EAGAIN >>> crypto: ccp: use -EAGAIN for transient busy indication >>> crypto: remove redundant backlog checks on EBUSY >>> crypto: marvell/cesa: remove redundant backlog checks on EBUSY >>> crypto: introduce crypto wait for async op >>> crypto: move algif to generic async completion >>> crypto: move pub key to generic async completion >>> crypto: move drbg to generic async completion >>> crypto: move gcm to generic async completion >>> crypto: move testmgr to generic async completion >>> fscrypt: move to generic async completion >>> dm: move dm-verity to generic async completion >>> cifs: move to generic async completion >>> ima: move to generic async completion >>> crypto: tcrypt: move to generic async completion >>> crypto: talitos: move to generic async completion >>> crypto: qce: move to generic async completion >>> crypto: mediatek: move to generic async completion >>> crypto: adapt api sample to use async. op wait >>> >>> Documentation/crypto/api-samples.rst | 52 ++------- >>> crypto/af_alg.c | 27 ----- >>> crypto/ahash.c | 12 +-- >>> crypto/algapi.c | 6 +- >>> crypto/algif_aead.c | 8 +- >>> crypto/algif_hash.c | 50 +++++---- >>> crypto/algif_skcipher.c | 9 +- >>> crypto/api.c | 13 +++ >>> crypto/asymmetric_keys/public_key.c | 28 +---- >>> crypto/cryptd.c | 4 +- >>> crypto/cts.c | 6 +- >>> crypto/drbg.c | 36 ++----- >>> crypto/gcm.c | 32 ++---- >>> crypto/lrw.c | 8 +- >>> crypto/rsa-pkcs1pad.c | 16 +-- >>> crypto/tcrypt.c | 84 +++++---------- >>> crypto/testmgr.c | 204 ++++++++++++----------------------- >>> crypto/xts.c | 8 +- >>> drivers/crypto/ccp/ccp-crypto-main.c | 8 +- >>> drivers/crypto/ccp/ccp-dev.c | 7 +- >>> drivers/crypto/marvell/cesa.c | 3 +- >>> drivers/crypto/marvell/cesa.h | 2 +- >>> drivers/crypto/mediatek/mtk-aes.c | 31 +----- >>> drivers/crypto/qce/sha.c | 30 +----- >>> drivers/crypto/talitos.c | 38 +------ >>> drivers/md/dm-verity-target.c | 81 ++++---------- >>> drivers/md/dm-verity.h | 5 - >>> fs/cifs/smb2ops.c | 30 +----- >>> fs/crypto/crypto.c | 28 +---- >>> fs/crypto/fname.c | 36 ++----- >>> fs/crypto/fscrypt_private.h | 10 -- >>> fs/crypto/keyinfo.c | 21 +--- >>> include/crypto/drbg.h | 3 +- >>> include/crypto/if_alg.h | 15 +-- >>> include/linux/crypto.h | 40 +++++++ >>> security/integrity/ima/ima_crypto.c | 56 +++------- >>> 36 files changed, 310 insertions(+), 737 deletions(-) >>> >>> -- >>> 2.1.4 >>> > > > > -- > Gilad Ben-Yossef > Chief Coffee Drinker > > "If you take a class in large-scale robotics, can you end up in a > situation where the homework eats your dog?" > -- Jean-Baptiste Queru |
|
From: shijun z. <zq...@gm...> - 2017-09-05 02:22:34
|
Hi, Can IMA run without a TPM chip? Thanks. Best Regards, Shijun Zhao -------------- next part -------------- An HTML attachment was scrubbed... |
|
From: shijun z. <zq...@gm...> - 2017-09-05 02:20:00
|
Hi, I'm doing some project on measuring android platform based IMA. From the website of IMA (https://sourceforge.net/projects/linux-ima/files/linux-ima/), I only see IMA versions that support Linux kernel under 2.6.xx. My question is does IMA support Linux kernel 4.x? And does IMA support 64-bit OS kernel? Best Regards, Shijun Zhao -------------- next part -------------- An HTML attachment was scrubbed... |
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-03 06:17:25
|
On Thu, Aug 31, 2017 at 3:31 PM, Harsh Jain <har...@gm...> wrote: > HI Gilad, > > I think we need an update in ESP also. Now EBUSY return means driver > has accepted, Packet should not be dropped in > > esp_output_tail() function. Good catch. You are right and the same holds true for ah_output() in ah4.c. But I do wonder, the code there now treats -EBUSY as a special case and returns NET_XMIT_DROP but if an AEAD or AHASH transformation return some other error, like -ENOMEM or -EINVAL shouldn't we return NET_XMIT_DROP in that case too? Any ideas? Gilad > > > On Thu, Aug 24, 2017 at 7:48 PM, Gilad Ben-Yossef <gi...@be...> wrote: >> Many users of kernel async. crypto services have a pattern of >> starting an async. crypto op and than using a completion >> to wait for it to end. >> >> This patch set simplifies this common use case in two ways: >> >> First, by separating the return codes of the case where a >> request is queued to a backlog due to the provider being >> busy (-EBUSY) from the case the request has failed due >> to the provider being busy and backlogging is not enabled >> (-EAGAIN). >> >> Next, this change is than built on to create a generic API >> to wait for a async. crypto operation to complete. >> >> The end result is a smaller code base and an API that is >> easier to use and more difficult to get wrong. >> >> The patch set was boot tested on x86_64 and arm64 which >> at the very least tests the crypto users via testmgr and >> tcrypt but I do note that I do not have access to some >> of the HW whose drivers are modified nor do I claim I was >> able to test all of the corner cases. >> >> The patch set is based upon linux-next release tagged >> next-20170824. >> >> Changes from v6: >> - Fix brown paper bag compile error on marvell/cesa >> code. >> >> Changes from v5: >> - Remove redundant new line as spotted by Jonathan >> Cameron. >> - Reworded dm-verity change commit message to better >> clarify potential issue averted by change as >> pointed out by Mikulas Patocka. >> >> Changes from v4: >> - Rebase on top of latest algif changes from Stephan >> Mueller. >> - Fix typo in ccp patch title. >> >> Changes from v3: >> - Instead of changing the return code to indicate >> backlog queueing, change the return code to indicate >> transient busy state, as suggested by Herbert Xu. >> >> Changes from v2: >> - Patch title changed from "introduce crypto wait for >> async op" to better reflect the current state. >> - Rebase on top of latest linux-next. >> - Add a new return code of -EIOCBQUEUED for backlog >> queueing, as suggested by Herbert Xu. >> - Transform more users to the new API. >> - Update the drbg change to account for new init as >> indicated by Stephan Muller. >> >> Changes from v1: >> - Address review comments from Eric Biggers. >> - Separated out bug fixes of existing code and rebase >> on top of that patch set. >> - Rename 'ecr' to 'wait' in fscrypto code. >> - Split patch introducing the new API from the change >> moving over the algif code which it originated from >> to the new API. >> - Inline crypto_wait_req(). >> - Some code indentation fixes. >> >> Gilad Ben-Yossef (19): >> crypto: change transient busy return code to -EAGAIN >> crypto: ccp: use -EAGAIN for transient busy indication >> crypto: remove redundant backlog checks on EBUSY >> crypto: marvell/cesa: remove redundant backlog checks on EBUSY >> crypto: introduce crypto wait for async op >> crypto: move algif to generic async completion >> crypto: move pub key to generic async completion >> crypto: move drbg to generic async completion >> crypto: move gcm to generic async completion >> crypto: move testmgr to generic async completion >> fscrypt: move to generic async completion >> dm: move dm-verity to generic async completion >> cifs: move to generic async completion >> ima: move to generic async completion >> crypto: tcrypt: move to generic async completion >> crypto: talitos: move to generic async completion >> crypto: qce: move to generic async completion >> crypto: mediatek: move to generic async completion >> crypto: adapt api sample to use async. op wait >> >> Documentation/crypto/api-samples.rst | 52 ++------- >> crypto/af_alg.c | 27 ----- >> crypto/ahash.c | 12 +-- >> crypto/algapi.c | 6 +- >> crypto/algif_aead.c | 8 +- >> crypto/algif_hash.c | 50 +++++---- >> crypto/algif_skcipher.c | 9 +- >> crypto/api.c | 13 +++ >> crypto/asymmetric_keys/public_key.c | 28 +---- >> crypto/cryptd.c | 4 +- >> crypto/cts.c | 6 +- >> crypto/drbg.c | 36 ++----- >> crypto/gcm.c | 32 ++---- >> crypto/lrw.c | 8 +- >> crypto/rsa-pkcs1pad.c | 16 +-- >> crypto/tcrypt.c | 84 +++++---------- >> crypto/testmgr.c | 204 ++++++++++++----------------------- >> crypto/xts.c | 8 +- >> drivers/crypto/ccp/ccp-crypto-main.c | 8 +- >> drivers/crypto/ccp/ccp-dev.c | 7 +- >> drivers/crypto/marvell/cesa.c | 3 +- >> drivers/crypto/marvell/cesa.h | 2 +- >> drivers/crypto/mediatek/mtk-aes.c | 31 +----- >> drivers/crypto/qce/sha.c | 30 +----- >> drivers/crypto/talitos.c | 38 +------ >> drivers/md/dm-verity-target.c | 81 ++++---------- >> drivers/md/dm-verity.h | 5 - >> fs/cifs/smb2ops.c | 30 +----- >> fs/crypto/crypto.c | 28 +---- >> fs/crypto/fname.c | 36 ++----- >> fs/crypto/fscrypt_private.h | 10 -- >> fs/crypto/keyinfo.c | 21 +--- >> include/crypto/drbg.h | 3 +- >> include/crypto/if_alg.h | 15 +-- >> include/linux/crypto.h | 40 +++++++ >> security/integrity/ima/ima_crypto.c | 56 +++------- >> 36 files changed, 310 insertions(+), 737 deletions(-) >> >> -- >> 2.1.4 >> -- Gilad Ben-Yossef Chief Coffee Drinker "If you take a class in large-scale robotics, can you end up in a situation where the homework eats your dog?" -- Jean-Baptiste Queru |
|
From: Gilad Ben-Y. <gi...@be...> - 2017-09-03 06:10:03
|
On Thu, Aug 31, 2017 at 3:31 PM, Harsh Jain <har...@gm...> wrote: > HI Gilad, > > I think we need an update in ESP also. Now EBUSY return means driver > has accepted, Packet should not be dropped in > > esp_output_tail() function. > > > Good catch. You are right and the same holds true for ah_output() in ah4.c. But I do wonder, the code there now treats -EBUSY as a special case and returns NET_XMIT_DROP but if an AEAD or AHASH transformation return some other error, like -ENOMEM or -EINVAL shouldn't we return NET_XMIT_DROP in that case too? Any ideas? Gilad > > > On Thu, Aug 24, 2017 at 7:48 PM, Gilad Ben-Yossef <gi...@be...> > wrote: > > Many users of kernel async. crypto services have a pattern of > > starting an async. crypto op and than using a completion > > to wait for it to end. > > > > This patch set simplifies this common use case in two ways: > > > > First, by separating the return codes of the case where a > > request is queued to a backlog due to the provider being > > busy (-EBUSY) from the case the request has failed due > > to the provider being busy and backlogging is not enabled > > (-EAGAIN). > > > > Next, this change is than built on to create a generic API > > to wait for a async. crypto operation to complete. > > > > The end result is a smaller code base and an API that is > > easier to use and more difficult to get wrong. > > > > The patch set was boot tested on x86_64 and arm64 which > > at the very least tests the crypto users via testmgr and > > tcrypt but I do note that I do not have access to some > > of the HW whose drivers are modified nor do I claim I was > > able to test all of the corner cases. > > > > The patch set is based upon linux-next release tagged > > next-20170824. > > > > Changes from v6: > > - Fix brown paper bag compile error on marvell/cesa > > code. > > > > Changes from v5: > > - Remove redundant new line as spotted by Jonathan > > Cameron. > > - Reworded dm-verity change commit message to better > > clarify potential issue averted by change as > > pointed out by Mikulas Patocka. > > > > Changes from v4: > > - Rebase on top of latest algif changes from Stephan > > Mueller. > > - Fix typo in ccp patch title. > > > > Changes from v3: > > - Instead of changing the return code to indicate > > backlog queueing, change the return code to indicate > > transient busy state, as suggested by Herbert Xu. > > > > Changes from v2: > > - Patch title changed from "introduce crypto wait for > > async op" to better reflect the current state. > > - Rebase on top of latest linux-next. > > - Add a new return code of -EIOCBQUEUED for backlog > > queueing, as suggested by Herbert Xu. > > - Transform more users to the new API. > > - Update the drbg change to account for new init as > > indicated by Stephan Muller. > > > > Changes from v1: > > - Address review comments from Eric Biggers. > > - Separated out bug fixes of existing code and rebase > > on top of that patch set. > > - Rename 'ecr' to 'wait' in fscrypto code. > > - Split patch introducing the new API from the change > > moving over the algif code which it originated from > > to the new API. > > - Inline crypto_wait_req(). > > - Some code indentation fixes. > > > > Gilad Ben-Yossef (19): > > crypto: change transient busy return code to -EAGAIN > > crypto: ccp: use -EAGAIN for transient busy indication > > crypto: remove redundant backlog checks on EBUSY > > crypto: marvell/cesa: remove redundant backlog checks on EBUSY > > crypto: introduce crypto wait for async op > > crypto: move algif to generic async completion > > crypto: move pub key to generic async completion > > crypto: move drbg to generic async completion > > crypto: move gcm to generic async completion > > crypto: move testmgr to generic async completion > > fscrypt: move to generic async completion > > dm: move dm-verity to generic async completion > > cifs: move to generic async completion > > ima: move to generic async completion > > crypto: tcrypt: move to generic async completion > > crypto: talitos: move to generic async completion > > crypto: qce: move to generic async completion > > crypto: mediatek: move to generic async completion > > crypto: adapt api sample to use async. op wait > > > > Documentation/crypto/api-samples.rst | 52 ++------- > > crypto/af_alg.c | 27 ----- > > crypto/ahash.c | 12 +-- > > crypto/algapi.c | 6 +- > > crypto/algif_aead.c | 8 +- > > crypto/algif_hash.c | 50 +++++---- > > crypto/algif_skcipher.c | 9 +- > > crypto/api.c | 13 +++ > > crypto/asymmetric_keys/public_key.c | 28 +---- > > crypto/cryptd.c | 4 +- > > crypto/cts.c | 6 +- > > crypto/drbg.c | 36 ++----- > > crypto/gcm.c | 32 ++---- > > crypto/lrw.c | 8 +- > > crypto/rsa-pkcs1pad.c | 16 +-- > > crypto/tcrypt.c | 84 +++++---------- > > crypto/testmgr.c | 204 > ++++++++++++----------------------- > > crypto/xts.c | 8 +- > > drivers/crypto/ccp/ccp-crypto-main.c | 8 +- > > drivers/crypto/ccp/ccp-dev.c | 7 +- > > drivers/crypto/marvell/cesa.c | 3 +- > > drivers/crypto/marvell/cesa.h | 2 +- > > drivers/crypto/mediatek/mtk-aes.c | 31 +----- > > drivers/crypto/qce/sha.c | 30 +----- > > drivers/crypto/talitos.c | 38 +------ > > drivers/md/dm-verity-target.c | 81 ++++---------- > > drivers/md/dm-verity.h | 5 - > > fs/cifs/smb2ops.c | 30 +----- > > fs/crypto/crypto.c | 28 +---- > > fs/crypto/fname.c | 36 ++----- > > fs/crypto/fscrypt_private.h | 10 -- > > fs/crypto/keyinfo.c | 21 +--- > > include/crypto/drbg.h | 3 +- > > include/crypto/if_alg.h | 15 +-- > > include/linux/crypto.h | 40 +++++++ > > security/integrity/ima/ima_crypto.c | 56 +++------- > > 36 files changed, 310 insertions(+), 737 deletions(-) > > > > -- > > 2.1.4 > > > -- Gilad Ben-Yossef Chief Coffee Drinker "If you take a class in large-scale robotics, can you end up in a situation where the homework eats your dog?" -- Jean-Baptiste Queru -------------- next part -------------- An HTML attachment was scrubbed... |
|
From: Mimi Z. <zo...@li...> - 2017-08-31 14:01:09
|
On Tue, 2017-08-29 at 11:42 -0700, Matthew Garrett wrote:
> Right now IMA BPRM validation is performed after setup_creds() but before
> commit_creds(), which means current->cred still refers to the fork()ing
> task rather than the child. This makes it difficult to implement
> appraisal rules that depend on the security context that the task will
> be transitioned to. Passing through bprm to the policy matching allows
> us to do so.
There are other LSM hooks in fs/exec.c. Instead of you modifying
process_measurements(), could you defer the policy check until after
the commit_creds? We could define the corresponding IMA hook.
Mimi
> Signed-off-by: Matthew Garrett <mj...@go...>
>
> --
>
> Does this seem reasonable? I thought about adding an additional match
> type (BPRM_LATE or something) but this seems like a simpler approach.
> ---
> security/integrity/ima/ima.h | 6 +++---
> security/integrity/ima/ima_api.c | 6 ++++--
> security/integrity/ima/ima_appraise.c | 2 +-
> security/integrity/ima/ima_main.c | 15 ++++++++-------
> security/integrity/ima/ima_policy.c | 11 +++++++----
> 5 files changed, 23 insertions(+), 17 deletions(-)
>
> diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
> index d52b487ad259..695614d85201 100644
> --- a/security/integrity/ima/ima.h
> +++ b/security/integrity/ima/ima.h
> @@ -191,7 +191,7 @@ enum ima_hooks {
> };
>
> /* LIM API function definitions */
> -int ima_get_action(struct inode *inode, int mask,
> +int ima_get_action(struct linux_binprm *bprm, struct inode *inode, int mask,
> enum ima_hooks func, int *pcr);
> int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func);
> int ima_collect_measurement(struct integrity_iint_cache *iint,
> @@ -212,8 +212,8 @@ void ima_free_template_entry(struct ima_template_entry *entry);
> const char *ima_d_path(const struct path *path, char **pathbuf, char *filename);
>
> /* IMA policy related functions */
> -int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
> - int flags, int *pcr);
> +int ima_match_policy(struct linux_binprm *bprm, struct inode *inode,
> + enum ima_hooks func, int mask, int flags, int *pcr);
> void ima_init_policy(void);
> void ima_update_policy(void);
> void ima_update_policy_flag(void);
> diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
> index c2edba8de35e..1c1a1ffee398 100644
> --- a/security/integrity/ima/ima_api.c
> +++ b/security/integrity/ima/ima_api.c
> @@ -156,6 +156,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
>
> /**
> * ima_get_action - appraise & measure decision based on policy.
> + * @bprm: pointer to the BPRM struct to be validated
> * @inode: pointer to inode to measure
> * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXEC,
> * MAY_APPEND)
> @@ -172,13 +173,14 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
> * Returns IMA_MEASURE, IMA_APPRAISE mask.
> *
> */
> -int ima_get_action(struct inode *inode, int mask, enum ima_hooks func, int *pcr)
> +int ima_get_action(struct linux_binprm *bprm, struct inode *inode, int mask,
> + enum ima_hooks func, int *pcr)
> {
> int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE;
>
> flags &= ima_policy_flag;
>
> - return ima_match_policy(inode, func, mask, flags, pcr);
> + return ima_match_policy(bprm, inode, func, mask, flags, pcr);
> }
>
> /*
> diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
> index 809ba70fbbbf..bcd956fc4a04 100644
> --- a/security/integrity/ima/ima_appraise.c
> +++ b/security/integrity/ima/ima_appraise.c
> @@ -53,7 +53,7 @@ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
> if (!ima_appraise)
> return 0;
>
> - return ima_match_policy(inode, func, mask, IMA_APPRAISE, NULL);
> + return ima_match_policy(NULL, inode, func, mask, IMA_APPRAISE, NULL);
> }
>
> static int ima_fix_xattr(struct dentry *dentry,
> diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
> index 2aebb7984437..4b1f2630b736 100644
> --- a/security/integrity/ima/ima_main.c
> +++ b/security/integrity/ima/ima_main.c
> @@ -155,8 +155,9 @@ void ima_file_free(struct file *file)
> ima_check_last_writer(iint, inode, file);
> }
>
> -static int process_measurement(struct file *file, char *buf, loff_t size,
> - int mask, enum ima_hooks func, int opened)
> +static int process_measurement(struct linux_binprm *bprm, struct file *file,
> + char *buf, loff_t size, int mask,
> + enum ima_hooks func, int opened)
> {
> struct inode *inode = file_inode(file);
> struct integrity_iint_cache *iint = NULL;
> @@ -178,7 +179,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
> * bitmask based on the appraise/audit/measurement policy.
> * Included is the appraise submask.
> */
> - action = ima_get_action(inode, mask, func, &pcr);
> + action = ima_get_action(bprm, inode, mask, func, &pcr);
> violation_check = ((func == FILE_CHECK || func == MMAP_CHECK) &&
> (ima_policy_flag & IMA_MEASURE));
> if (!action && !violation_check)
> @@ -282,7 +283,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
> int ima_file_mmap(struct file *file, unsigned long prot)
> {
> if (file && (prot & PROT_EXEC))
> - return process_measurement(file, NULL, 0, MAY_EXEC,
> + return process_measurement(NULL, file, NULL, 0, MAY_EXEC,
> MMAP_CHECK, 0);
> return 0;
> }
> @@ -302,7 +303,7 @@ int ima_file_mmap(struct file *file, unsigned long prot)
> */
> int ima_bprm_check(struct linux_binprm *bprm)
> {
> - return process_measurement(bprm->file, NULL, 0, MAY_EXEC,
> + return process_measurement(bprm, bprm->file, NULL, 0, MAY_EXEC,
> BPRM_CHECK, 0);
> }
>
> @@ -318,7 +319,7 @@ int ima_bprm_check(struct linux_binprm *bprm)
> */
> int ima_file_check(struct file *file, int mask, int opened)
> {
> - return process_measurement(file, NULL, 0,
> + return process_measurement(NULL, file, NULL, 0,
> mask & (MAY_READ | MAY_WRITE | MAY_EXEC |
> MAY_APPEND), FILE_CHECK, opened);
> }
> @@ -413,7 +414,7 @@ int ima_post_read_file(struct file *file, void *buf, loff_t size,
> }
>
> func = read_idmap[read_id] ?: FILE_CHECK;
> - return process_measurement(file, buf, size, MAY_READ, func, 0);
> + return process_measurement(NULL, file, buf, size, MAY_READ, func, 0);
> }
>
> static int __init init_ima(void)
> diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
> index 95209a5f8595..93f6af4e3a20 100644
> --- a/security/integrity/ima/ima_policy.c
> +++ b/security/integrity/ima/ima_policy.c
> @@ -240,13 +240,15 @@ static void ima_lsm_update_rules(void)
> /**
> * ima_match_rules - determine whether an inode matches the measure rule.
> * @rule: a pointer to a rule
> + * @bprm: a pointer to a binprm structure
> * @inode: a pointer to an inode
> * @func: LIM hook identifier
> * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
> *
> * Returns true on rule match, false on failure.
> */
> -static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
> +static bool ima_match_rules(struct ima_rule_entry *rule,
> + struct linux_binprm *bprm, struct inode *inode,
> enum ima_hooks func, int mask)
> {
> struct task_struct *tsk = current;
> @@ -350,6 +352,7 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
>
> /**
> * ima_match_policy - decision based on LSM and other conditions
> + * @bprm: pointer to a binprm for which the policy decision is being made
> * @inode: pointer to an inode for which the policy decision is being made
> * @func: IMA hook identifier
> * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
> @@ -362,8 +365,8 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
> * list when walking it. Reads are many orders of magnitude more numerous
> * than writes so ima_match_policy() is classical RCU candidate.
> */
> -int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
> - int flags, int *pcr)
> +int ima_match_policy(struct linux_binprm *bprm, struct inode *inode,
> + enum ima_hooks func, int mask, int flags, int *pcr)
> {
> struct ima_rule_entry *entry;
> int action = 0, actmask = flags | (flags << 1);
> @@ -374,7 +377,7 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
> if (!(entry->action & actmask))
> continue;
>
> - if (!ima_match_rules(entry, inode, func, mask))
> + if (!ima_match_rules(entry, bprm, inode, func, mask))
> continue;
>
> action |= entry->flags & IMA_ACTION_FLAGS;
|
|
From: Harsh J. <har...@gm...> - 2017-08-31 12:32:01
|
HI Gilad, I think we need an update in ESP also. Now EBUSY return means driver has accepted, Packet should not be dropped in esp_output_tail() function. . Regards Harsh Jain On Thu, Aug 24, 2017 at 7:48 PM, Gilad Ben-Yossef <gi...@be...> wrote: > Many users of kernel async. crypto services have a pattern of > starting an async. crypto op and than using a completion > to wait for it to end. > > This patch set simplifies this common use case in two ways: > > First, by separating the return codes of the case where a > request is queued to a backlog due to the provider being > busy (-EBUSY) from the case the request has failed due > to the provider being busy and backlogging is not enabled > (-EAGAIN). > > Next, this change is than built on to create a generic API > to wait for a async. crypto operation to complete. > > The end result is a smaller code base and an API that is > easier to use and more difficult to get wrong. > > The patch set was boot tested on x86_64 and arm64 which > at the very least tests the crypto users via testmgr and > tcrypt but I do note that I do not have access to some > of the HW whose drivers are modified nor do I claim I was > able to test all of the corner cases. > > The patch set is based upon linux-next release tagged > next-20170824. > > Changes from v6: > - Fix brown paper bag compile error on marvell/cesa > code. > > Changes from v5: > - Remove redundant new line as spotted by Jonathan > Cameron. > - Reworded dm-verity change commit message to better > clarify potential issue averted by change as > pointed out by Mikulas Patocka. > > Changes from v4: > - Rebase on top of latest algif changes from Stephan > Mueller. > - Fix typo in ccp patch title. > > Changes from v3: > - Instead of changing the return code to indicate > backlog queueing, change the return code to indicate > transient busy state, as suggested by Herbert Xu. > > Changes from v2: > - Patch title changed from "introduce crypto wait for > async op" to better reflect the current state. > - Rebase on top of latest linux-next. > - Add a new return code of -EIOCBQUEUED for backlog > queueing, as suggested by Herbert Xu. > - Transform more users to the new API. > - Update the drbg change to account for new init as > indicated by Stephan Muller. > > Changes from v1: > - Address review comments from Eric Biggers. > - Separated out bug fixes of existing code and rebase > on top of that patch set. > - Rename 'ecr' to 'wait' in fscrypto code. > - Split patch introducing the new API from the change > moving over the algif code which it originated from > to the new API. > - Inline crypto_wait_req(). > - Some code indentation fixes. > > Gilad Ben-Yossef (19): > crypto: change transient busy return code to -EAGAIN > crypto: ccp: use -EAGAIN for transient busy indication > crypto: remove redundant backlog checks on EBUSY > crypto: marvell/cesa: remove redundant backlog checks on EBUSY > crypto: introduce crypto wait for async op > crypto: move algif to generic async completion > crypto: move pub key to generic async completion > crypto: move drbg to generic async completion > crypto: move gcm to generic async completion > crypto: move testmgr to generic async completion > fscrypt: move to generic async completion > dm: move dm-verity to generic async completion > cifs: move to generic async completion > ima: move to generic async completion > crypto: tcrypt: move to generic async completion > crypto: talitos: move to generic async completion > crypto: qce: move to generic async completion > crypto: mediatek: move to generic async completion > crypto: adapt api sample to use async. op wait > > Documentation/crypto/api-samples.rst | 52 ++------- > crypto/af_alg.c | 27 ----- > crypto/ahash.c | 12 +-- > crypto/algapi.c | 6 +- > crypto/algif_aead.c | 8 +- > crypto/algif_hash.c | 50 +++++---- > crypto/algif_skcipher.c | 9 +- > crypto/api.c | 13 +++ > crypto/asymmetric_keys/public_key.c | 28 +---- > crypto/cryptd.c | 4 +- > crypto/cts.c | 6 +- > crypto/drbg.c | 36 ++----- > crypto/gcm.c | 32 ++---- > crypto/lrw.c | 8 +- > crypto/rsa-pkcs1pad.c | 16 +-- > crypto/tcrypt.c | 84 +++++---------- > crypto/testmgr.c | 204 ++++++++++++----------------------- > crypto/xts.c | 8 +- > drivers/crypto/ccp/ccp-crypto-main.c | 8 +- > drivers/crypto/ccp/ccp-dev.c | 7 +- > drivers/crypto/marvell/cesa.c | 3 +- > drivers/crypto/marvell/cesa.h | 2 +- > drivers/crypto/mediatek/mtk-aes.c | 31 +----- > drivers/crypto/qce/sha.c | 30 +----- > drivers/crypto/talitos.c | 38 +------ > drivers/md/dm-verity-target.c | 81 ++++---------- > drivers/md/dm-verity.h | 5 - > fs/cifs/smb2ops.c | 30 +----- > fs/crypto/crypto.c | 28 +---- > fs/crypto/fname.c | 36 ++----- > fs/crypto/fscrypt_private.h | 10 -- > fs/crypto/keyinfo.c | 21 +--- > include/crypto/drbg.h | 3 +- > include/crypto/if_alg.h | 15 +-- > include/linux/crypto.h | 40 +++++++ > security/integrity/ima/ima_crypto.c | 56 +++------- > 36 files changed, 310 insertions(+), 737 deletions(-) > > -- > 2.1.4 > |
|
From: <768...@qq...> - 2017-08-30 13:12:33
|
|
From: Matthew G. <mj...@go...> - 2017-08-29 19:50:42
|
On Tue, Aug 29, 2017 at 12:47 PM, John Johansen <joh...@ca...> wrote: > On 08/29/2017 12:04 PM, Matthew Garrett wrote: >> IMA has support for matching based on security context, but this is >> currently limited to modules that implement the audit_rule_match hook. >> The infrastructure around this seems to depend on having 32 bit security >> IDs to reference the policy associated with tasks or files, which >> doesn't seem to be a concept that Apparmor really has. So, this >> implementation ignores the abstraction and calls through to Apparmor >> directly. >> >> This seems ugly, so is there a better way to achieve this? > > probably via secids :/ > > secid support in apparmor is a wip, and we are hoping to land full support > in 4.15 > > I'll see if I can't get a dev branch with them up for you this week. Oh, that'd be great, thank you! > that said if you wanted to land this sooner I am not opposed to this > going in with a minor change (see below) on the apparmor end 4.15 would be fine, I can use this implementation for internal testing. |
|
From: John J. <joh...@ca...> - 2017-08-29 19:47:48
|
On 08/29/2017 12:04 PM, Matthew Garrett wrote:
> IMA has support for matching based on security context, but this is
> currently limited to modules that implement the audit_rule_match hook.
> The infrastructure around this seems to depend on having 32 bit security
> IDs to reference the policy associated with tasks or files, which
> doesn't seem to be a concept that Apparmor really has. So, this
> implementation ignores the abstraction and calls through to Apparmor
> directly.
>
> This seems ugly, so is there a better way to achieve this?
probably via secids :/
secid support in apparmor is a wip, and we are hoping to land full support
in 4.15
I'll see if I can't get a dev branch with them up for you this week.
that said if you wanted to land this sooner I am not opposed to this
going in with a minor change (see below) on the apparmor end
> ---
> include/linux/apparmor.h | 16 ++++++++++++++++
> security/apparmor/lib.c | 32 ++++++++++++++++++++++++++++++++
> security/integrity/ima/ima_policy.c | 24 +++++++++++++++++++++---
> 3 files changed, 69 insertions(+), 3 deletions(-)
> create mode 100644 include/linux/apparmor.h
>
> diff --git a/include/linux/apparmor.h b/include/linux/apparmor.h
> new file mode 100644
> index 000000000000..d8ac3f706437
> --- /dev/null
> +++ b/include/linux/apparmor.h
> @@ -0,0 +1,16 @@
> +#ifndef _APPARMOR_H_
> +
> +struct linux_binprm;
> +
> +#ifdef CONFIG_SECURITY_APPARMOR
> +int aa_task_profile_match(struct linux_binprm *bprm, struct task_struct *tsk,
> + const char *profile);
> +#else
> +static int aa_task_profile_match(struct linux_binprm *bprm,
> + struct task_struct *tsk, const char *profile)
> +{
> + return 0;
> +}
> +#endif
> +
> +#endif
> diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
> index 08ca26bcca77..04d087e4a1a3 100644
> --- a/security/apparmor/lib.c
> +++ b/security/apparmor/lib.c
> @@ -17,9 +17,11 @@
> #include <linux/slab.h>
> #include <linux/string.h>
> #include <linux/vmalloc.h>
> +#include <linux/apparmor.h>
>
> #include "include/audit.h"
> #include "include/apparmor.h"
> +#include "include/context.h"
> #include "include/lib.h"
> #include "include/perms.h"
> #include "include/policy.h"
> @@ -385,6 +387,36 @@ void aa_profile_match_label(struct aa_profile *profile, struct aa_label *label,
> aa_label_match(profile, label, state, false, request, perms);
> }
>
> +/**
> + * aa_task_profile_match - check whether a task is using the specified profile
> + * @bprm - bprm structure to extract creds from.
> + * @tsk - task to verify. Ignored if @bprm is not NULL.
> + * @name - name of the profile to search for.
> + */
> +int aa_task_profile_match(struct linux_binprm *bprm, struct task_struct *tsk,
> + const char *name)
> +{
> + struct aa_label *label;
> + struct aa_profile *profile;
> + struct aa_task_ctx *ctx;
> + struct label_it i;
> + const struct cred *cred;
> +
> + if (bprm) {
> + ctx = cred_ctx(bprm->cred);
> + label = aa_get_newest_label(ctx->label);
> + } else {
> + cred = __task_cred(tsk);
> + label = aa_get_newest_cred_label(cred);
> + }
> +
> + label_for_each(i, label, profile) {
> + if (strcmp(name, profile->base.name) == 0)
> + return 1;
> + }
> +
you should be using
profile->base.hname
its an stupid artefact of "hat" profile naming, which predates apparmor policy
namespace support
> + return 0;
> +}
>
> /* currently unused */
> int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
> diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
> index 93f6af4e3a20..556a1292734c 100644
> --- a/security/integrity/ima/ima_policy.c
> +++ b/security/integrity/ima/ima_policy.c
> @@ -20,6 +20,7 @@
> #include <linux/rculist.h>
> #include <linux/genhd.h>
> #include <linux/seq_file.h>
> +#include <linux/apparmor.h>
>
> #include "ima.h"
>
> @@ -47,9 +48,9 @@
> int ima_policy_flag;
> static int temp_ima_appraise;
>
> -#define MAX_LSM_RULES 6
> +#define MAX_LSM_RULES 7
> enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
> - LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
> + LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE, LSM_AA_PROFILE
> };
>
> enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
> @@ -313,6 +314,11 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
> Audit_equal,
> rule->lsm[i].rule,
> NULL);
> + break;
> + case LSM_AA_PROFILE:
> + rc = aa_task_profile_match(bprm, tsk,
> + rule->lsm[i].args_p);
> + break;
> default:
> break;
> }
> @@ -527,7 +533,7 @@ enum {
> Opt_audit,
> Opt_obj_user, Opt_obj_role, Opt_obj_type,
> Opt_subj_user, Opt_subj_role, Opt_subj_type,
> - Opt_func, Opt_mask, Opt_fsmagic,
> + Opt_aa_profile, Opt_func, Opt_mask, Opt_fsmagic,
> Opt_fsuuid, Opt_uid_eq, Opt_euid_eq, Opt_fowner_eq,
> Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
> Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
> @@ -547,6 +553,7 @@ static match_table_t policy_tokens = {
> {Opt_subj_user, "subj_user=%s"},
> {Opt_subj_role, "subj_role=%s"},
> {Opt_subj_type, "subj_type=%s"},
> + {Opt_aa_profile, "aa_profile=%s"},
> {Opt_func, "func=%s"},
> {Opt_mask, "mask=%s"},
> {Opt_fsmagic, "fsmagic=%s"},
> @@ -847,6 +854,13 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
> LSM_SUBJ_TYPE,
> AUDIT_SUBJ_TYPE);
> break;
> + case Opt_aa_profile:
> + ima_log_string(ab, "aa_profile", args[0].from);
> + entry->lsm[LSM_AA_PROFILE].args_p = match_strdup(args);
> + entry->lsm[LSM_AA_PROFILE].rule = 1;
> + if (!entry->lsm[LSM_AA_PROFILE].args_p)
> + result = -ENOMEM;
> + break;
> case Opt_appraise_type:
> if (entry->action != APPRAISE) {
> result = -EINVAL;
> @@ -1138,6 +1152,10 @@ int ima_policy_show(struct seq_file *m, void *v)
> seq_printf(m, pt(Opt_subj_type),
> (char *)entry->lsm[i].args_p);
> break;
> + case LSM_AA_PROFILE:
> + seq_printf(m, pt(Opt_aa_profile),
> + (char *)entry->lsm[i].args_p);
> + break;
> }
> }
> }
>
|