This list is closed, nobody may subscribe to it.
2007 |
Jan
|
Feb
(1) |
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
(1) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2009 |
Jan
|
Feb
|
Mar
|
Apr
(1) |
May
(1) |
Jun
(2) |
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
2011 |
Jan
|
Feb
|
Mar
(1) |
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
2013 |
Jan
|
Feb
|
Mar
(7) |
Apr
|
May
(7) |
Jun
(7) |
Jul
(26) |
Aug
|
Sep
(7) |
Oct
(1) |
Nov
(35) |
Dec
(18) |
2014 |
Jan
(1) |
Feb
(2) |
Mar
(3) |
Apr
|
May
(16) |
Jun
(35) |
Jul
(103) |
Aug
(45) |
Sep
(226) |
Oct
(200) |
Nov
(66) |
Dec
(42) |
2015 |
Jan
(47) |
Feb
(3) |
Mar
(6) |
Apr
(14) |
May
(38) |
Jun
(10) |
Jul
(10) |
Aug
(15) |
Sep
(23) |
Oct
(78) |
Nov
(56) |
Dec
(70) |
2016 |
Jan
(9) |
Feb
(8) |
Mar
(15) |
Apr
(18) |
May
(78) |
Jun
(39) |
Jul
(3) |
Aug
(136) |
Sep
(134) |
Oct
(19) |
Nov
(48) |
Dec
(30) |
2017 |
Jan
(33) |
Feb
(35) |
Mar
(100) |
Apr
(87) |
May
(169) |
Jun
(119) |
Jul
(165) |
Aug
(241) |
Sep
(128) |
Oct
(42) |
Nov
|
Dec
|
From: Matthew G. <mj...@go...> - 2017-08-29 19:41:22
|
Right now IMA BPRM validation is performed after setup_creds() but before commit_creds(), which means current->cred still refers to the fork()ing task rather than the child. This makes it difficult to implement appraisal rules that depend on the security context that the task will be transitioned to. Passing through bprm to the policy matching allows us to do so. Signed-off-by: Matthew Garrett <mj...@go...> -- Does this seem reasonable? I thought about adding an additional match type (BPRM_LATE or something) but this seems like a simpler approach. --- security/integrity/ima/ima.h | 6 +++--- security/integrity/ima/ima_api.c | 6 ++++-- security/integrity/ima/ima_appraise.c | 2 +- security/integrity/ima/ima_main.c | 15 ++++++++------- security/integrity/ima/ima_policy.c | 11 +++++++---- 5 files changed, 23 insertions(+), 17 deletions(-) diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index d52b487ad259..695614d85201 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -191,7 +191,7 @@ enum ima_hooks { }; /* LIM API function definitions */ -int ima_get_action(struct inode *inode, int mask, +int ima_get_action(struct linux_binprm *bprm, struct inode *inode, int mask, enum ima_hooks func, int *pcr); int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func); int ima_collect_measurement(struct integrity_iint_cache *iint, @@ -212,8 +212,8 @@ void ima_free_template_entry(struct ima_template_entry *entry); const char *ima_d_path(const struct path *path, char **pathbuf, char *filename); /* IMA policy related functions */ -int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, - int flags, int *pcr); +int ima_match_policy(struct linux_binprm *bprm, struct inode *inode, + enum ima_hooks func, int mask, int flags, int *pcr); void ima_init_policy(void); void ima_update_policy(void); void ima_update_policy_flag(void); diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index c2edba8de35e..1c1a1ffee398 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -156,6 +156,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename, /** * ima_get_action - appraise & measure decision based on policy. + * @bprm: pointer to the BPRM struct to be validated * @inode: pointer to inode to measure * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXEC, * MAY_APPEND) @@ -172,13 +173,14 @@ void ima_add_violation(struct file *file, const unsigned char *filename, * Returns IMA_MEASURE, IMA_APPRAISE mask. * */ -int ima_get_action(struct inode *inode, int mask, enum ima_hooks func, int *pcr) +int ima_get_action(struct linux_binprm *bprm, struct inode *inode, int mask, + enum ima_hooks func, int *pcr) { int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE; flags &= ima_policy_flag; - return ima_match_policy(inode, func, mask, flags, pcr); + return ima_match_policy(bprm, inode, func, mask, flags, pcr); } /* diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 809ba70fbbbf..bcd956fc4a04 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -53,7 +53,7 @@ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func) if (!ima_appraise) return 0; - return ima_match_policy(inode, func, mask, IMA_APPRAISE, NULL); + return ima_match_policy(NULL, inode, func, mask, IMA_APPRAISE, NULL); } static int ima_fix_xattr(struct dentry *dentry, diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 2aebb7984437..4b1f2630b736 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -155,8 +155,9 @@ void ima_file_free(struct file *file) ima_check_last_writer(iint, inode, file); } -static int process_measurement(struct file *file, char *buf, loff_t size, - int mask, enum ima_hooks func, int opened) +static int process_measurement(struct linux_binprm *bprm, struct file *file, + char *buf, loff_t size, int mask, + enum ima_hooks func, int opened) { struct inode *inode = file_inode(file); struct integrity_iint_cache *iint = NULL; @@ -178,7 +179,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size, * bitmask based on the appraise/audit/measurement policy. * Included is the appraise submask. */ - action = ima_get_action(inode, mask, func, &pcr); + action = ima_get_action(bprm, inode, mask, func, &pcr); violation_check = ((func == FILE_CHECK || func == MMAP_CHECK) && (ima_policy_flag & IMA_MEASURE)); if (!action && !violation_check) @@ -282,7 +283,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size, int ima_file_mmap(struct file *file, unsigned long prot) { if (file && (prot & PROT_EXEC)) - return process_measurement(file, NULL, 0, MAY_EXEC, + return process_measurement(NULL, file, NULL, 0, MAY_EXEC, MMAP_CHECK, 0); return 0; } @@ -302,7 +303,7 @@ int ima_file_mmap(struct file *file, unsigned long prot) */ int ima_bprm_check(struct linux_binprm *bprm) { - return process_measurement(bprm->file, NULL, 0, MAY_EXEC, + return process_measurement(bprm, bprm->file, NULL, 0, MAY_EXEC, BPRM_CHECK, 0); } @@ -318,7 +319,7 @@ int ima_bprm_check(struct linux_binprm *bprm) */ int ima_file_check(struct file *file, int mask, int opened) { - return process_measurement(file, NULL, 0, + return process_measurement(NULL, file, NULL, 0, mask & (MAY_READ | MAY_WRITE | MAY_EXEC | MAY_APPEND), FILE_CHECK, opened); } @@ -413,7 +414,7 @@ int ima_post_read_file(struct file *file, void *buf, loff_t size, } func = read_idmap[read_id] ?: FILE_CHECK; - return process_measurement(file, buf, size, MAY_READ, func, 0); + return process_measurement(NULL, file, buf, size, MAY_READ, func, 0); } static int __init init_ima(void) diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 95209a5f8595..93f6af4e3a20 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -240,13 +240,15 @@ static void ima_lsm_update_rules(void) /** * ima_match_rules - determine whether an inode matches the measure rule. * @rule: a pointer to a rule + * @bprm: a pointer to a binprm structure * @inode: a pointer to an inode * @func: LIM hook identifier * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC) * * Returns true on rule match, false on failure. */ -static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode, +static bool ima_match_rules(struct ima_rule_entry *rule, + struct linux_binprm *bprm, struct inode *inode, enum ima_hooks func, int mask) { struct task_struct *tsk = current; @@ -350,6 +352,7 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func) /** * ima_match_policy - decision based on LSM and other conditions + * @bprm: pointer to a binprm for which the policy decision is being made * @inode: pointer to an inode for which the policy decision is being made * @func: IMA hook identifier * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC) @@ -362,8 +365,8 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func) * list when walking it. Reads are many orders of magnitude more numerous * than writes so ima_match_policy() is classical RCU candidate. */ -int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, - int flags, int *pcr) +int ima_match_policy(struct linux_binprm *bprm, struct inode *inode, + enum ima_hooks func, int mask, int flags, int *pcr) { struct ima_rule_entry *entry; int action = 0, actmask = flags | (flags << 1); @@ -374,7 +377,7 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, if (!(entry->action & actmask)) continue; - if (!ima_match_rules(entry, inode, func, mask)) + if (!ima_match_rules(entry, bprm, inode, func, mask)) continue; action |= entry->flags & IMA_ACTION_FLAGS; -- 2.14.1.342.g6490525c54-goog |
From: Matthew G. <mj...@go...> - 2017-08-29 19:32:40
|
IMA has support for matching based on security context, but this is currently limited to modules that implement the audit_rule_match hook. The infrastructure around this seems to depend on having 32 bit security IDs to reference the policy associated with tasks or files, which doesn't seem to be a concept that Apparmor really has. So, this implementation ignores the abstraction and calls through to Apparmor directly. This seems ugly, so is there a better way to achieve this? --- include/linux/apparmor.h | 16 ++++++++++++++++ security/apparmor/lib.c | 32 ++++++++++++++++++++++++++++++++ security/integrity/ima/ima_policy.c | 24 +++++++++++++++++++++--- 3 files changed, 69 insertions(+), 3 deletions(-) create mode 100644 include/linux/apparmor.h diff --git a/include/linux/apparmor.h b/include/linux/apparmor.h new file mode 100644 index 000000000000..d8ac3f706437 --- /dev/null +++ b/include/linux/apparmor.h @@ -0,0 +1,16 @@ +#ifndef _APPARMOR_H_ + +struct linux_binprm; + +#ifdef CONFIG_SECURITY_APPARMOR +int aa_task_profile_match(struct linux_binprm *bprm, struct task_struct *tsk, + const char *profile); +#else +static int aa_task_profile_match(struct linux_binprm *bprm, + struct task_struct *tsk, const char *profile) +{ + return 0; +} +#endif + +#endif diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 08ca26bcca77..04d087e4a1a3 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c @@ -17,9 +17,11 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/vmalloc.h> +#include <linux/apparmor.h> #include "include/audit.h" #include "include/apparmor.h" +#include "include/context.h" #include "include/lib.h" #include "include/perms.h" #include "include/policy.h" @@ -385,6 +387,36 @@ void aa_profile_match_label(struct aa_profile *profile, struct aa_label *label, aa_label_match(profile, label, state, false, request, perms); } +/** + * aa_task_profile_match - check whether a task is using the specified profile + * @bprm - bprm structure to extract creds from. + * @tsk - task to verify. Ignored if @bprm is not NULL. + * @name - name of the profile to search for. + */ +int aa_task_profile_match(struct linux_binprm *bprm, struct task_struct *tsk, + const char *name) +{ + struct aa_label *label; + struct aa_profile *profile; + struct aa_task_ctx *ctx; + struct label_it i; + const struct cred *cred; + + if (bprm) { + ctx = cred_ctx(bprm->cred); + label = aa_get_newest_label(ctx->label); + } else { + cred = __task_cred(tsk); + label = aa_get_newest_cred_label(cred); + } + + label_for_each(i, label, profile) { + if (strcmp(name, profile->base.name) == 0) + return 1; + } + + return 0; +} /* currently unused */ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target, diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 93f6af4e3a20..556a1292734c 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -20,6 +20,7 @@ #include <linux/rculist.h> #include <linux/genhd.h> #include <linux/seq_file.h> +#include <linux/apparmor.h> #include "ima.h" @@ -47,9 +48,9 @@ int ima_policy_flag; static int temp_ima_appraise; -#define MAX_LSM_RULES 6 +#define MAX_LSM_RULES 7 enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE, - LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE + LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE, LSM_AA_PROFILE }; enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB }; @@ -313,6 +314,11 @@ static bool ima_match_rules(struct ima_rule_entry *rule, Audit_equal, rule->lsm[i].rule, NULL); + break; + case LSM_AA_PROFILE: + rc = aa_task_profile_match(bprm, tsk, + rule->lsm[i].args_p); + break; default: break; } @@ -527,7 +533,7 @@ enum { Opt_audit, Opt_obj_user, Opt_obj_role, Opt_obj_type, Opt_subj_user, Opt_subj_role, Opt_subj_type, - Opt_func, Opt_mask, Opt_fsmagic, + Opt_aa_profile, Opt_func, Opt_mask, Opt_fsmagic, Opt_fsuuid, Opt_uid_eq, Opt_euid_eq, Opt_fowner_eq, Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt, Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt, @@ -547,6 +553,7 @@ static match_table_t policy_tokens = { {Opt_subj_user, "subj_user=%s"}, {Opt_subj_role, "subj_role=%s"}, {Opt_subj_type, "subj_type=%s"}, + {Opt_aa_profile, "aa_profile=%s"}, {Opt_func, "func=%s"}, {Opt_mask, "mask=%s"}, {Opt_fsmagic, "fsmagic=%s"}, @@ -847,6 +854,13 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) LSM_SUBJ_TYPE, AUDIT_SUBJ_TYPE); break; + case Opt_aa_profile: + ima_log_string(ab, "aa_profile", args[0].from); + entry->lsm[LSM_AA_PROFILE].args_p = match_strdup(args); + entry->lsm[LSM_AA_PROFILE].rule = 1; + if (!entry->lsm[LSM_AA_PROFILE].args_p) + result = -ENOMEM; + break; case Opt_appraise_type: if (entry->action != APPRAISE) { result = -EINVAL; @@ -1138,6 +1152,10 @@ int ima_policy_show(struct seq_file *m, void *v) seq_printf(m, pt(Opt_subj_type), (char *)entry->lsm[i].args_p); break; + case LSM_AA_PROFILE: + seq_printf(m, pt(Opt_aa_profile), + (char *)entry->lsm[i].args_p); + break; } } } -- 2.14.1.342.g6490525c54-goog |
From: Mimi Z. <zo...@li...> - 2017-08-28 18:30:51
|
On Mon, 2017-08-28 at 05:13 +0100, Al Viro wrote: > On Tue, Aug 15, 2017 at 10:43:55AM -0400, Mimi Zohar wrote: > > From: Christoph Hellwig <hc...@ls...> > > > > Add a new ->integrity_read file operation to read data for integrity > > hash collection. This is defined to be equivalent to ->read_iter, > > except that it will be called with the i_rwsem held exclusively. > > Hmm... I'm really tempted to add default_integrity_read() that would > just call ->read_iter(), with boilerplate part becoming > .integrity_read = default_integrity_read How can it automatically call the fs read_iter() without knowing if the fs read_iter() takes the i_rwsem? Or are you suggesting that the default_integrity_read is defined as generic_file_read_iter()? Mimi > Note that all stuff accessed in it would be fresh in caches, so > it's not as if we had serious overhead there. And we are going > to be reading from file, anyway... > > I agree that it should be an opt-in from filesystem; default is still > "don't know how to read, sod off". It's just that telling at the > glance whether it's supposed to be a simple case or something tricky > is needed would be simpler that way and it might turn out to be > more robust that way... > -- > To unsubscribe from this list: send the line "unsubscribe linux-security-module" in > the body of a message to maj...@vg... > More majordomo info at http://vger.kernel.org/majordomo-info.html > |
From: Yan K. <sam...@gm...> - 2017-08-28 08:14:55
|
For linux v4.4, evmctl import /etc/keys/x509_ima.der 0x31282665 will failed: add_key failed errno: Invalid argument (22) Is there some changes? -------------- next part -------------- An HTML attachment was scrubbed... |
From: Al V. <vi...@Ze...> - 2017-08-28 04:18:14
|
On Wed, Aug 16, 2017 at 12:43:58PM +1000, James Morris wrote: > On Tue, 15 Aug 2017, Mimi Zohar wrote: > > > To resolve this locking problem, this patch set introduces a new > > ->integrity_read file operation method. Until all filesystems > > define the new ->integrity_read method, files that were previously > > measured might not be currently measured and files that were > > previously appraised might fail to be appraised properly. > > Are there any such filesystems in mainline which are not getting an > integrity_read method in this patchset? A lot - all network ones, minix/sysv/ufs, isofs/udf/efs, fat/hfs/hfs+/hpfs/affs/ntfs, ecryptfs, reiserfs, romfs, cramfs, bfs, qnx[46], etc. |
From: Al V. <vi...@Ze...> - 2017-08-28 04:13:23
|
On Tue, Aug 15, 2017 at 10:43:55AM -0400, Mimi Zohar wrote: > From: Christoph Hellwig <hc...@ls...> > > Add a new ->integrity_read file operation to read data for integrity > hash collection. This is defined to be equivalent to ->read_iter, > except that it will be called with the i_rwsem held exclusively. Hmm... I'm really tempted to add default_integrity_read() that would just call ->read_iter(), with boilerplate part becoming .integrity_read = default_integrity_read Note that all stuff accessed in it would be fresh in caches, so it's not as if we had serious overhead there. And we are going to be reading from file, anyway... I agree that it should be an opt-in from filesystem; default is still "don't know how to read, sod off". It's just that telling at the glance whether it's supposed to be a simple case or something tricky is needed would be simpler that way and it might turn out to be more robust that way... |
From: Mimi Z. <zo...@li...> - 2017-08-25 17:46:51
|
On Thu, 2017-08-24 at 14:54 +0000, Magalhaes, Guilherme (Brazil R&D- CL) wrote: > Hi Mehmet, > How is going the work with this patch? Is there any open design > decisions? > I remember at least one which is about the creating a new IMA > namespace. There are issues with this namespace creation once it > depends on a mount namespace creation. > Could we create a new IMA namespace and set it to the current > process through a user space request? There has to be a better way. I'll be looking at this patch set shortly. Mimi |
From: Yan K. <sam...@gm...> - 2017-08-25 04:09:59
|
When I enable IMA-Appraisal and EVM as these two guide 1) https://sourceforge.net/p/linux-ima/wiki/Home/#compiling-the-kernel-with-evmima-appraisal-enabled 2) http://linux-ima.sourceforge.net/evmctl.1.html Here is two questions(kernel version: v4.8.0): 1) when setting the new policy by "cat /etc/ima_policy > /sys/kernel/security/ima/policy", it reports "[ 366.008465] IMA: signed policy file (specified as an absolute pathname) required" 2) when I read a signed file, it reports "5677] integrity: Request for unknown key 'id:8f79faca' err -11" And there is some differences between "_ima" and ".ima". So is there a better guide document for enable IMA-Appraisal and EVM ? Thanks! Sam -------------- next part -------------- An HTML attachment was scrubbed... |
From: Magalhaes, G. (B. R&D-CL) <gui...@hp...> - 2017-08-24 14:54:33
|
Hi Mehmet, How is going the work with this patch? Is there any open design decisions? I remember at least one which is about the creating a new IMA namespace. There are issues with this namespace creation once it depends on a mount namespace creation. Could we create a new IMA namespace and set it to the current process through a user space request? -- Guilherme > -----Original Message----- > From: Mehmet Kayaalp [mailto:mka...@li...] > Sent: sexta-feira, 21 de julho de 2017 10:04 > To: Magalhaes, Guilherme (Brazil R&D-CL) <gui...@hp...> > Cc: Mimi Zohar <zo...@li...>; linux-ima- > de...@li... > Subject: Re: [Linux-ima-devel] [RFC 2/4] ima: use namespaced flags for > IMA_AUDITED on each namespace > > > > On Jul 19, 2017, at 4:09 PM, Magalhaes, Guilherme (Brazil R&D-CL) > <gui...@hp...> wrote: > > > > Hi Mimi, > > For all solutions with a single additional tree look up, the namespaced flags > tree is placed inside the iint structure. This is what the POC posted by Mehmet > does, and this mechanism is also the solution for 'namespacing' the > integrity_iint_find() function. > > See the patch set I posted yesterday [1], that does not place it inside the > iint, and creates an rbtree as part of the namespace struct. > > > However, this option suffers with the costly lock/unlock of hundreds of inodes > when a namespace is released to cleanup namespaced flags, what makes this > solution not acceptable for upstreaming as you indicated. > > As a result, the namespace cleanup does not touch inodes or iints. However, we > didn't save the inode-->namespace association, so we don't have a way of > removing the flags when the inode is removed. But we keep a copy of ino and > generation, so if inode matches but ino and generation do not match, we can > reset and reuse it. > > > We are really convinced that the solution #2 below is the best possible > solution, which has 2 additional tree look ups for namespaced flags references > (2 in addition to the existent iint tree look up, the first look up gets the list of > flags for a given namespace and the second lookup get the target flag from the > list for a given iint). > > The extra lookup is the namespace one. We should be able to tell the current > namespace without needing an rbtree or a hash table. > > [1]: https://sourceforge.net/p/linux-ima/mailman/message/35956610/ > > Mehmet |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:23:32
|
The code sample is waiting for an async. crypto op completion. Adapt sample to use the new generic infrastructure to do the same. This also fixes a possible data coruption bug created by the use of wait_for_completion_interruptible() without dealing correctly with an interrupt aborting the wait prior to the async op finishing. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- Documentation/crypto/api-samples.rst | 52 +++++++----------------------------- 1 file changed, 10 insertions(+), 42 deletions(-) diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst index 2531948..006827e 100644 --- a/Documentation/crypto/api-samples.rst +++ b/Documentation/crypto/api-samples.rst @@ -7,59 +7,27 @@ Code Example For Symmetric Key Cipher Operation :: - struct tcrypt_result { - struct completion completion; - int err; - }; - /* tie all data structures together */ struct skcipher_def { struct scatterlist sg; struct crypto_skcipher *tfm; struct skcipher_request *req; - struct tcrypt_result result; + struct crypto_wait wait; }; - /* Callback function */ - static void test_skcipher_cb(struct crypto_async_request *req, int error) - { - struct tcrypt_result *result = req->data; - - if (error == -EINPROGRESS) - return; - result->err = error; - complete(&result->completion); - pr_info("Encryption finished successfully\n"); - } - /* Perform cipher operation */ static unsigned int test_skcipher_encdec(struct skcipher_def *sk, int enc) { - int rc = 0; + int rc; if (enc) - rc = crypto_skcipher_encrypt(sk->req); + rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait); else - rc = crypto_skcipher_decrypt(sk->req); - - switch (rc) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - rc = wait_for_completion_interruptible( - &sk->result.completion); - if (!rc && !sk->result.err) { - reinit_completion(&sk->result.completion); - break; - } - default: - pr_info("skcipher encrypt returned with %d result %d\n", - rc, sk->result.err); - break; - } - init_completion(&sk->result.completion); + rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait); + + if (rc) + pr_info("skcipher encrypt returned with result %d\n", rc); return rc; } @@ -89,8 +57,8 @@ Code Example For Symmetric Key Cipher Operation } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - test_skcipher_cb, - &sk.result); + crypto_req_done, + &sk.wait); /* AES 256 with random key */ get_random_bytes(&key, 32); @@ -122,7 +90,7 @@ Code Example For Symmetric Key Cipher Operation /* We encrypt one block */ sg_init_one(&sk.sg, scratchpad, 16); skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata); - init_completion(&sk.result.completion); + crypto_init_wait(&sk.wait); /* encrypt data */ ret = test_skcipher_encdec(&sk, 1); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:23:22
|
The mediatek driver starts several async crypto ops and waits for their completions. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> Acked-by: Ryder Lee <ryd...@me...> --- drivers/crypto/mediatek/mtk-aes.c | 31 +++++-------------------------- 1 file changed, 5 insertions(+), 26 deletions(-) diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 9e845e8..e2c7c95 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -137,11 +137,6 @@ struct mtk_aes_gcm_ctx { struct crypto_skcipher *ctr; }; -struct mtk_aes_gcm_setkey_result { - int err; - struct completion completion; -}; - struct mtk_aes_drv { struct list_head dev_list; /* Device list lock */ @@ -936,17 +931,6 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) &req->base); } -static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err) -{ - struct mtk_aes_gcm_setkey_result *result = req->data; - - if (err == -EINPROGRESS) - return; - - result->err = err; - complete(&result->completion); -} - /* * Because of the hardware limitation, we need to pre-calculate key(H) * for the GHASH operation. The result of the encryption operation @@ -962,7 +946,7 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, u32 hash[4]; u8 iv[8]; - struct mtk_aes_gcm_setkey_result result; + struct crypto_wait wait; struct scatterlist sg[1]; struct skcipher_request req; @@ -1002,22 +986,17 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, if (!data) return -ENOMEM; - init_completion(&data->result.completion); + crypto_init_wait(&data->wait); sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE); skcipher_request_set_tfm(&data->req, ctr); skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, - mtk_gcm_setkey_done, &data->result); + crypto_req_done, &data->wait); skcipher_request_set_crypt(&data->req, data->sg, data->sg, AES_BLOCK_SIZE, data->iv); - err = crypto_skcipher_encrypt(&data->req); - if (err == -EINPROGRESS || err == -EBUSY) { - err = wait_for_completion_interruptible( - &data->result.completion); - if (!err) - err = data->result.err; - } + err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), + &data->wait); if (err) goto out; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:23:06
|
The qce driver starts several async crypto ops and waits for their completions. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- drivers/crypto/qce/sha.c | 30 ++++-------------------------- 1 file changed, 4 insertions(+), 26 deletions(-) diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 47e114a..53227d7 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -349,28 +349,12 @@ static int qce_ahash_digest(struct ahash_request *req) return qce->async_req_enqueue(tmpl->qce, &req->base); } -struct qce_ahash_result { - struct completion completion; - int error; -}; - -static void qce_digest_complete(struct crypto_async_request *req, int error) -{ - struct qce_ahash_result *result = req->data; - - if (error == -EINPROGRESS) - return; - - result->error = error; - complete(&result->completion); -} - static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned int digestsize = crypto_ahash_digestsize(tfm); struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); - struct qce_ahash_result result; + struct crypto_wait wait; struct ahash_request *req; struct scatterlist sg; unsigned int blocksize; @@ -405,9 +389,9 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, goto err_free_ahash; } - init_completion(&result.completion); + crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - qce_digest_complete, &result); + crypto_req_done, &wait); crypto_ahash_clear_flags(ahash_tfm, ~0); buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); @@ -420,13 +404,7 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, sg_init_one(&sg, buf, keylen); ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); - ret = crypto_ahash_digest(req); - if (ret == -EINPROGRESS || ret == -EBUSY) { - ret = wait_for_completion_interruptible(&result.completion); - if (!ret) - ret = result.error; - } - + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); if (ret) crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:22:53
|
The talitos driver starts several async crypto ops and waits for their completions. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- drivers/crypto/talitos.c | 38 +++++--------------------------------- 1 file changed, 5 insertions(+), 33 deletions(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 79791c6..194a307 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2037,22 +2037,6 @@ static int ahash_import(struct ahash_request *areq, const void *in) return 0; } -struct keyhash_result { - struct completion completion; - int err; -}; - -static void keyhash_complete(struct crypto_async_request *req, int err) -{ - struct keyhash_result *res = req->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, u8 *hash) { @@ -2060,10 +2044,10 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, struct scatterlist sg[1]; struct ahash_request *req; - struct keyhash_result hresult; + struct crypto_wait wait; int ret; - init_completion(&hresult.completion); + crypto_init_wait(&wait); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) @@ -2072,25 +2056,13 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, /* Keep tfm keylen == 0 during hash of the long key */ ctx->keylen = 0; ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - keyhash_complete, &hresult); + crypto_req_done, &wait); sg_init_one(&sg[0], key, keylen); ahash_request_set_crypt(req, sg, hash, keylen); - ret = crypto_ahash_digest(req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - ret = wait_for_completion_interruptible( - &hresult.completion); - if (!ret) - ret = hresult.err; - break; - default: - break; - } + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); + ahash_request_free(req); return ret; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:22:41
|
tcrypt starts several async crypto ops and waits for their completions. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/tcrypt.c | 84 +++++++++++++++++---------------------------------------- 1 file changed, 25 insertions(+), 59 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0022a18..802aa81 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -79,34 +79,11 @@ static char *check[] = { NULL }; -struct tcrypt_result { - struct completion completion; - int err; -}; - -static void tcrypt_complete(struct crypto_async_request *req, int err) -{ - struct tcrypt_result *res = req->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - static inline int do_one_aead_op(struct aead_request *req, int ret) { - if (ret == -EINPROGRESS || ret == -EBUSY) { - struct tcrypt_result *tr = req->base.data; + struct crypto_wait *wait = req->base.data; - ret = wait_for_completion_interruptible(&tr->completion); - if (!ret) - ret = tr->err; - reinit_completion(&tr->completion); - } - - return ret; + return crypto_wait_req(ret, wait); } static int test_aead_jiffies(struct aead_request *req, int enc, @@ -248,7 +225,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, char *axbuf[XBUFSIZE]; unsigned int *b_size; unsigned int iv_len; - struct tcrypt_result result; + struct crypto_wait wait; iv = kzalloc(MAX_IVLEN, GFP_KERNEL); if (!iv) @@ -284,7 +261,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, goto out_notfm; } - init_completion(&result.completion); + crypto_init_wait(&wait); printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, get_driver_name(crypto_aead, tfm), e); @@ -296,7 +273,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, } aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); i = 0; do { @@ -397,21 +374,16 @@ static void test_hash_sg_init(struct scatterlist *sg) static inline int do_one_ahash_op(struct ahash_request *req, int ret) { - if (ret == -EINPROGRESS || ret == -EBUSY) { - struct tcrypt_result *tr = req->base.data; + struct crypto_wait *wait = req->base.data; - wait_for_completion(&tr->completion); - reinit_completion(&tr->completion); - ret = tr->err; - } - return ret; + return crypto_wait_req(ret, wait); } struct test_mb_ahash_data { struct scatterlist sg[TVMEMSIZE]; char result[64]; struct ahash_request *req; - struct tcrypt_result tresult; + struct crypto_wait wait; char *xbuf[XBUFSIZE]; }; @@ -440,7 +412,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, if (testmgr_alloc_buf(data[i].xbuf)) goto out; - init_completion(&data[i].tresult.completion); + crypto_init_wait(&data[i].wait); data[i].req = ahash_request_alloc(tfm, GFP_KERNEL); if (!data[i].req) { @@ -449,8 +421,8 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, goto out; } - ahash_request_set_callback(data[i].req, 0, - tcrypt_complete, &data[i].tresult); + ahash_request_set_callback(data[i].req, 0, crypto_req_done, + &data[i].wait); test_hash_sg_init(data[i].sg); } @@ -492,16 +464,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, if (ret) break; - complete(&data[k].tresult.completion); - data[k].tresult.err = 0; + crypto_req_done(&data[k].req->base, 0); } for (j = 0; j < k; j++) { - struct tcrypt_result *tr = &data[j].tresult; + struct crypto_wait *wait = &data[j].wait; + int wait_ret; - wait_for_completion(&tr->completion); - if (tr->err) - ret = tr->err; + wait_ret = crypto_wait_req(-EINPROGRESS, wait); + if (wait_ret) + ret = wait_ret; } end = get_cycles(); @@ -679,7 +651,7 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, struct hash_speed *speed, unsigned mask) { struct scatterlist sg[TVMEMSIZE]; - struct tcrypt_result tresult; + struct crypto_wait wait; struct ahash_request *req; struct crypto_ahash *tfm; char *output; @@ -708,9 +680,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, goto out; } - init_completion(&tresult.completion); + crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &tresult); + crypto_req_done, &wait); output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL); if (!output) @@ -765,15 +737,9 @@ static void test_hash_speed(const char *algo, unsigned int secs, static inline int do_one_acipher_op(struct skcipher_request *req, int ret) { - if (ret == -EINPROGRESS || ret == -EBUSY) { - struct tcrypt_result *tr = req->base.data; - - wait_for_completion(&tr->completion); - reinit_completion(&tr->completion); - ret = tr->err; - } + struct crypto_wait *wait = req->base.data; - return ret; + return crypto_wait_req(ret, wait); } static int test_acipher_jiffies(struct skcipher_request *req, int enc, @@ -853,7 +819,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, unsigned int tcount, u8 *keysize, bool async) { unsigned int ret, i, j, k, iv_len; - struct tcrypt_result tresult; + struct crypto_wait wait; const char *key; char iv[128]; struct skcipher_request *req; @@ -866,7 +832,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, else e = "decryption"; - init_completion(&tresult.completion); + crypto_init_wait(&wait); tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC); @@ -887,7 +853,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &tresult); + crypto_req_done, &wait); i = 0; do { -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:22:28
|
ima starts several async crypto ops and waits for their completions. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> Acked-by: Mimi Zohar <zo...@li...> --- security/integrity/ima/ima_crypto.c | 56 +++++++++++-------------------------- 1 file changed, 17 insertions(+), 39 deletions(-) diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index a856d8c..9057b16 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -27,11 +27,6 @@ #include "ima.h" -struct ahash_completion { - struct completion completion; - int err; -}; - /* minimum file size for ahash use */ static unsigned long ima_ahash_minsize; module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); @@ -196,30 +191,13 @@ static void ima_free_atfm(struct crypto_ahash *tfm) crypto_free_ahash(tfm); } -static void ahash_complete(struct crypto_async_request *req, int err) +static inline int ahash_wait(int err, struct crypto_wait *wait) { - struct ahash_completion *res = req->data; - if (err == -EINPROGRESS) - return; - res->err = err; - complete(&res->completion); -} + err = crypto_wait_req(err, wait); -static int ahash_wait(int err, struct ahash_completion *res) -{ - switch (err) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&res->completion); - reinit_completion(&res->completion); - err = res->err; - /* fall through */ - default: + if (err) pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); - } return err; } @@ -233,7 +211,7 @@ static int ima_calc_file_hash_atfm(struct file *file, int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0; struct ahash_request *req; struct scatterlist sg[1]; - struct ahash_completion res; + struct crypto_wait wait; size_t rbuf_size[2]; hash->length = crypto_ahash_digestsize(tfm); @@ -242,12 +220,12 @@ static int ima_calc_file_hash_atfm(struct file *file, if (!req) return -ENOMEM; - init_completion(&res.completion); + crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - ahash_complete, &res); + crypto_req_done, &wait); - rc = ahash_wait(crypto_ahash_init(req), &res); + rc = ahash_wait(crypto_ahash_init(req), &wait); if (rc) goto out1; @@ -288,7 +266,7 @@ static int ima_calc_file_hash_atfm(struct file *file, * read/request, wait for the completion of the * previous ahash_update() request. */ - rc = ahash_wait(ahash_rc, &res); + rc = ahash_wait(ahash_rc, &wait); if (rc) goto out3; } @@ -304,7 +282,7 @@ static int ima_calc_file_hash_atfm(struct file *file, * read/request, wait for the completion of the * previous ahash_update() request. */ - rc = ahash_wait(ahash_rc, &res); + rc = ahash_wait(ahash_rc, &wait); if (rc) goto out3; } @@ -318,7 +296,7 @@ static int ima_calc_file_hash_atfm(struct file *file, active = !active; /* swap buffers, if we use two */ } /* wait for the last update request to complete */ - rc = ahash_wait(ahash_rc, &res); + rc = ahash_wait(ahash_rc, &wait); out3: if (read) file->f_mode &= ~FMODE_READ; @@ -327,7 +305,7 @@ static int ima_calc_file_hash_atfm(struct file *file, out2: if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); - rc = ahash_wait(crypto_ahash_final(req), &res); + rc = ahash_wait(crypto_ahash_final(req), &wait); } out1: ahash_request_free(req); @@ -537,7 +515,7 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len, { struct ahash_request *req; struct scatterlist sg; - struct ahash_completion res; + struct crypto_wait wait; int rc, ahash_rc = 0; hash->length = crypto_ahash_digestsize(tfm); @@ -546,12 +524,12 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len, if (!req) return -ENOMEM; - init_completion(&res.completion); + crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - ahash_complete, &res); + crypto_req_done, &wait); - rc = ahash_wait(crypto_ahash_init(req), &res); + rc = ahash_wait(crypto_ahash_init(req), &wait); if (rc) goto out; @@ -561,10 +539,10 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len, ahash_rc = crypto_ahash_update(req); /* wait for the update request to complete */ - rc = ahash_wait(ahash_rc, &res); + rc = ahash_wait(ahash_rc, &wait); if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); - rc = ahash_wait(crypto_ahash_final(req), &res); + rc = ahash_wait(crypto_ahash_final(req), &wait); } out: ahash_request_free(req); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:22:15
|
cifs starts an async. crypto op and waits for their completion. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> Acked-by: Pavel Shilovsky <ps...@mi...> --- fs/cifs/smb2ops.c | 30 ++++-------------------------- 1 file changed, 4 insertions(+), 26 deletions(-) diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index cfacf2c..16fb041 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1878,22 +1878,6 @@ init_sg(struct smb_rqst *rqst, u8 *sign) return sg; } -struct cifs_crypt_result { - int err; - struct completion completion; -}; - -static void cifs_crypt_complete(struct crypto_async_request *req, int err) -{ - struct cifs_crypt_result *res = req->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - static int smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) { @@ -1934,12 +1918,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) struct aead_request *req; char *iv; unsigned int iv_len; - struct cifs_crypt_result result = {0, }; + DECLARE_CRYPTO_WAIT(wait); struct crypto_aead *tfm; unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); - init_completion(&result.completion); - rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key); if (rc) { cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__, @@ -1999,14 +1981,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) aead_request_set_ad(req, assoc_data_len); aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - cifs_crypt_complete, &result); + crypto_req_done, &wait); - rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); - - if (rc == -EINPROGRESS || rc == -EBUSY) { - wait_for_completion(&result.completion); - rc = result.err; - } + rc = crypto_wait_req(enc ? crypto_aead_encrypt(req) + : crypto_aead_decrypt(req), &wait); if (!rc && enc) memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:22:04
|
dm-verity is starting async. crypto ops and waiting for them to complete. Move it over to generic code doing the same. This also avoids a future potential data coruption bug created by the use of wait_for_completion_interruptible() without dealing correctly with an interrupt aborting the wait prior to the async op finishing, should this code ever move to a context where signals are not masked. Signed-off-by: Gilad Ben-Yossef <gi...@be...> CC: Mikulas Patocka <mpa...@re...> --- drivers/md/dm-verity-target.c | 81 +++++++++++-------------------------------- drivers/md/dm-verity.h | 5 --- 2 files changed, 20 insertions(+), 66 deletions(-) diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 79f18d4..8df08a8 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -92,74 +92,33 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, return block >> (level * v->hash_per_block_bits); } -/* - * Callback function for asynchrnous crypto API completion notification - */ -static void verity_op_done(struct crypto_async_request *base, int err) -{ - struct verity_result *res = (struct verity_result *)base->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - -/* - * Wait for async crypto API callback - */ -static inline int verity_complete_op(struct verity_result *res, int ret) -{ - switch (ret) { - case 0: - break; - - case -EINPROGRESS: - case -EBUSY: - ret = wait_for_completion_interruptible(&res->completion); - if (!ret) - ret = res->err; - reinit_completion(&res->completion); - break; - - default: - DMERR("verity_wait_hash: crypto op submission failed: %d", ret); - } - - if (unlikely(ret < 0)) - DMERR("verity_wait_hash: crypto op failed: %d", ret); - - return ret; -} - static int verity_hash_update(struct dm_verity *v, struct ahash_request *req, const u8 *data, size_t len, - struct verity_result *res) + struct crypto_wait *wait) { struct scatterlist sg; sg_init_one(&sg, data, len); ahash_request_set_crypt(req, &sg, NULL, len); - return verity_complete_op(res, crypto_ahash_update(req)); + return crypto_wait_req(crypto_ahash_update(req), wait); } /* * Wrapper for crypto_ahash_init, which handles verity salting. */ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, - struct verity_result *res) + struct crypto_wait *wait) { int r; ahash_request_set_tfm(req, v->tfm); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, - verity_op_done, (void *)res); - init_completion(&res->completion); + crypto_req_done, (void *)wait); + crypto_init_wait(wait); - r = verity_complete_op(res, crypto_ahash_init(req)); + r = crypto_wait_req(crypto_ahash_init(req), wait); if (unlikely(r < 0)) { DMERR("crypto_ahash_init failed: %d", r); @@ -167,18 +126,18 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, } if (likely(v->salt_size && (v->version >= 1))) - r = verity_hash_update(v, req, v->salt, v->salt_size, res); + r = verity_hash_update(v, req, v->salt, v->salt_size, wait); return r; } static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, - u8 *digest, struct verity_result *res) + u8 *digest, struct crypto_wait *wait) { int r; if (unlikely(v->salt_size && (!v->version))) { - r = verity_hash_update(v, req, v->salt, v->salt_size, res); + r = verity_hash_update(v, req, v->salt, v->salt_size, wait); if (r < 0) { DMERR("verity_hash_final failed updating salt: %d", r); @@ -187,7 +146,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, } ahash_request_set_crypt(req, NULL, digest, 0); - r = verity_complete_op(res, crypto_ahash_final(req)); + r = crypto_wait_req(crypto_ahash_final(req), wait); out: return r; } @@ -196,17 +155,17 @@ int verity_hash(struct dm_verity *v, struct ahash_request *req, const u8 *data, size_t len, u8 *digest) { int r; - struct verity_result res; + struct crypto_wait wait; - r = verity_hash_init(v, req, &res); + r = verity_hash_init(v, req, &wait); if (unlikely(r < 0)) goto out; - r = verity_hash_update(v, req, data, len, &res); + r = verity_hash_update(v, req, data, len, &wait); if (unlikely(r < 0)) goto out; - r = verity_hash_final(v, req, digest, &res); + r = verity_hash_final(v, req, digest, &wait); out: return r; @@ -389,7 +348,7 @@ int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, * Calculates the digest for the given bio */ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, - struct bvec_iter *iter, struct verity_result *res) + struct bvec_iter *iter, struct crypto_wait *wait) { unsigned int todo = 1 << v->data_dev_block_bits; struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); @@ -414,7 +373,7 @@ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, */ sg_set_page(&sg, bv.bv_page, len, bv.bv_offset); ahash_request_set_crypt(req, &sg, NULL, len); - r = verity_complete_op(res, crypto_ahash_update(req)); + r = crypto_wait_req(crypto_ahash_update(req), wait); if (unlikely(r < 0)) { DMERR("verity_for_io_block crypto op failed: %d", r); @@ -482,7 +441,7 @@ static int verity_verify_io(struct dm_verity_io *io) struct dm_verity *v = io->v; struct bvec_iter start; unsigned b; - struct verity_result res; + struct crypto_wait wait; for (b = 0; b < io->n_blocks; b++) { int r; @@ -507,17 +466,17 @@ static int verity_verify_io(struct dm_verity_io *io) continue; } - r = verity_hash_init(v, req, &res); + r = verity_hash_init(v, req, &wait); if (unlikely(r < 0)) return r; start = io->iter; - r = verity_for_io_block(v, io, &io->iter, &res); + r = verity_for_io_block(v, io, &io->iter, &wait); if (unlikely(r < 0)) return r; r = verity_hash_final(v, req, verity_io_real_digest(v, io), - &res); + &wait); if (unlikely(r < 0)) return r; diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index a59e0ad..b675bc0 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -90,11 +90,6 @@ struct dm_verity_io { */ }; -struct verity_result { - struct completion completion; - int err; -}; - static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v, struct dm_verity_io *io) { -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:21:50
|
fscrypt starts several async. crypto ops and waiting for them to complete. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- fs/crypto/crypto.c | 28 ++++------------------------ fs/crypto/fname.c | 36 ++++++------------------------------ fs/crypto/fscrypt_private.h | 10 ---------- fs/crypto/keyinfo.c | 21 +++------------------ 4 files changed, 13 insertions(+), 82 deletions(-) diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index c7835df..80a3cad 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -126,21 +126,6 @@ struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags) } EXPORT_SYMBOL(fscrypt_get_ctx); -/** - * page_crypt_complete() - completion callback for page crypto - * @req: The asynchronous cipher request context - * @res: The result of the cipher operation - */ -static void page_crypt_complete(struct crypto_async_request *req, int res) -{ - struct fscrypt_completion_result *ecr = req->data; - - if (res == -EINPROGRESS) - return; - ecr->res = res; - complete(&ecr->completion); -} - int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, u64 lblk_num, struct page *src_page, struct page *dest_page, unsigned int len, @@ -151,7 +136,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, u8 padding[FS_IV_SIZE - sizeof(__le64)]; } iv; struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct scatterlist dst, src; struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; @@ -179,7 +164,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, skcipher_request_set_callback( req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - page_crypt_complete, &ecr); + crypto_req_done, &wait); sg_init_table(&dst, 1); sg_set_page(&dst, dest_page, len, offs); @@ -187,14 +172,9 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, sg_set_page(&src, src_page, len, offs); skcipher_request_set_crypt(req, &src, &dst, len, &iv); if (rw == FS_DECRYPT) - res = crypto_skcipher_decrypt(req); + res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); else - res = crypto_skcipher_encrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - BUG_ON(req->base.data != &ecr); - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); skcipher_request_free(req); if (res) { printk_ratelimited(KERN_ERR diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index ad9f814..a80a0d3 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -15,21 +15,6 @@ #include "fscrypt_private.h" /** - * fname_crypt_complete() - completion callback for filename crypto - * @req: The asynchronous cipher request context - * @res: The result of the cipher operation - */ -static void fname_crypt_complete(struct crypto_async_request *req, int res) -{ - struct fscrypt_completion_result *ecr = req->data; - - if (res == -EINPROGRESS) - return; - ecr->res = res; - complete(&ecr->completion); -} - -/** * fname_encrypt() - encrypt a filename * * The caller must have allocated sufficient memory for the @oname string. @@ -40,7 +25,7 @@ static int fname_encrypt(struct inode *inode, const struct qstr *iname, struct fscrypt_str *oname) { struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; @@ -76,17 +61,12 @@ static int fname_encrypt(struct inode *inode, } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - fname_crypt_complete, &ecr); + crypto_req_done, &wait); sg_init_one(&sg, oname->name, cryptlen); skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); /* Do the encryption */ - res = crypto_skcipher_encrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - /* Request is being completed asynchronously; wait for it */ - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); skcipher_request_free(req); if (res < 0) { printk_ratelimited(KERN_ERR @@ -110,7 +90,7 @@ static int fname_decrypt(struct inode *inode, struct fscrypt_str *oname) { struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; @@ -131,7 +111,7 @@ static int fname_decrypt(struct inode *inode, } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - fname_crypt_complete, &ecr); + crypto_req_done, &wait); /* Initialize IV */ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); @@ -140,11 +120,7 @@ static int fname_decrypt(struct inode *inode, sg_init_one(&src_sg, iname->name, iname->len); sg_init_one(&dst_sg, oname->name, oname->len); skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); - res = crypto_skcipher_decrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); skcipher_request_free(req); if (res < 0) { printk_ratelimited(KERN_ERR diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index a1d5021..c0f1881 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -69,16 +69,6 @@ typedef enum { #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 #define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 -struct fscrypt_completion_result { - struct completion completion; - int res; -}; - -#define DECLARE_FS_COMPLETION_RESULT(ecr) \ - struct fscrypt_completion_result ecr = { \ - COMPLETION_INITIALIZER_ONSTACK((ecr).completion), 0 } - - /* crypto.c */ extern int fscrypt_initialize(unsigned int cop_flags); extern struct workqueue_struct *fscrypt_read_workqueue; diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 018c588..3c84cac 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -17,17 +17,6 @@ static struct crypto_shash *essiv_hash_tfm; -static void derive_crypt_complete(struct crypto_async_request *req, int rc) -{ - struct fscrypt_completion_result *ecr = req->data; - - if (rc == -EINPROGRESS) - return; - - ecr->res = rc; - complete(&ecr->completion); -} - /** * derive_key_aes() - Derive a key using AES-128-ECB * @deriving_key: Encryption key used for derivation. @@ -42,7 +31,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], { int res = 0; struct skcipher_request *req = NULL; - DECLARE_FS_COMPLETION_RESULT(ecr); + DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); @@ -59,7 +48,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - derive_crypt_complete, &ecr); + crypto_req_done, &wait); res = crypto_skcipher_setkey(tfm, deriving_key, FS_AES_128_ECB_KEY_SIZE); if (res < 0) @@ -69,11 +58,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], sg_init_one(&dst_sg, derived_raw_key, source_key->size); skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size, NULL); - res = crypto_skcipher_encrypt(req); - if (res == -EINPROGRESS || res == -EBUSY) { - wait_for_completion(&ecr.completion); - res = ecr.res; - } + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); out: skcipher_request_free(req); crypto_free_skcipher(tfm); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:21:39
|
testmgr is starting async. crypto ops and waiting for them to complete. Move it over to generic code doing the same. This also provides a test of the generic crypto async. wait code. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/testmgr.c | 204 ++++++++++++++++++------------------------------------- 1 file changed, 66 insertions(+), 138 deletions(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 8a124d3..af968f4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -76,11 +76,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) #define ENCRYPT 1 #define DECRYPT 0 -struct tcrypt_result { - struct completion completion; - int err; -}; - struct aead_test_suite { struct { const struct aead_testvec *vecs; @@ -155,17 +150,6 @@ static void hexdump(unsigned char *buf, unsigned int len) buf, len, false); } -static void tcrypt_complete(struct crypto_async_request *req, int err) -{ - struct tcrypt_result *res = req->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - static int testmgr_alloc_buf(char *buf[XBUFSIZE]) { int i; @@ -193,20 +177,10 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) free_page((unsigned long)buf[i]); } -static int wait_async_op(struct tcrypt_result *tr, int ret) -{ - if (ret == -EINPROGRESS || ret == -EBUSY) { - wait_for_completion(&tr->completion); - reinit_completion(&tr->completion); - ret = tr->err; - } - return ret; -} - static int ahash_partial_update(struct ahash_request **preq, struct crypto_ahash *tfm, const struct hash_testvec *template, void *hash_buff, int k, int temp, struct scatterlist *sg, - const char *algo, char *result, struct tcrypt_result *tresult) + const char *algo, char *result, struct crypto_wait *wait) { char *state; struct ahash_request *req; @@ -236,7 +210,7 @@ static int ahash_partial_update(struct ahash_request **preq, } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, tresult); + crypto_req_done, wait); memcpy(hash_buff, template->plaintext + temp, template->tap[k]); @@ -247,7 +221,7 @@ static int ahash_partial_update(struct ahash_request **preq, pr_err("alg: hash: Failed to import() for %s\n", algo); goto out; } - ret = wait_async_op(tresult, crypto_ahash_update(req)); + ret = crypto_wait_req(crypto_ahash_update(req), wait); if (ret) goto out; *preq = req; @@ -272,7 +246,7 @@ static int __test_hash(struct crypto_ahash *tfm, char *result; char *key; struct ahash_request *req; - struct tcrypt_result tresult; + struct crypto_wait wait; void *hash_buff; char *xbuf[XBUFSIZE]; int ret = -ENOMEM; @@ -286,7 +260,7 @@ static int __test_hash(struct crypto_ahash *tfm, if (testmgr_alloc_buf(xbuf)) goto out_nobuf; - init_completion(&tresult.completion); + crypto_init_wait(&wait); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { @@ -295,7 +269,7 @@ static int __test_hash(struct crypto_ahash *tfm, goto out_noreq; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &tresult); + crypto_req_done, &wait); j = 0; for (i = 0; i < tcount; i++) { @@ -335,26 +309,26 @@ static int __test_hash(struct crypto_ahash *tfm, ahash_request_set_crypt(req, sg, result, template[i].psize); if (use_digest) { - ret = wait_async_op(&tresult, crypto_ahash_digest(req)); + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); if (ret) { pr_err("alg: hash: digest failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } } else { - ret = wait_async_op(&tresult, crypto_ahash_init(req)); + ret = crypto_wait_req(crypto_ahash_init(req), &wait); if (ret) { pr_err("alg: hash: init failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } - ret = wait_async_op(&tresult, crypto_ahash_update(req)); + ret = crypto_wait_req(crypto_ahash_update(req), &wait); if (ret) { pr_err("alg: hash: update failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } - ret = wait_async_op(&tresult, crypto_ahash_final(req)); + ret = crypto_wait_req(crypto_ahash_final(req), &wait); if (ret) { pr_err("alg: hash: final failed on test %d " "for %s: ret=%d\n", j, algo, -ret); @@ -420,22 +394,10 @@ static int __test_hash(struct crypto_ahash *tfm, } ahash_request_set_crypt(req, sg, result, template[i].psize); - ret = crypto_ahash_digest(req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&tresult.completion); - reinit_completion(&tresult.completion); - ret = tresult.err; - if (!ret) - break; - /* fall through */ - default: - printk(KERN_ERR "alg: hash: digest failed " - "on chunking test %d for %s: " - "ret=%d\n", j, algo, -ret); + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); + if (ret) { + pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n", + j, algo, -ret); goto out; } @@ -486,13 +448,13 @@ static int __test_hash(struct crypto_ahash *tfm, } ahash_request_set_crypt(req, sg, result, template[i].tap[0]); - ret = wait_async_op(&tresult, crypto_ahash_init(req)); + ret = crypto_wait_req(crypto_ahash_init(req), &wait); if (ret) { pr_err("alg: hash: init failed on test %d for %s: ret=%d\n", j, algo, -ret); goto out; } - ret = wait_async_op(&tresult, crypto_ahash_update(req)); + ret = crypto_wait_req(crypto_ahash_update(req), &wait); if (ret) { pr_err("alg: hash: update failed on test %d for %s: ret=%d\n", j, algo, -ret); @@ -503,7 +465,7 @@ static int __test_hash(struct crypto_ahash *tfm, for (k = 1; k < template[i].np; k++) { ret = ahash_partial_update(&req, tfm, &template[i], hash_buff, k, temp, &sg[0], algo, result, - &tresult); + &wait); if (ret) { pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n", j, algo, -ret); @@ -511,7 +473,7 @@ static int __test_hash(struct crypto_ahash *tfm, } temp += template[i].tap[k]; } - ret = wait_async_op(&tresult, crypto_ahash_final(req)); + ret = crypto_wait_req(crypto_ahash_final(req), &wait); if (ret) { pr_err("alg: hash: final failed on test %d for %s: ret=%d\n", j, algo, -ret); @@ -580,7 +542,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, struct scatterlist *sg; struct scatterlist *sgout; const char *e, *d; - struct tcrypt_result result; + struct crypto_wait wait; unsigned int authsize, iv_len; void *input; void *output; @@ -619,7 +581,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, else e = "decryption"; - init_completion(&result.completion); + crypto_init_wait(&wait); req = aead_request_alloc(tfm, GFP_KERNEL); if (!req) { @@ -629,7 +591,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, } aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); iv_len = crypto_aead_ivsize(tfm); @@ -709,7 +671,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, aead_request_set_ad(req, template[i].alen); - ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); + ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) + : crypto_aead_decrypt(req), &wait); switch (ret) { case 0: @@ -722,13 +685,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, goto out; } break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&result.completion); - reinit_completion(&result.completion); - ret = result.err; - if (!ret) - break; case -EBADMSG: if (template[i].novrfy) /* verification failure was expected */ @@ -866,7 +822,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, aead_request_set_ad(req, template[i].alen); - ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); + ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) + : crypto_aead_decrypt(req), &wait); switch (ret) { case 0: @@ -879,13 +836,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, goto out; } break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&result.completion); - reinit_completion(&result.completion); - ret = result.err; - if (!ret) - break; case -EBADMSG: if (template[i].novrfy) /* verification failure was expected */ @@ -1083,7 +1033,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, struct scatterlist sg[8]; struct scatterlist sgout[8]; const char *e, *d; - struct tcrypt_result result; + struct crypto_wait wait; void *data; char iv[MAX_IVLEN]; char *xbuf[XBUFSIZE]; @@ -1107,7 +1057,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, else e = "decryption"; - init_completion(&result.completion); + crypto_init_wait(&wait); req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { @@ -1117,7 +1067,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); j = 0; for (i = 0; i < tcount; i++) { @@ -1164,21 +1114,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, template[i].ilen, iv); - ret = enc ? crypto_skcipher_encrypt(req) : - crypto_skcipher_decrypt(req); + ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : + crypto_skcipher_decrypt(req), &wait); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&result.completion); - reinit_completion(&result.completion); - ret = result.err; - if (!ret) - break; - /* fall through */ - default: + if (ret) { pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", d, e, j, algo, -ret); goto out; @@ -1272,21 +1211,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, template[i].ilen, iv); - ret = enc ? crypto_skcipher_encrypt(req) : - crypto_skcipher_decrypt(req); + ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : + crypto_skcipher_decrypt(req), &wait); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&result.completion); - reinit_completion(&result.completion); - ret = result.err; - if (!ret) - break; - /* fall through */ - default: + if (ret) { pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", d, e, j, algo, -ret); goto out; @@ -1462,7 +1390,7 @@ static int test_acomp(struct crypto_acomp *tfm, int ret; struct scatterlist src, dst; struct acomp_req *req; - struct tcrypt_result result; + struct crypto_wait wait; output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); if (!output) @@ -1486,7 +1414,7 @@ static int test_acomp(struct crypto_acomp *tfm, } memset(output, 0, dlen); - init_completion(&result.completion); + crypto_init_wait(&wait); sg_init_one(&src, input_vec, ilen); sg_init_one(&dst, output, dlen); @@ -1501,9 +1429,9 @@ static int test_acomp(struct crypto_acomp *tfm, acomp_request_set_params(req, &src, &dst, ilen, dlen); acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); - ret = wait_async_op(&result, crypto_acomp_compress(req)); + ret = crypto_wait_req(crypto_acomp_compress(req), &wait); if (ret) { pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", i + 1, algo, -ret); @@ -1516,10 +1444,10 @@ static int test_acomp(struct crypto_acomp *tfm, dlen = COMP_BUF_SIZE; sg_init_one(&src, output, ilen); sg_init_one(&dst, decomp_out, dlen); - init_completion(&result.completion); + crypto_init_wait(&wait); acomp_request_set_params(req, &src, &dst, ilen, dlen); - ret = wait_async_op(&result, crypto_acomp_decompress(req)); + ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); if (ret) { pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", i + 1, algo, -ret); @@ -1563,7 +1491,7 @@ static int test_acomp(struct crypto_acomp *tfm, } memset(output, 0, dlen); - init_completion(&result.completion); + crypto_init_wait(&wait); sg_init_one(&src, input_vec, ilen); sg_init_one(&dst, output, dlen); @@ -1578,9 +1506,9 @@ static int test_acomp(struct crypto_acomp *tfm, acomp_request_set_params(req, &src, &dst, ilen, dlen); acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); - ret = wait_async_op(&result, crypto_acomp_decompress(req)); + ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); if (ret) { pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", i + 1, algo, -ret); @@ -2000,7 +1928,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, void *a_public = NULL; void *a_ss = NULL; void *shared_secret = NULL; - struct tcrypt_result result; + struct crypto_wait wait; unsigned int out_len_max; int err = -ENOMEM; struct scatterlist src, dst; @@ -2009,7 +1937,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, if (!req) return err; - init_completion(&result.completion); + crypto_init_wait(&wait); err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size); if (err < 0) @@ -2027,10 +1955,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, sg_init_one(&dst, output_buf, out_len_max); kpp_request_set_output(req, &dst, out_len_max); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); /* Compute party A's public key */ - err = wait_async_op(&result, crypto_kpp_generate_public_key(req)); + err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait); if (err) { pr_err("alg: %s: Party A: generate public key test failed. err %d\n", alg, err); @@ -2069,8 +1997,8 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, kpp_request_set_input(req, &src, vec->b_public_size); kpp_request_set_output(req, &dst, out_len_max); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); - err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req)); + crypto_req_done, &wait); + err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait); if (err) { pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n", alg, err); @@ -2100,9 +2028,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, kpp_request_set_input(req, &src, vec->expected_a_public_size); kpp_request_set_output(req, &dst, out_len_max); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); - err = wait_async_op(&result, - crypto_kpp_compute_shared_secret(req)); + crypto_req_done, &wait); + err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), + &wait); if (err) { pr_err("alg: %s: Party B: compute shared secret failed. err %d\n", alg, err); @@ -2179,7 +2107,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, struct akcipher_request *req; void *outbuf_enc = NULL; void *outbuf_dec = NULL; - struct tcrypt_result result; + struct crypto_wait wait; unsigned int out_len_max, out_len = 0; int err = -ENOMEM; struct scatterlist src, dst, src_tab[2]; @@ -2191,7 +2119,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, if (!req) goto free_xbuf; - init_completion(&result.completion); + crypto_init_wait(&wait); if (vecs->public_key_vec) err = crypto_akcipher_set_pub_key(tfm, vecs->key, @@ -2220,13 +2148,13 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size, out_len_max); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &result); + crypto_req_done, &wait); - err = wait_async_op(&result, vecs->siggen_sigver_test ? - /* Run asymmetric signature generation */ - crypto_akcipher_sign(req) : - /* Run asymmetric encrypt */ - crypto_akcipher_encrypt(req)); + err = crypto_wait_req(vecs->siggen_sigver_test ? + /* Run asymmetric signature generation */ + crypto_akcipher_sign(req) : + /* Run asymmetric encrypt */ + crypto_akcipher_encrypt(req), &wait); if (err) { pr_err("alg: akcipher: encrypt test failed. err %d\n", err); goto free_all; @@ -2261,14 +2189,14 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, sg_init_one(&src, xbuf[0], vecs->c_size); sg_init_one(&dst, outbuf_dec, out_len_max); - init_completion(&result.completion); + crypto_init_wait(&wait); akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max); - err = wait_async_op(&result, vecs->siggen_sigver_test ? - /* Run asymmetric signature verification */ - crypto_akcipher_verify(req) : - /* Run asymmetric decrypt */ - crypto_akcipher_decrypt(req)); + err = crypto_wait_req(vecs->siggen_sigver_test ? + /* Run asymmetric signature verification */ + crypto_akcipher_verify(req) : + /* Run asymmetric decrypt */ + crypto_akcipher_decrypt(req), &wait); if (err) { pr_err("alg: akcipher: decrypt test failed. err %d\n", err); goto free_all; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:21:25
|
gcm is starting an async. crypto op and waiting for it complete. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/gcm.c | 32 ++++++-------------------------- 1 file changed, 6 insertions(+), 26 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index 3841b5e..fb923a5 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -16,7 +16,6 @@ #include <crypto/scatterwalk.h> #include <crypto/hash.h> #include "internal.h" -#include <linux/completion.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -78,11 +77,6 @@ struct crypto_gcm_req_priv_ctx { } u; }; -struct crypto_gcm_setkey_result { - int err; - struct completion completion; -}; - static struct { u8 buf[16]; struct scatterlist sg; @@ -98,17 +92,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); } -static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) -{ - struct crypto_gcm_setkey_result *result = req->data; - - if (err == -EINPROGRESS) - return; - - result->err = err; - complete(&result->completion); -} - static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { @@ -119,7 +102,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, be128 hash; u8 iv[16]; - struct crypto_gcm_setkey_result result; + struct crypto_wait wait; struct scatterlist sg[1]; struct skcipher_request req; @@ -140,21 +123,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, if (!data) return -ENOMEM; - init_completion(&data->result.completion); + crypto_init_wait(&data->wait); sg_init_one(data->sg, &data->hash, sizeof(data->hash)); skcipher_request_set_tfm(&data->req, ctr); skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_gcm_setkey_done, - &data->result); + crypto_req_done, + &data->wait); skcipher_request_set_crypt(&data->req, data->sg, data->sg, sizeof(data->hash), data->iv); - err = crypto_skcipher_encrypt(&data->req); - if (err == -EINPROGRESS || err == -EBUSY) { - wait_for_completion(&data->result.completion); - err = data->result.err; - } + err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), + &data->wait); if (err) goto out; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:21:12
|
DRBG is starting an async. crypto op and waiting for it complete. Move it over to generic code doing the same. The code now also passes CRYPTO_TFM_REQ_MAY_SLEEP flag indicating crypto request memory allocation may use GFP_KERNEL which should be perfectly fine as the code is obviously sleeping for the completion of the request any way. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/drbg.c | 36 +++++++++--------------------------- include/crypto/drbg.h | 3 +-- 2 files changed, 10 insertions(+), 29 deletions(-) diff --git a/crypto/drbg.c b/crypto/drbg.c index 633a88e..c522251 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg) return 0; } -static void drbg_skcipher_cb(struct crypto_async_request *req, int error) -{ - struct drbg_state *drbg = req->data; - - if (error == -EINPROGRESS) - return; - drbg->ctr_async_err = error; - complete(&drbg->ctr_completion); -} - static int drbg_init_sym_kernel(struct drbg_state *drbg) { struct crypto_cipher *tfm; @@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) return PTR_ERR(sk_tfm); } drbg->ctr_handle = sk_tfm; - init_completion(&drbg->ctr_completion); + crypto_init_wait(&drbg->ctr_wait); req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); if (!req) { @@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) return -ENOMEM; } drbg->ctr_req = req; - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - drbg_skcipher_cb, drbg); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &drbg->ctr_wait); alignmask = crypto_skcipher_alignmask(sk_tfm); drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask, @@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, /* Output buffer may not be valid for SGL, use scratchpad */ skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, cryptlen, drbg->V); - ret = crypto_skcipher_encrypt(drbg->ctr_req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&drbg->ctr_completion); - if (!drbg->ctr_async_err) { - reinit_completion(&drbg->ctr_completion); - break; - } - default: + ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req), + &drbg->ctr_wait); + if (ret) goto out; - } - init_completion(&drbg->ctr_completion); + + crypto_init_wait(&drbg->ctr_wait); memcpy(outbuf, drbg->outscratchpad, cryptlen); diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 22f884c..8f94110 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -126,8 +126,7 @@ struct drbg_state { __u8 *ctr_null_value; /* CTR mode aligned zero buf */ __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ __u8 *outscratchpad; /* CTR mode aligned outbuf */ - struct completion ctr_completion; /* CTR mode async handler */ - int ctr_async_err; /* CTR mode async error */ + struct crypto_wait ctr_wait; /* CTR mode async wait obj */ bool seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:21:00
|
public_key_verify_signature() is starting an async crypto op and waiting for it to complete. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/asymmetric_keys/public_key.c | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 3cd6e12..d916235 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3) public_key_signature_free(payload3); } -struct public_key_completion { - struct completion completion; - int err; -}; - -static void public_key_verify_done(struct crypto_async_request *req, int err) -{ - struct public_key_completion *compl = req->data; - - if (err == -EINPROGRESS) - return; - - compl->err = err; - complete(&compl->completion); -} - /* * Verify a signature using a public key. */ int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig) { - struct public_key_completion compl; + struct crypto_wait cwait; struct crypto_akcipher *tfm; struct akcipher_request *req; struct scatterlist sig_sg, digest_sg; @@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey, sg_init_one(&digest_sg, output, outlen); akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size, outlen); - init_completion(&compl.completion); + crypto_init_wait(&cwait); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - public_key_verify_done, &compl); + crypto_req_done, &cwait); /* Perform the verification calculation. This doesn't actually do the * verification, but rather calculates the hash expected by the * signature and returns that to us. */ - ret = crypto_akcipher_verify(req); - if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { - wait_for_completion(&compl.completion); - ret = compl.err; - } + ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); if (ret < 0) goto out_free_output; -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:20:48
|
algif starts several async crypto ops and waits for their completion. Move it over to generic code doing the same. Signed-off-by: Gilad Ben-Yossef <gi...@be...> --- crypto/af_alg.c | 27 --------------------------- crypto/algif_aead.c | 8 ++++---- crypto/algif_hash.c | 30 ++++++++++++++---------------- crypto/algif_skcipher.c | 9 ++++----- include/crypto/if_alg.h | 15 +-------------- 5 files changed, 23 insertions(+), 66 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index ffa9f4c..cf312ed 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) } EXPORT_SYMBOL_GPL(af_alg_cmsg_send); -int af_alg_wait_for_completion(int err, struct af_alg_completion *completion) -{ - switch (err) { - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&completion->completion); - reinit_completion(&completion->completion); - err = completion->err; - break; - }; - - return err; -} -EXPORT_SYMBOL_GPL(af_alg_wait_for_completion); - -void af_alg_complete(struct crypto_async_request *req, int err) -{ - struct af_alg_completion *completion = req->data; - - if (err == -EINPROGRESS) - return; - - completion->err = err; - complete(&completion->completion); -} -EXPORT_SYMBOL_GPL(af_alg_complete); - /** * af_alg_alloc_tsgl - allocate the TX SGL * diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 516b38c..aacae08 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* Synchronous operation */ aead_request_set_callback(&areq->cra_u.aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); - err = af_alg_wait_for_completion(ctx->enc ? + crypto_req_done, &ctx->wait); + err = crypto_wait_req(ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : crypto_aead_decrypt(&areq->cra_u.aead_req), - &ctx->completion); + &ctx->wait); } /* AIO operation in progress */ @@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) ctx->merge = 0; ctx->enc = 0; ctx->aead_assoclen = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 3b3c154..d2ab8de 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -26,7 +26,7 @@ struct hash_ctx { u8 *result; - struct af_alg_completion completion; + struct crypto_wait wait; unsigned int len; bool more; @@ -102,8 +102,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, if ((msg->msg_flags & MSG_MORE)) hash_free_result(sk, ctx); - err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait); if (err) goto unlock; } @@ -124,8 +123,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); - err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_update(&ctx->req), + &ctx->wait); af_alg_free_sg(&ctx->sgl); if (err) goto unlock; @@ -143,8 +142,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, goto unlock; ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); - err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_final(&ctx->req), + &ctx->wait); } unlock: @@ -185,7 +184,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, } else { if (!ctx->more) { err = crypto_ahash_init(&ctx->req); - err = af_alg_wait_for_completion(err, &ctx->completion); + err = crypto_wait_req(err, &ctx->wait); if (err) goto unlock; } @@ -193,7 +192,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, err = crypto_ahash_update(&ctx->req); } - err = af_alg_wait_for_completion(err, &ctx->completion); + err = crypto_wait_req(err, &ctx->wait); if (err) goto unlock; @@ -229,17 +228,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); if (!result && !ctx->more) { - err = af_alg_wait_for_completion( - crypto_ahash_init(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_init(&ctx->req), + &ctx->wait); if (err) goto unlock; } if (!result || ctx->more) { ctx->more = 0; - err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), - &ctx->completion); + err = crypto_wait_req(crypto_ahash_final(&ctx->req), + &ctx->wait); if (err) goto unlock; } @@ -490,13 +488,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ctx->result = NULL; ctx->len = len; ctx->more = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; ahash_request_set_tfm(&ctx->req, hash); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); + crypto_req_done, &ctx->wait); sk->sk_destruct = hash_sock_destruct; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 8ae4170..9954b07 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, skcipher_request_set_callback(&areq->cra_u.skcipher_req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, - &ctx->completion); - err = af_alg_wait_for_completion(ctx->enc ? + crypto_req_done, &ctx->wait); + err = crypto_wait_req(ctx->enc ? crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), - &ctx->completion); + &ctx->wait); } /* AIO operation in progress */ @@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ctx->more = 0; ctx->merge = 0; ctx->enc = 0; - af_alg_init_completion(&ctx->completion); + crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 75ec9c6..6abf0a3 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -40,11 +40,6 @@ struct alg_sock { void *private; }; -struct af_alg_completion { - struct completion completion; - int err; -}; - struct af_alg_control { struct af_alg_iv *iv; int op; @@ -152,7 +147,7 @@ struct af_alg_ctx { void *iv; size_t aead_assoclen; - struct af_alg_completion completion; + struct crypto_wait wait; size_t used; size_t rcvused; @@ -177,19 +172,11 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new); int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); -int af_alg_wait_for_completion(int err, struct af_alg_completion *completion); -void af_alg_complete(struct crypto_async_request *req, int err); - static inline struct alg_sock *alg_sk(struct sock *sk) { return (struct alg_sock *)sk; } -static inline void af_alg_init_completion(struct af_alg_completion *completion) -{ - init_completion(&completion->completion); -} - /** * Size of available buffer for sending data from user space to kernel. * -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:20:35
|
Invoking a possibly async. crypto op and waiting for completion while correctly handling backlog processing is a common task in the crypto API implementation and outside users of it. This patch adds a generic implementation for doing so in preparation for using it across the board instead of hand rolled versions. Signed-off-by: Gilad Ben-Yossef <gi...@be...> CC: Eric Biggers <ebi...@gm...> CC: Jonathan Cameron <Jon...@hu...> --- crypto/api.c | 13 +++++++++++++ include/linux/crypto.h | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/crypto/api.c b/crypto/api.c index 941cd4c..2a2479d 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -24,6 +24,7 @@ #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/completion.h> #include "internal.h" LIST_HEAD(crypto_alg_list); @@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_alg); +void crypto_req_done(struct crypto_async_request *req, int err) +{ + struct crypto_wait *wait = req->data; + + if (err == -EINPROGRESS) + return; + + wait->err = err; + complete(&wait->completion); +} +EXPORT_SYMBOL_GPL(crypto_req_done); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 84da997..78508ca 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/uaccess.h> +#include <linux/completion.h> /* * Autoloaded crypto modules should only use a prefixed name to avoid allowing @@ -468,6 +469,45 @@ struct crypto_alg { } CRYPTO_MINALIGN_ATTR; /* + * A helper struct for waiting for completion of async crypto ops + */ +struct crypto_wait { + struct completion completion; + int err; +}; + +/* + * Macro for declaring a crypto op async wait object on stack + */ +#define DECLARE_CRYPTO_WAIT(_wait) \ + struct crypto_wait _wait = { \ + COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } + +/* + * Async ops completion helper functioons + */ +void crypto_req_done(struct crypto_async_request *req, int err); + +static inline int crypto_wait_req(int err, struct crypto_wait *wait) +{ + switch (err) { + case -EINPROGRESS: + case -EBUSY: + wait_for_completion(&wait->completion); + reinit_completion(&wait->completion); + err = wait->err; + break; + }; + + return err; +} + +static inline void crypto_init_wait(struct crypto_wait *wait) +{ + init_completion(&wait->completion); +} + +/* * Algorithm registration interface. */ int crypto_register_alg(struct crypto_alg *alg); -- 2.1.4 |
From: Gilad Ben-Y. <gi...@be...> - 2017-08-24 14:20:22
|
Now that -EBUSY return code only indicates backlog queueing we can safely remove the now redundant check for the CRYPTO_TFM_REQ_MAY_BACKLOG flag when -EBUSY is returned. Signed-off-by: Gilad Ben-Yossef <gi...@be...> Acked-by: Boris Brezillon <bor...@fr...> --- drivers/crypto/marvell/cesa.c | 3 +-- drivers/crypto/marvell/cesa.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 6e7a5c7..0c3909d 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -183,8 +183,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req, spin_lock_bh(&engine->lock); ret = crypto_enqueue_request(&engine->queue, req); if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && - (ret == -EINPROGRESS || - (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + (ret == -EINPROGRESS || ret == -EBUSY)) mv_cesa_tdma_chain(engine, creq); spin_unlock_bh(&engine->lock); diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index b7872f6..63c8457 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -763,7 +763,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req, * the backlog and will be processed later. There's no need to * clean it up. */ - if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + if (ret == -EBUSY) return false; /* Request wasn't queued, we need to clean it up */ -- 2.1.4 |