Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * Copyright (C) 2021, Linaro Limited. All rights reserved.
5 */
6#include <linux/dma-mapping.h>
7#include <linux/interrupt.h>
8#include <linux/string.h>
9#include <crypto/gcm.h>
10#include <crypto/authenc.h>
11#include <crypto/internal/aead.h>
12#include <crypto/internal/des.h>
13#include <crypto/sha1.h>
14#include <crypto/sha2.h>
15#include <crypto/scatterwalk.h>
16#include "aead.h"
17
18#define CCM_NONCE_ADATA_SHIFT 6
19#define CCM_NONCE_AUTHSIZE_SHIFT 3
20#define MAX_CCM_ADATA_HEADER_LEN 6
21
22static LIST_HEAD(aead_algs);
23
24static void qce_aead_done(void *data)
25{
26 struct crypto_async_request *async_req = data;
27 struct aead_request *req = aead_request_cast(async_req);
28 struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
29 struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
30 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
31 struct qce_device *qce = tmpl->qce;
32 struct qce_result_dump *result_buf = qce->dma.result_buf;
33 enum dma_data_direction dir_src, dir_dst;
34 bool diff_dst;
35 int error;
36 u32 status;
37 unsigned int totallen;
38 unsigned char tag[SHA256_DIGEST_SIZE] = {0};
39
40 diff_dst = (req->src != req->dst) ? true : false;
41 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
42 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
43
44 error = qce_dma_terminate_all(&qce->dma);
45 if (error)
46 dev_dbg(qce->dev, "aead dma termination error (%d)\n",
47 error);
48 if (diff_dst)
49 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
50
51 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
52
53 if (IS_CCM(rctx->flags)) {
54 if (req->assoclen) {
55 sg_free_table(&rctx->src_tbl);
56 if (diff_dst)
57 sg_free_table(&rctx->dst_tbl);
58 } else {
59 if (!(IS_DECRYPT(rctx->flags) && !diff_dst))
60 sg_free_table(&rctx->dst_tbl);
61 }
62 } else {
63 sg_free_table(&rctx->dst_tbl);
64 }
65
66 error = qce_check_status(qce, &status);
67 if (error < 0 && (error != -EBADMSG))
68 dev_err(qce->dev, "aead operation error (%x)\n", status);
69
70 if (IS_ENCRYPT(rctx->flags)) {
71 totallen = req->cryptlen + req->assoclen;
72 if (IS_CCM(rctx->flags))
73 scatterwalk_map_and_copy(rctx->ccmresult_buf, req->dst,
74 totallen, ctx->authsize, 1);
75 else
76 scatterwalk_map_and_copy(result_buf->auth_iv, req->dst,
77 totallen, ctx->authsize, 1);
78
79 } else if (!IS_CCM(rctx->flags)) {
80 totallen = req->cryptlen + req->assoclen - ctx->authsize;
81 scatterwalk_map_and_copy(tag, req->src, totallen, ctx->authsize, 0);
82 if (memcmp(result_buf->auth_iv, tag, ctx->authsize)) {
83 pr_err("Bad message error\n");
84 error = -EBADMSG;
85 }
86 }
87
88 qce->async_req_done(qce, error);
89}
90
91static struct scatterlist *
92qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req)
93{
94 struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
95 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
96 struct qce_device *qce = tmpl->qce;
97
98 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
99 return qce_sgtable_add(tbl, &rctx->result_sg, QCE_RESULT_BUF_SZ);
100}
101
102static struct scatterlist *
103qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req)
104{
105 struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
106
107 sg_init_one(&rctx->result_sg, rctx->ccmresult_buf, QCE_BAM_BURST_SIZE);
108 return qce_sgtable_add(tbl, &rctx->result_sg, QCE_BAM_BURST_SIZE);
109}
110
111static struct scatterlist *
112qce_aead_prepare_dst_buf(struct aead_request *req)
113{
114 struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
115 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
116 struct qce_device *qce = tmpl->qce;
117 struct scatterlist *sg, *msg_sg, __sg[2];
118 gfp_t gfp;
119 unsigned int assoclen = req->assoclen;
120 unsigned int totallen;
121 int ret;
122
123 totallen = rctx->cryptlen + assoclen;
124 rctx->dst_nents = sg_nents_for_len(req->dst, totallen);
125 if (rctx->dst_nents < 0) {
126 dev_err(qce->dev, "Invalid numbers of dst SG.\n");
127 return ERR_PTR(-EINVAL);
128 }
129 if (IS_CCM(rctx->flags))
130 rctx->dst_nents += 2;
131 else
132 rctx->dst_nents += 1;
133
134 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
135 GFP_KERNEL : GFP_ATOMIC;
136 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
137 if (ret)
138 return ERR_PTR(ret);
139
140 if (IS_CCM(rctx->flags) && assoclen) {
141 /* Get the dst buffer */
142 msg_sg = scatterwalk_ffwd(__sg, req->dst, assoclen);
143
144 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->adata_sg,
145 rctx->assoclen);
146 if (IS_ERR(sg))
147 goto dst_tbl_free;
148 /* dst buffer */
149 sg = qce_sgtable_add(&rctx->dst_tbl, msg_sg, rctx->cryptlen);
150 if (IS_ERR(sg))
151 goto dst_tbl_free;
152 totallen = rctx->cryptlen + rctx->assoclen;
153 } else {
154 if (totallen) {
155 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, totallen);
156 if (IS_ERR(sg))
157 goto dst_tbl_free;
158 }
159 }
160 if (IS_CCM(rctx->flags))
161 sg = qce_aead_prepare_ccm_result_buf(&rctx->dst_tbl, req);
162 else
163 sg = qce_aead_prepare_result_buf(&rctx->dst_tbl, req);
164
165 if (IS_ERR(sg))
166 goto dst_tbl_free;
167
168 sg_mark_end(sg);
169 rctx->dst_sg = rctx->dst_tbl.sgl;
170 rctx->dst_nents = sg_nents_for_len(rctx->dst_sg, totallen) + 1;
171
172 return sg;
173
174dst_tbl_free:
175 sg_free_table(&rctx->dst_tbl);
176 return sg;
177}
178
179static int
180qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req)
181{
182 struct scatterlist *sg, *msg_sg, __sg[2];
183 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
184 struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
185 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
186 unsigned int assoclen = rctx->assoclen;
187 unsigned int adata_header_len, cryptlen, totallen;
188 gfp_t gfp;
189 bool diff_dst;
190 int ret;
191
192 if (IS_DECRYPT(rctx->flags))
193 cryptlen = rctx->cryptlen + ctx->authsize;
194 else
195 cryptlen = rctx->cryptlen;
196 totallen = cryptlen + req->assoclen;
197
198 /* Get the msg */
199 msg_sg = scatterwalk_ffwd(__sg, req->src, req->assoclen);
200
201 rctx->adata = kzalloc((ALIGN(assoclen, 16) + MAX_CCM_ADATA_HEADER_LEN) *
202 sizeof(unsigned char), GFP_ATOMIC);
203 if (!rctx->adata)
204 return -ENOMEM;
205
206 /*
207 * Format associated data (RFC3610 and NIST 800-38C)
208 * Even though specification allows for AAD to be up to 2^64 - 1 bytes,
209 * the assoclen field in aead_request is unsigned int and thus limits
210 * the AAD to be up to 2^32 - 1 bytes. So we handle only two scenarios
211 * while forming the header for AAD.
212 */
213 if (assoclen < 0xff00) {
214 adata_header_len = 2;
215 *(__be16 *)rctx->adata = cpu_to_be16(assoclen);
216 } else {
217 adata_header_len = 6;
218 *(__be16 *)rctx->adata = cpu_to_be16(0xfffe);
219 *(__be32 *)(rctx->adata + 2) = cpu_to_be32(assoclen);
220 }
221
222 /* Copy the associated data */
223 if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, assoclen),
224 rctx->adata + adata_header_len,
225 assoclen) != assoclen)
226 return -EINVAL;
227
228 /* Pad associated data to block size */
229 rctx->assoclen = ALIGN(assoclen + adata_header_len, 16);
230
231 diff_dst = (req->src != req->dst) ? true : false;
232
233 if (diff_dst)
234 rctx->src_nents = sg_nents_for_len(req->src, totallen) + 1;
235 else
236 rctx->src_nents = sg_nents_for_len(req->src, totallen) + 2;
237
238 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
239 ret = sg_alloc_table(&rctx->src_tbl, rctx->src_nents, gfp);
240 if (ret)
241 return ret;
242
243 /* Associated Data */
244 sg_init_one(&rctx->adata_sg, rctx->adata, rctx->assoclen);
245 sg = qce_sgtable_add(&rctx->src_tbl, &rctx->adata_sg,
246 rctx->assoclen);
247 if (IS_ERR(sg)) {
248 ret = PTR_ERR(sg);
249 goto err_free;
250 }
251 /* src msg */
252 sg = qce_sgtable_add(&rctx->src_tbl, msg_sg, cryptlen);
253 if (IS_ERR(sg)) {
254 ret = PTR_ERR(sg);
255 goto err_free;
256 }
257 if (!diff_dst) {
258 /*
259 * For decrypt, when src and dst buffers are same, there is already space
260 * in the buffer for padded 0's which is output in lieu of
261 * the MAC that is input. So skip the below.
262 */
263 if (!IS_DECRYPT(rctx->flags)) {
264 sg = qce_aead_prepare_ccm_result_buf(&rctx->src_tbl, req);
265 if (IS_ERR(sg)) {
266 ret = PTR_ERR(sg);
267 goto err_free;
268 }
269 }
270 }
271 sg_mark_end(sg);
272 rctx->src_sg = rctx->src_tbl.sgl;
273 totallen = cryptlen + rctx->assoclen;
274 rctx->src_nents = sg_nents_for_len(rctx->src_sg, totallen);
275
276 if (diff_dst) {
277 sg = qce_aead_prepare_dst_buf(req);
278 if (IS_ERR(sg)) {
279 ret = PTR_ERR(sg);
280 goto err_free;
281 }
282 } else {
283 if (IS_ENCRYPT(rctx->flags))
284 rctx->dst_nents = rctx->src_nents + 1;
285 else
286 rctx->dst_nents = rctx->src_nents;
287 rctx->dst_sg = rctx->src_sg;
288 }
289
290 return 0;
291err_free:
292 sg_free_table(&rctx->src_tbl);
293 return ret;
294}
295
296static int qce_aead_prepare_buf(struct aead_request *req)
297{
298 struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
299 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
300 struct qce_device *qce = tmpl->qce;
301 struct scatterlist *sg;
302 bool diff_dst = (req->src != req->dst) ? true : false;
303 unsigned int totallen;
304
305 totallen = rctx->cryptlen + rctx->assoclen;
306
307 sg = qce_aead_prepare_dst_buf(req);
308 if (IS_ERR(sg))
309 return PTR_ERR(sg);
310 if (diff_dst) {
311 rctx->src_nents = sg_nents_for_len(req->src, totallen);
312 if (rctx->src_nents < 0) {
313 dev_err(qce->dev, "Invalid numbers of src SG.\n");
314 return -EINVAL;
315 }
316 rctx->src_sg = req->src;
317 } else {
318 rctx->src_nents = rctx->dst_nents - 1;
319 rctx->src_sg = rctx->dst_sg;
320 }
321 return 0;
322}
323
324static int qce_aead_ccm_prepare_buf(struct aead_request *req)
325{
326 struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
327 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
328 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
329 struct scatterlist *sg;
330 bool diff_dst = (req->src != req->dst) ? true : false;
331 unsigned int cryptlen;
332
333 if (rctx->assoclen)
334 return qce_aead_ccm_prepare_buf_assoclen(req);
335
336 if (IS_ENCRYPT(rctx->flags))
337 return qce_aead_prepare_buf(req);
338
339 cryptlen = rctx->cryptlen + ctx->authsize;
340 if (diff_dst) {
341 rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
342 rctx->src_sg = req->src;
343 sg = qce_aead_prepare_dst_buf(req);
344 if (IS_ERR(sg))
345 return PTR_ERR(sg);
346 } else {
347 rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
348 rctx->src_sg = req->src;
349 rctx->dst_nents = rctx->src_nents;
350 rctx->dst_sg = rctx->src_sg;
351 }
352
353 return 0;
354}
355
356static int qce_aead_create_ccm_nonce(struct qce_aead_reqctx *rctx, struct qce_aead_ctx *ctx)
357{
358 unsigned int msglen_size, ivsize;
359 u8 msg_len[4];
360 int i;
361
362 if (!rctx || !rctx->iv)
363 return -EINVAL;
364
365 msglen_size = rctx->iv[0] + 1;
366
367 /* Verify that msg len size is valid */
368 if (msglen_size < 2 || msglen_size > 8)
369 return -EINVAL;
370
371 ivsize = rctx->ivsize;
372
373 /*
374 * Clear the msglen bytes in IV.
375 * Else the h/w engine and nonce will use any stray value pending there.
376 */
377 if (!IS_CCM_RFC4309(rctx->flags)) {
378 for (i = 0; i < msglen_size; i++)
379 rctx->iv[ivsize - i - 1] = 0;
380 }
381
382 /*
383 * The crypto framework encodes cryptlen as unsigned int. Thus, even though
384 * spec allows for upto 8 bytes to encode msg_len only 4 bytes are needed.
385 */
386 if (msglen_size > 4)
387 msglen_size = 4;
388
389 memcpy(&msg_len[0], &rctx->cryptlen, 4);
390
391 memcpy(&rctx->ccm_nonce[0], rctx->iv, rctx->ivsize);
392 if (rctx->assoclen)
393 rctx->ccm_nonce[0] |= 1 << CCM_NONCE_ADATA_SHIFT;
394 rctx->ccm_nonce[0] |= ((ctx->authsize - 2) / 2) <<
395 CCM_NONCE_AUTHSIZE_SHIFT;
396 for (i = 0; i < msglen_size; i++)
397 rctx->ccm_nonce[QCE_MAX_NONCE - i - 1] = msg_len[i];
398
399 return 0;
400}
401
402static int
403qce_aead_async_req_handle(struct crypto_async_request *async_req)
404{
405 struct aead_request *req = aead_request_cast(async_req);
406 struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
407 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
408 struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
409 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
410 struct qce_device *qce = tmpl->qce;
411 enum dma_data_direction dir_src, dir_dst;
412 bool diff_dst;
413 int dst_nents, src_nents, ret;
414
415 if (IS_CCM_RFC4309(rctx->flags)) {
416 memset(rctx->ccm_rfc4309_iv, 0, QCE_MAX_IV_SIZE);
417 rctx->ccm_rfc4309_iv[0] = 3;
418 memcpy(&rctx->ccm_rfc4309_iv[1], ctx->ccm4309_salt, QCE_CCM4309_SALT_SIZE);
419 memcpy(&rctx->ccm_rfc4309_iv[4], req->iv, 8);
420 rctx->iv = rctx->ccm_rfc4309_iv;
421 rctx->ivsize = AES_BLOCK_SIZE;
422 } else {
423 rctx->iv = req->iv;
424 rctx->ivsize = crypto_aead_ivsize(tfm);
425 }
426 if (IS_CCM_RFC4309(rctx->flags))
427 rctx->assoclen = req->assoclen - 8;
428 else
429 rctx->assoclen = req->assoclen;
430
431 diff_dst = (req->src != req->dst) ? true : false;
432 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
433 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
434
435 if (IS_CCM(rctx->flags)) {
436 ret = qce_aead_create_ccm_nonce(rctx, ctx);
437 if (ret)
438 return ret;
439 }
440 if (IS_CCM(rctx->flags))
441 ret = qce_aead_ccm_prepare_buf(req);
442 else
443 ret = qce_aead_prepare_buf(req);
444
445 if (ret)
446 return ret;
447 dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
448 if (!dst_nents) {
449 ret = -EIO;
450 goto error_free;
451 }
452
453 if (diff_dst) {
454 src_nents = dma_map_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
455 if (src_nents < 0) {
456 ret = src_nents;
457 goto error_unmap_dst;
458 }
459 } else {
460 if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
461 src_nents = dst_nents;
462 else
463 src_nents = dst_nents - 1;
464 }
465
466 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents, rctx->dst_sg, dst_nents,
467 qce_aead_done, async_req);
468 if (ret)
469 goto error_unmap_src;
470
471 qce_dma_issue_pending(&qce->dma);
472
473 ret = qce_start(async_req, tmpl->crypto_alg_type);
474 if (ret)
475 goto error_terminate;
476
477 return 0;
478
479error_terminate:
480 qce_dma_terminate_all(&qce->dma);
481error_unmap_src:
482 if (diff_dst)
483 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
484error_unmap_dst:
485 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
486error_free:
487 if (IS_CCM(rctx->flags) && rctx->assoclen) {
488 sg_free_table(&rctx->src_tbl);
489 if (diff_dst)
490 sg_free_table(&rctx->dst_tbl);
491 } else {
492 sg_free_table(&rctx->dst_tbl);
493 }
494 return ret;
495}
496
497static int qce_aead_crypt(struct aead_request *req, int encrypt)
498{
499 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
500 struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
501 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
502 struct qce_alg_template *tmpl = to_aead_tmpl(tfm);
503 unsigned int blocksize = crypto_aead_blocksize(tfm);
504
505 rctx->flags = tmpl->alg_flags;
506 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
507
508 if (encrypt)
509 rctx->cryptlen = req->cryptlen;
510 else
511 rctx->cryptlen = req->cryptlen - ctx->authsize;
512
513 /* CE does not handle 0 length messages */
514 if (!rctx->cryptlen) {
515 if (!(IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)))
516 ctx->need_fallback = true;
517 }
518
519 /* If fallback is needed, schedule and exit */
520 if (ctx->need_fallback) {
521 /* Reset need_fallback in case the same ctx is used for another transaction */
522 ctx->need_fallback = false;
523
524 aead_request_set_tfm(&rctx->fallback_req, ctx->fallback);
525 aead_request_set_callback(&rctx->fallback_req, req->base.flags,
526 req->base.complete, req->base.data);
527 aead_request_set_crypt(&rctx->fallback_req, req->src,
528 req->dst, req->cryptlen, req->iv);
529 aead_request_set_ad(&rctx->fallback_req, req->assoclen);
530
531 return encrypt ? crypto_aead_encrypt(&rctx->fallback_req) :
532 crypto_aead_decrypt(&rctx->fallback_req);
533 }
534
535 /*
536 * CBC algorithms require message lengths to be
537 * multiples of block size.
538 */
539 if (IS_CBC(rctx->flags) && !IS_ALIGNED(rctx->cryptlen, blocksize))
540 return -EINVAL;
541
542 /* RFC4309 supported AAD size 16 bytes/20 bytes */
543 if (IS_CCM_RFC4309(rctx->flags))
544 if (crypto_ipsec_check_assoclen(req->assoclen))
545 return -EINVAL;
546
547 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
548}
549
550static int qce_aead_encrypt(struct aead_request *req)
551{
552 return qce_aead_crypt(req, 1);
553}
554
555static int qce_aead_decrypt(struct aead_request *req)
556{
557 return qce_aead_crypt(req, 0);
558}
559
560static int qce_aead_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
561 unsigned int keylen)
562{
563 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
564 unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
565
566 if (IS_CCM_RFC4309(flags)) {
567 if (keylen < QCE_CCM4309_SALT_SIZE)
568 return -EINVAL;
569 keylen -= QCE_CCM4309_SALT_SIZE;
570 memcpy(ctx->ccm4309_salt, key + keylen, QCE_CCM4309_SALT_SIZE);
571 }
572
573 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192)
574 return -EINVAL;
575
576 ctx->enc_keylen = keylen;
577 ctx->auth_keylen = keylen;
578
579 memcpy(ctx->enc_key, key, keylen);
580 memcpy(ctx->auth_key, key, keylen);
581
582 if (keylen == AES_KEYSIZE_192)
583 ctx->need_fallback = true;
584
585 return IS_CCM_RFC4309(flags) ?
586 crypto_aead_setkey(ctx->fallback, key, keylen + QCE_CCM4309_SALT_SIZE) :
587 crypto_aead_setkey(ctx->fallback, key, keylen);
588}
589
590static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
591{
592 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
593 struct crypto_authenc_keys authenc_keys;
594 unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
595 u32 _key[6];
596 int err;
597
598 err = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
599 if (err)
600 return err;
601
602 if (authenc_keys.enckeylen > QCE_MAX_KEY_SIZE ||
603 authenc_keys.authkeylen > QCE_MAX_KEY_SIZE)
604 return -EINVAL;
605
606 if (IS_DES(flags)) {
607 err = verify_aead_des_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
608 if (err)
609 return err;
610 } else if (IS_3DES(flags)) {
611 err = verify_aead_des3_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
612 if (err)
613 return err;
614 /*
615 * The crypto engine does not support any two keys
616 * being the same for triple des algorithms. The
617 * verify_skcipher_des3_key does not check for all the
618 * below conditions. Schedule fallback in this case.
619 */
620 memcpy(_key, authenc_keys.enckey, DES3_EDE_KEY_SIZE);
621 if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
622 !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
623 !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
624 ctx->need_fallback = true;
625 } else if (IS_AES(flags)) {
626 /* No random key sizes */
627 if (authenc_keys.enckeylen != AES_KEYSIZE_128 &&
628 authenc_keys.enckeylen != AES_KEYSIZE_192 &&
629 authenc_keys.enckeylen != AES_KEYSIZE_256)
630 return -EINVAL;
631 if (authenc_keys.enckeylen == AES_KEYSIZE_192)
632 ctx->need_fallback = true;
633 }
634
635 ctx->enc_keylen = authenc_keys.enckeylen;
636 ctx->auth_keylen = authenc_keys.authkeylen;
637
638 memcpy(ctx->enc_key, authenc_keys.enckey, authenc_keys.enckeylen);
639
640 memcpy_and_pad(ctx->auth_key, sizeof(ctx->auth_key),
641 authenc_keys.authkey, authenc_keys.authkeylen, 0);
642
643 return crypto_aead_setkey(ctx->fallback, key, keylen);
644}
645
646static int qce_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
647{
648 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
649 unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
650
651 if (IS_CCM(flags)) {
652 if (authsize < 4 || authsize > 16 || authsize % 2)
653 return -EINVAL;
654 if (IS_CCM_RFC4309(flags) && (authsize < 8 || authsize % 4))
655 return -EINVAL;
656 }
657 ctx->authsize = authsize;
658
659 return crypto_aead_setauthsize(ctx->fallback, authsize);
660}
661
662static int qce_aead_init(struct crypto_aead *tfm)
663{
664 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
665
666 ctx->need_fallback = false;
667 ctx->fallback = crypto_alloc_aead(crypto_tfm_alg_name(&tfm->base),
668 0, CRYPTO_ALG_NEED_FALLBACK);
669
670 if (IS_ERR(ctx->fallback))
671 return PTR_ERR(ctx->fallback);
672
673 crypto_aead_set_reqsize_dma(tfm, sizeof(struct qce_aead_reqctx) +
674 crypto_aead_reqsize(ctx->fallback));
675 return 0;
676}
677
678static void qce_aead_exit(struct crypto_aead *tfm)
679{
680 struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
681
682 crypto_free_aead(ctx->fallback);
683}
684
685struct qce_aead_def {
686 unsigned long flags;
687 const char *name;
688 const char *drv_name;
689 unsigned int blocksize;
690 unsigned int chunksize;
691 unsigned int ivsize;
692 unsigned int maxauthsize;
693};
694
695static const struct qce_aead_def aead_def[] = {
696 {
697 .flags = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
698 .name = "authenc(hmac(sha1),cbc(des))",
699 .drv_name = "authenc-hmac-sha1-cbc-des-qce",
700 .blocksize = DES_BLOCK_SIZE,
701 .ivsize = DES_BLOCK_SIZE,
702 .maxauthsize = SHA1_DIGEST_SIZE,
703 },
704 {
705 .flags = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
706 .name = "authenc(hmac(sha1),cbc(des3_ede))",
707 .drv_name = "authenc-hmac-sha1-cbc-3des-qce",
708 .blocksize = DES3_EDE_BLOCK_SIZE,
709 .ivsize = DES3_EDE_BLOCK_SIZE,
710 .maxauthsize = SHA1_DIGEST_SIZE,
711 },
712 {
713 .flags = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
714 .name = "authenc(hmac(sha256),cbc(des))",
715 .drv_name = "authenc-hmac-sha256-cbc-des-qce",
716 .blocksize = DES_BLOCK_SIZE,
717 .ivsize = DES_BLOCK_SIZE,
718 .maxauthsize = SHA256_DIGEST_SIZE,
719 },
720 {
721 .flags = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
722 .name = "authenc(hmac(sha256),cbc(des3_ede))",
723 .drv_name = "authenc-hmac-sha256-cbc-3des-qce",
724 .blocksize = DES3_EDE_BLOCK_SIZE,
725 .ivsize = DES3_EDE_BLOCK_SIZE,
726 .maxauthsize = SHA256_DIGEST_SIZE,
727 },
728 {
729 .flags = QCE_ALG_AES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
730 .name = "authenc(hmac(sha256),cbc(aes))",
731 .drv_name = "authenc-hmac-sha256-cbc-aes-qce",
732 .blocksize = AES_BLOCK_SIZE,
733 .ivsize = AES_BLOCK_SIZE,
734 .maxauthsize = SHA256_DIGEST_SIZE,
735 },
736 {
737 .flags = QCE_ALG_AES | QCE_MODE_CCM,
738 .name = "ccm(aes)",
739 .drv_name = "ccm-aes-qce",
740 .blocksize = 1,
741 .ivsize = AES_BLOCK_SIZE,
742 .maxauthsize = AES_BLOCK_SIZE,
743 },
744 {
745 .flags = QCE_ALG_AES | QCE_MODE_CCM | QCE_MODE_CCM_RFC4309,
746 .name = "rfc4309(ccm(aes))",
747 .drv_name = "rfc4309-ccm-aes-qce",
748 .blocksize = 1,
749 .ivsize = 8,
750 .maxauthsize = AES_BLOCK_SIZE,
751 },
752};
753
754static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_device *qce)
755{
756 struct qce_alg_template *tmpl;
757 struct aead_alg *alg;
758 int ret;
759
760 tmpl = kzalloc_obj(*tmpl);
761 if (!tmpl)
762 return -ENOMEM;
763
764 alg = &tmpl->alg.aead;
765
766 strscpy(alg->base.cra_name, def->name);
767 strscpy(alg->base.cra_driver_name, def->drv_name);
768
769 alg->base.cra_blocksize = def->blocksize;
770 alg->chunksize = def->chunksize;
771 alg->ivsize = def->ivsize;
772 alg->maxauthsize = def->maxauthsize;
773 if (IS_CCM(def->flags))
774 alg->setkey = qce_aead_ccm_setkey;
775 else
776 alg->setkey = qce_aead_setkey;
777 alg->setauthsize = qce_aead_setauthsize;
778 alg->encrypt = qce_aead_encrypt;
779 alg->decrypt = qce_aead_decrypt;
780 alg->init = qce_aead_init;
781 alg->exit = qce_aead_exit;
782
783 alg->base.cra_priority = 275;
784 alg->base.cra_flags = CRYPTO_ALG_ASYNC |
785 CRYPTO_ALG_ALLOCATES_MEMORY |
786 CRYPTO_ALG_KERN_DRIVER_ONLY |
787 CRYPTO_ALG_NEED_FALLBACK;
788 alg->base.cra_ctxsize = sizeof(struct qce_aead_ctx);
789 alg->base.cra_alignmask = 0;
790 alg->base.cra_module = THIS_MODULE;
791
792 INIT_LIST_HEAD(&tmpl->entry);
793 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AEAD;
794 tmpl->alg_flags = def->flags;
795 tmpl->qce = qce;
796
797 ret = crypto_register_aead(alg);
798 if (ret) {
799 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
800 kfree(tmpl);
801 return ret;
802 }
803
804 list_add_tail(&tmpl->entry, &aead_algs);
805 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
806 return 0;
807}
808
809static void qce_aead_unregister(struct qce_device *qce)
810{
811 struct qce_alg_template *tmpl, *n;
812
813 list_for_each_entry_safe(tmpl, n, &aead_algs, entry) {
814 crypto_unregister_aead(&tmpl->alg.aead);
815 list_del(&tmpl->entry);
816 kfree(tmpl);
817 }
818}
819
820static int qce_aead_register(struct qce_device *qce)
821{
822 int ret, i;
823
824 for (i = 0; i < ARRAY_SIZE(aead_def); i++) {
825 ret = qce_aead_register_one(&aead_def[i], qce);
826 if (ret)
827 goto err;
828 }
829
830 return 0;
831err:
832 qce_aead_unregister(qce);
833 return ret;
834}
835
836const struct qce_algo_ops aead_ops = {
837 .type = CRYPTO_ALG_TYPE_AEAD,
838 .register_algs = qce_aead_register,
839 .unregister_algs = qce_aead_unregister,
840 .async_req_handle = qce_aead_async_req_handle,
841};