Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
4 *
5 * Copyright (C) 2014-2017 Axis Communications AB
6 */
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/bitfield.h>
10#include <linux/crypto.h>
11#include <linux/debugfs.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/fault-inject.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24
25#include <crypto/aes.h>
26#include <crypto/gcm.h>
27#include <crypto/internal/aead.h>
28#include <crypto/internal/hash.h>
29#include <crypto/internal/skcipher.h>
30#include <crypto/scatterwalk.h>
31#include <crypto/sha1.h>
32#include <crypto/sha2.h>
33#include <crypto/xts.h>
34
35/* Max length of a line in all cache levels for Artpec SoCs. */
36#define ARTPEC_CACHE_LINE_MAX 32
37
38#define PDMA_OUT_CFG 0x0000
39#define PDMA_OUT_BUF_CFG 0x0004
40#define PDMA_OUT_CMD 0x0008
41#define PDMA_OUT_DESCRQ_PUSH 0x0010
42#define PDMA_OUT_DESCRQ_STAT 0x0014
43
44#define A6_PDMA_IN_CFG 0x0028
45#define A6_PDMA_IN_BUF_CFG 0x002c
46#define A6_PDMA_IN_CMD 0x0030
47#define A6_PDMA_IN_STATQ_PUSH 0x0038
48#define A6_PDMA_IN_DESCRQ_PUSH 0x0044
49#define A6_PDMA_IN_DESCRQ_STAT 0x0048
50#define A6_PDMA_INTR_MASK 0x0068
51#define A6_PDMA_ACK_INTR 0x006c
52#define A6_PDMA_MASKED_INTR 0x0074
53
54#define A7_PDMA_IN_CFG 0x002c
55#define A7_PDMA_IN_BUF_CFG 0x0030
56#define A7_PDMA_IN_CMD 0x0034
57#define A7_PDMA_IN_STATQ_PUSH 0x003c
58#define A7_PDMA_IN_DESCRQ_PUSH 0x0048
59#define A7_PDMA_IN_DESCRQ_STAT 0x004C
60#define A7_PDMA_INTR_MASK 0x006c
61#define A7_PDMA_ACK_INTR 0x0070
62#define A7_PDMA_MASKED_INTR 0x0078
63
64#define PDMA_OUT_CFG_EN BIT(0)
65
66#define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
67#define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
68
69#define PDMA_OUT_CMD_START BIT(0)
70#define A6_PDMA_OUT_CMD_STOP BIT(3)
71#define A7_PDMA_OUT_CMD_STOP BIT(2)
72
73#define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
74#define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
75
76#define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
77#define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
78
79#define PDMA_IN_CFG_EN BIT(0)
80
81#define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
82#define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
83#define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
84
85#define PDMA_IN_CMD_START BIT(0)
86#define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
87#define A6_PDMA_IN_CMD_STOP BIT(3)
88#define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
89#define A7_PDMA_IN_CMD_STOP BIT(2)
90
91#define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
92#define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
93
94#define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
95#define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
96
97#define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
98#define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
99
100#define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
101#define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
102#define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
103
104#define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
105#define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
106#define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
107
108#define A6_CRY_MD_OPER GENMASK(19, 16)
109
110#define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
111#define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
112
113#define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
114#define A6_CRY_MD_CIPHER_DECR BIT(22)
115#define A6_CRY_MD_CIPHER_TWEAK BIT(23)
116#define A6_CRY_MD_CIPHER_DSEQ BIT(24)
117
118#define A7_CRY_MD_OPER GENMASK(11, 8)
119
120#define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
121#define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
122
123#define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
124#define A7_CRY_MD_CIPHER_DECR BIT(14)
125#define A7_CRY_MD_CIPHER_TWEAK BIT(15)
126#define A7_CRY_MD_CIPHER_DSEQ BIT(16)
127
128/* DMA metadata constants */
129#define regk_crypto_aes_cbc 0x00000002
130#define regk_crypto_aes_ctr 0x00000003
131#define regk_crypto_aes_ecb 0x00000001
132#define regk_crypto_aes_gcm 0x00000004
133#define regk_crypto_aes_xts 0x00000005
134#define regk_crypto_cache 0x00000002
135#define a6_regk_crypto_dlkey 0x0000000a
136#define a7_regk_crypto_dlkey 0x0000000e
137#define regk_crypto_ext 0x00000001
138#define regk_crypto_hmac_sha1 0x00000007
139#define regk_crypto_hmac_sha256 0x00000009
140#define regk_crypto_init 0x00000000
141#define regk_crypto_key_128 0x00000000
142#define regk_crypto_key_192 0x00000001
143#define regk_crypto_key_256 0x00000002
144#define regk_crypto_null 0x00000000
145#define regk_crypto_sha1 0x00000006
146#define regk_crypto_sha256 0x00000008
147
148/* DMA descriptor structures */
149struct pdma_descr_ctrl {
150 unsigned char short_descr : 1;
151 unsigned char pad1 : 1;
152 unsigned char eop : 1;
153 unsigned char intr : 1;
154 unsigned char short_len : 3;
155 unsigned char pad2 : 1;
156} __packed;
157
158struct pdma_data_descr {
159 unsigned int len : 24;
160 unsigned int buf : 32;
161} __packed;
162
163struct pdma_short_descr {
164 unsigned char data[7];
165} __packed;
166
167struct pdma_descr {
168 struct pdma_descr_ctrl ctrl;
169 union {
170 struct pdma_data_descr data;
171 struct pdma_short_descr shrt;
172 };
173};
174
175struct pdma_stat_descr {
176 unsigned char pad1 : 1;
177 unsigned char pad2 : 1;
178 unsigned char eop : 1;
179 unsigned char pad3 : 5;
180 unsigned int len : 24;
181};
182
183/* Each descriptor array can hold max 64 entries */
184#define PDMA_DESCR_COUNT 64
185
186#define MODULE_NAME "Artpec-6 CA"
187
188/* Hash modes (including HMAC variants) */
189#define ARTPEC6_CRYPTO_HASH_SHA1 1
190#define ARTPEC6_CRYPTO_HASH_SHA256 2
191
192/* Crypto modes */
193#define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
194#define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
195#define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
196#define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
197
198/* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
199 * It operates on a descriptor array with up to 64 descriptor entries.
200 * The arrays must be 64 byte aligned in memory.
201 *
202 * The ciphering unit has no registers and is completely controlled by
203 * a 4-byte metadata that is inserted at the beginning of each dma packet.
204 *
205 * A dma packet is a sequence of descriptors terminated by setting the .eop
206 * field in the final descriptor of the packet.
207 *
208 * Multiple packets are used for providing context data, key data and
209 * the plain/ciphertext.
210 *
211 * PDMA Descriptors (Array)
212 * +------+------+------+~~+-------+------+----
213 * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
214 * +--+---+--+---+----+-+~~+-------+----+-+----
215 * | | | | |
216 * | | | | |
217 * __|__ +-------++-------++-------+ +----+
218 * | MD | |Payload||Payload||Payload| | MD |
219 * +-----+ +-------++-------++-------+ +----+
220 */
221
222struct artpec6_crypto_bounce_buffer {
223 struct list_head list;
224 size_t length;
225 struct scatterlist *sg;
226 size_t offset;
227 /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
228 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
229 */
230 void *buf;
231};
232
233struct artpec6_crypto_dma_map {
234 dma_addr_t dma_addr;
235 size_t size;
236 enum dma_data_direction dir;
237};
238
239struct artpec6_crypto_dma_descriptors {
240 struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
241 struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
242 u32 stat[PDMA_DESCR_COUNT] __aligned(64);
243 struct list_head bounce_buffers;
244 /* Enough maps for all out/in buffers, and all three descr. arrays */
245 struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
246 dma_addr_t out_dma_addr;
247 dma_addr_t in_dma_addr;
248 dma_addr_t stat_dma_addr;
249 size_t out_cnt;
250 size_t in_cnt;
251 size_t map_count;
252};
253
254enum artpec6_crypto_variant {
255 ARTPEC6_CRYPTO = 1,
256 ARTPEC7_CRYPTO,
257};
258
259struct artpec6_crypto {
260 void __iomem *base;
261 spinlock_t queue_lock;
262 struct list_head queue; /* waiting for pdma fifo space */
263 struct list_head pending; /* submitted to pdma fifo */
264 struct tasklet_struct task;
265 struct kmem_cache *dma_cache;
266 int pending_count;
267 struct timer_list timer;
268 enum artpec6_crypto_variant variant;
269 void *pad_buffer; /* cache-aligned block padding buffer */
270 void *zero_buffer;
271};
272
273enum artpec6_crypto_hash_flags {
274 HASH_FLAG_INIT_CTX = 2,
275 HASH_FLAG_UPDATE = 4,
276 HASH_FLAG_FINALIZE = 8,
277 HASH_FLAG_HMAC = 16,
278 HASH_FLAG_UPDATE_KEY = 32,
279};
280
281struct artpec6_crypto_req_common {
282 struct list_head list;
283 struct list_head complete_in_progress;
284 struct artpec6_crypto_dma_descriptors *dma;
285 struct crypto_async_request *req;
286 void (*complete)(struct crypto_async_request *req);
287 gfp_t gfp_flags;
288};
289
290struct artpec6_hash_request_context {
291 char partial_buffer[SHA256_BLOCK_SIZE];
292 char partial_buffer_out[SHA256_BLOCK_SIZE];
293 char key_buffer[SHA256_BLOCK_SIZE];
294 char pad_buffer[SHA256_BLOCK_SIZE + 32];
295 unsigned char digeststate[SHA256_DIGEST_SIZE];
296 size_t partial_bytes;
297 u64 digcnt;
298 u32 key_md;
299 u32 hash_md;
300 enum artpec6_crypto_hash_flags hash_flags;
301 struct artpec6_crypto_req_common common;
302};
303
304struct artpec6_hash_export_state {
305 char partial_buffer[SHA256_BLOCK_SIZE];
306 unsigned char digeststate[SHA256_DIGEST_SIZE];
307 size_t partial_bytes;
308 u64 digcnt;
309 int oper;
310 unsigned int hash_flags;
311};
312
313struct artpec6_hashalg_context {
314 char hmac_key[SHA256_BLOCK_SIZE];
315 size_t hmac_key_length;
316 struct crypto_shash *child_hash;
317};
318
319struct artpec6_crypto_request_context {
320 u32 cipher_md;
321 bool decrypt;
322 struct artpec6_crypto_req_common common;
323};
324
325struct artpec6_cryptotfm_context {
326 unsigned char aes_key[2*AES_MAX_KEY_SIZE];
327 size_t key_length;
328 u32 key_md;
329 int crypto_type;
330 struct crypto_sync_skcipher *fallback;
331};
332
333struct artpec6_crypto_aead_hw_ctx {
334 __be64 aad_length_bits;
335 __be64 text_length_bits;
336 __u8 J0[AES_BLOCK_SIZE];
337};
338
339struct artpec6_crypto_aead_req_ctx {
340 struct artpec6_crypto_aead_hw_ctx hw_ctx;
341 u32 cipher_md;
342 bool decrypt;
343 struct artpec6_crypto_req_common common;
344 __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
345};
346
347/* The crypto framework makes it hard to avoid this global. */
348static struct device *artpec6_crypto_dev;
349
350#ifdef CONFIG_FAULT_INJECTION
351static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
352static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
353#endif
354
355enum {
356 ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
357 ARTPEC6_CRYPTO_PREPARE_HASH_START,
358};
359
360static int artpec6_crypto_prepare_aead(struct aead_request *areq);
361static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
362static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
363
364static void
365artpec6_crypto_complete_crypto(struct crypto_async_request *req);
366static void
367artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
368static void
369artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
370static void
371artpec6_crypto_complete_aead(struct crypto_async_request *req);
372static void
373artpec6_crypto_complete_hash(struct crypto_async_request *req);
374
375static int
376artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
377
378static void
379artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
380
381struct artpec6_crypto_walk {
382 struct scatterlist *sg;
383 size_t offset;
384};
385
386static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
387 struct scatterlist *sg)
388{
389 awalk->sg = sg;
390 awalk->offset = 0;
391}
392
393static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
394 size_t nbytes)
395{
396 while (nbytes && awalk->sg) {
397 size_t piece;
398
399 WARN_ON(awalk->offset > awalk->sg->length);
400
401 piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
402 nbytes -= piece;
403 awalk->offset += piece;
404 if (awalk->offset == awalk->sg->length) {
405 awalk->sg = sg_next(awalk->sg);
406 awalk->offset = 0;
407 }
408
409 }
410
411 return nbytes;
412}
413
414static size_t
415artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
416{
417 WARN_ON(awalk->sg->length == awalk->offset);
418
419 return awalk->sg->length - awalk->offset;
420}
421
422static dma_addr_t
423artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
424{
425 return sg_phys(awalk->sg) + awalk->offset;
426}
427
428static void
429artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
430{
431 struct artpec6_crypto_dma_descriptors *dma = common->dma;
432 struct artpec6_crypto_bounce_buffer *b;
433 struct artpec6_crypto_bounce_buffer *next;
434
435 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
436 pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
437 b, b->length, b->offset, b->buf);
438 sg_pcopy_from_buffer(b->sg,
439 1,
440 b->buf,
441 b->length,
442 b->offset);
443
444 list_del(&b->list);
445 kfree(b);
446 }
447}
448
449static inline bool artpec6_crypto_busy(void)
450{
451 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
452 int fifo_count = ac->pending_count;
453
454 return fifo_count > 6;
455}
456
457static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
458{
459 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
460 int ret = -EBUSY;
461
462 spin_lock_bh(&ac->queue_lock);
463
464 if (!artpec6_crypto_busy()) {
465 list_add_tail(&req->list, &ac->pending);
466 artpec6_crypto_start_dma(req);
467 ret = -EINPROGRESS;
468 } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
469 list_add_tail(&req->list, &ac->queue);
470 } else {
471 artpec6_crypto_common_destroy(req);
472 }
473
474 spin_unlock_bh(&ac->queue_lock);
475
476 return ret;
477}
478
479static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
480{
481 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
482 enum artpec6_crypto_variant variant = ac->variant;
483 void __iomem *base = ac->base;
484 struct artpec6_crypto_dma_descriptors *dma = common->dma;
485 u32 ind, statd, outd;
486
487 /* Make descriptor content visible to the DMA before starting it. */
488 wmb();
489
490 ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
491 FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
492
493 statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
494 FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
495
496 outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
497 FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
498
499 if (variant == ARTPEC6_CRYPTO) {
500 writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
501 writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
502 writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
503 } else {
504 writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
505 writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
506 writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
507 }
508
509 writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
510 writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
511
512 ac->pending_count++;
513}
514
515static void
516artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
517{
518 struct artpec6_crypto_dma_descriptors *dma = common->dma;
519
520 dma->out_cnt = 0;
521 dma->in_cnt = 0;
522 dma->map_count = 0;
523 INIT_LIST_HEAD(&dma->bounce_buffers);
524}
525
526static bool fault_inject_dma_descr(void)
527{
528#ifdef CONFIG_FAULT_INJECTION
529 return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
530#else
531 return false;
532#endif
533}
534
535/** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
536 * physical address
537 *
538 * @addr: The physical address of the data buffer
539 * @len: The length of the data buffer
540 * @eop: True if this is the last buffer in the packet
541 *
542 * @return 0 on success or -ENOSPC if there are no more descriptors available
543 */
544static int
545artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
546 dma_addr_t addr, size_t len, bool eop)
547{
548 struct artpec6_crypto_dma_descriptors *dma = common->dma;
549 struct pdma_descr *d;
550
551 if (dma->out_cnt >= PDMA_DESCR_COUNT ||
552 fault_inject_dma_descr()) {
553 pr_err("No free OUT DMA descriptors available!\n");
554 return -ENOSPC;
555 }
556
557 d = &dma->out[dma->out_cnt++];
558 memset(d, 0, sizeof(*d));
559
560 d->ctrl.short_descr = 0;
561 d->ctrl.eop = eop;
562 d->data.len = len;
563 d->data.buf = addr;
564 return 0;
565}
566
567/** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
568 *
569 * @dst: The virtual address of the data
570 * @len: The length of the data, must be between 1 to 7 bytes
571 * @eop: True if this is the last buffer in the packet
572 *
573 * @return 0 on success
574 * -ENOSPC if no more descriptors are available
575 * -EINVAL if the data length exceeds 7 bytes
576 */
577static int
578artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
579 void *dst, unsigned int len, bool eop)
580{
581 struct artpec6_crypto_dma_descriptors *dma = common->dma;
582 struct pdma_descr *d;
583
584 if (dma->out_cnt >= PDMA_DESCR_COUNT ||
585 fault_inject_dma_descr()) {
586 pr_err("No free OUT DMA descriptors available!\n");
587 return -ENOSPC;
588 } else if (len > 7 || len < 1) {
589 return -EINVAL;
590 }
591 d = &dma->out[dma->out_cnt++];
592 memset(d, 0, sizeof(*d));
593
594 d->ctrl.short_descr = 1;
595 d->ctrl.short_len = len;
596 d->ctrl.eop = eop;
597 memcpy(d->shrt.data, dst, len);
598 return 0;
599}
600
601static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
602 struct page *page, size_t offset,
603 size_t size,
604 enum dma_data_direction dir,
605 dma_addr_t *dma_addr_out)
606{
607 struct artpec6_crypto_dma_descriptors *dma = common->dma;
608 struct device *dev = artpec6_crypto_dev;
609 struct artpec6_crypto_dma_map *map;
610 dma_addr_t dma_addr;
611
612 *dma_addr_out = 0;
613
614 if (dma->map_count >= ARRAY_SIZE(dma->maps))
615 return -ENOMEM;
616
617 dma_addr = dma_map_page(dev, page, offset, size, dir);
618 if (dma_mapping_error(dev, dma_addr))
619 return -ENOMEM;
620
621 map = &dma->maps[dma->map_count++];
622 map->size = size;
623 map->dma_addr = dma_addr;
624 map->dir = dir;
625
626 *dma_addr_out = dma_addr;
627
628 return 0;
629}
630
631static int
632artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
633 void *ptr, size_t size,
634 enum dma_data_direction dir,
635 dma_addr_t *dma_addr_out)
636{
637 struct page *page = virt_to_page(ptr);
638 size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
639
640 return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
641 dma_addr_out);
642}
643
644static int
645artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
646{
647 struct artpec6_crypto_dma_descriptors *dma = common->dma;
648 int ret;
649
650 ret = artpec6_crypto_dma_map_single(common, dma->in,
651 sizeof(dma->in[0]) * dma->in_cnt,
652 DMA_TO_DEVICE, &dma->in_dma_addr);
653 if (ret)
654 return ret;
655
656 ret = artpec6_crypto_dma_map_single(common, dma->out,
657 sizeof(dma->out[0]) * dma->out_cnt,
658 DMA_TO_DEVICE, &dma->out_dma_addr);
659 if (ret)
660 return ret;
661
662 /* We only read one stat descriptor */
663 dma->stat[dma->in_cnt - 1] = 0;
664
665 /*
666 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
667 * to be written.
668 */
669 return artpec6_crypto_dma_map_single(common,
670 dma->stat,
671 sizeof(dma->stat[0]) * dma->in_cnt,
672 DMA_BIDIRECTIONAL,
673 &dma->stat_dma_addr);
674}
675
676static void
677artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
678{
679 struct artpec6_crypto_dma_descriptors *dma = common->dma;
680 struct device *dev = artpec6_crypto_dev;
681 int i;
682
683 for (i = 0; i < dma->map_count; i++) {
684 struct artpec6_crypto_dma_map *map = &dma->maps[i];
685
686 dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
687 }
688
689 dma->map_count = 0;
690}
691
692/** artpec6_crypto_setup_out_descr - Setup an out descriptor
693 *
694 * @dst: The virtual address of the data
695 * @len: The length of the data
696 * @eop: True if this is the last buffer in the packet
697 * @use_short: If this is true and the data length is 7 bytes or less then
698 * a short descriptor will be used
699 *
700 * @return 0 on success
701 * Any errors from artpec6_crypto_setup_out_descr_short() or
702 * setup_out_descr_phys()
703 */
704static int
705artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
706 void *dst, unsigned int len, bool eop,
707 bool use_short)
708{
709 if (use_short && len < 7) {
710 return artpec6_crypto_setup_out_descr_short(common, dst, len,
711 eop);
712 } else {
713 int ret;
714 dma_addr_t dma_addr;
715
716 ret = artpec6_crypto_dma_map_single(common, dst, len,
717 DMA_TO_DEVICE,
718 &dma_addr);
719 if (ret)
720 return ret;
721
722 return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
723 len, eop);
724 }
725}
726
727/** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
728 * physical address
729 *
730 * @addr: The physical address of the data buffer
731 * @len: The length of the data buffer
732 * @intr: True if an interrupt should be fired after HW processing of this
733 * descriptor
734 *
735 */
736static int
737artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
738 dma_addr_t addr, unsigned int len, bool intr)
739{
740 struct artpec6_crypto_dma_descriptors *dma = common->dma;
741 struct pdma_descr *d;
742
743 if (dma->in_cnt >= PDMA_DESCR_COUNT ||
744 fault_inject_dma_descr()) {
745 pr_err("No free IN DMA descriptors available!\n");
746 return -ENOSPC;
747 }
748 d = &dma->in[dma->in_cnt++];
749 memset(d, 0, sizeof(*d));
750
751 d->ctrl.intr = intr;
752 d->data.len = len;
753 d->data.buf = addr;
754 return 0;
755}
756
757/** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
758 *
759 * @buffer: The virtual address to of the data buffer
760 * @len: The length of the data buffer
761 * @last: If this is the last data buffer in the request (i.e. an interrupt
762 * is needed
763 *
764 * Short descriptors are not used for the in channel
765 */
766static int
767artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
768 void *buffer, unsigned int len, bool last)
769{
770 dma_addr_t dma_addr;
771 int ret;
772
773 ret = artpec6_crypto_dma_map_single(common, buffer, len,
774 DMA_FROM_DEVICE, &dma_addr);
775 if (ret)
776 return ret;
777
778 return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
779}
780
781static struct artpec6_crypto_bounce_buffer *
782artpec6_crypto_alloc_bounce(gfp_t flags)
783{
784 void *base;
785 size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
786 2 * ARTPEC_CACHE_LINE_MAX;
787 struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
788
789 if (!bbuf)
790 return NULL;
791
792 base = bbuf + 1;
793 bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
794 return bbuf;
795}
796
797static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
798 struct artpec6_crypto_walk *walk, size_t size)
799{
800 struct artpec6_crypto_bounce_buffer *bbuf;
801 int ret;
802
803 bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
804 if (!bbuf)
805 return -ENOMEM;
806
807 bbuf->length = size;
808 bbuf->sg = walk->sg;
809 bbuf->offset = walk->offset;
810
811 ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
812 if (ret) {
813 kfree(bbuf);
814 return ret;
815 }
816
817 pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
818 list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
819 return 0;
820}
821
822static int
823artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
824 struct artpec6_crypto_walk *walk,
825 size_t count)
826{
827 size_t chunk;
828 int ret;
829 dma_addr_t addr;
830
831 while (walk->sg && count) {
832 chunk = min(count, artpec6_crypto_walk_chunklen(walk));
833 addr = artpec6_crypto_walk_chunk_phys(walk);
834
835 /* When destination buffers are not aligned to the cache line
836 * size we need bounce buffers. The DMA-API requires that the
837 * entire line is owned by the DMA buffer and this holds also
838 * for the case when coherent DMA is used.
839 */
840 if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
841 chunk = min_t(dma_addr_t, chunk,
842 ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
843 addr);
844
845 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
846 ret = setup_bounce_buffer_in(common, walk, chunk);
847 } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
848 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
849 ret = setup_bounce_buffer_in(common, walk, chunk);
850 } else {
851 dma_addr_t dma_addr;
852
853 chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
854
855 pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
856
857 ret = artpec6_crypto_dma_map_page(common,
858 sg_page(walk->sg),
859 walk->sg->offset +
860 walk->offset,
861 chunk,
862 DMA_FROM_DEVICE,
863 &dma_addr);
864 if (ret)
865 return ret;
866
867 ret = artpec6_crypto_setup_in_descr_phys(common,
868 dma_addr,
869 chunk, false);
870 }
871
872 if (ret)
873 return ret;
874
875 count = count - chunk;
876 artpec6_crypto_walk_advance(walk, chunk);
877 }
878
879 if (count)
880 pr_err("EOL unexpected %zu bytes left\n", count);
881
882 return count ? -EINVAL : 0;
883}
884
885static int
886artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
887 struct artpec6_crypto_walk *walk,
888 size_t count)
889{
890 size_t chunk;
891 int ret;
892 dma_addr_t addr;
893
894 while (walk->sg && count) {
895 chunk = min(count, artpec6_crypto_walk_chunklen(walk));
896 addr = artpec6_crypto_walk_chunk_phys(walk);
897
898 pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
899
900 if (addr & 3) {
901 char buf[3];
902
903 chunk = min_t(size_t, chunk, (4-(addr&3)));
904
905 sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
906 walk->offset);
907
908 ret = artpec6_crypto_setup_out_descr_short(common, buf,
909 chunk,
910 false);
911 } else {
912 dma_addr_t dma_addr;
913
914 ret = artpec6_crypto_dma_map_page(common,
915 sg_page(walk->sg),
916 walk->sg->offset +
917 walk->offset,
918 chunk,
919 DMA_TO_DEVICE,
920 &dma_addr);
921 if (ret)
922 return ret;
923
924 ret = artpec6_crypto_setup_out_descr_phys(common,
925 dma_addr,
926 chunk, false);
927 }
928
929 if (ret)
930 return ret;
931
932 count = count - chunk;
933 artpec6_crypto_walk_advance(walk, chunk);
934 }
935
936 if (count)
937 pr_err("EOL unexpected %zu bytes left\n", count);
938
939 return count ? -EINVAL : 0;
940}
941
942
943/** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
944 *
945 * If the out descriptor list is non-empty, then the eop flag on the
946 * last used out descriptor will be set.
947 *
948 * @return 0 on success
949 * -EINVAL if the out descriptor is empty or has overflown
950 */
951static int
952artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
953{
954 struct artpec6_crypto_dma_descriptors *dma = common->dma;
955 struct pdma_descr *d;
956
957 if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
958 pr_err("%s: OUT descriptor list is %s\n",
959 MODULE_NAME, dma->out_cnt ? "empty" : "full");
960 return -EINVAL;
961
962 }
963
964 d = &dma->out[dma->out_cnt-1];
965 d->ctrl.eop = 1;
966
967 return 0;
968}
969
970/** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
971 * in descriptor
972 *
973 * See artpec6_crypto_terminate_out_descrs() for return values
974 */
975static int
976artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
977{
978 struct artpec6_crypto_dma_descriptors *dma = common->dma;
979 struct pdma_descr *d;
980
981 if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
982 pr_err("%s: IN descriptor list is %s\n",
983 MODULE_NAME, dma->in_cnt ? "empty" : "full");
984 return -EINVAL;
985 }
986
987 d = &dma->in[dma->in_cnt-1];
988 d->ctrl.intr = 1;
989 return 0;
990}
991
992/** create_hash_pad - Create a Secure Hash conformant pad
993 *
994 * @dst: The destination buffer to write the pad. Must be at least 64 bytes
995 * @dgstlen: The total length of the hash digest in bytes
996 * @bitcount: The total length of the digest in bits
997 *
998 * @return The total number of padding bytes written to @dst
999 */
1000static size_t
1001create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
1002{
1003 unsigned int mod, target, diff, pad_bytes, size_bytes;
1004 __be64 bits = __cpu_to_be64(bitcount);
1005
1006 switch (oper) {
1007 case regk_crypto_sha1:
1008 case regk_crypto_sha256:
1009 case regk_crypto_hmac_sha1:
1010 case regk_crypto_hmac_sha256:
1011 target = 448 / 8;
1012 mod = 512 / 8;
1013 size_bytes = 8;
1014 break;
1015 default:
1016 target = 896 / 8;
1017 mod = 1024 / 8;
1018 size_bytes = 16;
1019 break;
1020 }
1021
1022 target -= 1;
1023 diff = dgstlen & (mod - 1);
1024 pad_bytes = diff > target ? target + mod - diff : target - diff;
1025
1026 memset(dst + 1, 0, pad_bytes);
1027 dst[0] = 0x80;
1028
1029 if (size_bytes == 16) {
1030 memset(dst + 1 + pad_bytes, 0, 8);
1031 memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
1032 } else {
1033 memcpy(dst + 1 + pad_bytes, &bits, 8);
1034 }
1035
1036 return pad_bytes + size_bytes + 1;
1037}
1038
1039static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
1040 struct crypto_async_request *parent,
1041 void (*complete)(struct crypto_async_request *req),
1042 struct scatterlist *dstsg, unsigned int nbytes)
1043{
1044 gfp_t flags;
1045 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1046
1047 flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1048 GFP_KERNEL : GFP_ATOMIC;
1049
1050 common->gfp_flags = flags;
1051 common->dma = kmem_cache_alloc(ac->dma_cache, flags);
1052 if (!common->dma)
1053 return -ENOMEM;
1054
1055 common->req = parent;
1056 common->complete = complete;
1057 return 0;
1058}
1059
1060static void
1061artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
1062{
1063 struct artpec6_crypto_bounce_buffer *b;
1064 struct artpec6_crypto_bounce_buffer *next;
1065
1066 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
1067 kfree(b);
1068 }
1069}
1070
1071static int
1072artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
1073{
1074 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1075
1076 artpec6_crypto_dma_unmap_all(common);
1077 artpec6_crypto_bounce_destroy(common->dma);
1078 kmem_cache_free(ac->dma_cache, common->dma);
1079 common->dma = NULL;
1080 return 0;
1081}
1082
1083/*
1084 * Ciphering functions.
1085 */
1086static int artpec6_crypto_encrypt(struct skcipher_request *req)
1087{
1088 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1089 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1090 struct artpec6_crypto_request_context *req_ctx = NULL;
1091 void (*complete)(struct crypto_async_request *req);
1092 int ret;
1093
1094 req_ctx = skcipher_request_ctx(req);
1095
1096 switch (ctx->crypto_type) {
1097 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1098 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1099 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1100 req_ctx->decrypt = 0;
1101 break;
1102 default:
1103 break;
1104 }
1105
1106 switch (ctx->crypto_type) {
1107 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1108 complete = artpec6_crypto_complete_cbc_encrypt;
1109 break;
1110 default:
1111 complete = artpec6_crypto_complete_crypto;
1112 break;
1113 }
1114
1115 ret = artpec6_crypto_common_init(&req_ctx->common,
1116 &req->base,
1117 complete,
1118 req->dst, req->cryptlen);
1119 if (ret)
1120 return ret;
1121
1122 ret = artpec6_crypto_prepare_crypto(req);
1123 if (ret) {
1124 artpec6_crypto_common_destroy(&req_ctx->common);
1125 return ret;
1126 }
1127
1128 return artpec6_crypto_submit(&req_ctx->common);
1129}
1130
1131static int artpec6_crypto_decrypt(struct skcipher_request *req)
1132{
1133 int ret;
1134 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1135 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1136 struct artpec6_crypto_request_context *req_ctx = NULL;
1137 void (*complete)(struct crypto_async_request *req);
1138
1139 req_ctx = skcipher_request_ctx(req);
1140
1141 switch (ctx->crypto_type) {
1142 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1143 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1144 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1145 req_ctx->decrypt = 1;
1146 break;
1147 default:
1148 break;
1149 }
1150
1151
1152 switch (ctx->crypto_type) {
1153 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1154 complete = artpec6_crypto_complete_cbc_decrypt;
1155 break;
1156 default:
1157 complete = artpec6_crypto_complete_crypto;
1158 break;
1159 }
1160
1161 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1162 complete,
1163 req->dst, req->cryptlen);
1164 if (ret)
1165 return ret;
1166
1167 ret = artpec6_crypto_prepare_crypto(req);
1168 if (ret) {
1169 artpec6_crypto_common_destroy(&req_ctx->common);
1170 return ret;
1171 }
1172
1173 return artpec6_crypto_submit(&req_ctx->common);
1174}
1175
1176static int
1177artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
1178{
1179 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1180 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1181 size_t iv_len = crypto_skcipher_ivsize(cipher);
1182 unsigned int counter = be32_to_cpup((__be32 *)
1183 (req->iv + iv_len - 4));
1184 unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
1185 AES_BLOCK_SIZE;
1186
1187 /*
1188 * The hardware uses only the last 32-bits as the counter while the
1189 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1190 * the whole IV is a counter. So fallback if the counter is going to
1191 * overlow.
1192 */
1193 if (counter + nblks < counter) {
1194 int ret;
1195
1196 pr_debug("counter %x will overflow (nblks %u), falling back\n",
1197 counter, counter + nblks);
1198
1199 ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
1200 ctx->key_length);
1201 if (ret)
1202 return ret;
1203
1204 {
1205 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
1206
1207 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
1208 skcipher_request_set_callback(subreq, req->base.flags,
1209 NULL, NULL);
1210 skcipher_request_set_crypt(subreq, req->src, req->dst,
1211 req->cryptlen, req->iv);
1212 ret = encrypt ? crypto_skcipher_encrypt(subreq)
1213 : crypto_skcipher_decrypt(subreq);
1214 skcipher_request_zero(subreq);
1215 }
1216 return ret;
1217 }
1218
1219 return encrypt ? artpec6_crypto_encrypt(req)
1220 : artpec6_crypto_decrypt(req);
1221}
1222
1223static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
1224{
1225 return artpec6_crypto_ctr_crypt(req, true);
1226}
1227
1228static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
1229{
1230 return artpec6_crypto_ctr_crypt(req, false);
1231}
1232
1233/*
1234 * AEAD functions
1235 */
1236static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
1237{
1238 struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
1239
1240 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
1241
1242 crypto_aead_set_reqsize(tfm,
1243 sizeof(struct artpec6_crypto_aead_req_ctx));
1244
1245 return 0;
1246}
1247
1248static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1249 unsigned int len)
1250{
1251 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
1252
1253 if (len != 16 && len != 24 && len != 32)
1254 return -EINVAL;
1255
1256 ctx->key_length = len;
1257
1258 memcpy(ctx->aes_key, key, len);
1259 return 0;
1260}
1261
1262static int artpec6_crypto_aead_encrypt(struct aead_request *req)
1263{
1264 int ret;
1265 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1266
1267 req_ctx->decrypt = false;
1268 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1269 artpec6_crypto_complete_aead,
1270 NULL, 0);
1271 if (ret)
1272 return ret;
1273
1274 ret = artpec6_crypto_prepare_aead(req);
1275 if (ret) {
1276 artpec6_crypto_common_destroy(&req_ctx->common);
1277 return ret;
1278 }
1279
1280 return artpec6_crypto_submit(&req_ctx->common);
1281}
1282
1283static int artpec6_crypto_aead_decrypt(struct aead_request *req)
1284{
1285 int ret;
1286 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1287
1288 req_ctx->decrypt = true;
1289 if (req->cryptlen < AES_BLOCK_SIZE)
1290 return -EINVAL;
1291
1292 ret = artpec6_crypto_common_init(&req_ctx->common,
1293 &req->base,
1294 artpec6_crypto_complete_aead,
1295 NULL, 0);
1296 if (ret)
1297 return ret;
1298
1299 ret = artpec6_crypto_prepare_aead(req);
1300 if (ret) {
1301 artpec6_crypto_common_destroy(&req_ctx->common);
1302 return ret;
1303 }
1304
1305 return artpec6_crypto_submit(&req_ctx->common);
1306}
1307
1308static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
1309{
1310 struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1311 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
1312 size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
1313 size_t contextsize = digestsize;
1314 size_t blocksize = crypto_tfm_alg_blocksize(
1315 crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
1316 struct artpec6_crypto_req_common *common = &req_ctx->common;
1317 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1318 enum artpec6_crypto_variant variant = ac->variant;
1319 u32 sel_ctx;
1320 bool ext_ctx = false;
1321 bool run_hw = false;
1322 int error = 0;
1323
1324 artpec6_crypto_init_dma_operation(common);
1325
1326 /* Upload HMAC key, it must be the first packet */
1327 if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
1328 if (variant == ARTPEC6_CRYPTO) {
1329 req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1330 a6_regk_crypto_dlkey);
1331 } else {
1332 req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1333 a7_regk_crypto_dlkey);
1334 }
1335
1336 memcpy_and_pad(req_ctx->key_buffer, blocksize, ctx->hmac_key,
1337 ctx->hmac_key_length, 0);
1338
1339 error = artpec6_crypto_setup_out_descr(common,
1340 (void *)&req_ctx->key_md,
1341 sizeof(req_ctx->key_md), false, false);
1342 if (error)
1343 return error;
1344
1345 error = artpec6_crypto_setup_out_descr(common,
1346 req_ctx->key_buffer, blocksize,
1347 true, false);
1348 if (error)
1349 return error;
1350 }
1351
1352 if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
1353 /* Restore context */
1354 sel_ctx = regk_crypto_ext;
1355 ext_ctx = true;
1356 } else {
1357 sel_ctx = regk_crypto_init;
1358 }
1359
1360 if (variant == ARTPEC6_CRYPTO) {
1361 req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
1362 req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
1363
1364 /* If this is the final round, set the final flag */
1365 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1366 req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
1367 } else {
1368 req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
1369 req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
1370
1371 /* If this is the final round, set the final flag */
1372 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1373 req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
1374 }
1375
1376 /* Setup up metadata descriptors */
1377 error = artpec6_crypto_setup_out_descr(common,
1378 (void *)&req_ctx->hash_md,
1379 sizeof(req_ctx->hash_md), false, false);
1380 if (error)
1381 return error;
1382
1383 error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1384 if (error)
1385 return error;
1386
1387 if (ext_ctx) {
1388 error = artpec6_crypto_setup_out_descr(common,
1389 req_ctx->digeststate,
1390 contextsize, false, false);
1391
1392 if (error)
1393 return error;
1394 }
1395
1396 if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
1397 size_t done_bytes = 0;
1398 size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
1399 size_t ready_bytes = round_down(total_bytes, blocksize);
1400 struct artpec6_crypto_walk walk;
1401
1402 run_hw = ready_bytes > 0;
1403 if (req_ctx->partial_bytes && ready_bytes) {
1404 /* We have a partial buffer and will at least some bytes
1405 * to the HW. Empty this partial buffer before tackling
1406 * the SG lists
1407 */
1408 memcpy(req_ctx->partial_buffer_out,
1409 req_ctx->partial_buffer,
1410 req_ctx->partial_bytes);
1411
1412 error = artpec6_crypto_setup_out_descr(common,
1413 req_ctx->partial_buffer_out,
1414 req_ctx->partial_bytes,
1415 false, true);
1416 if (error)
1417 return error;
1418
1419 /* Reset partial buffer */
1420 done_bytes += req_ctx->partial_bytes;
1421 req_ctx->partial_bytes = 0;
1422 }
1423
1424 artpec6_crypto_walk_init(&walk, areq->src);
1425
1426 error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
1427 ready_bytes -
1428 done_bytes);
1429 if (error)
1430 return error;
1431
1432 if (walk.sg) {
1433 size_t sg_skip = ready_bytes - done_bytes;
1434 size_t sg_rem = areq->nbytes - sg_skip;
1435
1436 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1437 req_ctx->partial_buffer +
1438 req_ctx->partial_bytes,
1439 sg_rem, sg_skip);
1440
1441 req_ctx->partial_bytes += sg_rem;
1442 }
1443
1444 req_ctx->digcnt += ready_bytes;
1445 req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
1446 }
1447
1448 /* Finalize */
1449 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
1450 size_t hash_pad_len;
1451 u64 digest_bits;
1452 u32 oper;
1453
1454 if (variant == ARTPEC6_CRYPTO)
1455 oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
1456 else
1457 oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
1458
1459 /* Write out the partial buffer if present */
1460 if (req_ctx->partial_bytes) {
1461 memcpy(req_ctx->partial_buffer_out,
1462 req_ctx->partial_buffer,
1463 req_ctx->partial_bytes);
1464 error = artpec6_crypto_setup_out_descr(common,
1465 req_ctx->partial_buffer_out,
1466 req_ctx->partial_bytes,
1467 false, true);
1468 if (error)
1469 return error;
1470
1471 req_ctx->digcnt += req_ctx->partial_bytes;
1472 req_ctx->partial_bytes = 0;
1473 }
1474
1475 if (req_ctx->hash_flags & HASH_FLAG_HMAC)
1476 digest_bits = 8 * (req_ctx->digcnt + blocksize);
1477 else
1478 digest_bits = 8 * req_ctx->digcnt;
1479
1480 /* Add the hash pad */
1481 hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
1482 req_ctx->digcnt, digest_bits);
1483 error = artpec6_crypto_setup_out_descr(common,
1484 req_ctx->pad_buffer,
1485 hash_pad_len, false,
1486 true);
1487 req_ctx->digcnt = 0;
1488
1489 if (error)
1490 return error;
1491
1492 /* Descriptor for the final result */
1493 error = artpec6_crypto_setup_in_descr(common, areq->result,
1494 digestsize,
1495 true);
1496 if (error)
1497 return error;
1498
1499 } else { /* This is not the final operation for this request */
1500 if (!run_hw)
1501 return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
1502
1503 /* Save the result to the context */
1504 error = artpec6_crypto_setup_in_descr(common,
1505 req_ctx->digeststate,
1506 contextsize, false);
1507 if (error)
1508 return error;
1509 /* fall through */
1510 }
1511
1512 req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
1513 HASH_FLAG_FINALIZE);
1514
1515 error = artpec6_crypto_terminate_in_descrs(common);
1516 if (error)
1517 return error;
1518
1519 error = artpec6_crypto_terminate_out_descrs(common);
1520 if (error)
1521 return error;
1522
1523 error = artpec6_crypto_dma_map_descs(common);
1524 if (error)
1525 return error;
1526
1527 return ARTPEC6_CRYPTO_PREPARE_HASH_START;
1528}
1529
1530
1531static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
1532{
1533 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1534
1535 crypto_skcipher_set_reqsize(tfm,
1536 sizeof(struct artpec6_crypto_request_context));
1537 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
1538
1539 return 0;
1540}
1541
1542static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
1543{
1544 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1545
1546 ctx->fallback =
1547 crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
1548 0, CRYPTO_ALG_NEED_FALLBACK);
1549 if (IS_ERR(ctx->fallback))
1550 return PTR_ERR(ctx->fallback);
1551
1552 crypto_skcipher_set_reqsize(tfm,
1553 sizeof(struct artpec6_crypto_request_context));
1554 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
1555
1556 return 0;
1557}
1558
1559static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
1560{
1561 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1562
1563 crypto_skcipher_set_reqsize(tfm,
1564 sizeof(struct artpec6_crypto_request_context));
1565 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
1566
1567 return 0;
1568}
1569
1570static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
1571{
1572 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1573
1574 crypto_skcipher_set_reqsize(tfm,
1575 sizeof(struct artpec6_crypto_request_context));
1576 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
1577
1578 return 0;
1579}
1580
1581static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
1582{
1583 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1584
1585 memset(ctx, 0, sizeof(*ctx));
1586}
1587
1588static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
1589{
1590 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1591
1592 crypto_free_sync_skcipher(ctx->fallback);
1593 artpec6_crypto_aes_exit(tfm);
1594}
1595
1596static int
1597artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
1598 unsigned int keylen)
1599{
1600 struct artpec6_cryptotfm_context *ctx =
1601 crypto_skcipher_ctx(cipher);
1602
1603 switch (keylen) {
1604 case 16:
1605 case 24:
1606 case 32:
1607 break;
1608 default:
1609 return -EINVAL;
1610 }
1611
1612 memcpy(ctx->aes_key, key, keylen);
1613 ctx->key_length = keylen;
1614 return 0;
1615}
1616
1617static int
1618artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
1619 unsigned int keylen)
1620{
1621 struct artpec6_cryptotfm_context *ctx =
1622 crypto_skcipher_ctx(cipher);
1623 int ret;
1624
1625 ret = xts_verify_key(cipher, key, keylen);
1626 if (ret)
1627 return ret;
1628
1629 switch (keylen) {
1630 case 32:
1631 case 48:
1632 case 64:
1633 break;
1634 default:
1635 return -EINVAL;
1636 }
1637
1638 memcpy(ctx->aes_key, key, keylen);
1639 ctx->key_length = keylen;
1640 return 0;
1641}
1642
1643/** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1644 *
1645 * @req: The asynch request to process
1646 *
1647 * @return 0 if the dma job was successfully prepared
1648 * <0 on error
1649 *
1650 * This function sets up the PDMA descriptors for a block cipher request.
1651 *
1652 * The required padding is added for AES-CTR using a statically defined
1653 * buffer.
1654 *
1655 * The PDMA descriptor list will be as follows:
1656 *
1657 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1658 * IN: <CIPHER_MD><data_0>...[data_n]<intr>
1659 *
1660 */
1661static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
1662{
1663 int ret;
1664 struct artpec6_crypto_walk walk;
1665 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1666 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1667 struct artpec6_crypto_request_context *req_ctx = NULL;
1668 size_t iv_len = crypto_skcipher_ivsize(cipher);
1669 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1670 enum artpec6_crypto_variant variant = ac->variant;
1671 struct artpec6_crypto_req_common *common;
1672 bool cipher_decr = false;
1673 size_t cipher_klen;
1674 u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
1675 u32 oper;
1676
1677 req_ctx = skcipher_request_ctx(areq);
1678 common = &req_ctx->common;
1679
1680 artpec6_crypto_init_dma_operation(common);
1681
1682 if (variant == ARTPEC6_CRYPTO)
1683 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
1684 else
1685 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
1686
1687 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1688 sizeof(ctx->key_md), false, false);
1689 if (ret)
1690 return ret;
1691
1692 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1693 ctx->key_length, true, false);
1694 if (ret)
1695 return ret;
1696
1697 req_ctx->cipher_md = 0;
1698
1699 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
1700 cipher_klen = ctx->key_length/2;
1701 else
1702 cipher_klen = ctx->key_length;
1703
1704 /* Metadata */
1705 switch (cipher_klen) {
1706 case 16:
1707 cipher_len = regk_crypto_key_128;
1708 break;
1709 case 24:
1710 cipher_len = regk_crypto_key_192;
1711 break;
1712 case 32:
1713 cipher_len = regk_crypto_key_256;
1714 break;
1715 default:
1716 pr_err("%s: Invalid key length %zu!\n",
1717 MODULE_NAME, ctx->key_length);
1718 return -EINVAL;
1719 }
1720
1721 switch (ctx->crypto_type) {
1722 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1723 oper = regk_crypto_aes_ecb;
1724 cipher_decr = req_ctx->decrypt;
1725 break;
1726
1727 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1728 oper = regk_crypto_aes_cbc;
1729 cipher_decr = req_ctx->decrypt;
1730 break;
1731
1732 case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
1733 oper = regk_crypto_aes_ctr;
1734 cipher_decr = false;
1735 break;
1736
1737 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1738 oper = regk_crypto_aes_xts;
1739 cipher_decr = req_ctx->decrypt;
1740
1741 if (variant == ARTPEC6_CRYPTO)
1742 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
1743 else
1744 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
1745 break;
1746
1747 default:
1748 pr_err("%s: Invalid cipher mode %d!\n",
1749 MODULE_NAME, ctx->crypto_type);
1750 return -EINVAL;
1751 }
1752
1753 if (variant == ARTPEC6_CRYPTO) {
1754 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
1755 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1756 cipher_len);
1757 if (cipher_decr)
1758 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1759 } else {
1760 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
1761 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1762 cipher_len);
1763 if (cipher_decr)
1764 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1765 }
1766
1767 ret = artpec6_crypto_setup_out_descr(common,
1768 &req_ctx->cipher_md,
1769 sizeof(req_ctx->cipher_md),
1770 false, false);
1771 if (ret)
1772 return ret;
1773
1774 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1775 if (ret)
1776 return ret;
1777
1778 if (iv_len) {
1779 ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
1780 false, false);
1781 if (ret)
1782 return ret;
1783 }
1784 /* Data out */
1785 artpec6_crypto_walk_init(&walk, areq->src);
1786 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
1787 if (ret)
1788 return ret;
1789
1790 /* Data in */
1791 artpec6_crypto_walk_init(&walk, areq->dst);
1792 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
1793 if (ret)
1794 return ret;
1795
1796 /* CTR-mode padding required by the HW. */
1797 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
1798 ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
1799 size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
1800 areq->cryptlen;
1801
1802 if (pad) {
1803 ret = artpec6_crypto_setup_out_descr(common,
1804 ac->pad_buffer,
1805 pad, false, false);
1806 if (ret)
1807 return ret;
1808
1809 ret = artpec6_crypto_setup_in_descr(common,
1810 ac->pad_buffer, pad,
1811 false);
1812 if (ret)
1813 return ret;
1814 }
1815 }
1816
1817 ret = artpec6_crypto_terminate_out_descrs(common);
1818 if (ret)
1819 return ret;
1820
1821 ret = artpec6_crypto_terminate_in_descrs(common);
1822 if (ret)
1823 return ret;
1824
1825 return artpec6_crypto_dma_map_descs(common);
1826}
1827
1828static int artpec6_crypto_prepare_aead(struct aead_request *areq)
1829{
1830 size_t count;
1831 int ret;
1832 size_t input_length;
1833 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1834 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
1835 struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
1836 struct artpec6_crypto_req_common *common = &req_ctx->common;
1837 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1838 enum artpec6_crypto_variant variant = ac->variant;
1839 u32 md_cipher_len;
1840
1841 artpec6_crypto_init_dma_operation(common);
1842
1843 /* Key */
1844 if (variant == ARTPEC6_CRYPTO) {
1845 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1846 a6_regk_crypto_dlkey);
1847 } else {
1848 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1849 a7_regk_crypto_dlkey);
1850 }
1851 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1852 sizeof(ctx->key_md), false, false);
1853 if (ret)
1854 return ret;
1855
1856 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1857 ctx->key_length, true, false);
1858 if (ret)
1859 return ret;
1860
1861 req_ctx->cipher_md = 0;
1862
1863 switch (ctx->key_length) {
1864 case 16:
1865 md_cipher_len = regk_crypto_key_128;
1866 break;
1867 case 24:
1868 md_cipher_len = regk_crypto_key_192;
1869 break;
1870 case 32:
1871 md_cipher_len = regk_crypto_key_256;
1872 break;
1873 default:
1874 return -EINVAL;
1875 }
1876
1877 if (variant == ARTPEC6_CRYPTO) {
1878 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
1879 regk_crypto_aes_gcm);
1880 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1881 md_cipher_len);
1882 if (req_ctx->decrypt)
1883 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1884 } else {
1885 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
1886 regk_crypto_aes_gcm);
1887 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1888 md_cipher_len);
1889 if (req_ctx->decrypt)
1890 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1891 }
1892
1893 ret = artpec6_crypto_setup_out_descr(common,
1894 (void *) &req_ctx->cipher_md,
1895 sizeof(req_ctx->cipher_md), false,
1896 false);
1897 if (ret)
1898 return ret;
1899
1900 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1901 if (ret)
1902 return ret;
1903
1904 /* For the decryption, cryptlen includes the tag. */
1905 input_length = areq->cryptlen;
1906 if (req_ctx->decrypt)
1907 input_length -= crypto_aead_authsize(cipher);
1908
1909 /* Prepare the context buffer */
1910 req_ctx->hw_ctx.aad_length_bits =
1911 __cpu_to_be64(8*areq->assoclen);
1912
1913 req_ctx->hw_ctx.text_length_bits =
1914 __cpu_to_be64(8*input_length);
1915
1916 memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
1917 // The HW omits the initial increment of the counter field.
1918 memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
1919
1920 ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
1921 sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
1922 if (ret)
1923 return ret;
1924
1925 {
1926 struct artpec6_crypto_walk walk;
1927
1928 artpec6_crypto_walk_init(&walk, areq->src);
1929
1930 /* Associated data */
1931 count = areq->assoclen;
1932 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1933 if (ret)
1934 return ret;
1935
1936 if (!IS_ALIGNED(areq->assoclen, 16)) {
1937 size_t assoc_pad = 16 - (areq->assoclen % 16);
1938 /* The HW mandates zero padding here */
1939 ret = artpec6_crypto_setup_out_descr(common,
1940 ac->zero_buffer,
1941 assoc_pad, false,
1942 false);
1943 if (ret)
1944 return ret;
1945 }
1946
1947 /* Data to crypto */
1948 count = input_length;
1949 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1950 if (ret)
1951 return ret;
1952
1953 if (!IS_ALIGNED(input_length, 16)) {
1954 size_t crypto_pad = 16 - (input_length % 16);
1955 /* The HW mandates zero padding here */
1956 ret = artpec6_crypto_setup_out_descr(common,
1957 ac->zero_buffer,
1958 crypto_pad,
1959 false,
1960 false);
1961 if (ret)
1962 return ret;
1963 }
1964 }
1965
1966 /* Data from crypto */
1967 {
1968 struct artpec6_crypto_walk walk;
1969 size_t output_len = areq->cryptlen;
1970
1971 if (req_ctx->decrypt)
1972 output_len -= crypto_aead_authsize(cipher);
1973
1974 artpec6_crypto_walk_init(&walk, areq->dst);
1975
1976 /* skip associated data in the output */
1977 count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
1978 if (count)
1979 return -EINVAL;
1980
1981 count = output_len;
1982 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
1983 if (ret)
1984 return ret;
1985
1986 /* Put padding between the cryptotext and the auth tag */
1987 if (!IS_ALIGNED(output_len, 16)) {
1988 size_t crypto_pad = 16 - (output_len % 16);
1989
1990 ret = artpec6_crypto_setup_in_descr(common,
1991 ac->pad_buffer,
1992 crypto_pad, false);
1993 if (ret)
1994 return ret;
1995 }
1996
1997 /* The authentication tag shall follow immediately after
1998 * the output ciphertext. For decryption it is put in a context
1999 * buffer for later compare against the input tag.
2000 */
2001
2002 if (req_ctx->decrypt) {
2003 ret = artpec6_crypto_setup_in_descr(common,
2004 req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
2005 if (ret)
2006 return ret;
2007
2008 } else {
2009 /* For encryption the requested tag size may be smaller
2010 * than the hardware's generated tag.
2011 */
2012 size_t authsize = crypto_aead_authsize(cipher);
2013
2014 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
2015 authsize);
2016 if (ret)
2017 return ret;
2018
2019 if (authsize < AES_BLOCK_SIZE) {
2020 count = AES_BLOCK_SIZE - authsize;
2021 ret = artpec6_crypto_setup_in_descr(common,
2022 ac->pad_buffer,
2023 count, false);
2024 if (ret)
2025 return ret;
2026 }
2027 }
2028
2029 }
2030
2031 ret = artpec6_crypto_terminate_in_descrs(common);
2032 if (ret)
2033 return ret;
2034
2035 ret = artpec6_crypto_terminate_out_descrs(common);
2036 if (ret)
2037 return ret;
2038
2039 return artpec6_crypto_dma_map_descs(common);
2040}
2041
2042static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
2043 struct list_head *completions)
2044{
2045 struct artpec6_crypto_req_common *req;
2046
2047 while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
2048 req = list_first_entry(&ac->queue,
2049 struct artpec6_crypto_req_common,
2050 list);
2051 list_move_tail(&req->list, &ac->pending);
2052 artpec6_crypto_start_dma(req);
2053
2054 list_add_tail(&req->complete_in_progress, completions);
2055 }
2056
2057 /*
2058 * In some cases, the hardware can raise an in_eop_flush interrupt
2059 * before actually updating the status, so we have an timer which will
2060 * recheck the status on timeout. Since the cases are expected to be
2061 * very rare, we use a relatively large timeout value. There should be
2062 * no noticeable negative effect if we timeout spuriously.
2063 */
2064 if (ac->pending_count)
2065 mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
2066 else
2067 timer_delete(&ac->timer);
2068}
2069
2070static void artpec6_crypto_timeout(struct timer_list *t)
2071{
2072 struct artpec6_crypto *ac = timer_container_of(ac, t, timer);
2073
2074 dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
2075
2076 tasklet_schedule(&ac->task);
2077}
2078
2079static void artpec6_crypto_task(unsigned long data)
2080{
2081 struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
2082 struct artpec6_crypto_req_common *req;
2083 struct artpec6_crypto_req_common *n;
2084 struct list_head complete_done;
2085 struct list_head complete_in_progress;
2086
2087 INIT_LIST_HEAD(&complete_done);
2088 INIT_LIST_HEAD(&complete_in_progress);
2089
2090 if (list_empty(&ac->pending)) {
2091 pr_debug("Spurious IRQ\n");
2092 return;
2093 }
2094
2095 spin_lock(&ac->queue_lock);
2096
2097 list_for_each_entry_safe(req, n, &ac->pending, list) {
2098 struct artpec6_crypto_dma_descriptors *dma = req->dma;
2099 u32 stat;
2100 dma_addr_t stataddr;
2101
2102 stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
2103 dma_sync_single_for_cpu(artpec6_crypto_dev,
2104 stataddr,
2105 4,
2106 DMA_BIDIRECTIONAL);
2107
2108 stat = req->dma->stat[req->dma->in_cnt-1];
2109
2110 /* A non-zero final status descriptor indicates
2111 * this job has finished.
2112 */
2113 pr_debug("Request %p status is %X\n", req, stat);
2114 if (!stat)
2115 break;
2116
2117 /* Allow testing of timeout handling with fault injection */
2118#ifdef CONFIG_FAULT_INJECTION
2119 if (should_fail(&artpec6_crypto_fail_status_read, 1))
2120 continue;
2121#endif
2122
2123 pr_debug("Completing request %p\n", req);
2124
2125 list_move_tail(&req->list, &complete_done);
2126
2127 ac->pending_count--;
2128 }
2129
2130 artpec6_crypto_process_queue(ac, &complete_in_progress);
2131
2132 spin_unlock(&ac->queue_lock);
2133
2134 /* Perform the completion callbacks without holding the queue lock
2135 * to allow new request submissions from the callbacks.
2136 */
2137 list_for_each_entry_safe(req, n, &complete_done, list) {
2138 artpec6_crypto_dma_unmap_all(req);
2139 artpec6_crypto_copy_bounce_buffers(req);
2140 artpec6_crypto_common_destroy(req);
2141
2142 req->complete(req->req);
2143 }
2144
2145 list_for_each_entry_safe(req, n, &complete_in_progress,
2146 complete_in_progress) {
2147 crypto_request_complete(req->req, -EINPROGRESS);
2148 }
2149}
2150
2151static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
2152{
2153 crypto_request_complete(req, 0);
2154}
2155
2156static void
2157artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
2158{
2159 struct skcipher_request *cipher_req = container_of(req,
2160 struct skcipher_request, base);
2161
2162 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
2163 cipher_req->cryptlen - AES_BLOCK_SIZE,
2164 AES_BLOCK_SIZE, 0);
2165 skcipher_request_complete(cipher_req, 0);
2166}
2167
2168static void
2169artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
2170{
2171 struct skcipher_request *cipher_req = container_of(req,
2172 struct skcipher_request, base);
2173
2174 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
2175 cipher_req->cryptlen - AES_BLOCK_SIZE,
2176 AES_BLOCK_SIZE, 0);
2177 skcipher_request_complete(cipher_req, 0);
2178}
2179
2180static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
2181{
2182 int result = 0;
2183
2184 /* Verify GCM hashtag. */
2185 struct aead_request *areq = container_of(req,
2186 struct aead_request, base);
2187 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
2188 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
2189
2190 if (req_ctx->decrypt) {
2191 u8 input_tag[AES_BLOCK_SIZE];
2192 unsigned int authsize = crypto_aead_authsize(aead);
2193
2194 sg_pcopy_to_buffer(areq->src,
2195 sg_nents(areq->src),
2196 input_tag,
2197 authsize,
2198 areq->assoclen + areq->cryptlen -
2199 authsize);
2200
2201 if (crypto_memneq(req_ctx->decryption_tag,
2202 input_tag,
2203 authsize)) {
2204 pr_debug("***EBADMSG:\n");
2205 print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
2206 input_tag, authsize, true);
2207 print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
2208 req_ctx->decryption_tag,
2209 authsize, true);
2210
2211 result = -EBADMSG;
2212 }
2213 }
2214
2215 aead_request_complete(areq, result);
2216}
2217
2218static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
2219{
2220 crypto_request_complete(req, 0);
2221}
2222
2223
2224/*------------------- Hash functions -----------------------------------------*/
2225static int
2226artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
2227 const u8 *key, unsigned int keylen)
2228{
2229 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
2230 size_t blocksize;
2231 int ret;
2232
2233 if (!keylen) {
2234 pr_err("Invalid length (%d) of HMAC key\n",
2235 keylen);
2236 return -EINVAL;
2237 }
2238
2239 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2240
2241 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2242
2243 if (keylen > blocksize) {
2244 tfm_ctx->hmac_key_length = blocksize;
2245
2246 ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
2247 tfm_ctx->hmac_key);
2248 if (ret)
2249 return ret;
2250 } else {
2251 memcpy(tfm_ctx->hmac_key, key, keylen);
2252 tfm_ctx->hmac_key_length = keylen;
2253 }
2254
2255 return 0;
2256}
2257
2258static int
2259artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
2260{
2261 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2262 enum artpec6_crypto_variant variant = ac->variant;
2263 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2264 u32 oper;
2265
2266 memset(req_ctx, 0, sizeof(*req_ctx));
2267
2268 req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
2269 if (hmac)
2270 req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
2271
2272 switch (type) {
2273 case ARTPEC6_CRYPTO_HASH_SHA1:
2274 oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
2275 break;
2276 case ARTPEC6_CRYPTO_HASH_SHA256:
2277 oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
2278 break;
2279 default:
2280 pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
2281 return -EINVAL;
2282 }
2283
2284 if (variant == ARTPEC6_CRYPTO)
2285 req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
2286 else
2287 req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
2288
2289 return 0;
2290}
2291
2292static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
2293{
2294 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2295 int ret;
2296
2297 if (!req_ctx->common.dma) {
2298 ret = artpec6_crypto_common_init(&req_ctx->common,
2299 &req->base,
2300 artpec6_crypto_complete_hash,
2301 NULL, 0);
2302
2303 if (ret)
2304 return ret;
2305 }
2306
2307 ret = artpec6_crypto_prepare_hash(req);
2308 switch (ret) {
2309 case ARTPEC6_CRYPTO_PREPARE_HASH_START:
2310 ret = artpec6_crypto_submit(&req_ctx->common);
2311 break;
2312
2313 case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
2314 ret = 0;
2315 fallthrough;
2316
2317 default:
2318 artpec6_crypto_common_destroy(&req_ctx->common);
2319 break;
2320 }
2321
2322 return ret;
2323}
2324
2325static int artpec6_crypto_hash_final(struct ahash_request *req)
2326{
2327 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2328
2329 req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
2330
2331 return artpec6_crypto_prepare_submit_hash(req);
2332}
2333
2334static int artpec6_crypto_hash_update(struct ahash_request *req)
2335{
2336 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2337
2338 req_ctx->hash_flags |= HASH_FLAG_UPDATE;
2339
2340 return artpec6_crypto_prepare_submit_hash(req);
2341}
2342
2343static int artpec6_crypto_sha1_init(struct ahash_request *req)
2344{
2345 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2346}
2347
2348static int artpec6_crypto_sha1_digest(struct ahash_request *req)
2349{
2350 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2351
2352 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2353
2354 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2355
2356 return artpec6_crypto_prepare_submit_hash(req);
2357}
2358
2359static int artpec6_crypto_sha256_init(struct ahash_request *req)
2360{
2361 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2362}
2363
2364static int artpec6_crypto_sha256_digest(struct ahash_request *req)
2365{
2366 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2367
2368 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2369 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2370
2371 return artpec6_crypto_prepare_submit_hash(req);
2372}
2373
2374static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
2375{
2376 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2377}
2378
2379static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
2380{
2381 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2382
2383 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2384 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2385
2386 return artpec6_crypto_prepare_submit_hash(req);
2387}
2388
2389static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
2390 const char *base_hash_name)
2391{
2392 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2393
2394 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2395 sizeof(struct artpec6_hash_request_context));
2396 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
2397
2398 if (base_hash_name) {
2399 struct crypto_shash *child;
2400
2401 child = crypto_alloc_shash(base_hash_name, 0,
2402 CRYPTO_ALG_NEED_FALLBACK);
2403
2404 if (IS_ERR(child))
2405 return PTR_ERR(child);
2406
2407 tfm_ctx->child_hash = child;
2408 }
2409
2410 return 0;
2411}
2412
2413static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
2414{
2415 return artpec6_crypto_ahash_init_common(tfm, NULL);
2416}
2417
2418static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
2419{
2420 return artpec6_crypto_ahash_init_common(tfm, "sha256");
2421}
2422
2423static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
2424{
2425 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2426
2427 if (tfm_ctx->child_hash)
2428 crypto_free_shash(tfm_ctx->child_hash);
2429
2430 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2431 tfm_ctx->hmac_key_length = 0;
2432}
2433
2434static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
2435{
2436 const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2437 struct artpec6_hash_export_state *state = out;
2438 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2439 enum artpec6_crypto_variant variant = ac->variant;
2440
2441 BUILD_BUG_ON(sizeof(state->partial_buffer) !=
2442 sizeof(ctx->partial_buffer));
2443 BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
2444
2445 state->digcnt = ctx->digcnt;
2446 state->partial_bytes = ctx->partial_bytes;
2447 state->hash_flags = ctx->hash_flags;
2448
2449 if (variant == ARTPEC6_CRYPTO)
2450 state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
2451 else
2452 state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
2453
2454 memcpy(state->partial_buffer, ctx->partial_buffer,
2455 sizeof(state->partial_buffer));
2456 memcpy(state->digeststate, ctx->digeststate,
2457 sizeof(state->digeststate));
2458
2459 return 0;
2460}
2461
2462static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
2463{
2464 struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2465 const struct artpec6_hash_export_state *state = in;
2466 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2467 enum artpec6_crypto_variant variant = ac->variant;
2468
2469 memset(ctx, 0, sizeof(*ctx));
2470
2471 ctx->digcnt = state->digcnt;
2472 ctx->partial_bytes = state->partial_bytes;
2473 ctx->hash_flags = state->hash_flags;
2474
2475 if (variant == ARTPEC6_CRYPTO)
2476 ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
2477 else
2478 ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
2479
2480 memcpy(ctx->partial_buffer, state->partial_buffer,
2481 sizeof(state->partial_buffer));
2482 memcpy(ctx->digeststate, state->digeststate,
2483 sizeof(state->digeststate));
2484
2485 return 0;
2486}
2487
2488static int init_crypto_hw(struct artpec6_crypto *ac)
2489{
2490 enum artpec6_crypto_variant variant = ac->variant;
2491 void __iomem *base = ac->base;
2492 u32 out_descr_buf_size;
2493 u32 out_data_buf_size;
2494 u32 in_data_buf_size;
2495 u32 in_descr_buf_size;
2496 u32 in_stat_buf_size;
2497 u32 in, out;
2498
2499 /*
2500 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2501 * channels and 1024 bytes for the IN channel. This is an elastic
2502 * memory used to internally store the descriptors and data. The values
2503 * ares specified in 64 byte incremements. Trustzone buffers are not
2504 * used at this stage.
2505 */
2506 out_data_buf_size = 16; /* 1024 bytes for data */
2507 out_descr_buf_size = 15; /* 960 bytes for descriptors */
2508 in_data_buf_size = 8; /* 512 bytes for data */
2509 in_descr_buf_size = 4; /* 256 bytes for descriptors */
2510 in_stat_buf_size = 4; /* 256 bytes for stat descrs */
2511
2512 BUILD_BUG_ON_MSG((out_data_buf_size
2513 + out_descr_buf_size) * 64 > 1984,
2514 "Invalid OUT configuration");
2515
2516 BUILD_BUG_ON_MSG((in_data_buf_size
2517 + in_descr_buf_size
2518 + in_stat_buf_size) * 64 > 1024,
2519 "Invalid IN configuration");
2520
2521 in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
2522 FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
2523 FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
2524
2525 out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
2526 FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
2527
2528 writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
2529 writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
2530
2531 if (variant == ARTPEC6_CRYPTO) {
2532 writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
2533 writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
2534 writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
2535 A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
2536 base + A6_PDMA_INTR_MASK);
2537 } else {
2538 writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
2539 writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
2540 writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
2541 A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
2542 base + A7_PDMA_INTR_MASK);
2543 }
2544
2545 return 0;
2546}
2547
2548static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
2549{
2550 enum artpec6_crypto_variant variant = ac->variant;
2551 void __iomem *base = ac->base;
2552
2553 if (variant == ARTPEC6_CRYPTO) {
2554 writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
2555 writel_relaxed(0, base + A6_PDMA_IN_CFG);
2556 writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2557 } else {
2558 writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
2559 writel_relaxed(0, base + A7_PDMA_IN_CFG);
2560 writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2561 }
2562
2563 writel_relaxed(0, base + PDMA_OUT_CFG);
2564
2565}
2566
2567static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
2568{
2569 struct artpec6_crypto *ac = dev_id;
2570 enum artpec6_crypto_variant variant = ac->variant;
2571 void __iomem *base = ac->base;
2572 u32 mask_in_data, mask_in_eop_flush;
2573 u32 in_cmd_flush_stat, in_cmd_reg;
2574 u32 ack_intr_reg;
2575 u32 ack = 0;
2576 u32 intr;
2577
2578 if (variant == ARTPEC6_CRYPTO) {
2579 intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
2580 mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
2581 mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
2582 in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
2583 in_cmd_reg = A6_PDMA_IN_CMD;
2584 ack_intr_reg = A6_PDMA_ACK_INTR;
2585 } else {
2586 intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
2587 mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
2588 mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
2589 in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
2590 in_cmd_reg = A7_PDMA_IN_CMD;
2591 ack_intr_reg = A7_PDMA_ACK_INTR;
2592 }
2593
2594 /* We get two interrupt notifications from each job.
2595 * The in_data means all data was sent to memory and then
2596 * we request a status flush command to write the per-job
2597 * status to its status vector. This ensures that the
2598 * tasklet can detect exactly how many submitted jobs
2599 * that have finished.
2600 */
2601 if (intr & mask_in_data)
2602 ack |= mask_in_data;
2603
2604 if (intr & mask_in_eop_flush)
2605 ack |= mask_in_eop_flush;
2606 else
2607 writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
2608
2609 writel_relaxed(ack, base + ack_intr_reg);
2610
2611 if (intr & mask_in_eop_flush)
2612 tasklet_schedule(&ac->task);
2613
2614 return IRQ_HANDLED;
2615}
2616
2617/*------------------- Algorithm definitions ----------------------------------*/
2618
2619/* Hashes */
2620static struct ahash_alg hash_algos[] = {
2621 /* SHA-1 */
2622 {
2623 .init = artpec6_crypto_sha1_init,
2624 .update = artpec6_crypto_hash_update,
2625 .final = artpec6_crypto_hash_final,
2626 .digest = artpec6_crypto_sha1_digest,
2627 .import = artpec6_crypto_hash_import,
2628 .export = artpec6_crypto_hash_export,
2629 .halg.digestsize = SHA1_DIGEST_SIZE,
2630 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2631 .halg.base = {
2632 .cra_name = "sha1",
2633 .cra_driver_name = "artpec-sha1",
2634 .cra_priority = 300,
2635 .cra_flags = CRYPTO_ALG_ASYNC |
2636 CRYPTO_ALG_ALLOCATES_MEMORY,
2637 .cra_blocksize = SHA1_BLOCK_SIZE,
2638 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2639 .cra_module = THIS_MODULE,
2640 .cra_init = artpec6_crypto_ahash_init,
2641 .cra_exit = artpec6_crypto_ahash_exit,
2642 }
2643 },
2644 /* SHA-256 */
2645 {
2646 .init = artpec6_crypto_sha256_init,
2647 .update = artpec6_crypto_hash_update,
2648 .final = artpec6_crypto_hash_final,
2649 .digest = artpec6_crypto_sha256_digest,
2650 .import = artpec6_crypto_hash_import,
2651 .export = artpec6_crypto_hash_export,
2652 .halg.digestsize = SHA256_DIGEST_SIZE,
2653 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2654 .halg.base = {
2655 .cra_name = "sha256",
2656 .cra_driver_name = "artpec-sha256",
2657 .cra_priority = 300,
2658 .cra_flags = CRYPTO_ALG_ASYNC |
2659 CRYPTO_ALG_ALLOCATES_MEMORY,
2660 .cra_blocksize = SHA256_BLOCK_SIZE,
2661 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2662 .cra_module = THIS_MODULE,
2663 .cra_init = artpec6_crypto_ahash_init,
2664 .cra_exit = artpec6_crypto_ahash_exit,
2665 }
2666 },
2667 /* HMAC SHA-256 */
2668 {
2669 .init = artpec6_crypto_hmac_sha256_init,
2670 .update = artpec6_crypto_hash_update,
2671 .final = artpec6_crypto_hash_final,
2672 .digest = artpec6_crypto_hmac_sha256_digest,
2673 .import = artpec6_crypto_hash_import,
2674 .export = artpec6_crypto_hash_export,
2675 .setkey = artpec6_crypto_hash_set_key,
2676 .halg.digestsize = SHA256_DIGEST_SIZE,
2677 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2678 .halg.base = {
2679 .cra_name = "hmac(sha256)",
2680 .cra_driver_name = "artpec-hmac-sha256",
2681 .cra_priority = 300,
2682 .cra_flags = CRYPTO_ALG_ASYNC |
2683 CRYPTO_ALG_ALLOCATES_MEMORY,
2684 .cra_blocksize = SHA256_BLOCK_SIZE,
2685 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2686 .cra_module = THIS_MODULE,
2687 .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
2688 .cra_exit = artpec6_crypto_ahash_exit,
2689 }
2690 },
2691};
2692
2693/* Crypto */
2694static struct skcipher_alg crypto_algos[] = {
2695 /* AES - ECB */
2696 {
2697 .base = {
2698 .cra_name = "ecb(aes)",
2699 .cra_driver_name = "artpec6-ecb-aes",
2700 .cra_priority = 300,
2701 .cra_flags = CRYPTO_ALG_ASYNC |
2702 CRYPTO_ALG_ALLOCATES_MEMORY,
2703 .cra_blocksize = AES_BLOCK_SIZE,
2704 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2705 .cra_alignmask = 3,
2706 .cra_module = THIS_MODULE,
2707 },
2708 .min_keysize = AES_MIN_KEY_SIZE,
2709 .max_keysize = AES_MAX_KEY_SIZE,
2710 .setkey = artpec6_crypto_cipher_set_key,
2711 .encrypt = artpec6_crypto_encrypt,
2712 .decrypt = artpec6_crypto_decrypt,
2713 .init = artpec6_crypto_aes_ecb_init,
2714 .exit = artpec6_crypto_aes_exit,
2715 },
2716 /* AES - CTR */
2717 {
2718 .base = {
2719 .cra_name = "ctr(aes)",
2720 .cra_driver_name = "artpec6-ctr-aes",
2721 .cra_priority = 300,
2722 .cra_flags = CRYPTO_ALG_ASYNC |
2723 CRYPTO_ALG_ALLOCATES_MEMORY |
2724 CRYPTO_ALG_NEED_FALLBACK,
2725 .cra_blocksize = 1,
2726 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2727 .cra_alignmask = 3,
2728 .cra_module = THIS_MODULE,
2729 },
2730 .min_keysize = AES_MIN_KEY_SIZE,
2731 .max_keysize = AES_MAX_KEY_SIZE,
2732 .ivsize = AES_BLOCK_SIZE,
2733 .setkey = artpec6_crypto_cipher_set_key,
2734 .encrypt = artpec6_crypto_ctr_encrypt,
2735 .decrypt = artpec6_crypto_ctr_decrypt,
2736 .init = artpec6_crypto_aes_ctr_init,
2737 .exit = artpec6_crypto_aes_ctr_exit,
2738 },
2739 /* AES - CBC */
2740 {
2741 .base = {
2742 .cra_name = "cbc(aes)",
2743 .cra_driver_name = "artpec6-cbc-aes",
2744 .cra_priority = 300,
2745 .cra_flags = CRYPTO_ALG_ASYNC |
2746 CRYPTO_ALG_ALLOCATES_MEMORY,
2747 .cra_blocksize = AES_BLOCK_SIZE,
2748 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2749 .cra_alignmask = 3,
2750 .cra_module = THIS_MODULE,
2751 },
2752 .min_keysize = AES_MIN_KEY_SIZE,
2753 .max_keysize = AES_MAX_KEY_SIZE,
2754 .ivsize = AES_BLOCK_SIZE,
2755 .setkey = artpec6_crypto_cipher_set_key,
2756 .encrypt = artpec6_crypto_encrypt,
2757 .decrypt = artpec6_crypto_decrypt,
2758 .init = artpec6_crypto_aes_cbc_init,
2759 .exit = artpec6_crypto_aes_exit
2760 },
2761 /* AES - XTS */
2762 {
2763 .base = {
2764 .cra_name = "xts(aes)",
2765 .cra_driver_name = "artpec6-xts-aes",
2766 .cra_priority = 300,
2767 .cra_flags = CRYPTO_ALG_ASYNC |
2768 CRYPTO_ALG_ALLOCATES_MEMORY,
2769 .cra_blocksize = 1,
2770 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2771 .cra_alignmask = 3,
2772 .cra_module = THIS_MODULE,
2773 },
2774 .min_keysize = 2*AES_MIN_KEY_SIZE,
2775 .max_keysize = 2*AES_MAX_KEY_SIZE,
2776 .ivsize = 16,
2777 .setkey = artpec6_crypto_xts_set_key,
2778 .encrypt = artpec6_crypto_encrypt,
2779 .decrypt = artpec6_crypto_decrypt,
2780 .init = artpec6_crypto_aes_xts_init,
2781 .exit = artpec6_crypto_aes_exit,
2782 },
2783};
2784
2785static struct aead_alg aead_algos[] = {
2786 {
2787 .init = artpec6_crypto_aead_init,
2788 .setkey = artpec6_crypto_aead_set_key,
2789 .encrypt = artpec6_crypto_aead_encrypt,
2790 .decrypt = artpec6_crypto_aead_decrypt,
2791 .ivsize = GCM_AES_IV_SIZE,
2792 .maxauthsize = AES_BLOCK_SIZE,
2793
2794 .base = {
2795 .cra_name = "gcm(aes)",
2796 .cra_driver_name = "artpec-gcm-aes",
2797 .cra_priority = 300,
2798 .cra_flags = CRYPTO_ALG_ASYNC |
2799 CRYPTO_ALG_ALLOCATES_MEMORY |
2800 CRYPTO_ALG_KERN_DRIVER_ONLY,
2801 .cra_blocksize = 1,
2802 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2803 .cra_alignmask = 3,
2804 .cra_module = THIS_MODULE,
2805 },
2806 }
2807};
2808
2809#ifdef CONFIG_DEBUG_FS
2810
2811static struct dentry *dbgfs_root;
2812
2813static void artpec6_crypto_init_debugfs(void)
2814{
2815 dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
2816
2817#ifdef CONFIG_FAULT_INJECTION
2818 fault_create_debugfs_attr("fail_status_read", dbgfs_root,
2819 &artpec6_crypto_fail_status_read);
2820
2821 fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
2822 &artpec6_crypto_fail_dma_array_full);
2823#endif
2824}
2825
2826static void artpec6_crypto_free_debugfs(void)
2827{
2828 debugfs_remove_recursive(dbgfs_root);
2829 dbgfs_root = NULL;
2830}
2831#endif
2832
2833static const struct of_device_id artpec6_crypto_of_match[] = {
2834 { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
2835 { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
2836 {}
2837};
2838MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
2839
2840static int artpec6_crypto_probe(struct platform_device *pdev)
2841{
2842 enum artpec6_crypto_variant variant;
2843 struct artpec6_crypto *ac;
2844 struct device *dev = &pdev->dev;
2845 void __iomem *base;
2846 int irq;
2847 int err;
2848
2849 if (artpec6_crypto_dev)
2850 return -ENODEV;
2851
2852 variant = (enum artpec6_crypto_variant)of_device_get_match_data(dev);
2853 if (!variant)
2854 return -EINVAL;
2855
2856 base = devm_platform_ioremap_resource(pdev, 0);
2857 if (IS_ERR(base))
2858 return PTR_ERR(base);
2859
2860 irq = platform_get_irq(pdev, 0);
2861 if (irq < 0)
2862 return -ENODEV;
2863
2864 ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
2865 GFP_KERNEL);
2866 if (!ac)
2867 return -ENOMEM;
2868
2869 platform_set_drvdata(pdev, ac);
2870 ac->variant = variant;
2871
2872 spin_lock_init(&ac->queue_lock);
2873 INIT_LIST_HEAD(&ac->queue);
2874 INIT_LIST_HEAD(&ac->pending);
2875 timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
2876
2877 ac->base = base;
2878
2879 ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
2880 sizeof(struct artpec6_crypto_dma_descriptors),
2881 64,
2882 0,
2883 NULL);
2884 if (!ac->dma_cache)
2885 return -ENOMEM;
2886
2887#ifdef CONFIG_DEBUG_FS
2888 artpec6_crypto_init_debugfs();
2889#endif
2890
2891 tasklet_init(&ac->task, artpec6_crypto_task,
2892 (unsigned long)ac);
2893
2894 ac->pad_buffer = devm_kcalloc(&pdev->dev, 2, ARTPEC_CACHE_LINE_MAX,
2895 GFP_KERNEL);
2896 if (!ac->pad_buffer)
2897 return -ENOMEM;
2898 ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
2899
2900 ac->zero_buffer = devm_kcalloc(&pdev->dev, 2, ARTPEC_CACHE_LINE_MAX,
2901 GFP_KERNEL);
2902 if (!ac->zero_buffer)
2903 return -ENOMEM;
2904 ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
2905
2906 err = init_crypto_hw(ac);
2907 if (err)
2908 goto free_cache;
2909
2910 err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
2911 "artpec6-crypto", ac);
2912 if (err)
2913 goto disable_hw;
2914
2915 artpec6_crypto_dev = &pdev->dev;
2916
2917 err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2918 if (err) {
2919 dev_err(dev, "Failed to register ahashes\n");
2920 goto disable_hw;
2921 }
2922
2923 err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2924 if (err) {
2925 dev_err(dev, "Failed to register ciphers\n");
2926 goto unregister_ahashes;
2927 }
2928
2929 err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2930 if (err) {
2931 dev_err(dev, "Failed to register aeads\n");
2932 goto unregister_algs;
2933 }
2934
2935 return 0;
2936
2937unregister_algs:
2938 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2939unregister_ahashes:
2940 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2941disable_hw:
2942 artpec6_crypto_disable_hw(ac);
2943free_cache:
2944 kmem_cache_destroy(ac->dma_cache);
2945 return err;
2946}
2947
2948static void artpec6_crypto_remove(struct platform_device *pdev)
2949{
2950 struct artpec6_crypto *ac = platform_get_drvdata(pdev);
2951 int irq = platform_get_irq(pdev, 0);
2952
2953 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2954 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2955 crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2956
2957 tasklet_disable(&ac->task);
2958 devm_free_irq(&pdev->dev, irq, ac);
2959 tasklet_kill(&ac->task);
2960 timer_delete_sync(&ac->timer);
2961
2962 artpec6_crypto_disable_hw(ac);
2963
2964 kmem_cache_destroy(ac->dma_cache);
2965#ifdef CONFIG_DEBUG_FS
2966 artpec6_crypto_free_debugfs();
2967#endif
2968}
2969
2970static struct platform_driver artpec6_crypto_driver = {
2971 .probe = artpec6_crypto_probe,
2972 .remove = artpec6_crypto_remove,
2973 .driver = {
2974 .name = "artpec6-crypto",
2975 .of_match_table = artpec6_crypto_of_match,
2976 },
2977};
2978
2979module_platform_driver(artpec6_crypto_driver);
2980
2981MODULE_AUTHOR("Axis Communications AB");
2982MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
2983MODULE_LICENSE("GPL");