Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2014 Imagination Technologies
4 * Authors: Will Thomas, James Hartley
5 *
6 * Interface structure taken from omap-sham driver
7 */
8
9#include <linux/clk.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmaengine.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/mod_devicetable.h>
17#include <linux/platform_device.h>
18#include <linux/scatterlist.h>
19
20#include <crypto/internal/hash.h>
21#include <crypto/md5.h>
22#include <crypto/sha1.h>
23#include <crypto/sha2.h>
24
25#define CR_RESET 0
26#define CR_RESET_SET 1
27#define CR_RESET_UNSET 0
28
29#define CR_MESSAGE_LENGTH_H 0x4
30#define CR_MESSAGE_LENGTH_L 0x8
31
32#define CR_CONTROL 0xc
33#define CR_CONTROL_BYTE_ORDER_3210 0
34#define CR_CONTROL_BYTE_ORDER_0123 1
35#define CR_CONTROL_BYTE_ORDER_2310 2
36#define CR_CONTROL_BYTE_ORDER_1032 3
37#define CR_CONTROL_BYTE_ORDER_SHIFT 8
38#define CR_CONTROL_ALGO_MD5 0
39#define CR_CONTROL_ALGO_SHA1 1
40#define CR_CONTROL_ALGO_SHA224 2
41#define CR_CONTROL_ALGO_SHA256 3
42
43#define CR_INTSTAT 0x10
44#define CR_INTENAB 0x14
45#define CR_INTCLEAR 0x18
46#define CR_INT_RESULTS_AVAILABLE BIT(0)
47#define CR_INT_NEW_RESULTS_SET BIT(1)
48#define CR_INT_RESULT_READ_ERR BIT(2)
49#define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
50#define CR_INT_STATUS BIT(8)
51
52#define CR_RESULT_QUEUE 0x1c
53#define CR_RSD0 0x40
54#define CR_CORE_REV 0x50
55#define CR_CORE_DES1 0x60
56#define CR_CORE_DES2 0x70
57
58#define DRIVER_FLAGS_BUSY BIT(0)
59#define DRIVER_FLAGS_FINAL BIT(1)
60#define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
61#define DRIVER_FLAGS_OUTPUT_READY BIT(3)
62#define DRIVER_FLAGS_INIT BIT(4)
63#define DRIVER_FLAGS_CPU BIT(5)
64#define DRIVER_FLAGS_DMA_READY BIT(6)
65#define DRIVER_FLAGS_ERROR BIT(7)
66#define DRIVER_FLAGS_SG BIT(8)
67#define DRIVER_FLAGS_SHA1 BIT(18)
68#define DRIVER_FLAGS_SHA224 BIT(19)
69#define DRIVER_FLAGS_SHA256 BIT(20)
70#define DRIVER_FLAGS_MD5 BIT(21)
71
72#define IMG_HASH_QUEUE_LENGTH 20
73#define IMG_HASH_DMA_BURST 4
74#define IMG_HASH_DMA_THRESHOLD 64
75
76#ifdef __LITTLE_ENDIAN
77#define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
78#else
79#define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
80#endif
81
82struct img_hash_dev;
83
84struct img_hash_request_ctx {
85 struct img_hash_dev *hdev;
86 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
87 unsigned long flags;
88 size_t digsize;
89
90 dma_addr_t dma_addr;
91 size_t dma_ct;
92
93 /* sg root */
94 struct scatterlist *sgfirst;
95 /* walk state */
96 struct scatterlist *sg;
97 size_t nents;
98 size_t offset;
99 unsigned int total;
100 size_t sent;
101
102 unsigned long op;
103
104 size_t bufcnt;
105 struct ahash_request fallback_req;
106
107 /* Zero length buffer must remain last member of struct */
108 u8 buffer[] __aligned(sizeof(u32));
109};
110
111struct img_hash_ctx {
112 struct img_hash_dev *hdev;
113 unsigned long flags;
114 struct crypto_ahash *fallback;
115};
116
117struct img_hash_dev {
118 struct list_head list;
119 struct device *dev;
120 struct clk *hash_clk;
121 struct clk *sys_clk;
122 void __iomem *io_base;
123
124 phys_addr_t bus_addr;
125 void __iomem *cpu_addr;
126
127 spinlock_t lock;
128 int err;
129 struct tasklet_struct done_task;
130 struct tasklet_struct dma_task;
131
132 unsigned long flags;
133 struct crypto_queue queue;
134 struct ahash_request *req;
135
136 struct dma_chan *dma_lch;
137};
138
139struct img_hash_drv {
140 struct list_head dev_list;
141 spinlock_t lock;
142};
143
144static struct img_hash_drv img_hash = {
145 .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
146 .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
147};
148
149static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
150{
151 return readl_relaxed(hdev->io_base + offset);
152}
153
154static inline void img_hash_write(struct img_hash_dev *hdev,
155 u32 offset, u32 value)
156{
157 writel_relaxed(value, hdev->io_base + offset);
158}
159
160static inline __be32 img_hash_read_result_queue(struct img_hash_dev *hdev)
161{
162 return cpu_to_be32(img_hash_read(hdev, CR_RESULT_QUEUE));
163}
164
165static void img_hash_start(struct img_hash_dev *hdev, bool dma)
166{
167 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
168 u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
169
170 if (ctx->flags & DRIVER_FLAGS_MD5)
171 cr |= CR_CONTROL_ALGO_MD5;
172 else if (ctx->flags & DRIVER_FLAGS_SHA1)
173 cr |= CR_CONTROL_ALGO_SHA1;
174 else if (ctx->flags & DRIVER_FLAGS_SHA224)
175 cr |= CR_CONTROL_ALGO_SHA224;
176 else if (ctx->flags & DRIVER_FLAGS_SHA256)
177 cr |= CR_CONTROL_ALGO_SHA256;
178 dev_dbg(hdev->dev, "Starting hash process\n");
179 img_hash_write(hdev, CR_CONTROL, cr);
180
181 /*
182 * The hardware block requires two cycles between writing the control
183 * register and writing the first word of data in non DMA mode, to
184 * ensure the first data write is not grouped in burst with the control
185 * register write a read is issued to 'flush' the bus.
186 */
187 if (!dma)
188 img_hash_read(hdev, CR_CONTROL);
189}
190
191static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
192 size_t length, int final)
193{
194 u32 count, len32;
195 const u32 *buffer = (const u32 *)buf;
196
197 dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
198
199 if (final)
200 hdev->flags |= DRIVER_FLAGS_FINAL;
201
202 len32 = DIV_ROUND_UP(length, sizeof(u32));
203
204 for (count = 0; count < len32; count++)
205 writel_relaxed(buffer[count], hdev->cpu_addr);
206
207 return -EINPROGRESS;
208}
209
210static void img_hash_dma_callback(void *data)
211{
212 struct img_hash_dev *hdev = data;
213 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
214
215 if (ctx->bufcnt) {
216 img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
217 ctx->bufcnt = 0;
218 }
219 if (ctx->sg)
220 tasklet_schedule(&hdev->dma_task);
221}
222
223static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
224{
225 struct dma_async_tx_descriptor *desc;
226 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
227
228 ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
229 if (ctx->dma_ct == 0) {
230 dev_err(hdev->dev, "Invalid DMA sg\n");
231 hdev->err = -EINVAL;
232 return -EINVAL;
233 }
234
235 desc = dmaengine_prep_slave_sg(hdev->dma_lch,
236 sg,
237 ctx->dma_ct,
238 DMA_MEM_TO_DEV,
239 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
240 if (!desc) {
241 dev_err(hdev->dev, "Null DMA descriptor\n");
242 hdev->err = -EINVAL;
243 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
244 return -EINVAL;
245 }
246 desc->callback = img_hash_dma_callback;
247 desc->callback_param = hdev;
248 dmaengine_submit(desc);
249 dma_async_issue_pending(hdev->dma_lch);
250
251 return 0;
252}
253
254static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
255{
256 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
257
258 ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
259 ctx->buffer, hdev->req->nbytes);
260
261 ctx->total = hdev->req->nbytes;
262 ctx->bufcnt = 0;
263
264 hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
265
266 img_hash_start(hdev, false);
267
268 return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
269}
270
271static int img_hash_finish(struct ahash_request *req)
272{
273 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
274
275 if (!req->result)
276 return -EINVAL;
277
278 memcpy(req->result, ctx->digest, ctx->digsize);
279
280 return 0;
281}
282
283static void img_hash_copy_hash(struct ahash_request *req)
284{
285 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
286 __be32 *hash = (__be32 *)ctx->digest;
287 int i;
288
289 for (i = (ctx->digsize / sizeof(*hash)) - 1; i >= 0; i--)
290 hash[i] = img_hash_read_result_queue(ctx->hdev);
291}
292
293static void img_hash_finish_req(struct ahash_request *req, int err)
294{
295 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
296 struct img_hash_dev *hdev = ctx->hdev;
297
298 if (!err) {
299 img_hash_copy_hash(req);
300 if (DRIVER_FLAGS_FINAL & hdev->flags)
301 err = img_hash_finish(req);
302 } else {
303 dev_warn(hdev->dev, "Hash failed with error %d\n", err);
304 ctx->flags |= DRIVER_FLAGS_ERROR;
305 }
306
307 hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
308 DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
309
310 if (req->base.complete)
311 ahash_request_complete(req, err);
312}
313
314static int img_hash_write_via_dma(struct img_hash_dev *hdev)
315{
316 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
317
318 img_hash_start(hdev, true);
319
320 dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
321
322 if (!ctx->total)
323 hdev->flags |= DRIVER_FLAGS_FINAL;
324
325 hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
326
327 tasklet_schedule(&hdev->dma_task);
328
329 return -EINPROGRESS;
330}
331
332static int img_hash_dma_init(struct img_hash_dev *hdev)
333{
334 struct dma_slave_config dma_conf;
335 int err;
336
337 hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
338 if (IS_ERR(hdev->dma_lch)) {
339 dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
340 return PTR_ERR(hdev->dma_lch);
341 }
342 dma_conf.direction = DMA_MEM_TO_DEV;
343 dma_conf.dst_addr = hdev->bus_addr;
344 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
345 dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
346 dma_conf.device_fc = false;
347
348 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
349 if (err) {
350 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
351 dma_release_channel(hdev->dma_lch);
352 return err;
353 }
354
355 return 0;
356}
357
358static void img_hash_dma_task(unsigned long d)
359{
360 struct img_hash_dev *hdev = (struct img_hash_dev *)d;
361 struct img_hash_request_ctx *ctx;
362 u8 *addr;
363 size_t nbytes, bleft, wsend, len, tbc;
364 struct scatterlist tsg;
365
366 if (!hdev->req)
367 return;
368
369 ctx = ahash_request_ctx(hdev->req);
370 if (!ctx->sg)
371 return;
372
373 addr = sg_virt(ctx->sg);
374 nbytes = ctx->sg->length - ctx->offset;
375
376 /*
377 * The hash accelerator does not support a data valid mask. This means
378 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
379 * padding bytes in the last word written by that dma would erroneously
380 * be included in the hash. To avoid this we round down the transfer,
381 * and add the excess to the start of the next dma. It does not matter
382 * that the final dma may not be a multiple of 4 bytes as the hashing
383 * block is programmed to accept the correct number of bytes.
384 */
385
386 bleft = nbytes % 4;
387 wsend = (nbytes / 4);
388
389 if (wsend) {
390 sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
391 if (img_hash_xmit_dma(hdev, &tsg)) {
392 dev_err(hdev->dev, "DMA failed, falling back to CPU");
393 ctx->flags |= DRIVER_FLAGS_CPU;
394 hdev->err = 0;
395 img_hash_xmit_cpu(hdev, addr + ctx->offset,
396 wsend * 4, 0);
397 ctx->sent += wsend * 4;
398 wsend = 0;
399 } else {
400 ctx->sent += wsend * 4;
401 }
402 }
403
404 if (bleft) {
405 ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
406 ctx->buffer, bleft, ctx->sent);
407 tbc = 0;
408 ctx->sg = sg_next(ctx->sg);
409 while (ctx->sg && (ctx->bufcnt < 4)) {
410 len = ctx->sg->length;
411 if (likely(len > (4 - ctx->bufcnt)))
412 len = 4 - ctx->bufcnt;
413 tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
414 ctx->buffer + ctx->bufcnt, len,
415 ctx->sent + ctx->bufcnt);
416 ctx->bufcnt += tbc;
417 if (tbc >= ctx->sg->length) {
418 ctx->sg = sg_next(ctx->sg);
419 tbc = 0;
420 }
421 }
422
423 ctx->sent += ctx->bufcnt;
424 ctx->offset = tbc;
425
426 if (!wsend)
427 img_hash_dma_callback(hdev);
428 } else {
429 ctx->offset = 0;
430 ctx->sg = sg_next(ctx->sg);
431 }
432}
433
434static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
435{
436 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
437
438 if (ctx->flags & DRIVER_FLAGS_SG)
439 dma_unmap_sg(hdev->dev, ctx->sg, 1, DMA_TO_DEVICE);
440
441 return 0;
442}
443
444static int img_hash_process_data(struct img_hash_dev *hdev)
445{
446 struct ahash_request *req = hdev->req;
447 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
448 int err = 0;
449
450 ctx->bufcnt = 0;
451
452 if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
453 dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
454 req->nbytes);
455 err = img_hash_write_via_dma(hdev);
456 } else {
457 dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
458 req->nbytes);
459 err = img_hash_write_via_cpu(hdev);
460 }
461 return err;
462}
463
464static int img_hash_hw_init(struct img_hash_dev *hdev)
465{
466 unsigned long long nbits;
467 u32 u, l;
468
469 img_hash_write(hdev, CR_RESET, CR_RESET_SET);
470 img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
471 img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
472
473 nbits = (u64)hdev->req->nbytes << 3;
474 u = nbits >> 32;
475 l = nbits;
476 img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
477 img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
478
479 if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
480 hdev->flags |= DRIVER_FLAGS_INIT;
481 hdev->err = 0;
482 }
483 dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
484 return 0;
485}
486
487static int img_hash_init(struct ahash_request *req)
488{
489 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
490 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
491 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
492
493 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
494 ahash_request_set_callback(&rctx->fallback_req,
495 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
496 req->base.complete, req->base.data);
497
498 return crypto_ahash_init(&rctx->fallback_req);
499}
500
501static int img_hash_handle_queue(struct img_hash_dev *hdev,
502 struct ahash_request *req)
503{
504 struct crypto_async_request *async_req, *backlog;
505 struct img_hash_request_ctx *ctx;
506 unsigned long flags;
507 int err = 0, res = 0;
508
509 spin_lock_irqsave(&hdev->lock, flags);
510
511 if (req)
512 res = ahash_enqueue_request(&hdev->queue, req);
513
514 if (DRIVER_FLAGS_BUSY & hdev->flags) {
515 spin_unlock_irqrestore(&hdev->lock, flags);
516 return res;
517 }
518
519 backlog = crypto_get_backlog(&hdev->queue);
520 async_req = crypto_dequeue_request(&hdev->queue);
521 if (async_req)
522 hdev->flags |= DRIVER_FLAGS_BUSY;
523
524 spin_unlock_irqrestore(&hdev->lock, flags);
525
526 if (!async_req)
527 return res;
528
529 if (backlog)
530 crypto_request_complete(backlog, -EINPROGRESS);
531
532 req = ahash_request_cast(async_req);
533 hdev->req = req;
534
535 ctx = ahash_request_ctx(req);
536
537 dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
538 ctx->op, req->nbytes);
539
540 err = img_hash_hw_init(hdev);
541
542 if (!err)
543 err = img_hash_process_data(hdev);
544
545 if (err != -EINPROGRESS) {
546 /* done_task will not finish so do it here */
547 img_hash_finish_req(req, err);
548 }
549 return res;
550}
551
552static int img_hash_update(struct ahash_request *req)
553{
554 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
555 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
556 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
557
558 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
559 ahash_request_set_callback(&rctx->fallback_req,
560 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
561 req->base.complete, req->base.data);
562 ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
563
564 return crypto_ahash_update(&rctx->fallback_req);
565}
566
567static int img_hash_final(struct ahash_request *req)
568{
569 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
570 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
571 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
572
573 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
574 ahash_request_set_callback(&rctx->fallback_req,
575 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
576 req->base.complete, req->base.data);
577 ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
578
579 return crypto_ahash_final(&rctx->fallback_req);
580}
581
582static int img_hash_finup(struct ahash_request *req)
583{
584 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
585 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
586 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
587
588 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
589 ahash_request_set_callback(&rctx->fallback_req,
590 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
591 req->base.complete, req->base.data);
592 ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
593 req->nbytes);
594
595
596 return crypto_ahash_finup(&rctx->fallback_req);
597}
598
599static int img_hash_import(struct ahash_request *req, const void *in)
600{
601 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
602 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
603 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
604
605 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
606 ahash_request_set_callback(&rctx->fallback_req,
607 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
608 req->base.complete, req->base.data);
609
610 return crypto_ahash_import(&rctx->fallback_req, in);
611}
612
613static int img_hash_export(struct ahash_request *req, void *out)
614{
615 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
616 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
617 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
618
619 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
620 ahash_request_set_callback(&rctx->fallback_req,
621 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
622 req->base.complete, req->base.data);
623
624 return crypto_ahash_export(&rctx->fallback_req, out);
625}
626
627static int img_hash_digest(struct ahash_request *req)
628{
629 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
630 struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
631 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
632
633 spin_lock(&img_hash.lock);
634 if (!tctx->hdev)
635 tctx->hdev = list_first_entry_or_null(&img_hash.dev_list,
636 struct img_hash_dev, list);
637 ctx->hdev = tctx->hdev;
638 spin_unlock(&img_hash.lock);
639
640 ctx->flags = 0;
641 ctx->digsize = crypto_ahash_digestsize(tfm);
642
643 switch (ctx->digsize) {
644 case SHA1_DIGEST_SIZE:
645 ctx->flags |= DRIVER_FLAGS_SHA1;
646 break;
647 case SHA256_DIGEST_SIZE:
648 ctx->flags |= DRIVER_FLAGS_SHA256;
649 break;
650 case SHA224_DIGEST_SIZE:
651 ctx->flags |= DRIVER_FLAGS_SHA224;
652 break;
653 case MD5_DIGEST_SIZE:
654 ctx->flags |= DRIVER_FLAGS_MD5;
655 break;
656 default:
657 return -EINVAL;
658 }
659
660 ctx->bufcnt = 0;
661 ctx->offset = 0;
662 ctx->sent = 0;
663 ctx->total = req->nbytes;
664 ctx->sg = req->src;
665 ctx->sgfirst = req->src;
666 ctx->nents = sg_nents(ctx->sg);
667
668 return img_hash_handle_queue(ctx->hdev, req);
669}
670
671static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
672{
673 struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
674
675 ctx->fallback = crypto_alloc_ahash(alg_name, 0,
676 CRYPTO_ALG_NEED_FALLBACK);
677 if (IS_ERR(ctx->fallback)) {
678 pr_err("img_hash: Could not load fallback driver.\n");
679 return PTR_ERR(ctx->fallback);
680 }
681 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
682 sizeof(struct img_hash_request_ctx) +
683 crypto_ahash_reqsize(ctx->fallback) +
684 IMG_HASH_DMA_THRESHOLD);
685
686 return 0;
687}
688
689static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
690{
691 return img_hash_cra_init(tfm, "md5-lib");
692}
693
694static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
695{
696 return img_hash_cra_init(tfm, "sha1-lib");
697}
698
699static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
700{
701 return img_hash_cra_init(tfm, "sha224-lib");
702}
703
704static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
705{
706 return img_hash_cra_init(tfm, "sha256-lib");
707}
708
709static void img_hash_cra_exit(struct crypto_tfm *tfm)
710{
711 struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
712
713 crypto_free_ahash(tctx->fallback);
714}
715
716static irqreturn_t img_irq_handler(int irq, void *dev_id)
717{
718 struct img_hash_dev *hdev = dev_id;
719 u32 reg;
720
721 reg = img_hash_read(hdev, CR_INTSTAT);
722 img_hash_write(hdev, CR_INTCLEAR, reg);
723
724 if (reg & CR_INT_NEW_RESULTS_SET) {
725 dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
726 if (DRIVER_FLAGS_BUSY & hdev->flags) {
727 hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
728 if (!(DRIVER_FLAGS_CPU & hdev->flags))
729 hdev->flags |= DRIVER_FLAGS_DMA_READY;
730 tasklet_schedule(&hdev->done_task);
731 } else {
732 dev_warn(hdev->dev,
733 "HASH interrupt when no active requests.\n");
734 }
735 } else if (reg & CR_INT_RESULTS_AVAILABLE) {
736 dev_warn(hdev->dev,
737 "IRQ triggered before the hash had completed\n");
738 } else if (reg & CR_INT_RESULT_READ_ERR) {
739 dev_warn(hdev->dev,
740 "Attempt to read from an empty result queue\n");
741 } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
742 dev_warn(hdev->dev,
743 "Data written before the hardware was configured\n");
744 }
745 return IRQ_HANDLED;
746}
747
748static struct ahash_alg img_algs[] = {
749 {
750 .init = img_hash_init,
751 .update = img_hash_update,
752 .final = img_hash_final,
753 .finup = img_hash_finup,
754 .export = img_hash_export,
755 .import = img_hash_import,
756 .digest = img_hash_digest,
757 .halg = {
758 .digestsize = MD5_DIGEST_SIZE,
759 .statesize = sizeof(struct md5_state),
760 .base = {
761 .cra_name = "md5",
762 .cra_driver_name = "img-md5",
763 .cra_priority = 300,
764 .cra_flags =
765 CRYPTO_ALG_ASYNC |
766 CRYPTO_ALG_NEED_FALLBACK,
767 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
768 .cra_ctxsize = sizeof(struct img_hash_ctx),
769 .cra_init = img_hash_cra_md5_init,
770 .cra_exit = img_hash_cra_exit,
771 .cra_module = THIS_MODULE,
772 }
773 }
774 },
775 {
776 .init = img_hash_init,
777 .update = img_hash_update,
778 .final = img_hash_final,
779 .finup = img_hash_finup,
780 .export = img_hash_export,
781 .import = img_hash_import,
782 .digest = img_hash_digest,
783 .halg = {
784 .digestsize = SHA1_DIGEST_SIZE,
785 .statesize = sizeof(struct sha1_state),
786 .base = {
787 .cra_name = "sha1",
788 .cra_driver_name = "img-sha1",
789 .cra_priority = 300,
790 .cra_flags =
791 CRYPTO_ALG_ASYNC |
792 CRYPTO_ALG_NEED_FALLBACK,
793 .cra_blocksize = SHA1_BLOCK_SIZE,
794 .cra_ctxsize = sizeof(struct img_hash_ctx),
795 .cra_init = img_hash_cra_sha1_init,
796 .cra_exit = img_hash_cra_exit,
797 .cra_module = THIS_MODULE,
798 }
799 }
800 },
801 {
802 .init = img_hash_init,
803 .update = img_hash_update,
804 .final = img_hash_final,
805 .finup = img_hash_finup,
806 .export = img_hash_export,
807 .import = img_hash_import,
808 .digest = img_hash_digest,
809 .halg = {
810 .digestsize = SHA224_DIGEST_SIZE,
811 .statesize = sizeof(struct sha256_state),
812 .base = {
813 .cra_name = "sha224",
814 .cra_driver_name = "img-sha224",
815 .cra_priority = 300,
816 .cra_flags =
817 CRYPTO_ALG_ASYNC |
818 CRYPTO_ALG_NEED_FALLBACK,
819 .cra_blocksize = SHA224_BLOCK_SIZE,
820 .cra_ctxsize = sizeof(struct img_hash_ctx),
821 .cra_init = img_hash_cra_sha224_init,
822 .cra_exit = img_hash_cra_exit,
823 .cra_module = THIS_MODULE,
824 }
825 }
826 },
827 {
828 .init = img_hash_init,
829 .update = img_hash_update,
830 .final = img_hash_final,
831 .finup = img_hash_finup,
832 .export = img_hash_export,
833 .import = img_hash_import,
834 .digest = img_hash_digest,
835 .halg = {
836 .digestsize = SHA256_DIGEST_SIZE,
837 .statesize = sizeof(struct sha256_state),
838 .base = {
839 .cra_name = "sha256",
840 .cra_driver_name = "img-sha256",
841 .cra_priority = 300,
842 .cra_flags =
843 CRYPTO_ALG_ASYNC |
844 CRYPTO_ALG_NEED_FALLBACK,
845 .cra_blocksize = SHA256_BLOCK_SIZE,
846 .cra_ctxsize = sizeof(struct img_hash_ctx),
847 .cra_init = img_hash_cra_sha256_init,
848 .cra_exit = img_hash_cra_exit,
849 .cra_module = THIS_MODULE,
850 }
851 }
852 }
853};
854
855static int img_register_algs(struct img_hash_dev *hdev)
856{
857 int i, err;
858
859 for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
860 err = crypto_register_ahash(&img_algs[i]);
861 if (err) {
862 crypto_unregister_ahashes(img_algs, i);
863 return err;
864 }
865 }
866
867 return 0;
868}
869
870static void img_unregister_algs(struct img_hash_dev *hdev)
871{
872 crypto_unregister_ahashes(img_algs, ARRAY_SIZE(img_algs));
873}
874
875static void img_hash_done_task(unsigned long data)
876{
877 struct img_hash_dev *hdev = (struct img_hash_dev *)data;
878 int err = 0;
879
880 if (hdev->err == -EINVAL) {
881 err = hdev->err;
882 goto finish;
883 }
884
885 if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
886 img_hash_handle_queue(hdev, NULL);
887 return;
888 }
889
890 if (DRIVER_FLAGS_CPU & hdev->flags) {
891 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
892 hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
893 goto finish;
894 }
895 } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
896 if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
897 hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
898 img_hash_write_via_dma_stop(hdev);
899 if (hdev->err) {
900 err = hdev->err;
901 goto finish;
902 }
903 }
904 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
905 hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
906 DRIVER_FLAGS_OUTPUT_READY);
907 goto finish;
908 }
909 }
910 return;
911
912finish:
913 img_hash_finish_req(hdev->req, err);
914}
915
916static const struct of_device_id img_hash_match[] __maybe_unused = {
917 { .compatible = "img,hash-accelerator" },
918 {}
919};
920MODULE_DEVICE_TABLE(of, img_hash_match);
921
922static int img_hash_probe(struct platform_device *pdev)
923{
924 struct img_hash_dev *hdev;
925 struct device *dev = &pdev->dev;
926 struct resource *hash_res;
927 int irq;
928 int err;
929
930 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
931 if (hdev == NULL)
932 return -ENOMEM;
933
934 spin_lock_init(&hdev->lock);
935
936 hdev->dev = dev;
937
938 platform_set_drvdata(pdev, hdev);
939
940 INIT_LIST_HEAD(&hdev->list);
941
942 tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
943 tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
944
945 crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
946
947 /* Register bank */
948 hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
949 if (IS_ERR(hdev->io_base)) {
950 err = PTR_ERR(hdev->io_base);
951 goto res_err;
952 }
953
954 /* Write port (DMA or CPU) */
955 hdev->cpu_addr = devm_platform_get_and_ioremap_resource(pdev, 1, &hash_res);
956 if (IS_ERR(hdev->cpu_addr)) {
957 err = PTR_ERR(hdev->cpu_addr);
958 goto res_err;
959 }
960 hdev->bus_addr = hash_res->start;
961
962 irq = platform_get_irq(pdev, 0);
963 if (irq < 0) {
964 err = irq;
965 goto res_err;
966 }
967
968 err = devm_request_irq(dev, irq, img_irq_handler, 0,
969 dev_name(dev), hdev);
970 if (err) {
971 dev_err(dev, "unable to request irq\n");
972 goto res_err;
973 }
974 dev_dbg(dev, "using IRQ channel %d\n", irq);
975
976 hdev->hash_clk = devm_clk_get_enabled(&pdev->dev, "hash");
977 if (IS_ERR(hdev->hash_clk)) {
978 dev_err(dev, "clock initialization failed.\n");
979 err = PTR_ERR(hdev->hash_clk);
980 goto res_err;
981 }
982
983 hdev->sys_clk = devm_clk_get_enabled(&pdev->dev, "sys");
984 if (IS_ERR(hdev->sys_clk)) {
985 dev_err(dev, "clock initialization failed.\n");
986 err = PTR_ERR(hdev->sys_clk);
987 goto res_err;
988 }
989
990 err = img_hash_dma_init(hdev);
991 if (err)
992 goto res_err;
993
994 dev_dbg(dev, "using %s for DMA transfers\n",
995 dma_chan_name(hdev->dma_lch));
996
997 spin_lock(&img_hash.lock);
998 list_add_tail(&hdev->list, &img_hash.dev_list);
999 spin_unlock(&img_hash.lock);
1000
1001 err = img_register_algs(hdev);
1002 if (err)
1003 goto err_algs;
1004 dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1005
1006 return 0;
1007
1008err_algs:
1009 spin_lock(&img_hash.lock);
1010 list_del(&hdev->list);
1011 spin_unlock(&img_hash.lock);
1012 dma_release_channel(hdev->dma_lch);
1013res_err:
1014 tasklet_kill(&hdev->done_task);
1015 tasklet_kill(&hdev->dma_task);
1016
1017 return err;
1018}
1019
1020static void img_hash_remove(struct platform_device *pdev)
1021{
1022 struct img_hash_dev *hdev;
1023
1024 hdev = platform_get_drvdata(pdev);
1025 spin_lock(&img_hash.lock);
1026 list_del(&hdev->list);
1027 spin_unlock(&img_hash.lock);
1028
1029 img_unregister_algs(hdev);
1030
1031 tasklet_kill(&hdev->done_task);
1032 tasklet_kill(&hdev->dma_task);
1033
1034 dma_release_channel(hdev->dma_lch);
1035}
1036
1037#ifdef CONFIG_PM_SLEEP
1038static int img_hash_suspend(struct device *dev)
1039{
1040 struct img_hash_dev *hdev = dev_get_drvdata(dev);
1041
1042 clk_disable_unprepare(hdev->hash_clk);
1043 clk_disable_unprepare(hdev->sys_clk);
1044
1045 return 0;
1046}
1047
1048static int img_hash_resume(struct device *dev)
1049{
1050 struct img_hash_dev *hdev = dev_get_drvdata(dev);
1051 int ret;
1052
1053 ret = clk_prepare_enable(hdev->hash_clk);
1054 if (ret)
1055 return ret;
1056
1057 ret = clk_prepare_enable(hdev->sys_clk);
1058 if (ret) {
1059 clk_disable_unprepare(hdev->hash_clk);
1060 return ret;
1061 }
1062
1063 return 0;
1064}
1065#endif /* CONFIG_PM_SLEEP */
1066
1067static const struct dev_pm_ops img_hash_pm_ops = {
1068 SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1069};
1070
1071static struct platform_driver img_hash_driver = {
1072 .probe = img_hash_probe,
1073 .remove = img_hash_remove,
1074 .driver = {
1075 .name = "img-hash-accelerator",
1076 .pm = &img_hash_pm_ops,
1077 .of_match_table = img_hash_match,
1078 }
1079};
1080module_platform_driver(img_hash_driver);
1081
1082MODULE_LICENSE("GPL v2");
1083MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1084MODULE_AUTHOR("Will Thomas.");
1085MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");