Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * AMD Cryptographic Coprocessor (CCP) AES crypto API support
4 *
5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10#include <crypto/aes.h>
11#include <crypto/ctr.h>
12#include <crypto/internal/skcipher.h>
13#include <linux/err.h>
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20
21#include "ccp-crypto.h"
22
23static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
24{
25 struct skcipher_request *req = skcipher_request_cast(async_req);
26 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(
27 crypto_skcipher_reqtfm(req));
28 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
29
30 if (ret)
31 return ret;
32
33 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
34 size_t ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
35
36 memcpy(req->iv, rctx->iv, ivsize);
37 }
38
39 return 0;
40}
41
42static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
43 unsigned int key_len)
44{
45 struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
46 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
47
48 switch (key_len) {
49 case AES_KEYSIZE_128:
50 ctx->u.aes.type = CCP_AES_TYPE_128;
51 break;
52 case AES_KEYSIZE_192:
53 ctx->u.aes.type = CCP_AES_TYPE_192;
54 break;
55 case AES_KEYSIZE_256:
56 ctx->u.aes.type = CCP_AES_TYPE_256;
57 break;
58 default:
59 return -EINVAL;
60 }
61 ctx->u.aes.mode = alg->mode;
62 ctx->u.aes.key_len = key_len;
63
64 memcpy(ctx->u.aes.key, key, key_len);
65 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
66
67 return 0;
68}
69
70static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
71{
72 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
73 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
74 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
75 struct scatterlist *iv_sg = NULL;
76 unsigned int iv_len = 0;
77
78 if (!ctx->u.aes.key_len)
79 return -EINVAL;
80
81 if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
82 (ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
83 (req->cryptlen & (AES_BLOCK_SIZE - 1)))
84 return -EINVAL;
85
86 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
87 if (!req->iv)
88 return -EINVAL;
89
90 memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE);
91 iv_sg = &rctx->iv_sg;
92 iv_len = AES_BLOCK_SIZE;
93 sg_init_one(iv_sg, rctx->iv, iv_len);
94 }
95
96 memset(&rctx->cmd, 0, sizeof(rctx->cmd));
97 INIT_LIST_HEAD(&rctx->cmd.entry);
98 rctx->cmd.engine = CCP_ENGINE_AES;
99 rctx->cmd.u.aes.type = ctx->u.aes.type;
100 rctx->cmd.u.aes.mode = ctx->u.aes.mode;
101 rctx->cmd.u.aes.action =
102 (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT;
103 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
104 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
105 rctx->cmd.u.aes.iv = iv_sg;
106 rctx->cmd.u.aes.iv_len = iv_len;
107 rctx->cmd.u.aes.src = req->src;
108 rctx->cmd.u.aes.src_len = req->cryptlen;
109 rctx->cmd.u.aes.dst = req->dst;
110
111 return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
112}
113
114static int ccp_aes_encrypt(struct skcipher_request *req)
115{
116 return ccp_aes_crypt(req, true);
117}
118
119static int ccp_aes_decrypt(struct skcipher_request *req)
120{
121 return ccp_aes_crypt(req, false);
122}
123
124static int ccp_aes_init_tfm(struct crypto_skcipher *tfm)
125{
126 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
127
128 ctx->complete = ccp_aes_complete;
129 ctx->u.aes.key_len = 0;
130
131 crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
132
133 return 0;
134}
135
136static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
137 int ret)
138{
139 struct skcipher_request *req = skcipher_request_cast(async_req);
140 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
141
142 /* Restore the original pointer */
143 req->iv = rctx->rfc3686_info;
144
145 return ccp_aes_complete(async_req, ret);
146}
147
148static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
149 unsigned int key_len)
150{
151 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
152
153 if (key_len < CTR_RFC3686_NONCE_SIZE)
154 return -EINVAL;
155
156 key_len -= CTR_RFC3686_NONCE_SIZE;
157 memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE);
158
159 return ccp_aes_setkey(tfm, key, key_len);
160}
161
162static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt)
163{
164 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
165 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
166 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
167 u8 *iv;
168
169 /* Initialize the CTR block */
170 iv = rctx->rfc3686_iv;
171 memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE);
172
173 iv += CTR_RFC3686_NONCE_SIZE;
174 memcpy(iv, req->iv, CTR_RFC3686_IV_SIZE);
175
176 iv += CTR_RFC3686_IV_SIZE;
177 *(__be32 *)iv = cpu_to_be32(1);
178
179 /* Point to the new IV */
180 rctx->rfc3686_info = req->iv;
181 req->iv = rctx->rfc3686_iv;
182
183 return ccp_aes_crypt(req, encrypt);
184}
185
186static int ccp_aes_rfc3686_encrypt(struct skcipher_request *req)
187{
188 return ccp_aes_rfc3686_crypt(req, true);
189}
190
191static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req)
192{
193 return ccp_aes_rfc3686_crypt(req, false);
194}
195
196static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm)
197{
198 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
199
200 ctx->complete = ccp_aes_rfc3686_complete;
201 ctx->u.aes.key_len = 0;
202
203 crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx));
204
205 return 0;
206}
207
208static const struct skcipher_alg ccp_aes_defaults = {
209 .setkey = ccp_aes_setkey,
210 .encrypt = ccp_aes_encrypt,
211 .decrypt = ccp_aes_decrypt,
212 .min_keysize = AES_MIN_KEY_SIZE,
213 .max_keysize = AES_MAX_KEY_SIZE,
214 .init = ccp_aes_init_tfm,
215
216 .base.cra_flags = CRYPTO_ALG_ASYNC |
217 CRYPTO_ALG_ALLOCATES_MEMORY |
218 CRYPTO_ALG_KERN_DRIVER_ONLY |
219 CRYPTO_ALG_NEED_FALLBACK,
220 .base.cra_blocksize = AES_BLOCK_SIZE,
221 .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
222 .base.cra_priority = CCP_CRA_PRIORITY,
223 .base.cra_module = THIS_MODULE,
224};
225
226static const struct skcipher_alg ccp_aes_rfc3686_defaults = {
227 .setkey = ccp_aes_rfc3686_setkey,
228 .encrypt = ccp_aes_rfc3686_encrypt,
229 .decrypt = ccp_aes_rfc3686_decrypt,
230 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
231 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
232 .init = ccp_aes_rfc3686_init_tfm,
233
234 .base.cra_flags = CRYPTO_ALG_ASYNC |
235 CRYPTO_ALG_ALLOCATES_MEMORY |
236 CRYPTO_ALG_KERN_DRIVER_ONLY |
237 CRYPTO_ALG_NEED_FALLBACK,
238 .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
239 .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
240 .base.cra_priority = CCP_CRA_PRIORITY,
241 .base.cra_module = THIS_MODULE,
242};
243
244struct ccp_aes_def {
245 enum ccp_aes_mode mode;
246 unsigned int version;
247 const char *name;
248 const char *driver_name;
249 unsigned int blocksize;
250 unsigned int ivsize;
251 const struct skcipher_alg *alg_defaults;
252};
253
254static struct ccp_aes_def aes_algs[] = {
255 {
256 .mode = CCP_AES_MODE_ECB,
257 .version = CCP_VERSION(3, 0),
258 .name = "ecb(aes)",
259 .driver_name = "ecb-aes-ccp",
260 .blocksize = AES_BLOCK_SIZE,
261 .ivsize = 0,
262 .alg_defaults = &ccp_aes_defaults,
263 },
264 {
265 .mode = CCP_AES_MODE_CBC,
266 .version = CCP_VERSION(3, 0),
267 .name = "cbc(aes)",
268 .driver_name = "cbc-aes-ccp",
269 .blocksize = AES_BLOCK_SIZE,
270 .ivsize = AES_BLOCK_SIZE,
271 .alg_defaults = &ccp_aes_defaults,
272 },
273 {
274 .mode = CCP_AES_MODE_CTR,
275 .version = CCP_VERSION(3, 0),
276 .name = "ctr(aes)",
277 .driver_name = "ctr-aes-ccp",
278 .blocksize = 1,
279 .ivsize = AES_BLOCK_SIZE,
280 .alg_defaults = &ccp_aes_defaults,
281 },
282 {
283 .mode = CCP_AES_MODE_CTR,
284 .version = CCP_VERSION(3, 0),
285 .name = "rfc3686(ctr(aes))",
286 .driver_name = "rfc3686-ctr-aes-ccp",
287 .blocksize = 1,
288 .ivsize = CTR_RFC3686_IV_SIZE,
289 .alg_defaults = &ccp_aes_rfc3686_defaults,
290 },
291};
292
293static int ccp_register_aes_alg(struct list_head *head,
294 const struct ccp_aes_def *def)
295{
296 struct ccp_crypto_skcipher_alg *ccp_alg;
297 struct skcipher_alg *alg;
298 int ret;
299
300 ccp_alg = kzalloc_obj(*ccp_alg);
301 if (!ccp_alg)
302 return -ENOMEM;
303
304 INIT_LIST_HEAD(&ccp_alg->entry);
305
306 ccp_alg->mode = def->mode;
307
308 /* Copy the defaults and override as necessary */
309 alg = &ccp_alg->alg;
310 *alg = *def->alg_defaults;
311 strscpy(alg->base.cra_name, def->name);
312 strscpy(alg->base.cra_driver_name, def->driver_name);
313 alg->base.cra_blocksize = def->blocksize;
314 alg->ivsize = def->ivsize;
315
316 ret = crypto_register_skcipher(alg);
317 if (ret) {
318 pr_err("%s skcipher algorithm registration error (%d)\n",
319 alg->base.cra_name, ret);
320 kfree(ccp_alg);
321 return ret;
322 }
323
324 list_add(&ccp_alg->entry, head);
325
326 return 0;
327}
328
329int ccp_register_aes_algs(struct list_head *head)
330{
331 int i, ret;
332 unsigned int ccpversion = ccp_version();
333
334 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
335 if (aes_algs[i].version > ccpversion)
336 continue;
337 ret = ccp_register_aes_alg(head, &aes_algs[i]);
338 if (ret)
339 return ret;
340 }
341
342 return 0;
343}