Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
4 */
5
6#include <linux/crc32.h>
7#include <linux/base64.h>
8#include <linux/prandom.h>
9#include <linux/unaligned.h>
10#include <crypto/dh.h>
11#include "nvme.h"
12#include "fabrics.h"
13#include <linux/nvme-auth.h>
14#include <linux/nvme-keyring.h>
15
16#define CHAP_BUF_SIZE 4096
17static struct kmem_cache *nvme_chap_buf_cache;
18static mempool_t *nvme_chap_buf_pool;
19
20struct nvme_dhchap_queue_context {
21 struct list_head entry;
22 struct work_struct auth_work;
23 struct nvme_ctrl *ctrl;
24 struct crypto_kpp *dh_tfm;
25 struct nvme_dhchap_key *transformed_key;
26 void *buf;
27 int qid;
28 int error;
29 u32 s1;
30 u32 s2;
31 bool bi_directional;
32 bool authenticated;
33 u16 transaction;
34 u8 status;
35 u8 dhgroup_id;
36 u8 hash_id;
37 u8 sc_c;
38 size_t hash_len;
39 u8 c1[NVME_AUTH_MAX_DIGEST_SIZE];
40 u8 c2[NVME_AUTH_MAX_DIGEST_SIZE];
41 u8 response[NVME_AUTH_MAX_DIGEST_SIZE];
42 u8 *ctrl_key;
43 u8 *host_key;
44 u8 *sess_key;
45 int ctrl_key_len;
46 int host_key_len;
47 int sess_key_len;
48};
49
50static struct workqueue_struct *nvme_auth_wq;
51
52static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
53{
54 return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
55 ctrl->opts->nr_poll_queues + 1;
56}
57
58static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
59 void *data, size_t data_len, bool auth_send)
60{
61 struct nvme_command cmd = {};
62 nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
63 struct request_queue *q = ctrl->fabrics_q;
64 int ret;
65
66 if (qid != 0) {
67 flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
68 q = ctrl->connect_q;
69 }
70
71 cmd.auth_common.opcode = nvme_fabrics_command;
72 cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
73 cmd.auth_common.spsp0 = 0x01;
74 cmd.auth_common.spsp1 = 0x01;
75 if (auth_send) {
76 cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
77 cmd.auth_send.tl = cpu_to_le32(data_len);
78 } else {
79 cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
80 cmd.auth_receive.al = cpu_to_le32(data_len);
81 }
82
83 ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
84 qid == 0 ? NVME_QID_ANY : qid, flags);
85 if (ret > 0)
86 dev_warn(ctrl->device,
87 "qid %d auth_send failed with status %d\n", qid, ret);
88 else if (ret < 0)
89 dev_err(ctrl->device,
90 "qid %d auth_send failed with error %d\n", qid, ret);
91 return ret;
92}
93
94static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
95 struct nvmf_auth_dhchap_failure_data *data,
96 u16 transaction, u8 expected_msg)
97{
98 dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
99 __func__, qid, data->auth_type, data->auth_id);
100
101 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
102 data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
103 return data->rescode_exp;
104 }
105 if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
106 data->auth_id != expected_msg) {
107 dev_warn(ctrl->device,
108 "qid %d invalid message %02x/%02x\n",
109 qid, data->auth_type, data->auth_id);
110 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
111 }
112 if (le16_to_cpu(data->t_id) != transaction) {
113 dev_warn(ctrl->device,
114 "qid %d invalid transaction ID %d\n",
115 qid, le16_to_cpu(data->t_id));
116 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
117 }
118 return 0;
119}
120
121static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
122 struct nvme_dhchap_queue_context *chap)
123{
124 struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
125 size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
126 u8 dh_list_offset = NVME_AUTH_DHCHAP_MAX_DH_IDS;
127 u8 *idlist = data->auth_protocol[0].dhchap.idlist;
128
129 if (size > CHAP_BUF_SIZE) {
130 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
131 return -EINVAL;
132 }
133 memset((u8 *)chap->buf, 0, size);
134 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
135 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
136 data->t_id = cpu_to_le16(chap->transaction);
137 if (ctrl->opts->concat && chap->qid == 0) {
138 if (ctrl->opts->tls_key)
139 data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK;
140 else
141 data->sc_c = NVME_AUTH_SECP_NEWTLSPSK;
142 } else
143 data->sc_c = NVME_AUTH_SECP_NOSC;
144 chap->sc_c = data->sc_c;
145 data->napd = 1;
146 data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
147 data->auth_protocol[0].dhchap.halen = 3;
148 idlist[0] = NVME_AUTH_HASH_SHA256;
149 idlist[1] = NVME_AUTH_HASH_SHA384;
150 idlist[2] = NVME_AUTH_HASH_SHA512;
151 if (chap->sc_c == NVME_AUTH_SECP_NOSC)
152 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_NULL;
153 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_2048;
154 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_3072;
155 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_4096;
156 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_6144;
157 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_8192;
158 data->auth_protocol[0].dhchap.dhlen =
159 dh_list_offset - NVME_AUTH_DHCHAP_MAX_DH_IDS;
160
161 return size;
162}
163
164static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
165 struct nvme_dhchap_queue_context *chap)
166{
167 struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
168 u16 dhvlen = le16_to_cpu(data->dhvlen);
169 size_t size = sizeof(*data) + data->hl + dhvlen;
170 const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
171 const char *hmac_name, *kpp_name;
172
173 if (size > CHAP_BUF_SIZE) {
174 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
175 return -EINVAL;
176 }
177
178 hmac_name = nvme_auth_hmac_name(data->hashid);
179 if (!hmac_name) {
180 dev_warn(ctrl->device,
181 "qid %d: invalid HASH ID %d\n",
182 chap->qid, data->hashid);
183 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
184 return -EPROTO;
185 }
186
187 if (chap->hash_id == data->hashid && chap->hash_len == data->hl) {
188 dev_dbg(ctrl->device,
189 "qid %d: reuse existing hash %s\n",
190 chap->qid, hmac_name);
191 goto select_kpp;
192 }
193
194 if (nvme_auth_hmac_hash_len(data->hashid) != data->hl) {
195 dev_warn(ctrl->device,
196 "qid %d: invalid hash length %d\n",
197 chap->qid, data->hl);
198 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
199 return -EPROTO;
200 }
201
202 chap->hash_id = data->hashid;
203 chap->hash_len = data->hl;
204 dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
205 chap->qid, hmac_name);
206
207select_kpp:
208 kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
209 if (!kpp_name) {
210 dev_warn(ctrl->device,
211 "qid %d: invalid DH group id %d\n",
212 chap->qid, data->dhgid);
213 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
214 /* Leave previous dh_tfm intact */
215 return -EPROTO;
216 }
217
218 if (chap->dhgroup_id == data->dhgid &&
219 (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
220 dev_dbg(ctrl->device,
221 "qid %d: reuse existing DH group %s\n",
222 chap->qid, gid_name);
223 goto skip_kpp;
224 }
225
226 /* Reset dh_tfm if it can't be reused */
227 if (chap->dh_tfm) {
228 crypto_free_kpp(chap->dh_tfm);
229 chap->dh_tfm = NULL;
230 }
231
232 if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
233 if (dhvlen == 0) {
234 dev_warn(ctrl->device,
235 "qid %d: empty DH value\n",
236 chap->qid);
237 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
238 return -EPROTO;
239 }
240
241 chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
242 if (IS_ERR(chap->dh_tfm)) {
243 int ret = PTR_ERR(chap->dh_tfm);
244
245 dev_warn(ctrl->device,
246 "qid %d: error %d initializing DH group %s\n",
247 chap->qid, ret, gid_name);
248 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
249 chap->dh_tfm = NULL;
250 return ret;
251 }
252 dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
253 chap->qid, gid_name);
254 } else if (dhvlen != 0) {
255 dev_warn(ctrl->device,
256 "qid %d: invalid DH value for NULL DH\n",
257 chap->qid);
258 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
259 return -EPROTO;
260 }
261 chap->dhgroup_id = data->dhgid;
262
263skip_kpp:
264 chap->s1 = le32_to_cpu(data->seqnum);
265 memcpy(chap->c1, data->cval, chap->hash_len);
266 if (dhvlen) {
267 chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
268 if (!chap->ctrl_key) {
269 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
270 return -ENOMEM;
271 }
272 chap->ctrl_key_len = dhvlen;
273 memcpy(chap->ctrl_key, data->cval + chap->hash_len,
274 dhvlen);
275 dev_dbg(ctrl->device, "ctrl public key %*ph\n",
276 (int)chap->ctrl_key_len, chap->ctrl_key);
277 }
278
279 return 0;
280}
281
282static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
283 struct nvme_dhchap_queue_context *chap)
284{
285 struct nvmf_auth_dhchap_reply_data *data = chap->buf;
286 size_t size = sizeof(*data);
287
288 size += 2 * chap->hash_len;
289
290 if (chap->host_key_len)
291 size += chap->host_key_len;
292
293 if (size > CHAP_BUF_SIZE) {
294 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
295 return -EINVAL;
296 }
297
298 memset(chap->buf, 0, size);
299 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
300 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
301 data->t_id = cpu_to_le16(chap->transaction);
302 data->hl = chap->hash_len;
303 data->dhvlen = cpu_to_le16(chap->host_key_len);
304 memcpy(data->rval, chap->response, chap->hash_len);
305 if (ctrl->ctrl_key)
306 chap->bi_directional = true;
307 if (ctrl->ctrl_key || ctrl->opts->concat) {
308 get_random_bytes(chap->c2, chap->hash_len);
309 data->cvalid = 1;
310 memcpy(data->rval + chap->hash_len, chap->c2,
311 chap->hash_len);
312 dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
313 __func__, chap->qid, (int)chap->hash_len, chap->c2);
314 } else {
315 memset(chap->c2, 0, chap->hash_len);
316 }
317 if (ctrl->opts->concat) {
318 chap->s2 = 0;
319 chap->bi_directional = false;
320 } else
321 chap->s2 = nvme_auth_get_seqnum();
322 data->seqnum = cpu_to_le32(chap->s2);
323 if (chap->host_key_len) {
324 dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
325 __func__, chap->qid,
326 chap->host_key_len, chap->host_key);
327 memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
328 chap->host_key_len);
329 }
330
331 return size;
332}
333
334static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
335 struct nvme_dhchap_queue_context *chap)
336{
337 struct nvmf_auth_dhchap_success1_data *data = chap->buf;
338 size_t size = sizeof(*data) + chap->hash_len;
339
340 if (size > CHAP_BUF_SIZE) {
341 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
342 return -EINVAL;
343 }
344
345 if (data->hl != chap->hash_len) {
346 dev_warn(ctrl->device,
347 "qid %d: invalid hash length %u\n",
348 chap->qid, data->hl);
349 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
350 return -EPROTO;
351 }
352
353 /* Just print out information for the admin queue */
354 if (chap->qid == 0)
355 dev_info(ctrl->device,
356 "qid 0: authenticated with hash %s dhgroup %s\n",
357 nvme_auth_hmac_name(chap->hash_id),
358 nvme_auth_dhgroup_name(chap->dhgroup_id));
359
360 if (!data->rvalid)
361 return 0;
362
363 /* Validate controller response */
364 if (memcmp(chap->response, data->rval, data->hl)) {
365 dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
366 __func__, chap->qid, (int)chap->hash_len, data->rval);
367 dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
368 __func__, chap->qid, (int)chap->hash_len,
369 chap->response);
370 dev_warn(ctrl->device,
371 "qid %d: controller authentication failed\n",
372 chap->qid);
373 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
374 return -ECONNREFUSED;
375 }
376
377 /* Just print out information for the admin queue */
378 if (chap->qid == 0)
379 dev_info(ctrl->device,
380 "qid 0: controller authenticated\n");
381 return 0;
382}
383
384static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
385 struct nvme_dhchap_queue_context *chap)
386{
387 struct nvmf_auth_dhchap_success2_data *data = chap->buf;
388 size_t size = sizeof(*data);
389
390 memset(chap->buf, 0, size);
391 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
392 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
393 data->t_id = cpu_to_le16(chap->transaction);
394
395 return size;
396}
397
398static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
399 struct nvme_dhchap_queue_context *chap)
400{
401 struct nvmf_auth_dhchap_failure_data *data = chap->buf;
402 size_t size = sizeof(*data);
403
404 memset(chap->buf, 0, size);
405 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
406 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
407 data->t_id = cpu_to_le16(chap->transaction);
408 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
409 data->rescode_exp = chap->status;
410
411 return size;
412}
413
414static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
415 struct nvme_dhchap_queue_context *chap)
416{
417 struct nvme_auth_hmac_ctx hmac;
418 u8 buf[4], *challenge = chap->c1;
419 int ret;
420
421 dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
422 __func__, chap->qid, chap->s1, chap->transaction);
423
424 if (!chap->transformed_key) {
425 chap->transformed_key = nvme_auth_transform_key(ctrl->host_key,
426 ctrl->opts->host->nqn);
427 if (IS_ERR(chap->transformed_key)) {
428 ret = PTR_ERR(chap->transformed_key);
429 chap->transformed_key = NULL;
430 return ret;
431 }
432 } else {
433 dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
434 __func__, chap->qid);
435 }
436
437 ret = nvme_auth_hmac_init(&hmac, chap->hash_id,
438 chap->transformed_key->key,
439 chap->transformed_key->len);
440 if (ret)
441 goto out;
442
443 if (chap->dh_tfm) {
444 challenge = kmalloc(chap->hash_len, GFP_KERNEL);
445 if (!challenge) {
446 ret = -ENOMEM;
447 goto out;
448 }
449 ret = nvme_auth_augmented_challenge(chap->hash_id,
450 chap->sess_key,
451 chap->sess_key_len,
452 chap->c1, challenge,
453 chap->hash_len);
454 if (ret)
455 goto out;
456 }
457
458 nvme_auth_hmac_update(&hmac, challenge, chap->hash_len);
459
460 put_unaligned_le32(chap->s1, buf);
461 nvme_auth_hmac_update(&hmac, buf, 4);
462
463 put_unaligned_le16(chap->transaction, buf);
464 nvme_auth_hmac_update(&hmac, buf, 2);
465
466 *buf = chap->sc_c;
467 nvme_auth_hmac_update(&hmac, buf, 1);
468 nvme_auth_hmac_update(&hmac, "HostHost", 8);
469 nvme_auth_hmac_update(&hmac, ctrl->opts->host->nqn,
470 strlen(ctrl->opts->host->nqn));
471 memset(buf, 0, sizeof(buf));
472 nvme_auth_hmac_update(&hmac, buf, 1);
473 nvme_auth_hmac_update(&hmac, ctrl->opts->subsysnqn,
474 strlen(ctrl->opts->subsysnqn));
475 nvme_auth_hmac_final(&hmac, chap->response);
476 ret = 0;
477out:
478 if (challenge != chap->c1)
479 kfree(challenge);
480 memzero_explicit(&hmac, sizeof(hmac));
481 return ret;
482}
483
484static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
485 struct nvme_dhchap_queue_context *chap)
486{
487 struct nvme_auth_hmac_ctx hmac;
488 struct nvme_dhchap_key *transformed_key;
489 u8 buf[4], *challenge = chap->c2;
490 int ret;
491
492 transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
493 ctrl->opts->subsysnqn);
494 if (IS_ERR(transformed_key)) {
495 ret = PTR_ERR(transformed_key);
496 return ret;
497 }
498
499 ret = nvme_auth_hmac_init(&hmac, chap->hash_id, transformed_key->key,
500 transformed_key->len);
501 if (ret) {
502 dev_warn(ctrl->device, "qid %d: failed to init hmac, error %d\n",
503 chap->qid, ret);
504 goto out;
505 }
506
507 if (chap->dh_tfm) {
508 challenge = kmalloc(chap->hash_len, GFP_KERNEL);
509 if (!challenge) {
510 ret = -ENOMEM;
511 goto out;
512 }
513 ret = nvme_auth_augmented_challenge(chap->hash_id,
514 chap->sess_key,
515 chap->sess_key_len,
516 chap->c2, challenge,
517 chap->hash_len);
518 if (ret)
519 goto out;
520 }
521 dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
522 __func__, chap->qid, chap->s2, chap->transaction);
523 dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
524 __func__, chap->qid, (int)chap->hash_len, challenge);
525 dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
526 __func__, chap->qid, ctrl->opts->subsysnqn);
527 dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
528 __func__, chap->qid, ctrl->opts->host->nqn);
529
530 nvme_auth_hmac_update(&hmac, challenge, chap->hash_len);
531
532 put_unaligned_le32(chap->s2, buf);
533 nvme_auth_hmac_update(&hmac, buf, 4);
534
535 put_unaligned_le16(chap->transaction, buf);
536 nvme_auth_hmac_update(&hmac, buf, 2);
537
538 *buf = chap->sc_c;
539 nvme_auth_hmac_update(&hmac, buf, 1);
540 nvme_auth_hmac_update(&hmac, "Controller", 10);
541 nvme_auth_hmac_update(&hmac, ctrl->opts->subsysnqn,
542 strlen(ctrl->opts->subsysnqn));
543 memset(buf, 0, 4);
544 nvme_auth_hmac_update(&hmac, buf, 1);
545 nvme_auth_hmac_update(&hmac, ctrl->opts->host->nqn,
546 strlen(ctrl->opts->host->nqn));
547 nvme_auth_hmac_final(&hmac, chap->response);
548 ret = 0;
549out:
550 if (challenge != chap->c2)
551 kfree(challenge);
552 memzero_explicit(&hmac, sizeof(hmac));
553 nvme_auth_free_key(transformed_key);
554 return ret;
555}
556
557static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
558 struct nvme_dhchap_queue_context *chap)
559{
560 int ret;
561
562 if (chap->host_key && chap->host_key_len) {
563 dev_dbg(ctrl->device,
564 "qid %d: reusing host key\n", chap->qid);
565 goto gen_sesskey;
566 }
567 ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
568 if (ret < 0) {
569 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
570 return ret;
571 }
572
573 chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
574
575 chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
576 if (!chap->host_key) {
577 chap->host_key_len = 0;
578 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
579 return -ENOMEM;
580 }
581 ret = nvme_auth_gen_pubkey(chap->dh_tfm,
582 chap->host_key, chap->host_key_len);
583 if (ret) {
584 dev_dbg(ctrl->device,
585 "failed to generate public key, error %d\n", ret);
586 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
587 return ret;
588 }
589
590gen_sesskey:
591 chap->sess_key_len = chap->hash_len;
592 chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
593 if (!chap->sess_key) {
594 chap->sess_key_len = 0;
595 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
596 return -ENOMEM;
597 }
598
599 ret = nvme_auth_gen_session_key(chap->dh_tfm,
600 chap->ctrl_key, chap->ctrl_key_len,
601 chap->sess_key, chap->sess_key_len,
602 chap->hash_id);
603 if (ret) {
604 dev_dbg(ctrl->device,
605 "failed to generate session key, error %d\n", ret);
606 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
607 return ret;
608 }
609 dev_dbg(ctrl->device, "session key %*ph\n",
610 (int)chap->sess_key_len, chap->sess_key);
611 return 0;
612}
613
614static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
615{
616 nvme_auth_free_key(chap->transformed_key);
617 chap->transformed_key = NULL;
618 kfree_sensitive(chap->host_key);
619 chap->host_key = NULL;
620 chap->host_key_len = 0;
621 kfree_sensitive(chap->ctrl_key);
622 chap->ctrl_key = NULL;
623 chap->ctrl_key_len = 0;
624 kfree_sensitive(chap->sess_key);
625 chap->sess_key = NULL;
626 chap->sess_key_len = 0;
627 chap->status = 0;
628 chap->error = 0;
629 chap->s1 = 0;
630 chap->s2 = 0;
631 chap->bi_directional = false;
632 chap->transaction = 0;
633 memset(chap->c1, 0, sizeof(chap->c1));
634 memset(chap->c2, 0, sizeof(chap->c2));
635 mempool_free(chap->buf, nvme_chap_buf_pool);
636 chap->buf = NULL;
637}
638
639static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
640{
641 nvme_auth_reset_dhchap(chap);
642 chap->authenticated = false;
643 if (chap->dh_tfm)
644 crypto_free_kpp(chap->dh_tfm);
645}
646
647void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl)
648{
649 dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n",
650 key_serial(ctrl->opts->tls_key));
651 key_revoke(ctrl->opts->tls_key);
652 key_put(ctrl->opts->tls_key);
653 ctrl->opts->tls_key = NULL;
654}
655EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key);
656
657static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
658 struct nvme_dhchap_queue_context *chap)
659{
660 u8 *psk, *tls_psk;
661 char *digest;
662 struct key *tls_key;
663 size_t psk_len;
664 int ret = 0;
665
666 if (!chap->sess_key) {
667 dev_warn(ctrl->device,
668 "%s: qid %d no session key negotiated\n",
669 __func__, chap->qid);
670 return -ENOKEY;
671 }
672
673 if (chap->qid) {
674 dev_warn(ctrl->device,
675 "qid %d: secure concatenation not supported on I/O queues\n",
676 chap->qid);
677 return -EINVAL;
678 }
679 ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key,
680 chap->sess_key_len,
681 chap->c1, chap->c2,
682 chap->hash_len, &psk, &psk_len);
683 if (ret) {
684 dev_warn(ctrl->device,
685 "%s: qid %d failed to generate PSK, error %d\n",
686 __func__, chap->qid, ret);
687 return ret;
688 }
689 dev_dbg(ctrl->device,
690 "%s: generated psk %*ph\n", __func__, (int)psk_len, psk);
691
692 ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len,
693 ctrl->opts->subsysnqn,
694 ctrl->opts->host->nqn, &digest);
695 if (ret) {
696 dev_warn(ctrl->device,
697 "%s: qid %d failed to generate digest, error %d\n",
698 __func__, chap->qid, ret);
699 goto out_free_psk;
700 }
701 dev_dbg(ctrl->device, "%s: generated digest %s\n",
702 __func__, digest);
703 ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
704 digest, &tls_psk);
705 if (ret) {
706 dev_warn(ctrl->device,
707 "%s: qid %d failed to derive TLS psk, error %d\n",
708 __func__, chap->qid, ret);
709 goto out_free_digest;
710 }
711
712 tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
713 ctrl->opts->host->nqn,
714 ctrl->opts->subsysnqn, chap->hash_id,
715 tls_psk, psk_len, digest);
716 if (IS_ERR(tls_key)) {
717 ret = PTR_ERR(tls_key);
718 dev_warn(ctrl->device,
719 "%s: qid %d failed to insert generated key, error %d\n",
720 __func__, chap->qid, ret);
721 tls_key = NULL;
722 }
723 kfree_sensitive(tls_psk);
724 if (ctrl->opts->tls_key)
725 nvme_auth_revoke_tls_key(ctrl);
726 ctrl->opts->tls_key = tls_key;
727out_free_digest:
728 kfree_sensitive(digest);
729out_free_psk:
730 kfree_sensitive(psk);
731 return ret;
732}
733
734static void nvme_queue_auth_work(struct work_struct *work)
735{
736 struct nvme_dhchap_queue_context *chap =
737 container_of(work, struct nvme_dhchap_queue_context, auth_work);
738 struct nvme_ctrl *ctrl = chap->ctrl;
739 size_t tl;
740 int ret = 0;
741
742 /*
743 * Allocate a large enough buffer for the entire negotiation:
744 * 4k is enough to ffdhe8192.
745 */
746 chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
747 if (!chap->buf) {
748 chap->error = -ENOMEM;
749 return;
750 }
751
752 chap->transaction = ctrl->transaction++;
753
754 /* DH-HMAC-CHAP Step 1: send negotiate */
755 dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
756 __func__, chap->qid);
757 ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
758 if (ret < 0) {
759 chap->error = ret;
760 return;
761 }
762 tl = ret;
763 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
764 if (ret) {
765 chap->error = ret;
766 return;
767 }
768
769 /* DH-HMAC-CHAP Step 2: receive challenge */
770 dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
771 __func__, chap->qid);
772
773 memset(chap->buf, 0, CHAP_BUF_SIZE);
774 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
775 false);
776 if (ret) {
777 dev_warn(ctrl->device,
778 "qid %d failed to receive challenge, %s %d\n",
779 chap->qid, ret < 0 ? "error" : "nvme status", ret);
780 chap->error = ret;
781 return;
782 }
783 ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
784 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
785 if (ret) {
786 chap->status = ret;
787 chap->error = -EKEYREJECTED;
788 return;
789 }
790
791 ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
792 if (ret) {
793 /* Invalid challenge parameters */
794 chap->error = ret;
795 goto fail2;
796 }
797
798 if (chap->ctrl_key_len) {
799 dev_dbg(ctrl->device,
800 "%s: qid %d DH exponential\n",
801 __func__, chap->qid);
802 ret = nvme_auth_dhchap_exponential(ctrl, chap);
803 if (ret) {
804 chap->error = ret;
805 goto fail2;
806 }
807 }
808
809 dev_dbg(ctrl->device, "%s: qid %d host response\n",
810 __func__, chap->qid);
811 mutex_lock(&ctrl->dhchap_auth_mutex);
812 ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
813 mutex_unlock(&ctrl->dhchap_auth_mutex);
814 if (ret) {
815 chap->error = ret;
816 goto fail2;
817 }
818
819 /* DH-HMAC-CHAP Step 3: send reply */
820 dev_dbg(ctrl->device, "%s: qid %d send reply\n",
821 __func__, chap->qid);
822 ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
823 if (ret < 0) {
824 chap->error = ret;
825 goto fail2;
826 }
827
828 tl = ret;
829 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
830 if (ret) {
831 chap->error = ret;
832 goto fail2;
833 }
834
835 /* DH-HMAC-CHAP Step 4: receive success1 */
836 dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
837 __func__, chap->qid);
838
839 memset(chap->buf, 0, CHAP_BUF_SIZE);
840 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
841 false);
842 if (ret) {
843 dev_warn(ctrl->device,
844 "qid %d failed to receive success1, %s %d\n",
845 chap->qid, ret < 0 ? "error" : "nvme status", ret);
846 chap->error = ret;
847 return;
848 }
849 ret = nvme_auth_receive_validate(ctrl, chap->qid,
850 chap->buf, chap->transaction,
851 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
852 if (ret) {
853 chap->status = ret;
854 chap->error = -EKEYREJECTED;
855 return;
856 }
857
858 mutex_lock(&ctrl->dhchap_auth_mutex);
859 if (ctrl->ctrl_key) {
860 dev_dbg(ctrl->device,
861 "%s: qid %d controller response\n",
862 __func__, chap->qid);
863 ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
864 if (ret) {
865 mutex_unlock(&ctrl->dhchap_auth_mutex);
866 chap->error = ret;
867 goto fail2;
868 }
869 }
870 mutex_unlock(&ctrl->dhchap_auth_mutex);
871
872 ret = nvme_auth_process_dhchap_success1(ctrl, chap);
873 if (ret) {
874 /* Controller authentication failed */
875 chap->error = -EKEYREJECTED;
876 goto fail2;
877 }
878
879 if (chap->bi_directional) {
880 /* DH-HMAC-CHAP Step 5: send success2 */
881 dev_dbg(ctrl->device, "%s: qid %d send success2\n",
882 __func__, chap->qid);
883 tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
884 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
885 if (ret)
886 chap->error = ret;
887 }
888 if (!ret) {
889 chap->error = 0;
890 chap->authenticated = true;
891 if (ctrl->opts->concat &&
892 (ret = nvme_auth_secure_concat(ctrl, chap))) {
893 dev_warn(ctrl->device,
894 "%s: qid %d failed to enable secure concatenation\n",
895 __func__, chap->qid);
896 chap->error = ret;
897 chap->authenticated = false;
898 }
899 return;
900 }
901
902fail2:
903 if (chap->status == 0)
904 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
905 dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
906 __func__, chap->qid, chap->status);
907 tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
908 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
909 /*
910 * only update error if send failure2 failed and no other
911 * error had been set during authentication.
912 */
913 if (ret && !chap->error)
914 chap->error = ret;
915}
916
917int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
918{
919 struct nvme_dhchap_queue_context *chap;
920
921 if (!ctrl->host_key) {
922 dev_warn(ctrl->device, "qid %d: no key\n", qid);
923 return -ENOKEY;
924 }
925
926 if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
927 dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
928 return -ENOKEY;
929 }
930
931 chap = &ctrl->dhchap_ctxs[qid];
932 cancel_work_sync(&chap->auth_work);
933 queue_work(nvme_auth_wq, &chap->auth_work);
934 return 0;
935}
936EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
937
938int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
939{
940 struct nvme_dhchap_queue_context *chap;
941 int ret;
942
943 chap = &ctrl->dhchap_ctxs[qid];
944 flush_work(&chap->auth_work);
945 ret = chap->error;
946 /* clear sensitive info */
947 nvme_auth_reset_dhchap(chap);
948 return ret;
949}
950EXPORT_SYMBOL_GPL(nvme_auth_wait);
951
952static void nvme_ctrl_auth_work(struct work_struct *work)
953{
954 struct nvme_ctrl *ctrl =
955 container_of(work, struct nvme_ctrl, dhchap_auth_work);
956 int ret, q;
957
958 /*
959 * If the ctrl is no connected, bail as reconnect will handle
960 * authentication.
961 */
962 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
963 return;
964
965 /* Authenticate admin queue first */
966 ret = nvme_auth_negotiate(ctrl, 0);
967 if (ret) {
968 dev_warn(ctrl->device,
969 "qid 0: error %d setting up authentication\n", ret);
970 return;
971 }
972 ret = nvme_auth_wait(ctrl, 0);
973 if (ret) {
974 dev_warn(ctrl->device,
975 "qid 0: authentication failed\n");
976 return;
977 }
978 /*
979 * Only run authentication on the admin queue for secure concatenation.
980 */
981 if (ctrl->opts->concat)
982 return;
983
984 for (q = 1; q < ctrl->queue_count; q++) {
985 struct nvme_dhchap_queue_context *chap =
986 &ctrl->dhchap_ctxs[q];
987 /*
988 * Skip re-authentication if the queue had
989 * not been authenticated initially.
990 */
991 if (!chap->authenticated)
992 continue;
993 cancel_work_sync(&chap->auth_work);
994 queue_work(nvme_auth_wq, &chap->auth_work);
995 }
996
997 /*
998 * Failure is a soft-state; credentials remain valid until
999 * the controller terminates the connection.
1000 */
1001 for (q = 1; q < ctrl->queue_count; q++) {
1002 struct nvme_dhchap_queue_context *chap =
1003 &ctrl->dhchap_ctxs[q];
1004 if (!chap->authenticated)
1005 continue;
1006 flush_work(&chap->auth_work);
1007 ret = chap->error;
1008 nvme_auth_reset_dhchap(chap);
1009 if (ret)
1010 dev_warn(ctrl->device,
1011 "qid %d: authentication failed\n", q);
1012 }
1013}
1014
1015int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1016{
1017 struct nvme_dhchap_queue_context *chap;
1018 int i, ret;
1019
1020 mutex_init(&ctrl->dhchap_auth_mutex);
1021 INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
1022 if (!ctrl->opts)
1023 return 0;
1024 ret = nvme_auth_parse_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
1025 if (ret)
1026 return ret;
1027 ret = nvme_auth_parse_key(ctrl->opts->dhchap_ctrl_secret,
1028 &ctrl->ctrl_key);
1029 if (ret)
1030 goto err_free_dhchap_secret;
1031
1032 if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
1033 return 0;
1034
1035 ctrl->dhchap_ctxs = kvzalloc_objs(*chap, ctrl_max_dhchaps(ctrl));
1036 if (!ctrl->dhchap_ctxs) {
1037 ret = -ENOMEM;
1038 goto err_free_dhchap_ctrl_secret;
1039 }
1040
1041 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
1042 chap = &ctrl->dhchap_ctxs[i];
1043 chap->qid = i;
1044 chap->ctrl = ctrl;
1045 chap->authenticated = false;
1046 INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
1047 }
1048
1049 return 0;
1050err_free_dhchap_ctrl_secret:
1051 nvme_auth_free_key(ctrl->ctrl_key);
1052 ctrl->ctrl_key = NULL;
1053err_free_dhchap_secret:
1054 nvme_auth_free_key(ctrl->host_key);
1055 ctrl->host_key = NULL;
1056 return ret;
1057}
1058EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
1059
1060void nvme_auth_stop(struct nvme_ctrl *ctrl)
1061{
1062 cancel_work_sync(&ctrl->dhchap_auth_work);
1063}
1064EXPORT_SYMBOL_GPL(nvme_auth_stop);
1065
1066void nvme_auth_free(struct nvme_ctrl *ctrl)
1067{
1068 int i;
1069
1070 if (ctrl->dhchap_ctxs) {
1071 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
1072 nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
1073 kvfree(ctrl->dhchap_ctxs);
1074 }
1075 if (ctrl->host_key) {
1076 nvme_auth_free_key(ctrl->host_key);
1077 ctrl->host_key = NULL;
1078 }
1079 if (ctrl->ctrl_key) {
1080 nvme_auth_free_key(ctrl->ctrl_key);
1081 ctrl->ctrl_key = NULL;
1082 }
1083}
1084EXPORT_SYMBOL_GPL(nvme_auth_free);
1085
1086int __init nvme_init_auth(void)
1087{
1088 nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
1089 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1090 if (!nvme_auth_wq)
1091 return -ENOMEM;
1092
1093 nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
1094 CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
1095 if (!nvme_chap_buf_cache)
1096 goto err_destroy_workqueue;
1097
1098 nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
1099 mempool_free_slab, nvme_chap_buf_cache);
1100 if (!nvme_chap_buf_pool)
1101 goto err_destroy_chap_buf_cache;
1102
1103 return 0;
1104err_destroy_chap_buf_cache:
1105 kmem_cache_destroy(nvme_chap_buf_cache);
1106err_destroy_workqueue:
1107 destroy_workqueue(nvme_auth_wq);
1108 return -ENOMEM;
1109}
1110
1111void __exit nvme_exit_auth(void)
1112{
1113 mempool_destroy(nvme_chap_buf_pool);
1114 kmem_cache_destroy(nvme_chap_buf_cache);
1115 destroy_workqueue(nvme_auth_wq);
1116}