Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
4 * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
5 * All rights reserved.
6 */
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8#include <linux/blkdev.h>
9#include <linux/random.h>
10#include <linux/nvme-auth.h>
11#include <crypto/kpp.h>
12#include "nvmet.h"
13
14static void nvmet_auth_expired_work(struct work_struct *work)
15{
16 struct nvmet_sq *sq = container_of(to_delayed_work(work),
17 struct nvmet_sq, auth_expired_work);
18
19 pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
20 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
21 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
22 sq->dhchap_tid = -1;
23}
24
25void nvmet_auth_sq_init(struct nvmet_sq *sq)
26{
27 /* Initialize in-band authentication */
28 INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
29 sq->authenticated = false;
30 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
31}
32
33static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
34{
35 struct nvmet_ctrl *ctrl = req->sq->ctrl;
36 struct nvmf_auth_dhchap_negotiate_data *data = d;
37 int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
38
39 pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
40 __func__, ctrl->cntlid, req->sq->qid,
41 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
42 data->auth_protocol[0].dhchap.halen,
43 data->auth_protocol[0].dhchap.dhlen);
44 req->sq->dhchap_tid = le16_to_cpu(data->t_id);
45 req->sq->sc_c = data->sc_c;
46 if (data->sc_c != NVME_AUTH_SECP_NOSC) {
47 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS))
48 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
49 /* Secure concatenation can only be enabled on the admin queue */
50 if (req->sq->qid)
51 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
52 switch (data->sc_c) {
53 case NVME_AUTH_SECP_NEWTLSPSK:
54 if (nvmet_queue_tls_keyid(req->sq))
55 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
56 break;
57 case NVME_AUTH_SECP_REPLACETLSPSK:
58 if (!nvmet_queue_tls_keyid(req->sq))
59 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
60 break;
61 default:
62 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
63 }
64 ctrl->concat = true;
65 }
66
67 if (data->napd != 1)
68 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
69
70 if (data->auth_protocol[0].dhchap.authid !=
71 NVME_AUTH_DHCHAP_AUTH_ID)
72 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
73
74 for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
75 u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
76
77 if (!fallback_hash_id && nvme_auth_hmac_hash_len(host_hmac_id))
78 fallback_hash_id = host_hmac_id;
79 if (ctrl->shash_id != host_hmac_id)
80 continue;
81 hash_id = ctrl->shash_id;
82 break;
83 }
84 if (hash_id == 0) {
85 if (fallback_hash_id == 0) {
86 pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
87 __func__, ctrl->cntlid, req->sq->qid);
88 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
89 }
90 pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
91 __func__, ctrl->cntlid, req->sq->qid,
92 nvme_auth_hmac_name(fallback_hash_id));
93 ctrl->shash_id = fallback_hash_id;
94 }
95
96 dhgid = -1;
97 fallback_dhgid = -1;
98 for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
99 int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
100
101 if (tmp_dhgid != ctrl->dh_gid) {
102 dhgid = tmp_dhgid;
103 break;
104 }
105 if (fallback_dhgid < 0) {
106 const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
107
108 if (crypto_has_kpp(kpp, 0, 0))
109 fallback_dhgid = tmp_dhgid;
110 }
111 }
112 if (dhgid < 0) {
113 if (fallback_dhgid < 0) {
114 pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
115 __func__, ctrl->cntlid, req->sq->qid);
116 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
117 }
118 pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
119 __func__, ctrl->cntlid, req->sq->qid,
120 nvme_auth_dhgroup_name(fallback_dhgid));
121 ctrl->dh_gid = fallback_dhgid;
122 }
123 if (ctrl->dh_gid == NVME_AUTH_DHGROUP_NULL && ctrl->concat) {
124 pr_debug("%s: ctrl %d qid %d: NULL DH group invalid "
125 "for secure channel concatenation\n", __func__,
126 ctrl->cntlid, req->sq->qid);
127 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
128 }
129 pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
130 __func__, ctrl->cntlid, req->sq->qid,
131 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
132 return 0;
133}
134
135static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
136{
137 struct nvmet_ctrl *ctrl = req->sq->ctrl;
138 struct nvmf_auth_dhchap_reply_data *data = d;
139 u16 dhvlen = le16_to_cpu(data->dhvlen);
140 u8 *response;
141
142 pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
143 __func__, ctrl->cntlid, req->sq->qid,
144 data->hl, data->cvalid, dhvlen);
145
146 if (dhvlen) {
147 if (!ctrl->dh_tfm)
148 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
149 if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
150 dhvlen) < 0)
151 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
152 }
153
154 response = kmalloc(data->hl, GFP_KERNEL);
155 if (!response)
156 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
157
158 if (!ctrl->host_key) {
159 pr_warn("ctrl %d qid %d no host key\n",
160 ctrl->cntlid, req->sq->qid);
161 kfree(response);
162 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
163 }
164 if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
165 pr_debug("ctrl %d qid %d host hash failed\n",
166 ctrl->cntlid, req->sq->qid);
167 kfree(response);
168 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
169 }
170
171 if (memcmp(data->rval, response, data->hl)) {
172 pr_info("ctrl %d qid %d host response mismatch\n",
173 ctrl->cntlid, req->sq->qid);
174 pr_debug("ctrl %d qid %d rval %*ph\n",
175 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
176 pr_debug("ctrl %d qid %d response %*ph\n",
177 ctrl->cntlid, req->sq->qid, data->hl, response);
178 kfree(response);
179 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
180 }
181 kfree(response);
182 pr_debug("%s: ctrl %d qid %d host authenticated\n",
183 __func__, ctrl->cntlid, req->sq->qid);
184 if (!data->cvalid && ctrl->concat) {
185 pr_debug("%s: ctrl %d qid %d invalid challenge\n",
186 __func__, ctrl->cntlid, req->sq->qid);
187 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
188 }
189 req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
190 if (data->cvalid) {
191 req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
192 GFP_KERNEL);
193 if (!req->sq->dhchap_c2)
194 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
195
196 pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
197 __func__, ctrl->cntlid, req->sq->qid, data->hl,
198 req->sq->dhchap_c2);
199 }
200 /*
201 * NVMe Base Spec 2.2 section 8.3.4.5.4: DH-HMAC-CHAP_Reply message
202 * Sequence Number (SEQNUM): [ .. ]
203 * The value 0h is used to indicate that bidirectional authentication
204 * is not performed, but a challenge value C2 is carried in order to
205 * generate a pre-shared key (PSK) for subsequent establishment of a
206 * secure channel.
207 */
208 if (req->sq->dhchap_s2 == 0) {
209 if (ctrl->concat)
210 nvmet_auth_insert_psk(req->sq);
211 req->sq->authenticated = true;
212 kfree(req->sq->dhchap_c2);
213 req->sq->dhchap_c2 = NULL;
214 } else if (!data->cvalid)
215 req->sq->authenticated = true;
216
217 return 0;
218}
219
220static u8 nvmet_auth_failure2(void *d)
221{
222 struct nvmf_auth_dhchap_failure_data *data = d;
223
224 return data->rescode_exp;
225}
226
227u32 nvmet_auth_send_data_len(struct nvmet_req *req)
228{
229 return le32_to_cpu(req->cmd->auth_send.tl);
230}
231
232void nvmet_execute_auth_send(struct nvmet_req *req)
233{
234 struct nvmet_ctrl *ctrl = req->sq->ctrl;
235 struct nvmf_auth_dhchap_success2_data *data;
236 void *d;
237 u32 tl;
238 u16 status = 0;
239 u8 dhchap_status;
240
241 if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
242 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
243 req->error_loc =
244 offsetof(struct nvmf_auth_send_command, secp);
245 goto done;
246 }
247 if (req->cmd->auth_send.spsp0 != 0x01) {
248 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
249 req->error_loc =
250 offsetof(struct nvmf_auth_send_command, spsp0);
251 goto done;
252 }
253 if (req->cmd->auth_send.spsp1 != 0x01) {
254 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
255 req->error_loc =
256 offsetof(struct nvmf_auth_send_command, spsp1);
257 goto done;
258 }
259 tl = nvmet_auth_send_data_len(req);
260 if (!tl) {
261 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
262 req->error_loc =
263 offsetof(struct nvmf_auth_send_command, tl);
264 goto done;
265 }
266 if (!nvmet_check_transfer_len(req, tl)) {
267 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
268 return;
269 }
270
271 d = kmalloc(tl, GFP_KERNEL);
272 if (!d) {
273 status = NVME_SC_INTERNAL;
274 goto done;
275 }
276
277 status = nvmet_copy_from_sgl(req, 0, d, tl);
278 if (status)
279 goto done_kfree;
280
281 data = d;
282 pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
283 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
284 req->sq->dhchap_step);
285 if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
286 data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
287 goto done_failure1;
288 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
289 if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
290 /* Restart negotiation */
291 pr_debug("%s: ctrl %d qid %d reset negotiation\n",
292 __func__, ctrl->cntlid, req->sq->qid);
293 if (!req->sq->qid) {
294 dhchap_status = nvmet_setup_auth(ctrl, req->sq,
295 true);
296 if (dhchap_status) {
297 pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
298 ctrl->cntlid);
299 req->sq->dhchap_status = dhchap_status;
300 req->sq->dhchap_step =
301 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
302 goto done_kfree;
303 }
304 }
305 req->sq->dhchap_step =
306 NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
307 } else if (data->auth_id != req->sq->dhchap_step)
308 goto done_failure1;
309 /* Validate negotiation parameters */
310 dhchap_status = nvmet_auth_negotiate(req, d);
311 if (dhchap_status == 0)
312 req->sq->dhchap_step =
313 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
314 else {
315 req->sq->dhchap_step =
316 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
317 req->sq->dhchap_status = dhchap_status;
318 }
319 goto done_kfree;
320 }
321 if (data->auth_id != req->sq->dhchap_step) {
322 pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
323 __func__, ctrl->cntlid, req->sq->qid,
324 data->auth_id, req->sq->dhchap_step);
325 goto done_failure1;
326 }
327 if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
328 pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
329 __func__, ctrl->cntlid, req->sq->qid,
330 le16_to_cpu(data->t_id),
331 req->sq->dhchap_tid);
332 req->sq->dhchap_step =
333 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
334 req->sq->dhchap_status =
335 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
336 goto done_kfree;
337 }
338
339 switch (data->auth_id) {
340 case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
341 dhchap_status = nvmet_auth_reply(req, d);
342 if (dhchap_status == 0)
343 req->sq->dhchap_step =
344 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
345 else {
346 req->sq->dhchap_step =
347 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
348 req->sq->dhchap_status = dhchap_status;
349 }
350 goto done_kfree;
351 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
352 if (ctrl->concat)
353 nvmet_auth_insert_psk(req->sq);
354 req->sq->authenticated = true;
355 pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
356 __func__, ctrl->cntlid, req->sq->qid);
357 goto done_kfree;
358 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
359 dhchap_status = nvmet_auth_failure2(d);
360 if (dhchap_status) {
361 pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
362 ctrl->cntlid, req->sq->qid, dhchap_status);
363 req->sq->dhchap_status = dhchap_status;
364 req->sq->authenticated = false;
365 }
366 goto done_kfree;
367 default:
368 req->sq->dhchap_status =
369 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
370 req->sq->dhchap_step =
371 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
372 req->sq->authenticated = false;
373 goto done_kfree;
374 }
375done_failure1:
376 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
377 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
378
379done_kfree:
380 kfree(d);
381done:
382 pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
383 ctrl->cntlid, req->sq->qid,
384 req->sq->dhchap_status, req->sq->dhchap_step);
385 if (status)
386 pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
387 __func__, ctrl->cntlid, req->sq->qid,
388 status, req->error_loc);
389 if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
390 req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
391 unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
392
393 mod_delayed_work(system_percpu_wq, &req->sq->auth_expired_work,
394 auth_expire_secs * HZ);
395 goto complete;
396 }
397 /* Final states, clear up variables */
398 nvmet_auth_sq_free(req->sq);
399 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
400 nvmet_ctrl_fatal_error(ctrl);
401
402complete:
403 nvmet_req_complete(req, status);
404}
405
406static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
407{
408 struct nvmf_auth_dhchap_challenge_data *data = d;
409 struct nvmet_ctrl *ctrl = req->sq->ctrl;
410 int ret = 0;
411 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
412 int data_size = sizeof(*d) + hash_len;
413
414 if (ctrl->dh_tfm)
415 data_size += ctrl->dh_keysize;
416 if (al < data_size) {
417 pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
418 al, data_size);
419 return -EINVAL;
420 }
421 memset(data, 0, data_size);
422 req->sq->dhchap_s1 = nvme_auth_get_seqnum();
423 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
424 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
425 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
426 data->hashid = ctrl->shash_id;
427 data->hl = hash_len;
428 data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
429 req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
430 if (!req->sq->dhchap_c1)
431 return -ENOMEM;
432 get_random_bytes(req->sq->dhchap_c1, data->hl);
433 memcpy(data->cval, req->sq->dhchap_c1, data->hl);
434 if (ctrl->dh_tfm) {
435 data->dhgid = ctrl->dh_gid;
436 data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
437 ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
438 ctrl->dh_keysize);
439 }
440 pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
441 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
442 req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
443 return ret;
444}
445
446static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
447{
448 struct nvmf_auth_dhchap_success1_data *data = d;
449 struct nvmet_ctrl *ctrl = req->sq->ctrl;
450 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
451
452 WARN_ON(al < sizeof(*data));
453 memset(data, 0, sizeof(*data));
454 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
455 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
456 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
457 data->hl = hash_len;
458 if (req->sq->dhchap_c2) {
459 if (!ctrl->ctrl_key) {
460 pr_warn("ctrl %d qid %d no ctrl key\n",
461 ctrl->cntlid, req->sq->qid);
462 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
463 }
464 if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
465 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
466 data->rvalid = 1;
467 pr_debug("ctrl %d qid %d response %*ph\n",
468 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
469 }
470 return 0;
471}
472
473static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
474{
475 struct nvmf_auth_dhchap_failure_data *data = d;
476
477 WARN_ON(al < sizeof(*data));
478 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
479 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
480 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
481 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
482 data->rescode_exp = req->sq->dhchap_status;
483}
484
485u32 nvmet_auth_receive_data_len(struct nvmet_req *req)
486{
487 return le32_to_cpu(req->cmd->auth_receive.al);
488}
489
490void nvmet_execute_auth_receive(struct nvmet_req *req)
491{
492 struct nvmet_ctrl *ctrl = req->sq->ctrl;
493 void *d;
494 u32 al;
495 u16 status = 0;
496
497 if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
498 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
499 req->error_loc =
500 offsetof(struct nvmf_auth_receive_command, secp);
501 goto done;
502 }
503 if (req->cmd->auth_receive.spsp0 != 0x01) {
504 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
505 req->error_loc =
506 offsetof(struct nvmf_auth_receive_command, spsp0);
507 goto done;
508 }
509 if (req->cmd->auth_receive.spsp1 != 0x01) {
510 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
511 req->error_loc =
512 offsetof(struct nvmf_auth_receive_command, spsp1);
513 goto done;
514 }
515 al = nvmet_auth_receive_data_len(req);
516 if (!al) {
517 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
518 req->error_loc =
519 offsetof(struct nvmf_auth_receive_command, al);
520 goto done;
521 }
522 if (!nvmet_check_transfer_len(req, al)) {
523 pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
524 return;
525 }
526
527 d = kmalloc(al, GFP_KERNEL);
528 if (!d) {
529 status = NVME_SC_INTERNAL;
530 goto done;
531 }
532 pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
533 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
534 switch (req->sq->dhchap_step) {
535 case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
536 if (nvmet_auth_challenge(req, d, al) < 0) {
537 pr_warn("ctrl %d qid %d: challenge error (%d)\n",
538 ctrl->cntlid, req->sq->qid, status);
539 status = NVME_SC_INTERNAL;
540 break;
541 }
542 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
543 break;
544 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
545 status = nvmet_auth_success1(req, d, al);
546 if (status) {
547 req->sq->dhchap_status = status;
548 req->sq->authenticated = false;
549 nvmet_auth_failure1(req, d, al);
550 pr_warn("ctrl %d qid %d: success1 status (%x)\n",
551 ctrl->cntlid, req->sq->qid,
552 req->sq->dhchap_status);
553 break;
554 }
555 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
556 break;
557 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
558 req->sq->authenticated = false;
559 nvmet_auth_failure1(req, d, al);
560 pr_warn("ctrl %d qid %d failure1 (%x)\n",
561 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
562 break;
563 default:
564 pr_warn("ctrl %d qid %d unhandled step (%d)\n",
565 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
566 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
567 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
568 nvmet_auth_failure1(req, d, al);
569 status = 0;
570 break;
571 }
572
573 status = nvmet_copy_to_sgl(req, 0, d, al);
574 kfree(d);
575done:
576 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
577 nvmet_auth_sq_free(req->sq);
578 else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
579 nvmet_auth_sq_free(req->sq);
580 nvmet_ctrl_fatal_error(ctrl);
581 }
582 nvmet_req_complete(req, status);
583}