Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/crc32c.h>
11#include <linux/err.h>
12#include <linux/nvme-tcp.h>
13#include <linux/nvme-keyring.h>
14#include <net/sock.h>
15#include <net/tcp.h>
16#include <net/tls.h>
17#include <net/tls_prot.h>
18#include <net/handshake.h>
19#include <linux/inet.h>
20#include <linux/llist.h>
21#include <trace/events/sock.h>
22
23#include "nvmet.h"
24
25#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
26#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
27#define NVMET_TCP_BACKLOG 128
28
29static int param_store_val(const char *str, int *val, int min, int max)
30{
31 int ret, new_val;
32
33 ret = kstrtoint(str, 10, &new_val);
34 if (ret)
35 return -EINVAL;
36
37 if (new_val < min || new_val > max)
38 return -EINVAL;
39
40 *val = new_val;
41 return 0;
42}
43
44static int set_params(const char *str, const struct kernel_param *kp)
45{
46 return param_store_val(str, kp->arg, 0, INT_MAX);
47}
48
49static const struct kernel_param_ops set_param_ops = {
50 .set = set_params,
51 .get = param_get_int,
52};
53
54/* Define the socket priority to use for connections were it is desirable
55 * that the NIC consider performing optimized packet processing or filtering.
56 * A non-zero value being sufficient to indicate general consideration of any
57 * possible optimization. Making it a module param allows for alternative
58 * values that may be unique for some NIC implementations.
59 */
60static int so_priority;
61device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
62MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
63
64/* Define a time period (in usecs) that io_work() shall sample an activated
65 * queue before determining it to be idle. This optional module behavior
66 * can enable NIC solutions that support socket optimized packet processing
67 * using advanced interrupt moderation techniques.
68 */
69static int idle_poll_period_usecs;
70device_param_cb(idle_poll_period_usecs, &set_param_ops,
71 &idle_poll_period_usecs, 0644);
72MODULE_PARM_DESC(idle_poll_period_usecs,
73 "nvmet tcp io_work poll till idle time period in usecs: Default 0");
74
75#ifdef CONFIG_NVME_TARGET_TCP_TLS
76/*
77 * TLS handshake timeout
78 */
79static int tls_handshake_timeout = 10;
80module_param(tls_handshake_timeout, int, 0644);
81MODULE_PARM_DESC(tls_handshake_timeout,
82 "nvme TLS handshake timeout in seconds (default 10)");
83#endif
84
85#define NVMET_TCP_RECV_BUDGET 8
86#define NVMET_TCP_SEND_BUDGET 8
87#define NVMET_TCP_IO_WORK_BUDGET 64
88
89enum nvmet_tcp_send_state {
90 NVMET_TCP_SEND_DATA_PDU,
91 NVMET_TCP_SEND_DATA,
92 NVMET_TCP_SEND_R2T,
93 NVMET_TCP_SEND_DDGST,
94 NVMET_TCP_SEND_RESPONSE
95};
96
97enum nvmet_tcp_recv_state {
98 NVMET_TCP_RECV_PDU,
99 NVMET_TCP_RECV_DATA,
100 NVMET_TCP_RECV_DDGST,
101 NVMET_TCP_RECV_ERR,
102};
103
104enum {
105 NVMET_TCP_F_INIT_FAILED = (1 << 0),
106};
107
108struct nvmet_tcp_cmd {
109 struct nvmet_tcp_queue *queue;
110 struct nvmet_req req;
111
112 struct nvme_tcp_cmd_pdu *cmd_pdu;
113 struct nvme_tcp_rsp_pdu *rsp_pdu;
114 struct nvme_tcp_data_pdu *data_pdu;
115 struct nvme_tcp_r2t_pdu *r2t_pdu;
116
117 u32 rbytes_done;
118 u32 wbytes_done;
119
120 u32 pdu_len;
121 u32 pdu_recv;
122 int sg_idx;
123 char recv_cbuf[CMSG_LEN(sizeof(char))];
124 struct msghdr recv_msg;
125 struct bio_vec *iov;
126 u32 flags;
127
128 struct list_head entry;
129 struct llist_node lentry;
130
131 /* send state */
132 u32 offset;
133 struct scatterlist *cur_sg;
134 enum nvmet_tcp_send_state state;
135
136 __le32 exp_ddgst;
137 __le32 recv_ddgst;
138};
139
140enum nvmet_tcp_queue_state {
141 NVMET_TCP_Q_CONNECTING,
142 NVMET_TCP_Q_TLS_HANDSHAKE,
143 NVMET_TCP_Q_LIVE,
144 NVMET_TCP_Q_DISCONNECTING,
145 NVMET_TCP_Q_FAILED,
146};
147
148struct nvmet_tcp_queue {
149 struct socket *sock;
150 struct nvmet_tcp_port *port;
151 struct work_struct io_work;
152 struct nvmet_cq nvme_cq;
153 struct nvmet_sq nvme_sq;
154 struct kref kref;
155
156 /* send state */
157 struct nvmet_tcp_cmd *cmds;
158 unsigned int nr_cmds;
159 struct list_head free_list;
160 struct llist_head resp_list;
161 struct list_head resp_send_list;
162 int send_list_len;
163 struct nvmet_tcp_cmd *snd_cmd;
164
165 /* recv state */
166 int offset;
167 int left;
168 enum nvmet_tcp_recv_state rcv_state;
169 struct nvmet_tcp_cmd *cmd;
170 union nvme_tcp_pdu pdu;
171
172 /* digest state */
173 bool hdr_digest;
174 bool data_digest;
175
176 /* TLS state */
177 key_serial_t tls_pskid;
178 struct delayed_work tls_handshake_tmo_work;
179
180 unsigned long poll_end;
181
182 spinlock_t state_lock;
183 enum nvmet_tcp_queue_state state;
184
185 struct sockaddr_storage sockaddr;
186 struct sockaddr_storage sockaddr_peer;
187 struct work_struct release_work;
188
189 int idx;
190 struct list_head queue_list;
191
192 struct nvmet_tcp_cmd connect;
193
194 struct page_frag_cache pf_cache;
195
196 void (*data_ready)(struct sock *);
197 void (*state_change)(struct sock *);
198 void (*write_space)(struct sock *);
199};
200
201struct nvmet_tcp_port {
202 struct socket *sock;
203 struct work_struct accept_work;
204 struct nvmet_port *nport;
205 struct sockaddr_storage addr;
206 void (*data_ready)(struct sock *);
207};
208
209static DEFINE_IDA(nvmet_tcp_queue_ida);
210static LIST_HEAD(nvmet_tcp_queue_list);
211static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
212
213static struct workqueue_struct *nvmet_tcp_wq;
214static const struct nvmet_fabrics_ops nvmet_tcp_ops;
215static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
216static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
217
218static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
219 struct nvmet_tcp_cmd *cmd)
220{
221 if (unlikely(!queue->nr_cmds)) {
222 /* We didn't allocate cmds yet, send 0xffff */
223 return USHRT_MAX;
224 }
225
226 return cmd - queue->cmds;
227}
228
229static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
230{
231 return nvme_is_write(cmd->req.cmd) &&
232 cmd->rbytes_done < cmd->req.transfer_len;
233}
234
235static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
236{
237 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
238}
239
240static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
241{
242 return !nvme_is_write(cmd->req.cmd) &&
243 cmd->req.transfer_len > 0 &&
244 !cmd->req.cqe->status;
245}
246
247static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
248{
249 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
250 !cmd->rbytes_done;
251}
252
253static inline struct nvmet_tcp_cmd *
254nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
255{
256 struct nvmet_tcp_cmd *cmd;
257
258 cmd = list_first_entry_or_null(&queue->free_list,
259 struct nvmet_tcp_cmd, entry);
260 if (!cmd)
261 return NULL;
262 list_del_init(&cmd->entry);
263
264 cmd->rbytes_done = cmd->wbytes_done = 0;
265 cmd->pdu_len = 0;
266 cmd->pdu_recv = 0;
267 cmd->iov = NULL;
268 cmd->flags = 0;
269 return cmd;
270}
271
272static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
273{
274 if (unlikely(cmd == &cmd->queue->connect))
275 return;
276
277 list_add_tail(&cmd->entry, &cmd->queue->free_list);
278}
279
280static inline int queue_cpu(struct nvmet_tcp_queue *queue)
281{
282 return queue->sock->sk->sk_incoming_cpu;
283}
284
285static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
286{
287 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
288}
289
290static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
291{
292 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
293}
294
295static inline void nvmet_tcp_hdgst(void *pdu, size_t len)
296{
297 put_unaligned_le32(~crc32c(~0, pdu, len), pdu + len);
298}
299
300static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
301 void *pdu, size_t len)
302{
303 struct nvme_tcp_hdr *hdr = pdu;
304 __le32 recv_digest;
305 __le32 exp_digest;
306
307 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
308 pr_err("queue %d: header digest enabled but no header digest\n",
309 queue->idx);
310 return -EPROTO;
311 }
312
313 recv_digest = *(__le32 *)(pdu + hdr->hlen);
314 nvmet_tcp_hdgst(pdu, len);
315 exp_digest = *(__le32 *)(pdu + hdr->hlen);
316 if (recv_digest != exp_digest) {
317 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
318 queue->idx, le32_to_cpu(recv_digest),
319 le32_to_cpu(exp_digest));
320 return -EPROTO;
321 }
322
323 return 0;
324}
325
326static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
327{
328 struct nvme_tcp_hdr *hdr = pdu;
329 u8 digest_len = nvmet_tcp_hdgst_len(queue);
330 u32 len;
331
332 len = le32_to_cpu(hdr->plen) - hdr->hlen -
333 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
334
335 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
336 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
337 return -EPROTO;
338 }
339
340 return 0;
341}
342
343/* If cmd buffers are NULL, no operation is performed */
344static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
345{
346 kfree(cmd->iov);
347 sgl_free(cmd->req.sg);
348 cmd->iov = NULL;
349 cmd->req.sg = NULL;
350}
351
352static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
353{
354 struct bio_vec *iov = cmd->iov;
355 struct scatterlist *sg;
356 u32 length, offset, sg_offset;
357 unsigned int sg_remaining;
358 int nr_pages;
359
360 length = cmd->pdu_len;
361 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
362 offset = cmd->rbytes_done;
363 cmd->sg_idx = offset / PAGE_SIZE;
364 sg_offset = offset % PAGE_SIZE;
365 if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt)
366 return -EPROTO;
367
368 sg = &cmd->req.sg[cmd->sg_idx];
369 sg_remaining = cmd->req.sg_cnt - cmd->sg_idx;
370
371 while (length) {
372 if (!sg_remaining)
373 return -EPROTO;
374
375 if (!sg->length || sg->length <= sg_offset)
376 return -EPROTO;
377
378 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
379
380 bvec_set_page(iov, sg_page(sg), iov_len,
381 sg->offset + sg_offset);
382
383 length -= iov_len;
384 sg = sg_next(sg);
385 sg_remaining--;
386 iov++;
387 sg_offset = 0;
388 }
389
390 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
391 nr_pages, cmd->pdu_len);
392 return 0;
393}
394
395static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
396{
397 /*
398 * Keep rcv_state at RECV_ERR even for the internal -ESHUTDOWN path.
399 * nvmet_tcp_handle_icreq() can return -ESHUTDOWN after the ICReq has
400 * already been consumed and queue teardown has started.
401 *
402 * If nvmet_tcp_data_ready() or nvmet_tcp_write_space() queues
403 * nvmet_tcp_io_work() again before nvmet_tcp_release_queue_work()
404 * cancels it, the queue must not keep that old receive state.
405 * Otherwise the next nvmet_tcp_io_work() run can reach
406 * nvmet_tcp_done_recv_pdu() and try to handle the same ICReq again.
407 *
408 * That is why queue->rcv_state needs to be updated before we return.
409 */
410 queue->rcv_state = NVMET_TCP_RECV_ERR;
411 if (status == -EPIPE || status == -ECONNRESET || !queue->nvme_sq.ctrl)
412 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
413 else
414 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
415}
416
417static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
418{
419 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
420 u32 len = le32_to_cpu(sgl->length);
421
422 if (!len)
423 return 0;
424
425 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
426 NVME_SGL_FMT_OFFSET)) {
427 if (!nvme_is_write(cmd->req.cmd))
428 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
429
430 if (len > cmd->req.port->inline_data_size)
431 return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR;
432 cmd->pdu_len = len;
433 }
434 cmd->req.transfer_len += len;
435
436 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
437 if (!cmd->req.sg)
438 return NVME_SC_INTERNAL;
439 cmd->cur_sg = cmd->req.sg;
440
441 if (nvmet_tcp_has_data_in(cmd)) {
442 cmd->iov = kmalloc_objs(*cmd->iov, cmd->req.sg_cnt);
443 if (!cmd->iov)
444 goto err;
445 }
446
447 return 0;
448err:
449 nvmet_tcp_free_cmd_buffers(cmd);
450 return NVME_SC_INTERNAL;
451}
452
453static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd)
454{
455 size_t total_len = cmd->req.transfer_len;
456 struct scatterlist *sg = cmd->req.sg;
457 u32 crc = ~0;
458
459 while (total_len) {
460 size_t len = min_t(size_t, total_len, sg->length);
461
462 /*
463 * Note that the scatterlist does not contain any highmem pages,
464 * as it was allocated by sgl_alloc() with GFP_KERNEL.
465 */
466 crc = crc32c(crc, sg_virt(sg), len);
467 total_len -= len;
468 sg = sg_next(sg);
469 }
470 cmd->exp_ddgst = cpu_to_le32(~crc);
471}
472
473static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
474{
475 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
476 struct nvmet_tcp_queue *queue = cmd->queue;
477 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
478 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
479
480 cmd->offset = 0;
481 cmd->state = NVMET_TCP_SEND_DATA_PDU;
482
483 pdu->hdr.type = nvme_tcp_c2h_data;
484 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
485 NVME_TCP_F_DATA_SUCCESS : 0);
486 pdu->hdr.hlen = sizeof(*pdu);
487 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
488 pdu->hdr.plen =
489 cpu_to_le32(pdu->hdr.hlen + hdgst +
490 cmd->req.transfer_len + ddgst);
491 pdu->command_id = cmd->req.cqe->command_id;
492 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
493 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
494
495 if (queue->data_digest) {
496 pdu->hdr.flags |= NVME_TCP_F_DDGST;
497 nvmet_tcp_calc_ddgst(cmd);
498 }
499
500 if (cmd->queue->hdr_digest) {
501 pdu->hdr.flags |= NVME_TCP_F_HDGST;
502 nvmet_tcp_hdgst(pdu, sizeof(*pdu));
503 }
504}
505
506static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
507{
508 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
509 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
510
511 cmd->offset = 0;
512 cmd->state = NVMET_TCP_SEND_R2T;
513
514 pdu->hdr.type = nvme_tcp_r2t;
515 pdu->hdr.flags = 0;
516 pdu->hdr.hlen = sizeof(*pdu);
517 pdu->hdr.pdo = 0;
518 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
519
520 pdu->command_id = cmd->req.cmd->common.command_id;
521 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
522 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
523 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
524 if (cmd->queue->hdr_digest) {
525 pdu->hdr.flags |= NVME_TCP_F_HDGST;
526 nvmet_tcp_hdgst(pdu, sizeof(*pdu));
527 }
528}
529
530static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
531{
532 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
533 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
534
535 cmd->offset = 0;
536 cmd->state = NVMET_TCP_SEND_RESPONSE;
537
538 pdu->hdr.type = nvme_tcp_rsp;
539 pdu->hdr.flags = 0;
540 pdu->hdr.hlen = sizeof(*pdu);
541 pdu->hdr.pdo = 0;
542 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
543 if (cmd->queue->hdr_digest) {
544 pdu->hdr.flags |= NVME_TCP_F_HDGST;
545 nvmet_tcp_hdgst(pdu, sizeof(*pdu));
546 }
547}
548
549static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
550{
551 struct llist_node *node;
552 struct nvmet_tcp_cmd *cmd;
553
554 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
555 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
556 list_add(&cmd->entry, &queue->resp_send_list);
557 queue->send_list_len++;
558 }
559}
560
561static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
562{
563 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
564 struct nvmet_tcp_cmd, entry);
565 if (!queue->snd_cmd) {
566 nvmet_tcp_process_resp_list(queue);
567 queue->snd_cmd =
568 list_first_entry_or_null(&queue->resp_send_list,
569 struct nvmet_tcp_cmd, entry);
570 if (unlikely(!queue->snd_cmd))
571 return NULL;
572 }
573
574 list_del_init(&queue->snd_cmd->entry);
575 queue->send_list_len--;
576
577 if (nvmet_tcp_need_data_out(queue->snd_cmd))
578 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
579 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
580 nvmet_setup_r2t_pdu(queue->snd_cmd);
581 else
582 nvmet_setup_response_pdu(queue->snd_cmd);
583
584 return queue->snd_cmd;
585}
586
587static void nvmet_tcp_queue_response(struct nvmet_req *req)
588{
589 struct nvmet_tcp_cmd *cmd =
590 container_of(req, struct nvmet_tcp_cmd, req);
591 struct nvmet_tcp_queue *queue = cmd->queue;
592 enum nvmet_tcp_recv_state queue_state;
593 struct nvmet_tcp_cmd *queue_cmd;
594 struct nvme_sgl_desc *sgl;
595 u32 len;
596
597 /* Pairs with store_release in nvmet_prepare_receive_pdu() */
598 queue_state = smp_load_acquire(&queue->rcv_state);
599 queue_cmd = READ_ONCE(queue->cmd);
600
601 if (unlikely(cmd == queue_cmd)) {
602 sgl = &cmd->req.cmd->common.dptr.sgl;
603 len = le32_to_cpu(sgl->length);
604
605 /*
606 * Wait for inline data before processing the response.
607 * Avoid using helpers, this might happen before
608 * nvmet_req_init is completed.
609 */
610 if (queue_state == NVMET_TCP_RECV_PDU &&
611 len && len <= cmd->req.port->inline_data_size &&
612 nvme_is_write(cmd->req.cmd))
613 return;
614 }
615
616 llist_add(&cmd->lentry, &queue->resp_list);
617 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
618}
619
620static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
621{
622 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
623 nvmet_tcp_queue_response(&cmd->req);
624 else
625 cmd->req.execute(&cmd->req);
626}
627
628static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
629{
630 struct msghdr msg = {
631 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
632 };
633 struct bio_vec bvec;
634 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
635 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
636 int ret;
637
638 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
639 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
640 ret = sock_sendmsg(cmd->queue->sock, &msg);
641 if (ret <= 0)
642 return ret;
643
644 cmd->offset += ret;
645 left -= ret;
646
647 if (left)
648 return -EAGAIN;
649
650 cmd->state = NVMET_TCP_SEND_DATA;
651 cmd->offset = 0;
652 return 1;
653}
654
655static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
656{
657 struct nvmet_tcp_queue *queue = cmd->queue;
658 int ret;
659
660 while (cmd->cur_sg) {
661 struct msghdr msg = {
662 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
663 };
664 struct page *page = sg_page(cmd->cur_sg);
665 struct bio_vec bvec;
666 u32 left = cmd->cur_sg->length - cmd->offset;
667
668 if ((!last_in_batch && cmd->queue->send_list_len) ||
669 cmd->wbytes_done + left < cmd->req.transfer_len ||
670 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
671 msg.msg_flags |= MSG_MORE;
672
673 bvec_set_page(&bvec, page, left, cmd->offset);
674 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
675 ret = sock_sendmsg(cmd->queue->sock, &msg);
676 if (ret <= 0)
677 return ret;
678
679 cmd->offset += ret;
680 cmd->wbytes_done += ret;
681
682 /* Done with sg?*/
683 if (cmd->offset == cmd->cur_sg->length) {
684 cmd->cur_sg = sg_next(cmd->cur_sg);
685 cmd->offset = 0;
686 }
687 }
688
689 if (queue->data_digest) {
690 cmd->state = NVMET_TCP_SEND_DDGST;
691 cmd->offset = 0;
692 } else {
693 if (queue->nvme_sq.sqhd_disabled) {
694 cmd->queue->snd_cmd = NULL;
695 nvmet_tcp_put_cmd(cmd);
696 } else {
697 nvmet_setup_response_pdu(cmd);
698 }
699 }
700
701 if (queue->nvme_sq.sqhd_disabled)
702 nvmet_tcp_free_cmd_buffers(cmd);
703
704 return 1;
705
706}
707
708static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
709 bool last_in_batch)
710{
711 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
712 struct bio_vec bvec;
713 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
714 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
715 int ret;
716
717 if (!last_in_batch && cmd->queue->send_list_len)
718 msg.msg_flags |= MSG_MORE;
719 else
720 msg.msg_flags |= MSG_EOR;
721
722 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
723 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
724 ret = sock_sendmsg(cmd->queue->sock, &msg);
725 if (ret <= 0)
726 return ret;
727 cmd->offset += ret;
728 left -= ret;
729
730 if (left)
731 return -EAGAIN;
732
733 nvmet_tcp_free_cmd_buffers(cmd);
734 cmd->queue->snd_cmd = NULL;
735 nvmet_tcp_put_cmd(cmd);
736 return 1;
737}
738
739static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
740{
741 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
742 struct bio_vec bvec;
743 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
744 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
745 int ret;
746
747 if (!last_in_batch && cmd->queue->send_list_len)
748 msg.msg_flags |= MSG_MORE;
749 else
750 msg.msg_flags |= MSG_EOR;
751
752 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
753 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
754 ret = sock_sendmsg(cmd->queue->sock, &msg);
755 if (ret <= 0)
756 return ret;
757 cmd->offset += ret;
758 left -= ret;
759
760 if (left)
761 return -EAGAIN;
762
763 cmd->queue->snd_cmd = NULL;
764 return 1;
765}
766
767static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
768{
769 struct nvmet_tcp_queue *queue = cmd->queue;
770 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
771 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
772 struct kvec iov = {
773 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
774 .iov_len = left
775 };
776 int ret;
777
778 if (!last_in_batch && cmd->queue->send_list_len)
779 msg.msg_flags |= MSG_MORE;
780 else
781 msg.msg_flags |= MSG_EOR;
782
783 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
784 if (unlikely(ret <= 0))
785 return ret;
786
787 cmd->offset += ret;
788 left -= ret;
789
790 if (left)
791 return -EAGAIN;
792
793 if (queue->nvme_sq.sqhd_disabled) {
794 cmd->queue->snd_cmd = NULL;
795 nvmet_tcp_put_cmd(cmd);
796 } else {
797 nvmet_setup_response_pdu(cmd);
798 }
799 return 1;
800}
801
802static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
803 bool last_in_batch)
804{
805 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
806 int ret = 0;
807
808 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
809 cmd = nvmet_tcp_fetch_cmd(queue);
810 if (unlikely(!cmd))
811 return 0;
812 }
813
814 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
815 ret = nvmet_try_send_data_pdu(cmd);
816 if (ret <= 0)
817 goto done_send;
818 }
819
820 if (cmd->state == NVMET_TCP_SEND_DATA) {
821 ret = nvmet_try_send_data(cmd, last_in_batch);
822 if (ret <= 0)
823 goto done_send;
824 }
825
826 if (cmd->state == NVMET_TCP_SEND_DDGST) {
827 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
828 if (ret <= 0)
829 goto done_send;
830 }
831
832 if (cmd->state == NVMET_TCP_SEND_R2T) {
833 ret = nvmet_try_send_r2t(cmd, last_in_batch);
834 if (ret <= 0)
835 goto done_send;
836 }
837
838 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
839 ret = nvmet_try_send_response(cmd, last_in_batch);
840
841done_send:
842 if (ret < 0) {
843 if (ret == -EAGAIN)
844 return 0;
845 return ret;
846 }
847
848 return 1;
849}
850
851static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
852 int budget, int *sends)
853{
854 int i, ret = 0;
855
856 for (i = 0; i < budget; i++) {
857 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
858 if (unlikely(ret < 0)) {
859 nvmet_tcp_socket_error(queue, ret);
860 goto done;
861 } else if (ret == 0) {
862 break;
863 }
864 (*sends)++;
865 }
866done:
867 return ret;
868}
869
870static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
871{
872 queue->offset = 0;
873 queue->left = sizeof(struct nvme_tcp_hdr);
874 WRITE_ONCE(queue->cmd, NULL);
875 /* Ensure rcv_state is visible only after queue->cmd is set */
876 smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
877}
878
879static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
880{
881 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
882 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
883 struct msghdr msg = {};
884 struct kvec iov;
885 int ret;
886
887 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
888 pr_err("bad nvme-tcp pdu length (%d)\n",
889 le32_to_cpu(icreq->hdr.plen));
890 return -EPROTO;
891 }
892
893 if (icreq->pfv != NVME_TCP_PFV_1_0) {
894 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
895 return -EPROTO;
896 }
897
898 if (icreq->hpda != 0) {
899 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
900 icreq->hpda);
901 return -EPROTO;
902 }
903
904 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
905 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
906
907 memset(icresp, 0, sizeof(*icresp));
908 icresp->hdr.type = nvme_tcp_icresp;
909 icresp->hdr.hlen = sizeof(*icresp);
910 icresp->hdr.pdo = 0;
911 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
912 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
913 icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
914 icresp->cpda = 0;
915 if (queue->hdr_digest)
916 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
917 if (queue->data_digest)
918 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
919
920 iov.iov_base = icresp;
921 iov.iov_len = sizeof(*icresp);
922 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
923 if (ret < 0) {
924 spin_lock_bh(&queue->state_lock);
925 if (queue->state == NVMET_TCP_Q_DISCONNECTING) {
926 spin_unlock_bh(&queue->state_lock);
927 return -ESHUTDOWN;
928 }
929 queue->state = NVMET_TCP_Q_FAILED;
930 spin_unlock_bh(&queue->state_lock);
931 return ret; /* queue removal will cleanup */
932 }
933
934 spin_lock_bh(&queue->state_lock);
935 if (queue->state == NVMET_TCP_Q_DISCONNECTING) {
936 spin_unlock_bh(&queue->state_lock);
937 /* Tell nvmet_tcp_socket_error() teardown is in progress. */
938 return -ESHUTDOWN;
939 }
940 queue->state = NVMET_TCP_Q_LIVE;
941 spin_unlock_bh(&queue->state_lock);
942 nvmet_prepare_receive_pdu(queue);
943 return 0;
944}
945
946static int nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
947 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
948{
949 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
950 int ret;
951
952 /*
953 * This command has not been processed yet, hence we are trying to
954 * figure out if there is still pending data left to receive. If
955 * we don't, we can simply prepare for the next pdu and bail out,
956 * otherwise we will need to prepare a buffer and receive the
957 * stale data before continuing forward.
958 */
959 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
960 data_len > cmd->req.port->inline_data_size) {
961 nvmet_prepare_receive_pdu(queue);
962 return 0;
963 }
964
965 ret = nvmet_tcp_map_data(cmd);
966 if (unlikely(ret)) {
967 pr_err("queue %d: failed to map data\n", queue->idx);
968 return -EPROTO;
969 }
970
971 queue->rcv_state = NVMET_TCP_RECV_DATA;
972 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
973 ret = nvmet_tcp_build_pdu_iovec(cmd);
974 if (unlikely(ret))
975 pr_err("queue %d: failed to build PDU iovec\n", queue->idx);
976
977 return ret;
978}
979
980static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
981{
982 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
983 struct nvmet_tcp_cmd *cmd;
984 unsigned int exp_data_len;
985
986 if (likely(queue->nr_cmds)) {
987 if (unlikely(data->ttag >= queue->nr_cmds)) {
988 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
989 queue->idx, data->ttag, queue->nr_cmds);
990 goto err_proto;
991 }
992 cmd = &queue->cmds[data->ttag];
993 } else {
994 cmd = &queue->connect;
995 }
996
997 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
998 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
999 data->ttag, le32_to_cpu(data->data_offset),
1000 cmd->rbytes_done);
1001 goto err_proto;
1002 }
1003
1004 exp_data_len = le32_to_cpu(data->hdr.plen) -
1005 nvmet_tcp_hdgst_len(queue) -
1006 nvmet_tcp_ddgst_len(queue) -
1007 sizeof(*data);
1008
1009 cmd->pdu_len = le32_to_cpu(data->data_length);
1010 if (unlikely(cmd->pdu_len != exp_data_len ||
1011 cmd->pdu_len == 0 ||
1012 cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
1013 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
1014 goto err_proto;
1015 }
1016 /*
1017 * Ensure command data structures are initialized. We must check both
1018 * cmd->req.sg and cmd->iov because they can have different NULL states:
1019 * - Uninitialized commands: both NULL
1020 * - READ commands: cmd->req.sg allocated, cmd->iov NULL
1021 * - WRITE commands: both allocated
1022 */
1023 if (unlikely(!cmd->req.sg || !cmd->iov)) {
1024 pr_err("queue %d: H2CData PDU received for invalid command state (ttag %u)\n",
1025 queue->idx, data->ttag);
1026 goto err_proto;
1027 }
1028 cmd->pdu_recv = 0;
1029 if (unlikely(nvmet_tcp_build_pdu_iovec(cmd))) {
1030 pr_err("queue %d: failed to build PDU iovec\n", queue->idx);
1031 goto err_proto;
1032 }
1033 queue->cmd = cmd;
1034 queue->rcv_state = NVMET_TCP_RECV_DATA;
1035
1036 return 0;
1037
1038err_proto:
1039 /* FIXME: use proper transport errors */
1040 return -EPROTO;
1041}
1042
1043static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
1044{
1045 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1046 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
1047 struct nvmet_req *req;
1048 int ret;
1049
1050 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1051 if (hdr->type != nvme_tcp_icreq) {
1052 pr_err("unexpected pdu type (%d) before icreq\n",
1053 hdr->type);
1054 return -EPROTO;
1055 }
1056 return nvmet_tcp_handle_icreq(queue);
1057 }
1058
1059 if (unlikely(hdr->type == nvme_tcp_icreq)) {
1060 pr_err("queue %d: received icreq pdu in state %d\n",
1061 queue->idx, queue->state);
1062 return -EPROTO;
1063 }
1064
1065 if (hdr->type == nvme_tcp_h2c_data) {
1066 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1067 if (unlikely(ret))
1068 return ret;
1069 return 0;
1070 }
1071
1072 queue->cmd = nvmet_tcp_get_cmd(queue);
1073 if (unlikely(!queue->cmd)) {
1074 /* This should never happen */
1075 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1076 queue->idx, queue->nr_cmds, queue->send_list_len,
1077 nvme_cmd->common.opcode);
1078 return -ENOMEM;
1079 }
1080
1081 req = &queue->cmd->req;
1082 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1083
1084 if (unlikely(!nvmet_req_init(req, &queue->nvme_sq, &nvmet_tcp_ops))) {
1085 pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n",
1086 req->cmd, req->cmd->common.command_id,
1087 req->cmd->common.opcode,
1088 le32_to_cpu(req->cmd->common.dptr.sgl.length),
1089 le16_to_cpu(req->cqe->status));
1090
1091 return nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1092 }
1093
1094 ret = nvmet_tcp_map_data(queue->cmd);
1095 if (unlikely(ret)) {
1096 pr_err("queue %d: failed to map data\n", queue->idx);
1097 if (nvmet_tcp_has_inline_data(queue->cmd))
1098 return -EPROTO;
1099
1100 nvmet_req_complete(req, ret);
1101 ret = -EAGAIN;
1102 goto out;
1103 }
1104
1105 if (nvmet_tcp_need_data_in(queue->cmd)) {
1106 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1107 queue->rcv_state = NVMET_TCP_RECV_DATA;
1108 ret = nvmet_tcp_build_pdu_iovec(queue->cmd);
1109 if (unlikely(ret))
1110 pr_err("queue %d: failed to build PDU iovec\n",
1111 queue->idx);
1112 return ret;
1113 }
1114 /* send back R2T */
1115 nvmet_tcp_queue_response(&queue->cmd->req);
1116 goto out;
1117 }
1118
1119 queue->cmd->req.execute(&queue->cmd->req);
1120out:
1121 nvmet_prepare_receive_pdu(queue);
1122 return ret;
1123}
1124
1125static const u8 nvme_tcp_pdu_sizes[] = {
1126 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1127 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1128 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1129};
1130
1131static inline u8 nvmet_tcp_pdu_size(u8 type)
1132{
1133 size_t idx = type;
1134
1135 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1136 nvme_tcp_pdu_sizes[idx]) ?
1137 nvme_tcp_pdu_sizes[idx] : 0;
1138}
1139
1140static inline bool nvmet_tcp_pdu_valid(u8 type)
1141{
1142 switch (type) {
1143 case nvme_tcp_icreq:
1144 case nvme_tcp_cmd:
1145 case nvme_tcp_h2c_data:
1146 /* fallthru */
1147 return true;
1148 }
1149
1150 return false;
1151}
1152
1153static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
1154 struct msghdr *msg, char *cbuf)
1155{
1156 struct cmsghdr *cmsg = (struct cmsghdr *)cbuf;
1157 u8 ctype, level, description;
1158 int ret = 0;
1159
1160 ctype = tls_get_record_type(queue->sock->sk, cmsg);
1161 switch (ctype) {
1162 case 0:
1163 break;
1164 case TLS_RECORD_TYPE_DATA:
1165 break;
1166 case TLS_RECORD_TYPE_ALERT:
1167 tls_alert_recv(queue->sock->sk, msg, &level, &description);
1168 if (level == TLS_ALERT_LEVEL_FATAL) {
1169 pr_err("queue %d: TLS Alert desc %u\n",
1170 queue->idx, description);
1171 ret = -ENOTCONN;
1172 } else {
1173 pr_warn("queue %d: TLS Alert desc %u\n",
1174 queue->idx, description);
1175 ret = -EAGAIN;
1176 }
1177 break;
1178 default:
1179 /* discard this record type */
1180 pr_err("queue %d: TLS record %d unhandled\n",
1181 queue->idx, ctype);
1182 ret = -EAGAIN;
1183 break;
1184 }
1185 return ret;
1186}
1187
1188static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1189{
1190 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1191 int len, ret;
1192 struct kvec iov;
1193 char cbuf[CMSG_LEN(sizeof(char))] = {};
1194 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1195
1196recv:
1197 iov.iov_base = (void *)&queue->pdu + queue->offset;
1198 iov.iov_len = queue->left;
1199 if (queue->tls_pskid) {
1200 msg.msg_control = cbuf;
1201 msg.msg_controllen = sizeof(cbuf);
1202 }
1203 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1204 iov.iov_len, msg.msg_flags);
1205 if (unlikely(len < 0))
1206 return len;
1207 if (queue->tls_pskid) {
1208 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1209 if (ret < 0)
1210 return ret;
1211 }
1212
1213 queue->offset += len;
1214 queue->left -= len;
1215 if (queue->left)
1216 return -EAGAIN;
1217
1218 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1219 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1220
1221 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1222 pr_err("unexpected pdu type %d\n", hdr->type);
1223 return -EIO;
1224 }
1225
1226 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1227 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1228 return -EIO;
1229 }
1230
1231 queue->left = hdr->hlen - queue->offset + hdgst;
1232 goto recv;
1233 }
1234
1235 if (queue->hdr_digest &&
1236 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen))
1237 return -EPROTO;
1238
1239 if (queue->data_digest &&
1240 nvmet_tcp_check_ddgst(queue, &queue->pdu))
1241 return -EPROTO;
1242
1243 return nvmet_tcp_done_recv_pdu(queue);
1244}
1245
1246static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1247{
1248 struct nvmet_tcp_queue *queue = cmd->queue;
1249
1250 nvmet_tcp_calc_ddgst(cmd);
1251 queue->offset = 0;
1252 queue->left = NVME_TCP_DIGEST_LENGTH;
1253 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1254}
1255
1256static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1257{
1258 struct nvmet_tcp_cmd *cmd = queue->cmd;
1259 int len, ret;
1260
1261 while (msg_data_left(&cmd->recv_msg)) {
1262 len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1263 cmd->recv_msg.msg_flags);
1264 if (len <= 0)
1265 return len;
1266 if (queue->tls_pskid) {
1267 ret = nvmet_tcp_tls_record_ok(cmd->queue,
1268 &cmd->recv_msg, cmd->recv_cbuf);
1269 if (ret < 0)
1270 return ret;
1271 }
1272
1273 cmd->pdu_recv += len;
1274 cmd->rbytes_done += len;
1275 }
1276
1277 if (queue->data_digest) {
1278 nvmet_tcp_prep_recv_ddgst(cmd);
1279 return 0;
1280 }
1281
1282 if (cmd->rbytes_done == cmd->req.transfer_len)
1283 nvmet_tcp_execute_request(cmd);
1284
1285 nvmet_prepare_receive_pdu(queue);
1286 return 0;
1287}
1288
1289static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1290{
1291 struct nvmet_tcp_cmd *cmd = queue->cmd;
1292 int ret, len;
1293 char cbuf[CMSG_LEN(sizeof(char))] = {};
1294 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1295 struct kvec iov = {
1296 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1297 .iov_len = queue->left
1298 };
1299
1300 if (queue->tls_pskid) {
1301 msg.msg_control = cbuf;
1302 msg.msg_controllen = sizeof(cbuf);
1303 }
1304 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1305 iov.iov_len, msg.msg_flags);
1306 if (unlikely(len < 0))
1307 return len;
1308 if (queue->tls_pskid) {
1309 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1310 if (ret < 0)
1311 return ret;
1312 }
1313
1314 queue->offset += len;
1315 queue->left -= len;
1316 if (queue->left)
1317 return -EAGAIN;
1318
1319 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1320 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1321 queue->idx, cmd->req.cmd->common.command_id,
1322 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1323 le32_to_cpu(cmd->exp_ddgst));
1324 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED))
1325 nvmet_req_uninit(&cmd->req);
1326 nvmet_tcp_free_cmd_buffers(cmd);
1327 ret = -EPROTO;
1328 goto out;
1329 }
1330
1331 if (cmd->rbytes_done == cmd->req.transfer_len)
1332 nvmet_tcp_execute_request(cmd);
1333
1334 ret = 0;
1335out:
1336 nvmet_prepare_receive_pdu(queue);
1337 return ret;
1338}
1339
1340static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1341{
1342 int result = 0;
1343
1344 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1345 return 0;
1346
1347 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1348 result = nvmet_tcp_try_recv_pdu(queue);
1349 if (result != 0)
1350 goto done_recv;
1351 }
1352
1353 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1354 result = nvmet_tcp_try_recv_data(queue);
1355 if (result != 0)
1356 goto done_recv;
1357 }
1358
1359 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1360 result = nvmet_tcp_try_recv_ddgst(queue);
1361 if (result != 0)
1362 goto done_recv;
1363 }
1364
1365done_recv:
1366 if (result < 0) {
1367 if (result == -EAGAIN)
1368 return 0;
1369 return result;
1370 }
1371 return 1;
1372}
1373
1374static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1375 int budget, int *recvs)
1376{
1377 int i, ret = 0;
1378
1379 for (i = 0; i < budget; i++) {
1380 ret = nvmet_tcp_try_recv_one(queue);
1381 if (unlikely(ret < 0)) {
1382 nvmet_tcp_socket_error(queue, ret);
1383 goto done;
1384 } else if (ret == 0) {
1385 break;
1386 }
1387 (*recvs)++;
1388 }
1389done:
1390 return ret;
1391}
1392
1393static void nvmet_tcp_release_queue(struct kref *kref)
1394{
1395 struct nvmet_tcp_queue *queue =
1396 container_of(kref, struct nvmet_tcp_queue, kref);
1397
1398 WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING);
1399 queue_work(nvmet_wq, &queue->release_work);
1400}
1401
1402static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1403{
1404 spin_lock_bh(&queue->state_lock);
1405 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1406 /* Socket closed during handshake */
1407 tls_handshake_cancel(queue->sock->sk);
1408 }
1409 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1410 queue->state = NVMET_TCP_Q_DISCONNECTING;
1411 kref_put(&queue->kref, nvmet_tcp_release_queue);
1412 }
1413 spin_unlock_bh(&queue->state_lock);
1414}
1415
1416static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1417{
1418 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1419}
1420
1421static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1422 int ops)
1423{
1424 if (!idle_poll_period_usecs)
1425 return false;
1426
1427 if (ops)
1428 nvmet_tcp_arm_queue_deadline(queue);
1429
1430 return !time_after(jiffies, queue->poll_end);
1431}
1432
1433static void nvmet_tcp_io_work(struct work_struct *w)
1434{
1435 struct nvmet_tcp_queue *queue =
1436 container_of(w, struct nvmet_tcp_queue, io_work);
1437 bool pending;
1438 int ret, ops = 0;
1439
1440 do {
1441 pending = false;
1442
1443 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1444 if (ret > 0)
1445 pending = true;
1446 else if (ret < 0)
1447 return;
1448
1449 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1450 if (ret > 0)
1451 pending = true;
1452 else if (ret < 0)
1453 return;
1454
1455 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1456
1457 /*
1458 * Requeue the worker if idle deadline period is in progress or any
1459 * ops activity was recorded during the do-while loop above.
1460 */
1461 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1462 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1463}
1464
1465static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1466 struct nvmet_tcp_cmd *c)
1467{
1468 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1469
1470 c->queue = queue;
1471 c->req.port = queue->port->nport;
1472
1473 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1474 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1475 if (!c->cmd_pdu)
1476 return -ENOMEM;
1477 c->req.cmd = &c->cmd_pdu->cmd;
1478
1479 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1480 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1481 if (!c->rsp_pdu)
1482 goto out_free_cmd;
1483 c->req.cqe = &c->rsp_pdu->cqe;
1484
1485 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1486 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1487 if (!c->data_pdu)
1488 goto out_free_rsp;
1489
1490 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1491 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1492 if (!c->r2t_pdu)
1493 goto out_free_data;
1494
1495 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1496 c->recv_msg.msg_control = c->recv_cbuf;
1497 c->recv_msg.msg_controllen = sizeof(c->recv_cbuf);
1498 }
1499 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1500
1501 list_add_tail(&c->entry, &queue->free_list);
1502
1503 return 0;
1504out_free_data:
1505 page_frag_free(c->data_pdu);
1506out_free_rsp:
1507 page_frag_free(c->rsp_pdu);
1508out_free_cmd:
1509 page_frag_free(c->cmd_pdu);
1510 return -ENOMEM;
1511}
1512
1513static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1514{
1515 page_frag_free(c->r2t_pdu);
1516 page_frag_free(c->data_pdu);
1517 page_frag_free(c->rsp_pdu);
1518 page_frag_free(c->cmd_pdu);
1519}
1520
1521static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1522{
1523 struct nvmet_tcp_cmd *cmds;
1524 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1525
1526 cmds = kvzalloc_objs(struct nvmet_tcp_cmd, nr_cmds);
1527 if (!cmds)
1528 goto out;
1529
1530 for (i = 0; i < nr_cmds; i++) {
1531 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1532 if (ret)
1533 goto out_free;
1534 }
1535
1536 queue->cmds = cmds;
1537
1538 return 0;
1539out_free:
1540 while (--i >= 0)
1541 nvmet_tcp_free_cmd(cmds + i);
1542 kvfree(cmds);
1543out:
1544 return ret;
1545}
1546
1547static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1548{
1549 struct nvmet_tcp_cmd *cmds = queue->cmds;
1550 int i;
1551
1552 for (i = 0; i < queue->nr_cmds; i++)
1553 nvmet_tcp_free_cmd(cmds + i);
1554
1555 nvmet_tcp_free_cmd(&queue->connect);
1556 kvfree(cmds);
1557}
1558
1559static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1560{
1561 struct socket *sock = queue->sock;
1562
1563 if (!queue->state_change)
1564 return;
1565
1566 write_lock_bh(&sock->sk->sk_callback_lock);
1567 sock->sk->sk_data_ready = queue->data_ready;
1568 sock->sk->sk_state_change = queue->state_change;
1569 sock->sk->sk_write_space = queue->write_space;
1570 sock->sk->sk_user_data = NULL;
1571 write_unlock_bh(&sock->sk->sk_callback_lock);
1572}
1573
1574static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1575{
1576 struct nvmet_tcp_cmd *cmd = queue->cmds;
1577 int i;
1578
1579 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1580 if (nvmet_tcp_need_data_in(cmd))
1581 nvmet_req_uninit(&cmd->req);
1582 }
1583
1584 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1585 /* failed in connect */
1586 nvmet_req_uninit(&queue->connect.req);
1587 }
1588}
1589
1590static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1591{
1592 struct nvmet_tcp_cmd *cmd = queue->cmds;
1593 int i;
1594
1595 for (i = 0; i < queue->nr_cmds; i++, cmd++)
1596 nvmet_tcp_free_cmd_buffers(cmd);
1597 nvmet_tcp_free_cmd_buffers(&queue->connect);
1598}
1599
1600static void nvmet_tcp_release_queue_work(struct work_struct *w)
1601{
1602 struct nvmet_tcp_queue *queue =
1603 container_of(w, struct nvmet_tcp_queue, release_work);
1604
1605 mutex_lock(&nvmet_tcp_queue_mutex);
1606 list_del_init(&queue->queue_list);
1607 mutex_unlock(&nvmet_tcp_queue_mutex);
1608
1609 nvmet_tcp_restore_socket_callbacks(queue);
1610 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1611 cancel_work_sync(&queue->io_work);
1612 /* stop accepting incoming data */
1613 queue->rcv_state = NVMET_TCP_RECV_ERR;
1614
1615 nvmet_sq_put_tls_key(&queue->nvme_sq);
1616 nvmet_tcp_uninit_data_in_cmds(queue);
1617 nvmet_sq_destroy(&queue->nvme_sq);
1618 nvmet_cq_put(&queue->nvme_cq);
1619 cancel_work_sync(&queue->io_work);
1620 nvmet_tcp_free_cmd_data_in_buffers(queue);
1621 /* ->sock will be released by fput() */
1622 fput(queue->sock->file);
1623 nvmet_tcp_free_cmds(queue);
1624 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1625 page_frag_cache_drain(&queue->pf_cache);
1626 kfree(queue);
1627}
1628
1629static void nvmet_tcp_data_ready(struct sock *sk)
1630{
1631 struct nvmet_tcp_queue *queue;
1632
1633 trace_sk_data_ready(sk);
1634
1635 read_lock_bh(&sk->sk_callback_lock);
1636 queue = sk->sk_user_data;
1637 if (likely(queue)) {
1638 if (queue->data_ready)
1639 queue->data_ready(sk);
1640 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)
1641 queue_work_on(queue_cpu(queue), nvmet_tcp_wq,
1642 &queue->io_work);
1643 }
1644 read_unlock_bh(&sk->sk_callback_lock);
1645}
1646
1647static void nvmet_tcp_write_space(struct sock *sk)
1648{
1649 struct nvmet_tcp_queue *queue;
1650
1651 read_lock_bh(&sk->sk_callback_lock);
1652 queue = sk->sk_user_data;
1653 if (unlikely(!queue))
1654 goto out;
1655
1656 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1657 queue->write_space(sk);
1658 goto out;
1659 }
1660
1661 if (sk_stream_is_writeable(sk)) {
1662 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1663 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1664 }
1665out:
1666 read_unlock_bh(&sk->sk_callback_lock);
1667}
1668
1669static void nvmet_tcp_state_change(struct sock *sk)
1670{
1671 struct nvmet_tcp_queue *queue;
1672
1673 read_lock_bh(&sk->sk_callback_lock);
1674 queue = sk->sk_user_data;
1675 if (!queue)
1676 goto done;
1677
1678 switch (sk->sk_state) {
1679 case TCP_FIN_WAIT2:
1680 case TCP_LAST_ACK:
1681 break;
1682 case TCP_FIN_WAIT1:
1683 case TCP_CLOSE_WAIT:
1684 case TCP_CLOSE:
1685 /* FALLTHRU */
1686 nvmet_tcp_schedule_release_queue(queue);
1687 break;
1688 default:
1689 pr_warn("queue %d unhandled state %d\n",
1690 queue->idx, sk->sk_state);
1691 }
1692done:
1693 read_unlock_bh(&sk->sk_callback_lock);
1694}
1695
1696static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1697{
1698 struct socket *sock = queue->sock;
1699 struct inet_sock *inet = inet_sk(sock->sk);
1700 int ret;
1701
1702 ret = kernel_getsockname(sock,
1703 (struct sockaddr *)&queue->sockaddr);
1704 if (ret < 0)
1705 return ret;
1706
1707 ret = kernel_getpeername(sock,
1708 (struct sockaddr *)&queue->sockaddr_peer);
1709 if (ret < 0)
1710 return ret;
1711
1712 /*
1713 * Cleanup whatever is sitting in the TCP transmit queue on socket
1714 * close. This is done to prevent stale data from being sent should
1715 * the network connection be restored before TCP times out.
1716 */
1717 sock_no_linger(sock->sk);
1718
1719 if (so_priority > 0)
1720 sock_set_priority(sock->sk, so_priority);
1721
1722 /* Set socket type of service */
1723 if (inet->rcv_tos > 0)
1724 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1725
1726 ret = 0;
1727 write_lock_bh(&sock->sk->sk_callback_lock);
1728 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1729 /*
1730 * If the socket is already closing, don't even start
1731 * consuming it
1732 */
1733 ret = -ENOTCONN;
1734 } else {
1735 sock->sk->sk_user_data = queue;
1736 queue->data_ready = sock->sk->sk_data_ready;
1737 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1738 queue->state_change = sock->sk->sk_state_change;
1739 sock->sk->sk_state_change = nvmet_tcp_state_change;
1740 queue->write_space = sock->sk->sk_write_space;
1741 sock->sk->sk_write_space = nvmet_tcp_write_space;
1742 if (idle_poll_period_usecs)
1743 nvmet_tcp_arm_queue_deadline(queue);
1744 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1745 }
1746 write_unlock_bh(&sock->sk->sk_callback_lock);
1747
1748 return ret;
1749}
1750
1751#ifdef CONFIG_NVME_TARGET_TCP_TLS
1752static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
1753{
1754 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1755 int len, ret;
1756 struct kvec iov = {
1757 .iov_base = (u8 *)&queue->pdu + queue->offset,
1758 .iov_len = sizeof(struct nvme_tcp_hdr),
1759 };
1760 char cbuf[CMSG_LEN(sizeof(char))] = {};
1761 struct msghdr msg = {
1762 .msg_control = cbuf,
1763 .msg_controllen = sizeof(cbuf),
1764 .msg_flags = MSG_PEEK,
1765 };
1766
1767 if (nvmet_port_secure_channel_required(queue->port->nport))
1768 return 0;
1769
1770 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1771 iov.iov_len, msg.msg_flags);
1772 if (unlikely(len < 0)) {
1773 pr_debug("queue %d: peek error %d\n",
1774 queue->idx, len);
1775 return len;
1776 }
1777
1778 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1779 if (ret < 0)
1780 return ret;
1781
1782 if (len < sizeof(struct nvme_tcp_hdr)) {
1783 pr_debug("queue %d: short read, %d bytes missing\n",
1784 queue->idx, (int)iov.iov_len - len);
1785 return -EAGAIN;
1786 }
1787 pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n",
1788 queue->idx, hdr->type, hdr->hlen, hdr->plen,
1789 (int)sizeof(struct nvme_tcp_icreq_pdu));
1790 if (hdr->type == nvme_tcp_icreq &&
1791 hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
1792 hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) {
1793 pr_debug("queue %d: icreq detected\n",
1794 queue->idx);
1795 return len;
1796 }
1797 return 0;
1798}
1799
1800static int nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue *queue,
1801 key_serial_t peerid)
1802{
1803 struct key *tls_key = nvme_tls_key_lookup(peerid);
1804 int status = 0;
1805
1806 if (IS_ERR(tls_key)) {
1807 pr_warn("%s: queue %d failed to lookup key %x\n",
1808 __func__, queue->idx, peerid);
1809 spin_lock_bh(&queue->state_lock);
1810 queue->state = NVMET_TCP_Q_FAILED;
1811 spin_unlock_bh(&queue->state_lock);
1812 status = PTR_ERR(tls_key);
1813 } else {
1814 pr_debug("%s: queue %d using TLS PSK %x\n",
1815 __func__, queue->idx, peerid);
1816 queue->nvme_sq.tls_key = tls_key;
1817 }
1818 return status;
1819}
1820
1821static void nvmet_tcp_tls_handshake_done(void *data, int status,
1822 key_serial_t peerid)
1823{
1824 struct nvmet_tcp_queue *queue = data;
1825
1826 pr_debug("queue %d: TLS handshake done, key %x, status %d\n",
1827 queue->idx, peerid, status);
1828 spin_lock_bh(&queue->state_lock);
1829 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1830 spin_unlock_bh(&queue->state_lock);
1831 return;
1832 }
1833 if (!status) {
1834 queue->tls_pskid = peerid;
1835 queue->state = NVMET_TCP_Q_CONNECTING;
1836 } else
1837 queue->state = NVMET_TCP_Q_FAILED;
1838 spin_unlock_bh(&queue->state_lock);
1839
1840 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1841
1842 if (!status)
1843 status = nvmet_tcp_tls_key_lookup(queue, peerid);
1844
1845 if (status)
1846 nvmet_tcp_schedule_release_queue(queue);
1847 else
1848 nvmet_tcp_set_queue_sock(queue);
1849 kref_put(&queue->kref, nvmet_tcp_release_queue);
1850}
1851
1852static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w)
1853{
1854 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w),
1855 struct nvmet_tcp_queue, tls_handshake_tmo_work);
1856
1857 pr_warn("queue %d: TLS handshake timeout\n", queue->idx);
1858 /*
1859 * If tls_handshake_cancel() fails we've lost the race with
1860 * nvmet_tcp_tls_handshake_done() */
1861 if (!tls_handshake_cancel(queue->sock->sk))
1862 return;
1863 spin_lock_bh(&queue->state_lock);
1864 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1865 spin_unlock_bh(&queue->state_lock);
1866 return;
1867 }
1868 queue->state = NVMET_TCP_Q_FAILED;
1869 spin_unlock_bh(&queue->state_lock);
1870 nvmet_tcp_schedule_release_queue(queue);
1871 kref_put(&queue->kref, nvmet_tcp_release_queue);
1872}
1873
1874static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
1875{
1876 int ret = -EOPNOTSUPP;
1877 struct tls_handshake_args args;
1878
1879 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
1880 pr_warn("cannot start TLS in state %d\n", queue->state);
1881 return -EINVAL;
1882 }
1883
1884 kref_get(&queue->kref);
1885 pr_debug("queue %d: TLS ServerHello\n", queue->idx);
1886 memset(&args, 0, sizeof(args));
1887 args.ta_sock = queue->sock;
1888 args.ta_done = nvmet_tcp_tls_handshake_done;
1889 args.ta_data = queue;
1890 args.ta_keyring = key_serial(queue->port->nport->keyring);
1891 args.ta_timeout_ms = tls_handshake_timeout * 1000;
1892
1893 ret = tls_server_hello_psk(&args, GFP_KERNEL);
1894 if (ret) {
1895 kref_put(&queue->kref, nvmet_tcp_release_queue);
1896 pr_err("failed to start TLS, err=%d\n", ret);
1897 } else {
1898 queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work,
1899 tls_handshake_timeout * HZ);
1900 }
1901 return ret;
1902}
1903#else
1904static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {}
1905#endif
1906
1907static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1908 struct socket *newsock)
1909{
1910 struct nvmet_tcp_queue *queue;
1911 struct file *sock_file = NULL;
1912 int ret;
1913
1914 queue = kzalloc_obj(*queue);
1915 if (!queue) {
1916 ret = -ENOMEM;
1917 goto out_release;
1918 }
1919
1920 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1921 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1922 kref_init(&queue->kref);
1923 queue->sock = newsock;
1924 queue->port = port;
1925 queue->nr_cmds = 0;
1926 spin_lock_init(&queue->state_lock);
1927 if (queue->port->nport->disc_addr.tsas.tcp.sectype ==
1928 NVMF_TCP_SECTYPE_TLS13)
1929 queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
1930 else
1931 queue->state = NVMET_TCP_Q_CONNECTING;
1932 INIT_LIST_HEAD(&queue->free_list);
1933 init_llist_head(&queue->resp_list);
1934 INIT_LIST_HEAD(&queue->resp_send_list);
1935
1936 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
1937 if (IS_ERR(sock_file)) {
1938 ret = PTR_ERR(sock_file);
1939 goto out_free_queue;
1940 }
1941
1942 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1943 if (queue->idx < 0) {
1944 ret = queue->idx;
1945 goto out_sock;
1946 }
1947
1948 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1949 if (ret)
1950 goto out_ida_remove;
1951
1952 nvmet_cq_init(&queue->nvme_cq);
1953 ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq);
1954 if (ret)
1955 goto out_free_connect;
1956
1957 nvmet_prepare_receive_pdu(queue);
1958
1959 mutex_lock(&nvmet_tcp_queue_mutex);
1960 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1961 mutex_unlock(&nvmet_tcp_queue_mutex);
1962
1963 INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
1964 nvmet_tcp_tls_handshake_timeout);
1965#ifdef CONFIG_NVME_TARGET_TCP_TLS
1966 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1967 struct sock *sk = queue->sock->sk;
1968
1969 /* Restore the default callbacks before starting upcall */
1970 write_lock_bh(&sk->sk_callback_lock);
1971 sk->sk_user_data = NULL;
1972 sk->sk_data_ready = port->data_ready;
1973 write_unlock_bh(&sk->sk_callback_lock);
1974 if (!nvmet_tcp_try_peek_pdu(queue)) {
1975 if (!nvmet_tcp_tls_handshake(queue))
1976 return;
1977 /* TLS handshake failed, terminate the connection */
1978 goto out_destroy_sq;
1979 }
1980 /* Not a TLS connection, continue with normal processing */
1981 queue->state = NVMET_TCP_Q_CONNECTING;
1982 }
1983#endif
1984
1985 ret = nvmet_tcp_set_queue_sock(queue);
1986 if (ret)
1987 goto out_destroy_sq;
1988
1989 return;
1990out_destroy_sq:
1991 mutex_lock(&nvmet_tcp_queue_mutex);
1992 list_del_init(&queue->queue_list);
1993 mutex_unlock(&nvmet_tcp_queue_mutex);
1994 nvmet_sq_destroy(&queue->nvme_sq);
1995out_free_connect:
1996 nvmet_cq_put(&queue->nvme_cq);
1997 nvmet_tcp_free_cmd(&queue->connect);
1998out_ida_remove:
1999 ida_free(&nvmet_tcp_queue_ida, queue->idx);
2000out_sock:
2001 fput(queue->sock->file);
2002out_free_queue:
2003 kfree(queue);
2004out_release:
2005 pr_err("failed to allocate queue, error %d\n", ret);
2006 if (!sock_file)
2007 sock_release(newsock);
2008}
2009
2010static void nvmet_tcp_accept_work(struct work_struct *w)
2011{
2012 struct nvmet_tcp_port *port =
2013 container_of(w, struct nvmet_tcp_port, accept_work);
2014 struct socket *newsock;
2015 int ret;
2016
2017 while (true) {
2018 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
2019 if (ret < 0) {
2020 if (ret != -EAGAIN)
2021 pr_warn("failed to accept err=%d\n", ret);
2022 return;
2023 }
2024 nvmet_tcp_alloc_queue(port, newsock);
2025 }
2026}
2027
2028static void nvmet_tcp_listen_data_ready(struct sock *sk)
2029{
2030 struct nvmet_tcp_port *port;
2031
2032 trace_sk_data_ready(sk);
2033
2034 if (sk->sk_state != TCP_LISTEN)
2035 return;
2036
2037 read_lock_bh(&sk->sk_callback_lock);
2038 port = sk->sk_user_data;
2039 if (port)
2040 queue_work(nvmet_wq, &port->accept_work);
2041 read_unlock_bh(&sk->sk_callback_lock);
2042}
2043
2044static int nvmet_tcp_add_port(struct nvmet_port *nport)
2045{
2046 struct nvmet_tcp_port *port;
2047 __kernel_sa_family_t af;
2048 int ret;
2049
2050 port = kzalloc_obj(*port);
2051 if (!port)
2052 return -ENOMEM;
2053
2054 switch (nport->disc_addr.adrfam) {
2055 case NVMF_ADDR_FAMILY_IP4:
2056 af = AF_INET;
2057 break;
2058 case NVMF_ADDR_FAMILY_IP6:
2059 af = AF_INET6;
2060 break;
2061 default:
2062 pr_err("address family %d not supported\n",
2063 nport->disc_addr.adrfam);
2064 ret = -EINVAL;
2065 goto err_port;
2066 }
2067
2068 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
2069 nport->disc_addr.trsvcid, &port->addr);
2070 if (ret) {
2071 pr_err("malformed ip/port passed: %s:%s\n",
2072 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
2073 goto err_port;
2074 }
2075
2076 port->nport = nport;
2077 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
2078 if (port->nport->inline_data_size < 0)
2079 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
2080
2081 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
2082 IPPROTO_TCP, &port->sock);
2083 if (ret) {
2084 pr_err("failed to create a socket\n");
2085 goto err_port;
2086 }
2087
2088 port->sock->sk->sk_user_data = port;
2089 port->data_ready = port->sock->sk->sk_data_ready;
2090 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
2091 sock_set_reuseaddr(port->sock->sk);
2092 tcp_sock_set_nodelay(port->sock->sk);
2093 if (so_priority > 0)
2094 sock_set_priority(port->sock->sk, so_priority);
2095
2096 ret = kernel_bind(port->sock, (struct sockaddr_unsized *)&port->addr,
2097 sizeof(port->addr));
2098 if (ret) {
2099 pr_err("failed to bind port socket %d\n", ret);
2100 goto err_sock;
2101 }
2102
2103 ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
2104 if (ret) {
2105 pr_err("failed to listen %d on port sock\n", ret);
2106 goto err_sock;
2107 }
2108
2109 nport->priv = port;
2110 pr_info("enabling port %d (%pISpc)\n",
2111 le16_to_cpu(nport->disc_addr.portid), &port->addr);
2112
2113 return 0;
2114
2115err_sock:
2116 sock_release(port->sock);
2117err_port:
2118 kfree(port);
2119 return ret;
2120}
2121
2122static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
2123{
2124 struct nvmet_tcp_queue *queue;
2125
2126 mutex_lock(&nvmet_tcp_queue_mutex);
2127 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2128 if (queue->port == port)
2129 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2130 mutex_unlock(&nvmet_tcp_queue_mutex);
2131}
2132
2133static void nvmet_tcp_remove_port(struct nvmet_port *nport)
2134{
2135 struct nvmet_tcp_port *port = nport->priv;
2136
2137 write_lock_bh(&port->sock->sk->sk_callback_lock);
2138 port->sock->sk->sk_data_ready = port->data_ready;
2139 port->sock->sk->sk_user_data = NULL;
2140 write_unlock_bh(&port->sock->sk->sk_callback_lock);
2141 cancel_work_sync(&port->accept_work);
2142 /*
2143 * Destroy the remaining queues, which are not belong to any
2144 * controller yet.
2145 */
2146 nvmet_tcp_destroy_port_queues(port);
2147
2148 sock_release(port->sock);
2149 kfree(port);
2150}
2151
2152static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
2153{
2154 struct nvmet_tcp_queue *queue;
2155
2156 mutex_lock(&nvmet_tcp_queue_mutex);
2157 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2158 if (queue->nvme_sq.ctrl == ctrl)
2159 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2160 mutex_unlock(&nvmet_tcp_queue_mutex);
2161}
2162
2163static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
2164{
2165 struct nvmet_tcp_queue *queue =
2166 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
2167
2168 if (sq->qid == 0) {
2169 struct nvmet_tcp_queue *q;
2170 int pending = 0;
2171
2172 /* Check for pending controller teardown */
2173 mutex_lock(&nvmet_tcp_queue_mutex);
2174 list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
2175 if (q->nvme_sq.ctrl == sq->ctrl &&
2176 q->state == NVMET_TCP_Q_DISCONNECTING)
2177 pending++;
2178 }
2179 mutex_unlock(&nvmet_tcp_queue_mutex);
2180 if (pending > NVMET_TCP_BACKLOG)
2181 return NVME_SC_CONNECT_CTRL_BUSY;
2182 }
2183
2184 queue->nr_cmds = sq->size * 2;
2185 if (nvmet_tcp_alloc_cmds(queue)) {
2186 queue->nr_cmds = 0;
2187 return NVME_SC_INTERNAL;
2188 }
2189 return 0;
2190}
2191
2192static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
2193 struct nvmet_port *nport, char *traddr)
2194{
2195 struct nvmet_tcp_port *port = nport->priv;
2196
2197 if (inet_addr_is_any(&port->addr)) {
2198 struct nvmet_tcp_cmd *cmd =
2199 container_of(req, struct nvmet_tcp_cmd, req);
2200 struct nvmet_tcp_queue *queue = cmd->queue;
2201
2202 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
2203 } else {
2204 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
2205 }
2206}
2207
2208static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl,
2209 char *traddr, size_t traddr_len)
2210{
2211 struct nvmet_sq *sq = ctrl->sqs[0];
2212 struct nvmet_tcp_queue *queue =
2213 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
2214
2215 if (queue->sockaddr_peer.ss_family == AF_UNSPEC)
2216 return -EINVAL;
2217 return snprintf(traddr, traddr_len, "%pISc",
2218 (struct sockaddr *)&queue->sockaddr_peer);
2219}
2220
2221static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
2222 .owner = THIS_MODULE,
2223 .type = NVMF_TRTYPE_TCP,
2224 .msdbd = 1,
2225 .add_port = nvmet_tcp_add_port,
2226 .remove_port = nvmet_tcp_remove_port,
2227 .queue_response = nvmet_tcp_queue_response,
2228 .delete_ctrl = nvmet_tcp_delete_ctrl,
2229 .install_queue = nvmet_tcp_install_queue,
2230 .disc_traddr = nvmet_tcp_disc_port_addr,
2231 .host_traddr = nvmet_tcp_host_port_addr,
2232};
2233
2234static int __init nvmet_tcp_init(void)
2235{
2236 int ret;
2237
2238 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
2239 WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU, 0);
2240 if (!nvmet_tcp_wq)
2241 return -ENOMEM;
2242
2243 ret = nvmet_register_transport(&nvmet_tcp_ops);
2244 if (ret)
2245 goto err;
2246
2247 return 0;
2248err:
2249 destroy_workqueue(nvmet_tcp_wq);
2250 return ret;
2251}
2252
2253static void __exit nvmet_tcp_exit(void)
2254{
2255 struct nvmet_tcp_queue *queue;
2256
2257 nvmet_unregister_transport(&nvmet_tcp_ops);
2258
2259 flush_workqueue(nvmet_wq);
2260 mutex_lock(&nvmet_tcp_queue_mutex);
2261 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2262 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2263 mutex_unlock(&nvmet_tcp_queue_mutex);
2264 flush_workqueue(nvmet_wq);
2265
2266 destroy_workqueue(nvmet_tcp_wq);
2267 ida_destroy(&nvmet_tcp_queue_ida);
2268}
2269
2270module_init(nvmet_tcp_init);
2271module_exit(nvmet_tcp_exit);
2272
2273MODULE_DESCRIPTION("NVMe target TCP transport driver");
2274MODULE_LICENSE("GPL v2");
2275MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */