Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

nvmet-tcp: propagate nvmet_tcp_build_pdu_iovec() errors to its callers

Currently, when nvmet_tcp_build_pdu_iovec() detects an out-of-bounds
PDU length or offset, it triggers nvmet_tcp_fatal_error(cmd->queue)
and returns early. However, because the function returns void, the
callers are entirely unaware that a fatal error has occurred and
that the cmd->recv_msg.msg_iter was left uninitialized.

Callers such as nvmet_tcp_handle_h2c_data_pdu() proceed to blindly
overwrite the queue state with queue->rcv_state = NVMET_TCP_RECV_DATA
Consequently, the socket receiving loop may attempt to read incoming
network data into the uninitialized iterator.

Fix this by shifting the error handling responsibility to the callers.

Fixes: 52a0a9854934 ("nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec")
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Yunje Shin <ioerts@kookmin.ac.kr>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>

authored by

Maurizio Lombardi and committed by
Keith Busch
ea8e356a 23528aa3

+29 -22
+29 -22
drivers/nvme/target/tcp.c
··· 351 351 352 352 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue); 353 353 354 - static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 354 + static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 355 355 { 356 356 struct bio_vec *iov = cmd->iov; 357 357 struct scatterlist *sg; ··· 364 364 offset = cmd->rbytes_done; 365 365 cmd->sg_idx = offset / PAGE_SIZE; 366 366 sg_offset = offset % PAGE_SIZE; 367 - if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) { 368 - nvmet_tcp_fatal_error(cmd->queue); 369 - return; 370 - } 367 + if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) 368 + return -EPROTO; 369 + 371 370 sg = &cmd->req.sg[cmd->sg_idx]; 372 371 sg_remaining = cmd->req.sg_cnt - cmd->sg_idx; 373 372 374 373 while (length) { 375 - if (!sg_remaining) { 376 - nvmet_tcp_fatal_error(cmd->queue); 377 - return; 378 - } 379 - if (!sg->length || sg->length <= sg_offset) { 380 - nvmet_tcp_fatal_error(cmd->queue); 381 - return; 382 - } 374 + if (!sg_remaining) 375 + return -EPROTO; 376 + 377 + if (!sg->length || sg->length <= sg_offset) 378 + return -EPROTO; 379 + 383 380 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 384 381 385 382 bvec_set_page(iov, sg_page(sg), iov_len, ··· 391 394 392 395 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, 393 396 nr_pages, cmd->pdu_len); 397 + return 0; 394 398 } 395 399 396 400 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) ··· 929 931 return 0; 930 932 } 931 933 932 - static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 934 + static int nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 933 935 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) 934 936 { 935 937 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); ··· 945 947 if (!nvme_is_write(cmd->req.cmd) || !data_len || 946 948 data_len > cmd->req.port->inline_data_size) { 947 949 nvmet_prepare_receive_pdu(queue); 948 - return; 950 + return 0; 949 951 } 950 952 951 953 ret = nvmet_tcp_map_data(cmd); 952 954 if (unlikely(ret)) { 953 955 pr_err("queue %d: failed to map data\n", queue->idx); 954 956 nvmet_tcp_fatal_error(queue); 955 - return; 957 + return -EPROTO; 956 958 } 957 959 958 960 queue->rcv_state = NVMET_TCP_RECV_DATA; 959 - nvmet_tcp_build_pdu_iovec(cmd); 960 961 cmd->flags |= NVMET_TCP_F_INIT_FAILED; 962 + ret = nvmet_tcp_build_pdu_iovec(cmd); 963 + if (unlikely(ret)) 964 + pr_err("queue %d: failed to build PDU iovec\n", queue->idx); 965 + 966 + return ret; 961 967 } 962 968 963 969 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) ··· 1013 1011 goto err_proto; 1014 1012 } 1015 1013 cmd->pdu_recv = 0; 1016 - nvmet_tcp_build_pdu_iovec(cmd); 1014 + if (unlikely(nvmet_tcp_build_pdu_iovec(cmd))) { 1015 + pr_err("queue %d: failed to build PDU iovec\n", queue->idx); 1016 + goto err_proto; 1017 + } 1017 1018 queue->cmd = cmd; 1018 1019 queue->rcv_state = NVMET_TCP_RECV_DATA; 1019 1020 ··· 1079 1074 le32_to_cpu(req->cmd->common.dptr.sgl.length), 1080 1075 le16_to_cpu(req->cqe->status)); 1081 1076 1082 - nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 1083 - return 0; 1077 + return nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 1084 1078 } 1085 1079 1086 1080 ret = nvmet_tcp_map_data(queue->cmd); ··· 1096 1092 if (nvmet_tcp_need_data_in(queue->cmd)) { 1097 1093 if (nvmet_tcp_has_inline_data(queue->cmd)) { 1098 1094 queue->rcv_state = NVMET_TCP_RECV_DATA; 1099 - nvmet_tcp_build_pdu_iovec(queue->cmd); 1100 - return 0; 1095 + ret = nvmet_tcp_build_pdu_iovec(queue->cmd); 1096 + if (unlikely(ret)) 1097 + pr_err("queue %d: failed to build PDU iovec\n", 1098 + queue->idx); 1099 + return ret; 1101 1100 } 1102 1101 /* send back R2T */ 1103 1102 nvmet_tcp_queue_response(&queue->cmd->req);