Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

nvmet-tcp: remove redundant calls to nvmet_tcp_fatal_error()

Executing nvmet_tcp_fatal_error() is generally the responsibility
of the caller (nvmet_tcp_try_recv); all other functions should
just return the error code.

Remove the nvmet_tcp_fatal_error() function, it's not needed
anymore.

Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>

authored by

Maurizio Lombardi and committed by
Keith Busch
bad44c9c ea8e356a

+7 -30
+7 -30
drivers/nvme/target/tcp.c
··· 349 349 cmd->req.sg = NULL; 350 350 } 351 351 352 - static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue); 353 - 354 352 static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 355 353 { 356 354 struct bio_vec *iov = cmd->iov; ··· 392 394 return 0; 393 395 } 394 396 395 - static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) 396 - { 397 - queue->rcv_state = NVMET_TCP_RECV_ERR; 398 - if (queue->nvme_sq.ctrl) 399 - nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 400 - else 401 - kernel_sock_shutdown(queue->sock, SHUT_RDWR); 402 - } 403 - 404 397 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) 405 398 { 406 399 queue->rcv_state = NVMET_TCP_RECV_ERR; 407 - if (status == -EPIPE || status == -ECONNRESET) 400 + if (status == -EPIPE || status == -ECONNRESET || !queue->nvme_sq.ctrl) 408 401 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 409 402 else 410 - nvmet_tcp_fatal_error(queue); 403 + nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 411 404 } 412 405 413 406 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) ··· 874 885 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { 875 886 pr_err("bad nvme-tcp pdu length (%d)\n", 876 887 le32_to_cpu(icreq->hdr.plen)); 877 - nvmet_tcp_fatal_error(queue); 878 888 return -EPROTO; 879 889 } 880 890 ··· 939 951 ret = nvmet_tcp_map_data(cmd); 940 952 if (unlikely(ret)) { 941 953 pr_err("queue %d: failed to map data\n", queue->idx); 942 - nvmet_tcp_fatal_error(queue); 943 954 return -EPROTO; 944 955 } 945 956 ··· 1011 1024 1012 1025 err_proto: 1013 1026 /* FIXME: use proper transport errors */ 1014 - nvmet_tcp_fatal_error(queue); 1015 1027 return -EPROTO; 1016 1028 } 1017 1029 ··· 1025 1039 if (hdr->type != nvme_tcp_icreq) { 1026 1040 pr_err("unexpected pdu type (%d) before icreq\n", 1027 1041 hdr->type); 1028 - nvmet_tcp_fatal_error(queue); 1029 1042 return -EPROTO; 1030 1043 } 1031 1044 return nvmet_tcp_handle_icreq(queue); ··· 1033 1048 if (unlikely(hdr->type == nvme_tcp_icreq)) { 1034 1049 pr_err("queue %d: received icreq pdu in state %d\n", 1035 1050 queue->idx, queue->state); 1036 - nvmet_tcp_fatal_error(queue); 1037 1051 return -EPROTO; 1038 1052 } 1039 1053 ··· 1049 1065 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", 1050 1066 queue->idx, queue->nr_cmds, queue->send_list_len, 1051 1067 nvme_cmd->common.opcode); 1052 - nvmet_tcp_fatal_error(queue); 1053 1068 return -ENOMEM; 1054 1069 } 1055 1070 ··· 1069 1086 if (unlikely(ret)) { 1070 1087 pr_err("queue %d: failed to map data\n", queue->idx); 1071 1088 if (nvmet_tcp_has_inline_data(queue->cmd)) 1072 - nvmet_tcp_fatal_error(queue); 1073 - else 1074 - nvmet_req_complete(req, ret); 1089 + return -EPROTO; 1090 + 1091 + nvmet_req_complete(req, ret); 1075 1092 ret = -EAGAIN; 1076 1093 goto out; 1077 1094 } ··· 1194 1211 1195 1212 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { 1196 1213 pr_err("unexpected pdu type %d\n", hdr->type); 1197 - nvmet_tcp_fatal_error(queue); 1198 1214 return -EIO; 1199 1215 } 1200 1216 ··· 1207 1225 } 1208 1226 1209 1227 if (queue->hdr_digest && 1210 - nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { 1211 - nvmet_tcp_fatal_error(queue); /* fatal */ 1228 + nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) 1212 1229 return -EPROTO; 1213 - } 1214 1230 1215 1231 if (queue->data_digest && 1216 - nvmet_tcp_check_ddgst(queue, &queue->pdu)) { 1217 - nvmet_tcp_fatal_error(queue); /* fatal */ 1232 + nvmet_tcp_check_ddgst(queue, &queue->pdu)) 1218 1233 return -EPROTO; 1219 - } 1220 1234 1221 1235 return nvmet_tcp_done_recv_pdu(queue); 1222 1236 } ··· 1298 1320 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED)) 1299 1321 nvmet_req_uninit(&cmd->req); 1300 1322 nvmet_tcp_free_cmd_buffers(cmd); 1301 - nvmet_tcp_fatal_error(queue); 1302 1323 ret = -EPROTO; 1303 1324 goto out; 1304 1325 }