Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'v7.0-rc-part1-ksmbd-and-smbdirect-fixes' of git://git.samba.org/ksmbd

Pull smb server and smbdirect updates from Steve French:

- Fix tcp connection leak

- Fix potential use after free when freeing multichannel

- Fix locking problem in showing channel list

- Locking improvement for tree connection

- Fix infinite loop when signing errors

- Add /proc interface for monitoring server state

- Fixes to avoid mixing iWarp and InfiniBand/RoCEv1/RoCEv2
port ranges used for smbdirect

- Fixes for smbdirect credit handling problems, these make
the connections more reliable

* tag 'v7.0-rc-part1-ksmbd-and-smbdirect-fixes' of git://git.samba.org/ksmbd: (32 commits)
ksmbd: fix non-IPv6 build
ksmbd: convert tree_conns_lock to rw_semaphore
ksmbd: fix missing chann_lock while iterating session channel list
ksmbd: add chann_lock to protect ksmbd_chann_list xarray
smb: server: correct value for smb_direct_max_fragmented_recv_size
smb: client: correct value for smbd_max_fragmented_recv_size
smb: server: fix leak of active_num_conn in ksmbd_tcp_new_connection()
ksmbd: add procfs interface for runtime monitoring and statistics
ksmbd: fix infinite loop caused by next_smb2_rcv_hdr_off reset in error paths
smb: server: make use of rdma_restrict_node_type()
smb: client: make use of rdma_restrict_node_type()
RDMA/core: introduce rdma_restrict_node_type()
smb: client: let send_done handle a completion without IB_SEND_SIGNALED
smb: client: let smbd_post_send_negotiate_req() use smbd_post_send()
smb: client: fix last send credit problem causing disconnects
smb: client: make use of smbdirect_socket.send_io.bcredits
smb: client: use smbdirect_send_batch processing
smb: client: introduce and use smbd_{alloc, free}_send_io()
smb: client: split out smbd_ib_post_send()
smb: client: port and use the wait_for_credits logic used by server
...

+1485 -203
+30
drivers/infiniband/core/cma.c
··· 793 793 794 794 mutex_lock(&lock); 795 795 list_for_each_entry(cma_dev, &dev_list, list) { 796 + if (id_priv->restricted_node_type != RDMA_NODE_UNSPECIFIED && 797 + id_priv->restricted_node_type != cma_dev->device->node_type) 798 + continue; 796 799 rdma_for_each_port (cma_dev->device, port) { 797 800 gidp = rdma_protocol_roce(cma_dev->device, port) ? 798 801 &iboe_gid : &gid; ··· 1018 1015 return ERR_PTR(-ENOMEM); 1019 1016 1020 1017 id_priv->state = RDMA_CM_IDLE; 1018 + id_priv->restricted_node_type = RDMA_NODE_UNSPECIFIED; 1021 1019 id_priv->id.context = context; 1022 1020 id_priv->id.event_handler = event_handler; 1023 1021 id_priv->id.ps = ps; ··· 4180 4176 return ret; 4181 4177 } 4182 4178 EXPORT_SYMBOL(rdma_resolve_addr); 4179 + 4180 + int rdma_restrict_node_type(struct rdma_cm_id *id, u8 node_type) 4181 + { 4182 + struct rdma_id_private *id_priv = 4183 + container_of(id, struct rdma_id_private, id); 4184 + int ret = 0; 4185 + 4186 + switch (node_type) { 4187 + case RDMA_NODE_UNSPECIFIED: 4188 + case RDMA_NODE_IB_CA: 4189 + case RDMA_NODE_RNIC: 4190 + break; 4191 + default: 4192 + return -EINVAL; 4193 + } 4194 + 4195 + mutex_lock(&lock); 4196 + if (id_priv->cma_dev) 4197 + ret = -EALREADY; 4198 + else 4199 + id_priv->restricted_node_type = node_type; 4200 + mutex_unlock(&lock); 4201 + 4202 + return ret; 4203 + } 4204 + EXPORT_SYMBOL(rdma_restrict_node_type); 4183 4205 4184 4206 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 4185 4207 {
+1
drivers/infiniband/core/cma_priv.h
··· 72 72 73 73 int internal_id; 74 74 enum rdma_cm_state state; 75 + u8 restricted_node_type; 75 76 spinlock_t lock; 76 77 struct mutex qp_mutex; 77 78
+440 -128
fs/smb/client/smbdirect.c
··· 35 35 static struct smbdirect_recv_io *_get_first_reassembly( 36 36 struct smbdirect_socket *sc); 37 37 38 + static int smbd_post_send(struct smbdirect_socket *sc, 39 + struct smbdirect_send_batch *batch, 40 + struct smbdirect_send_io *request); 41 + 38 42 static int smbd_post_recv( 39 43 struct smbdirect_socket *sc, 40 44 struct smbdirect_recv_io *response); ··· 101 97 /* The maximum single message size can be sent to remote peer */ 102 98 int smbd_max_send_size = 1364; 103 99 104 - /* The maximum fragmented upper-layer payload receive size supported */ 105 - int smbd_max_fragmented_recv_size = 1024 * 1024; 100 + /* 101 + * The maximum fragmented upper-layer payload receive size supported 102 + * 103 + * Assume max_payload_per_credit is 104 + * smbd_max_receive_size - 24 = 1340 105 + * 106 + * The maximum number would be 107 + * smbd_receive_credit_max * max_payload_per_credit 108 + * 109 + * 1340 * 255 = 341700 (0x536C4) 110 + * 111 + * The minimum value from the spec is 131072 (0x20000) 112 + * 113 + * For now we use the logic we used in ksmbd before: 114 + * (1364 * 255) / 2 = 173910 (0x2A756) 115 + */ 116 + int smbd_max_fragmented_recv_size = (1364 * 255) / 2; 106 117 107 118 /* The maximum single-message size which can be received */ 108 119 int smbd_max_receive_size = 1364; ··· 512 493 return (void *)response->packet; 513 494 } 514 495 496 + static struct smbdirect_send_io *smbd_alloc_send_io(struct smbdirect_socket *sc) 497 + { 498 + struct smbdirect_send_io *msg; 499 + 500 + msg = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL); 501 + if (!msg) 502 + return ERR_PTR(-ENOMEM); 503 + msg->socket = sc; 504 + INIT_LIST_HEAD(&msg->sibling_list); 505 + msg->num_sge = 0; 506 + 507 + return msg; 508 + } 509 + 510 + static void smbd_free_send_io(struct smbdirect_send_io *msg) 511 + { 512 + struct smbdirect_socket *sc = msg->socket; 513 + size_t i; 514 + 515 + /* 516 + * The list needs to be empty! 517 + * The caller should take care of it. 518 + */ 519 + WARN_ON_ONCE(!list_empty(&msg->sibling_list)); 520 + 521 + /* 522 + * Note we call ib_dma_unmap_page(), even if some sges are mapped using 523 + * ib_dma_map_single(). 524 + * 525 + * The difference between _single() and _page() only matters for the 526 + * ib_dma_map_*() case. 527 + * 528 + * For the ib_dma_unmap_*() case it does not matter as both take the 529 + * dma_addr_t and dma_unmap_single_attrs() is just an alias to 530 + * dma_unmap_page_attrs(). 531 + */ 532 + for (i = 0; i < msg->num_sge; i++) 533 + ib_dma_unmap_page(sc->ib.dev, 534 + msg->sge[i].addr, 535 + msg->sge[i].length, 536 + DMA_TO_DEVICE); 537 + 538 + mempool_free(msg, sc->send_io.mem.pool); 539 + } 540 + 515 541 /* Called when a RDMA send is done */ 516 542 static void send_done(struct ib_cq *cq, struct ib_wc *wc) 517 543 { 518 - int i; 519 544 struct smbdirect_send_io *request = 520 545 container_of(wc->wr_cqe, struct smbdirect_send_io, cqe); 521 546 struct smbdirect_socket *sc = request->socket; 547 + struct smbdirect_send_io *sibling, *next; 522 548 int lcredits = 0; 523 549 524 550 log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%s\n", 525 551 request, ib_wc_status_msg(wc->status)); 526 552 527 - for (i = 0; i < request->num_sge; i++) 528 - ib_dma_unmap_single(sc->ib.dev, 529 - request->sge[i].addr, 530 - request->sge[i].length, 531 - DMA_TO_DEVICE); 532 - mempool_free(request, sc->send_io.mem.pool); 553 + if (unlikely(!(request->wr.send_flags & IB_SEND_SIGNALED))) { 554 + /* 555 + * This happens when smbdirect_send_io is a sibling 556 + * before the final message, it is signaled on 557 + * error anyway, so we need to skip 558 + * smbdirect_connection_free_send_io here, 559 + * otherwise is will destroy the memory 560 + * of the siblings too, which will cause 561 + * use after free problems for the others 562 + * triggered from ib_drain_qp(). 563 + */ 564 + if (wc->status != IB_WC_SUCCESS) 565 + goto skip_free; 566 + 567 + /* 568 + * This should not happen! 569 + * But we better just close the 570 + * connection... 571 + */ 572 + log_rdma_send(ERR, 573 + "unexpected send completion wc->status=%s (%d) wc->opcode=%d\n", 574 + ib_wc_status_msg(wc->status), wc->status, wc->opcode); 575 + smbd_disconnect_rdma_connection(sc); 576 + return; 577 + } 578 + 579 + /* 580 + * Free possible siblings and then the main send_io 581 + */ 582 + list_for_each_entry_safe(sibling, next, &request->sibling_list, sibling_list) { 583 + list_del_init(&sibling->sibling_list); 584 + smbd_free_send_io(sibling); 585 + lcredits += 1; 586 + } 587 + /* Note this frees wc->wr_cqe, but not wc */ 588 + smbd_free_send_io(request); 533 589 lcredits += 1; 534 590 535 591 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { 592 + skip_free: 536 593 if (wc->status != IB_WC_WR_FLUSH_ERR) 537 594 log_rdma_send(ERR, "wc->status=%s wc->opcode=%d\n", 538 595 ib_wc_status_msg(wc->status), wc->opcode); ··· 703 608 sp->max_frmr_depth * PAGE_SIZE); 704 609 sp->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE; 705 610 611 + atomic_set(&sc->send_io.bcredits.count, 1); 706 612 sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER; 707 613 return true; 708 614 } ··· 714 618 struct smbdirect_recv_io *response; 715 619 struct smbdirect_socket *sc = 716 620 container_of(work, struct smbdirect_socket, recv_io.posted.refill_work); 621 + int posted = 0; 717 622 718 623 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { 719 624 return; ··· 737 640 } 738 641 739 642 atomic_inc(&sc->recv_io.posted.count); 643 + posted += 1; 740 644 } 741 645 } 646 + 647 + atomic_add(posted, &sc->recv_io.credits.available); 648 + 649 + /* 650 + * If the last send credit is waiting for credits 651 + * it can grant we need to wake it up 652 + */ 653 + if (posted && 654 + atomic_read(&sc->send_io.bcredits.count) == 0 && 655 + atomic_read(&sc->send_io.credits.count) == 0) 656 + wake_up(&sc->send_io.credits.wait_queue); 742 657 743 658 /* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */ 744 659 if (atomic_read(&sc->recv_io.credits.count) < ··· 768 659 container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe); 769 660 struct smbdirect_socket *sc = response->socket; 770 661 struct smbdirect_socket_parameters *sp = &sc->parameters; 662 + int current_recv_credits; 771 663 u16 old_recv_credit_target; 772 664 u32 data_offset = 0; 773 665 u32 data_length = 0; ··· 853 743 } 854 744 855 745 atomic_dec(&sc->recv_io.posted.count); 856 - atomic_dec(&sc->recv_io.credits.count); 746 + current_recv_credits = atomic_dec_return(&sc->recv_io.credits.count); 747 + 857 748 old_recv_credit_target = sc->recv_io.credits.target; 858 749 sc->recv_io.credits.target = 859 750 le16_to_cpu(data_transfer->credits_requested); ··· 890 779 * reassembly queue and wake up the reading thread 891 780 */ 892 781 if (data_length) { 893 - if (sc->recv_io.credits.target > old_recv_credit_target) 782 + if (current_recv_credits <= (sc->recv_io.credits.target / 4) || 783 + sc->recv_io.credits.target > old_recv_credit_target) 894 784 queue_work(sc->workqueue, &sc->recv_io.posted.refill_work); 895 785 896 786 enqueue_reassembly(sc, response, data_length); ··· 922 810 { 923 811 struct smbdirect_socket_parameters *sp = &sc->parameters; 924 812 struct rdma_cm_id *id; 813 + u8 node_type = RDMA_NODE_UNSPECIFIED; 925 814 int rc; 926 815 __be16 *sport; 927 816 ··· 932 819 rc = PTR_ERR(id); 933 820 log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc); 934 821 return id; 822 + } 823 + 824 + switch (port) { 825 + case SMBD_PORT: 826 + /* 827 + * only allow iWarp devices 828 + * for port 5445. 829 + */ 830 + node_type = RDMA_NODE_RNIC; 831 + break; 832 + case SMB_PORT: 833 + /* 834 + * only allow InfiniBand, RoCEv1 or RoCEv2 835 + * devices for port 445. 836 + * 837 + * (Basically don't allow iWarp devices) 838 + */ 839 + node_type = RDMA_NODE_IB_CA; 840 + break; 841 + } 842 + rc = rdma_restrict_node_type(id, node_type); 843 + if (rc) { 844 + log_rdma_event(ERR, "rdma_restrict_node_type(%u) failed %i\n", 845 + node_type, rc); 846 + goto out; 935 847 } 936 848 937 849 if (dstaddr->sa_family == AF_INET6) ··· 1093 955 static int smbd_post_send_negotiate_req(struct smbdirect_socket *sc) 1094 956 { 1095 957 struct smbdirect_socket_parameters *sp = &sc->parameters; 1096 - struct ib_send_wr send_wr; 1097 - int rc = -ENOMEM; 958 + int rc; 1098 959 struct smbdirect_send_io *request; 1099 960 struct smbdirect_negotiate_req *packet; 1100 961 1101 - request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL); 1102 - if (!request) 1103 - return rc; 1104 - 1105 - request->socket = sc; 962 + request = smbd_alloc_send_io(sc); 963 + if (IS_ERR(request)) 964 + return PTR_ERR(request); 1106 965 1107 966 packet = smbdirect_send_io_payload(request); 1108 967 packet->min_version = cpu_to_le16(SMBDIRECT_V1); ··· 1111 976 packet->max_fragmented_size = 1112 977 cpu_to_le32(sp->max_fragmented_recv_size); 1113 978 1114 - request->num_sge = 1; 1115 979 request->sge[0].addr = ib_dma_map_single( 1116 980 sc->ib.dev, (void *)packet, 1117 981 sizeof(*packet), DMA_TO_DEVICE); ··· 1118 984 rc = -EIO; 1119 985 goto dma_mapping_failed; 1120 986 } 987 + request->num_sge = 1; 1121 988 1122 989 request->sge[0].length = sizeof(*packet); 1123 990 request->sge[0].lkey = sc->ib.pd->local_dma_lkey; 1124 991 1125 - ib_dma_sync_single_for_device( 1126 - sc->ib.dev, request->sge[0].addr, 1127 - request->sge[0].length, DMA_TO_DEVICE); 1128 - 1129 - request->cqe.done = send_done; 1130 - 1131 - send_wr.next = NULL; 1132 - send_wr.wr_cqe = &request->cqe; 1133 - send_wr.sg_list = request->sge; 1134 - send_wr.num_sge = request->num_sge; 1135 - send_wr.opcode = IB_WR_SEND; 1136 - send_wr.send_flags = IB_SEND_SIGNALED; 1137 - 1138 - log_rdma_send(INFO, "sge addr=0x%llx length=%u lkey=0x%x\n", 1139 - request->sge[0].addr, 1140 - request->sge[0].length, request->sge[0].lkey); 1141 - 1142 - atomic_inc(&sc->send_io.pending.count); 1143 - rc = ib_post_send(sc->ib.qp, &send_wr, NULL); 992 + rc = smbd_post_send(sc, NULL, request); 1144 993 if (!rc) 1145 994 return 0; 1146 995 1147 - /* if we reach here, post send failed */ 1148 - log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); 1149 - atomic_dec(&sc->send_io.pending.count); 1150 - ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr, 1151 - request->sge[0].length, DMA_TO_DEVICE); 1152 - 1153 - smbd_disconnect_rdma_connection(sc); 996 + if (rc == -EAGAIN) 997 + rc = -EIO; 1154 998 1155 999 dma_mapping_failed: 1156 - mempool_free(request, sc->send_io.mem.pool); 1000 + smbd_free_send_io(request); 1157 1001 return rc; 1158 1002 } 1159 1003 ··· 1145 1033 */ 1146 1034 static int manage_credits_prior_sending(struct smbdirect_socket *sc) 1147 1035 { 1036 + int missing; 1037 + int available; 1148 1038 int new_credits; 1149 1039 1150 1040 if (atomic_read(&sc->recv_io.credits.count) >= sc->recv_io.credits.target) 1151 1041 return 0; 1152 1042 1153 - new_credits = atomic_read(&sc->recv_io.posted.count); 1154 - if (new_credits == 0) 1043 + missing = (int)sc->recv_io.credits.target - atomic_read(&sc->recv_io.credits.count); 1044 + available = atomic_xchg(&sc->recv_io.credits.available, 0); 1045 + new_credits = (u16)min3(U16_MAX, missing, available); 1046 + if (new_credits <= 0) { 1047 + /* 1048 + * If credits are available, but not granted 1049 + * we need to re-add them again. 1050 + */ 1051 + if (available) 1052 + atomic_add(available, &sc->recv_io.credits.available); 1155 1053 return 0; 1054 + } 1156 1055 1157 - new_credits -= atomic_read(&sc->recv_io.credits.count); 1158 - if (new_credits <= 0) 1159 - return 0; 1056 + if (new_credits < available) { 1057 + /* 1058 + * Readd the remaining available again. 1059 + */ 1060 + available -= new_credits; 1061 + atomic_add(available, &sc->recv_io.credits.available); 1062 + } 1160 1063 1064 + /* 1065 + * Remember we granted the credits 1066 + */ 1067 + atomic_add(new_credits, &sc->recv_io.credits.count); 1161 1068 return new_credits; 1162 1069 } 1163 1070 ··· 1206 1075 return 0; 1207 1076 } 1208 1077 1078 + static int smbd_ib_post_send(struct smbdirect_socket *sc, 1079 + struct ib_send_wr *wr) 1080 + { 1081 + int ret; 1082 + 1083 + atomic_inc(&sc->send_io.pending.count); 1084 + ret = ib_post_send(sc->ib.qp, wr, NULL); 1085 + if (ret) { 1086 + pr_err("failed to post send: %d\n", ret); 1087 + smbd_disconnect_rdma_connection(sc); 1088 + ret = -EAGAIN; 1089 + } 1090 + return ret; 1091 + } 1092 + 1209 1093 /* Post the send request */ 1210 1094 static int smbd_post_send(struct smbdirect_socket *sc, 1211 - struct smbdirect_send_io *request) 1095 + struct smbdirect_send_batch *batch, 1096 + struct smbdirect_send_io *request) 1212 1097 { 1213 - struct ib_send_wr send_wr; 1214 - int rc, i; 1098 + int i; 1215 1099 1216 1100 for (i = 0; i < request->num_sge; i++) { 1217 1101 log_rdma_send(INFO, ··· 1240 1094 } 1241 1095 1242 1096 request->cqe.done = send_done; 1097 + request->wr.next = NULL; 1098 + request->wr.sg_list = request->sge; 1099 + request->wr.num_sge = request->num_sge; 1100 + request->wr.opcode = IB_WR_SEND; 1243 1101 1244 - send_wr.next = NULL; 1245 - send_wr.wr_cqe = &request->cqe; 1246 - send_wr.sg_list = request->sge; 1247 - send_wr.num_sge = request->num_sge; 1248 - send_wr.opcode = IB_WR_SEND; 1249 - send_wr.send_flags = IB_SEND_SIGNALED; 1102 + if (batch) { 1103 + request->wr.wr_cqe = NULL; 1104 + request->wr.send_flags = 0; 1105 + if (!list_empty(&batch->msg_list)) { 1106 + struct smbdirect_send_io *last; 1250 1107 1251 - rc = ib_post_send(sc->ib.qp, &send_wr, NULL); 1252 - if (rc) { 1253 - log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); 1254 - smbd_disconnect_rdma_connection(sc); 1255 - rc = -EAGAIN; 1108 + last = list_last_entry(&batch->msg_list, 1109 + struct smbdirect_send_io, 1110 + sibling_list); 1111 + last->wr.next = &request->wr; 1112 + } 1113 + list_add_tail(&request->sibling_list, &batch->msg_list); 1114 + batch->wr_cnt++; 1115 + return 0; 1256 1116 } 1257 1117 1258 - return rc; 1118 + request->wr.wr_cqe = &request->cqe; 1119 + request->wr.send_flags = IB_SEND_SIGNALED; 1120 + return smbd_ib_post_send(sc, &request->wr); 1121 + } 1122 + 1123 + static void smbd_send_batch_init(struct smbdirect_send_batch *batch, 1124 + bool need_invalidate_rkey, 1125 + unsigned int remote_key) 1126 + { 1127 + INIT_LIST_HEAD(&batch->msg_list); 1128 + batch->wr_cnt = 0; 1129 + batch->need_invalidate_rkey = need_invalidate_rkey; 1130 + batch->remote_key = remote_key; 1131 + batch->credit = 0; 1132 + } 1133 + 1134 + static int smbd_send_batch_flush(struct smbdirect_socket *sc, 1135 + struct smbdirect_send_batch *batch, 1136 + bool is_last) 1137 + { 1138 + struct smbdirect_send_io *first, *last; 1139 + int ret = 0; 1140 + 1141 + if (list_empty(&batch->msg_list)) 1142 + goto release_credit; 1143 + 1144 + first = list_first_entry(&batch->msg_list, 1145 + struct smbdirect_send_io, 1146 + sibling_list); 1147 + last = list_last_entry(&batch->msg_list, 1148 + struct smbdirect_send_io, 1149 + sibling_list); 1150 + 1151 + if (batch->need_invalidate_rkey) { 1152 + first->wr.opcode = IB_WR_SEND_WITH_INV; 1153 + first->wr.ex.invalidate_rkey = batch->remote_key; 1154 + batch->need_invalidate_rkey = false; 1155 + batch->remote_key = 0; 1156 + } 1157 + 1158 + last->wr.send_flags = IB_SEND_SIGNALED; 1159 + last->wr.wr_cqe = &last->cqe; 1160 + 1161 + /* 1162 + * Remove last from batch->msg_list 1163 + * and splice the rest of batch->msg_list 1164 + * to last->sibling_list. 1165 + * 1166 + * batch->msg_list is a valid empty list 1167 + * at the end. 1168 + */ 1169 + list_del_init(&last->sibling_list); 1170 + list_splice_tail_init(&batch->msg_list, &last->sibling_list); 1171 + batch->wr_cnt = 0; 1172 + 1173 + ret = smbd_ib_post_send(sc, &first->wr); 1174 + if (ret) { 1175 + struct smbdirect_send_io *sibling, *next; 1176 + 1177 + list_for_each_entry_safe(sibling, next, &last->sibling_list, sibling_list) { 1178 + list_del_init(&sibling->sibling_list); 1179 + smbd_free_send_io(sibling); 1180 + } 1181 + smbd_free_send_io(last); 1182 + } 1183 + 1184 + release_credit: 1185 + if (is_last && !ret && batch->credit) { 1186 + atomic_add(batch->credit, &sc->send_io.bcredits.count); 1187 + batch->credit = 0; 1188 + wake_up(&sc->send_io.bcredits.wait_queue); 1189 + } 1190 + 1191 + return ret; 1192 + } 1193 + 1194 + static int wait_for_credits(struct smbdirect_socket *sc, 1195 + wait_queue_head_t *waitq, atomic_t *total_credits, 1196 + int needed) 1197 + { 1198 + int ret; 1199 + 1200 + do { 1201 + if (atomic_sub_return(needed, total_credits) >= 0) 1202 + return 0; 1203 + 1204 + atomic_add(needed, total_credits); 1205 + ret = wait_event_interruptible(*waitq, 1206 + atomic_read(total_credits) >= needed || 1207 + sc->status != SMBDIRECT_SOCKET_CONNECTED); 1208 + 1209 + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) 1210 + return -ENOTCONN; 1211 + else if (ret < 0) 1212 + return ret; 1213 + } while (true); 1214 + } 1215 + 1216 + static int wait_for_send_bcredit(struct smbdirect_socket *sc, 1217 + struct smbdirect_send_batch *batch) 1218 + { 1219 + int ret; 1220 + 1221 + if (batch->credit) 1222 + return 0; 1223 + 1224 + ret = wait_for_credits(sc, 1225 + &sc->send_io.bcredits.wait_queue, 1226 + &sc->send_io.bcredits.count, 1227 + 1); 1228 + if (ret) 1229 + return ret; 1230 + 1231 + batch->credit = 1; 1232 + return 0; 1233 + } 1234 + 1235 + static int wait_for_send_lcredit(struct smbdirect_socket *sc, 1236 + struct smbdirect_send_batch *batch) 1237 + { 1238 + if (batch && (atomic_read(&sc->send_io.lcredits.count) <= 1)) { 1239 + int ret; 1240 + 1241 + ret = smbd_send_batch_flush(sc, batch, false); 1242 + if (ret) 1243 + return ret; 1244 + } 1245 + 1246 + return wait_for_credits(sc, 1247 + &sc->send_io.lcredits.wait_queue, 1248 + &sc->send_io.lcredits.count, 1249 + 1); 1250 + } 1251 + 1252 + static int wait_for_send_credits(struct smbdirect_socket *sc, 1253 + struct smbdirect_send_batch *batch) 1254 + { 1255 + if (batch && 1256 + (batch->wr_cnt >= 16 || atomic_read(&sc->send_io.credits.count) <= 1)) { 1257 + int ret; 1258 + 1259 + ret = smbd_send_batch_flush(sc, batch, false); 1260 + if (ret) 1261 + return ret; 1262 + } 1263 + 1264 + return wait_for_credits(sc, 1265 + &sc->send_io.credits.wait_queue, 1266 + &sc->send_io.credits.count, 1267 + 1); 1259 1268 } 1260 1269 1261 1270 static int smbd_post_send_iter(struct smbdirect_socket *sc, 1271 + struct smbdirect_send_batch *batch, 1262 1272 struct iov_iter *iter, 1263 1273 int *_remaining_data_length) 1264 1274 { 1265 1275 struct smbdirect_socket_parameters *sp = &sc->parameters; 1266 - int i, rc; 1276 + int rc; 1267 1277 int header_length; 1268 1278 int data_length; 1269 1279 struct smbdirect_send_io *request; 1270 1280 struct smbdirect_data_transfer *packet; 1271 1281 int new_credits = 0; 1282 + struct smbdirect_send_batch _batch; 1272 1283 1273 - wait_lcredit: 1274 - /* Wait for local send credits */ 1275 - rc = wait_event_interruptible(sc->send_io.lcredits.wait_queue, 1276 - atomic_read(&sc->send_io.lcredits.count) > 0 || 1277 - sc->status != SMBDIRECT_SOCKET_CONNECTED); 1278 - if (rc) 1279 - goto err_wait_lcredit; 1284 + if (!batch) { 1285 + smbd_send_batch_init(&_batch, false, 0); 1286 + batch = &_batch; 1287 + } 1280 1288 1281 - if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { 1282 - log_outgoing(ERR, "disconnected not sending on wait_credit\n"); 1289 + rc = wait_for_send_bcredit(sc, batch); 1290 + if (rc) { 1291 + log_outgoing(ERR, "disconnected not sending on wait_bcredit\n"); 1292 + rc = -EAGAIN; 1293 + goto err_wait_bcredit; 1294 + } 1295 + 1296 + rc = wait_for_send_lcredit(sc, batch); 1297 + if (rc) { 1298 + log_outgoing(ERR, "disconnected not sending on wait_lcredit\n"); 1283 1299 rc = -EAGAIN; 1284 1300 goto err_wait_lcredit; 1285 1301 } 1286 - if (unlikely(atomic_dec_return(&sc->send_io.lcredits.count) < 0)) { 1287 - atomic_inc(&sc->send_io.lcredits.count); 1288 - goto wait_lcredit; 1289 - } 1290 1302 1291 - wait_credit: 1292 - /* Wait for send credits. A SMBD packet needs one credit */ 1293 - rc = wait_event_interruptible(sc->send_io.credits.wait_queue, 1294 - atomic_read(&sc->send_io.credits.count) > 0 || 1295 - sc->status != SMBDIRECT_SOCKET_CONNECTED); 1296 - if (rc) 1297 - goto err_wait_credit; 1298 - 1299 - if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { 1303 + rc = wait_for_send_credits(sc, batch); 1304 + if (rc) { 1300 1305 log_outgoing(ERR, "disconnected not sending on wait_credit\n"); 1301 1306 rc = -EAGAIN; 1302 1307 goto err_wait_credit; 1303 1308 } 1304 - if (unlikely(atomic_dec_return(&sc->send_io.credits.count) < 0)) { 1305 - atomic_inc(&sc->send_io.credits.count); 1306 - goto wait_credit; 1309 + 1310 + new_credits = manage_credits_prior_sending(sc); 1311 + if (new_credits == 0 && 1312 + atomic_read(&sc->send_io.credits.count) == 0 && 1313 + atomic_read(&sc->recv_io.credits.count) == 0) { 1314 + queue_work(sc->workqueue, &sc->recv_io.posted.refill_work); 1315 + rc = wait_event_interruptible(sc->send_io.credits.wait_queue, 1316 + atomic_read(&sc->send_io.credits.count) >= 1 || 1317 + atomic_read(&sc->recv_io.credits.available) >= 1 || 1318 + sc->status != SMBDIRECT_SOCKET_CONNECTED); 1319 + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) 1320 + rc = -ENOTCONN; 1321 + if (rc < 0) { 1322 + log_outgoing(ERR, "disconnected not sending on last credit\n"); 1323 + rc = -EAGAIN; 1324 + goto err_wait_credit; 1325 + } 1326 + 1327 + new_credits = manage_credits_prior_sending(sc); 1307 1328 } 1308 1329 1309 - request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL); 1310 - if (!request) { 1311 - rc = -ENOMEM; 1330 + request = smbd_alloc_send_io(sc); 1331 + if (IS_ERR(request)) { 1332 + rc = PTR_ERR(request); 1312 1333 goto err_alloc; 1313 1334 } 1314 1335 1315 - request->socket = sc; 1316 1336 memset(request->sge, 0, sizeof(request->sge)); 1317 1337 1318 1338 /* Map the packet to DMA */ ··· 1527 1215 1528 1216 /* Fill in the packet header */ 1529 1217 packet->credits_requested = cpu_to_le16(sp->send_credit_target); 1530 - 1531 - new_credits = manage_credits_prior_sending(sc); 1532 - atomic_add(new_credits, &sc->recv_io.credits.count); 1533 1218 packet->credits_granted = cpu_to_le16(new_credits); 1534 1219 1535 1220 packet->flags = 0; ··· 1549 1240 le32_to_cpu(packet->data_length), 1550 1241 le32_to_cpu(packet->remaining_data_length)); 1551 1242 1552 - /* 1553 - * Now that we got a local and a remote credit 1554 - * we add us as pending 1555 - */ 1556 - atomic_inc(&sc->send_io.pending.count); 1243 + rc = smbd_post_send(sc, batch, request); 1244 + if (!rc) { 1245 + if (batch != &_batch) 1246 + return 0; 1557 1247 1558 - rc = smbd_post_send(sc, request); 1559 - if (!rc) 1560 - return 0; 1561 - 1562 - if (atomic_dec_and_test(&sc->send_io.pending.count)) 1563 - wake_up(&sc->send_io.pending.zero_wait_queue); 1564 - 1565 - wake_up(&sc->send_io.pending.dec_wait_queue); 1248 + rc = smbd_send_batch_flush(sc, batch, true); 1249 + if (!rc) 1250 + return 0; 1251 + } 1566 1252 1567 1253 err_dma: 1568 - for (i = 0; i < request->num_sge; i++) 1569 - if (request->sge[i].addr) 1570 - ib_dma_unmap_single(sc->ib.dev, 1571 - request->sge[i].addr, 1572 - request->sge[i].length, 1573 - DMA_TO_DEVICE); 1574 - mempool_free(request, sc->send_io.mem.pool); 1575 - 1576 - /* roll back the granted receive credits */ 1577 - atomic_sub(new_credits, &sc->recv_io.credits.count); 1254 + smbd_free_send_io(request); 1578 1255 1579 1256 err_alloc: 1580 1257 atomic_inc(&sc->send_io.credits.count); ··· 1571 1276 wake_up(&sc->send_io.lcredits.wait_queue); 1572 1277 1573 1278 err_wait_lcredit: 1279 + atomic_add(batch->credit, &sc->send_io.bcredits.count); 1280 + batch->credit = 0; 1281 + wake_up(&sc->send_io.bcredits.wait_queue); 1282 + 1283 + err_wait_bcredit: 1574 1284 return rc; 1575 1285 } 1576 1286 ··· 1589 1289 int remaining_data_length = 0; 1590 1290 1591 1291 sc->statistics.send_empty++; 1592 - return smbd_post_send_iter(sc, NULL, &remaining_data_length); 1292 + return smbd_post_send_iter(sc, NULL, NULL, &remaining_data_length); 1593 1293 } 1594 1294 1595 1295 static int smbd_post_send_full_iter(struct smbdirect_socket *sc, 1296 + struct smbdirect_send_batch *batch, 1596 1297 struct iov_iter *iter, 1597 1298 int *_remaining_data_length) 1598 1299 { ··· 1606 1305 */ 1607 1306 1608 1307 while (iov_iter_count(iter) > 0) { 1609 - rc = smbd_post_send_iter(sc, iter, _remaining_data_length); 1308 + rc = smbd_post_send_iter(sc, batch, iter, _remaining_data_length); 1610 1309 if (rc < 0) 1611 1310 break; 1612 1311 } ··· 2528 2227 struct smbdirect_socket_parameters *sp = &sc->parameters; 2529 2228 struct smb_rqst *rqst; 2530 2229 struct iov_iter iter; 2230 + struct smbdirect_send_batch batch; 2531 2231 unsigned int remaining_data_length, klen; 2532 2232 int rc, i, rqst_idx; 2233 + int error = 0; 2533 2234 2534 2235 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) 2535 2236 return -EAGAIN; ··· 2556 2253 num_rqst, remaining_data_length); 2557 2254 2558 2255 rqst_idx = 0; 2256 + smbd_send_batch_init(&batch, false, 0); 2559 2257 do { 2560 2258 rqst = &rqst_array[rqst_idx]; 2561 2259 ··· 2575 2271 klen += rqst->rq_iov[i].iov_len; 2576 2272 iov_iter_kvec(&iter, ITER_SOURCE, rqst->rq_iov, rqst->rq_nvec, klen); 2577 2273 2578 - rc = smbd_post_send_full_iter(sc, &iter, &remaining_data_length); 2579 - if (rc < 0) 2274 + rc = smbd_post_send_full_iter(sc, &batch, &iter, &remaining_data_length); 2275 + if (rc < 0) { 2276 + error = rc; 2580 2277 break; 2278 + } 2581 2279 2582 2280 if (iov_iter_count(&rqst->rq_iter) > 0) { 2583 2281 /* And then the data pages if there are any */ 2584 - rc = smbd_post_send_full_iter(sc, &rqst->rq_iter, 2282 + rc = smbd_post_send_full_iter(sc, &batch, &rqst->rq_iter, 2585 2283 &remaining_data_length); 2586 - if (rc < 0) 2284 + if (rc < 0) { 2285 + error = rc; 2587 2286 break; 2287 + } 2588 2288 } 2589 2289 2590 2290 } while (++rqst_idx < num_rqst); 2291 + 2292 + rc = smbd_send_batch_flush(sc, &batch, true); 2293 + if (unlikely(!rc && error)) 2294 + rc = error; 2591 2295 2592 2296 /* 2593 2297 * As an optimization, we don't wait for individual I/O to finish
+18
fs/smb/common/smbdirect/smbdirect_socket.h
··· 163 163 } mem; 164 164 165 165 /* 166 + * This is a coordination for smbdirect_send_batch. 167 + * 168 + * There's only one possible credit, which means 169 + * only one instance is running at a time. 170 + */ 171 + struct { 172 + atomic_t count; 173 + wait_queue_head_t wait_queue; 174 + } bcredits; 175 + 176 + /* 166 177 * The local credit state for ib_post_send() 167 178 */ 168 179 struct { ··· 250 239 */ 251 240 struct { 252 241 u16 target; 242 + atomic_t available; 253 243 atomic_t count; 254 244 } credits; 255 245 ··· 382 370 INIT_DELAYED_WORK(&sc->idle.timer_work, __smbdirect_socket_disabled_work); 383 371 disable_delayed_work_sync(&sc->idle.timer_work); 384 372 373 + atomic_set(&sc->send_io.bcredits.count, 0); 374 + init_waitqueue_head(&sc->send_io.bcredits.wait_queue); 375 + 385 376 atomic_set(&sc->send_io.lcredits.count, 0); 386 377 init_waitqueue_head(&sc->send_io.lcredits.wait_queue); 387 378 ··· 402 387 INIT_WORK(&sc->recv_io.posted.refill_work, __smbdirect_socket_disabled_work); 403 388 disable_work_sync(&sc->recv_io.posted.refill_work); 404 389 390 + atomic_set(&sc->recv_io.credits.available, 0); 405 391 atomic_set(&sc->recv_io.credits.count, 0); 406 392 407 393 INIT_LIST_HEAD(&sc->recv_io.reassembly.list); ··· 499 483 */ 500 484 bool need_invalidate_rkey; 501 485 u32 remote_key; 486 + 487 + int credit; 502 488 }; 503 489 504 490 struct smbdirect_recv_io {
+1
fs/smb/server/Makefile
··· 18 18 $(obj)/ksmbd_spnego_negtokentarg.asn1.o: $(obj)/ksmbd_spnego_negtokentarg.asn1.c $(obj)/ksmbd_spnego_negtokentarg.asn1.h 19 19 20 20 ksmbd-$(CONFIG_SMB_SERVER_SMBDIRECT) += transport_rdma.o 21 + ksmbd-$(CONFIG_PROC_FS) += proc.o
+59
fs/smb/server/connection.c
··· 14 14 #include "connection.h" 15 15 #include "transport_tcp.h" 16 16 #include "transport_rdma.h" 17 + #include "misc.h" 17 18 18 19 static DEFINE_MUTEX(init_lock); 19 20 ··· 22 21 23 22 DEFINE_HASHTABLE(conn_list, CONN_HASH_BITS); 24 23 DECLARE_RWSEM(conn_list_lock); 24 + 25 + #ifdef CONFIG_PROC_FS 26 + static struct proc_dir_entry *proc_clients; 27 + 28 + static int proc_show_clients(struct seq_file *m, void *v) 29 + { 30 + struct ksmbd_conn *conn; 31 + struct timespec64 now, t; 32 + int i; 33 + 34 + seq_printf(m, "#%-20s %-10s %-10s %-10s %-10s %-10s\n", 35 + "<name>", "<dialect>", "<credits>", "<open files>", 36 + "<requests>", "<last active>"); 37 + 38 + down_read(&conn_list_lock); 39 + hash_for_each(conn_list, i, conn, hlist) { 40 + jiffies_to_timespec64(jiffies - conn->last_active, &t); 41 + ktime_get_real_ts64(&now); 42 + t = timespec64_sub(now, t); 43 + #if IS_ENABLED(CONFIG_IPV6) 44 + if (!conn->inet_addr) 45 + seq_printf(m, "%-20pI6c", &conn->inet6_addr); 46 + else 47 + #endif 48 + seq_printf(m, "%-20pI4", &conn->inet_addr); 49 + seq_printf(m, " 0x%-10x %-10u %-12d %-10d %ptT\n", 50 + conn->dialect, 51 + conn->total_credits, 52 + atomic_read(&conn->stats.open_files_count), 53 + atomic_read(&conn->req_running), 54 + &t); 55 + } 56 + up_read(&conn_list_lock); 57 + return 0; 58 + } 59 + 60 + static int create_proc_clients(void) 61 + { 62 + proc_clients = ksmbd_proc_create("clients", 63 + proc_show_clients, NULL); 64 + if (!proc_clients) 65 + return -ENOMEM; 66 + return 0; 67 + } 68 + 69 + static void delete_proc_clients(void) 70 + { 71 + if (proc_clients) { 72 + proc_remove(proc_clients); 73 + proc_clients = NULL; 74 + } 75 + } 76 + #else 77 + static int create_proc_clients(void) { return 0; } 78 + static void delete_proc_clients(void) {} 79 + #endif 25 80 26 81 /** 27 82 * ksmbd_conn_free() - free resources of the connection instance ··· 529 472 } 530 473 out: 531 474 mutex_unlock(&init_lock); 475 + create_proc_clients(); 532 476 return ret; 533 477 } 534 478 ··· 560 502 561 503 void ksmbd_conn_transport_destroy(void) 562 504 { 505 + delete_proc_clients(); 563 506 mutex_lock(&init_lock); 564 507 ksmbd_tcp_destroy(); 565 508 ksmbd_rdma_stop_listening();
+3 -2
fs/smb/server/connection.h
··· 7 7 #define __KSMBD_CONNECTION_H__ 8 8 9 9 #include <linux/list.h> 10 + #include <linux/inet.h> 10 11 #include <linux/ip.h> 11 12 #include <net/sock.h> 12 13 #include <net/tcp.h> ··· 34 33 KSMBD_SESS_RELEASING 35 34 }; 36 35 37 - struct ksmbd_stats { 36 + struct ksmbd_conn_stats { 38 37 atomic_t open_files_count; 39 38 atomic64_t request_served; 40 39 }; ··· 79 78 struct list_head requests; 80 79 struct list_head async_requests; 81 80 int connection_type; 82 - struct ksmbd_stats stats; 81 + struct ksmbd_conn_stats stats; 83 82 char ClientGUID[SMB2_CLIENT_GUID_SIZE]; 84 83 struct ntlmssp_auth ntlmssp; 85 84
+24 -12
fs/smb/server/mgmt/tree_connect.c
··· 9 9 10 10 #include "../transport_ipc.h" 11 11 #include "../connection.h" 12 + #include "../stats.h" 12 13 13 14 #include "tree_connect.h" 14 15 #include "user_config.h" ··· 80 79 status.tree_conn = tree_conn; 81 80 atomic_set(&tree_conn->refcount, 1); 82 81 82 + down_write(&sess->tree_conns_lock); 83 83 ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn, 84 84 KSMBD_DEFAULT_GFP)); 85 + up_write(&sess->tree_conns_lock); 85 86 if (ret) { 86 87 status.ret = -ENOMEM; 87 88 goto out_error; 88 89 } 90 + ksmbd_counter_inc(KSMBD_COUNTER_TREE_CONNS); 89 91 kvfree(resp); 90 92 return status; 91 93 ··· 107 103 kfree(tcon); 108 104 } 109 105 110 - int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess, 111 - struct ksmbd_tree_connect *tree_conn) 106 + static int __ksmbd_tree_conn_disconnect(struct ksmbd_session *sess, 107 + struct ksmbd_tree_connect *tree_conn) 112 108 { 113 109 int ret; 114 - 115 - write_lock(&sess->tree_conns_lock); 116 - xa_erase(&sess->tree_conns, tree_conn->id); 117 - write_unlock(&sess->tree_conns_lock); 118 110 119 111 ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id); 120 112 ksmbd_release_tree_conn_id(sess, tree_conn->id); 121 113 ksmbd_share_config_put(tree_conn->share_conf); 114 + ksmbd_counter_dec(KSMBD_COUNTER_TREE_CONNS); 122 115 if (atomic_dec_and_test(&tree_conn->refcount)) 123 116 kfree(tree_conn); 124 117 return ret; 118 + } 119 + 120 + int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess, 121 + struct ksmbd_tree_connect *tree_conn) 122 + { 123 + down_write(&sess->tree_conns_lock); 124 + xa_erase(&sess->tree_conns, tree_conn->id); 125 + up_write(&sess->tree_conns_lock); 126 + 127 + return __ksmbd_tree_conn_disconnect(sess, tree_conn); 125 128 } 126 129 127 130 struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess, ··· 136 125 { 137 126 struct ksmbd_tree_connect *tcon; 138 127 139 - read_lock(&sess->tree_conns_lock); 128 + down_read(&sess->tree_conns_lock); 140 129 tcon = xa_load(&sess->tree_conns, id); 141 130 if (tcon) { 142 131 if (tcon->t_state != TREE_CONNECTED) ··· 144 133 else if (!atomic_inc_not_zero(&tcon->refcount)) 145 134 tcon = NULL; 146 135 } 147 - read_unlock(&sess->tree_conns_lock); 136 + up_read(&sess->tree_conns_lock); 148 137 149 138 return tcon; 150 139 } ··· 158 147 if (!sess) 159 148 return -EINVAL; 160 149 150 + down_write(&sess->tree_conns_lock); 161 151 xa_for_each(&sess->tree_conns, id, tc) { 162 - write_lock(&sess->tree_conns_lock); 163 152 if (tc->t_state == TREE_DISCONNECTED) { 164 - write_unlock(&sess->tree_conns_lock); 165 153 ret = -ENOENT; 166 154 continue; 167 155 } 168 156 tc->t_state = TREE_DISCONNECTED; 169 - write_unlock(&sess->tree_conns_lock); 170 157 171 - ret |= ksmbd_tree_conn_disconnect(sess, tc); 158 + xa_erase(&sess->tree_conns, tc->id); 159 + ret |= __ksmbd_tree_conn_disconnect(sess, tc); 172 160 } 173 161 xa_destroy(&sess->tree_conns); 162 + up_write(&sess->tree_conns_lock); 163 + 174 164 return ret; 175 165 }
+2 -4
fs/smb/server/mgmt/user_config.c
··· 90 90 kfree(user); 91 91 } 92 92 93 - int ksmbd_anonymous_user(struct ksmbd_user *user) 93 + bool ksmbd_anonymous_user(struct ksmbd_user *user) 94 94 { 95 - if (user->name[0] == '\0') 96 - return 1; 97 - return 0; 95 + return user->name[0] == '\0'; 98 96 } 99 97 100 98 bool ksmbd_compare_user(struct ksmbd_user *u1, struct ksmbd_user *u2)
+1 -1
fs/smb/server/mgmt/user_config.h
··· 65 65 struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp, 66 66 struct ksmbd_login_response_ext *resp_ext); 67 67 void ksmbd_free_user(struct ksmbd_user *user); 68 - int ksmbd_anonymous_user(struct ksmbd_user *user); 68 + bool ksmbd_anonymous_user(struct ksmbd_user *user); 69 69 bool ksmbd_compare_user(struct ksmbd_user *u1, struct ksmbd_user *u2); 70 70 #endif /* __USER_CONFIG_MANAGEMENT_H__ */
+230 -1
fs/smb/server/mgmt/user_session.c
··· 12 12 #include "user_session.h" 13 13 #include "user_config.h" 14 14 #include "tree_connect.h" 15 + #include "share_config.h" 15 16 #include "../transport_ipc.h" 16 17 #include "../connection.h" 17 18 #include "../vfs_cache.h" 19 + #include "../misc.h" 20 + #include "../stats.h" 18 21 19 22 static DEFINE_IDA(session_ida); 20 23 ··· 30 27 unsigned int method; 31 28 }; 32 29 30 + #ifdef CONFIG_PROC_FS 31 + 32 + static const struct ksmbd_const_name ksmbd_sess_cap_const_names[] = { 33 + {SMB2_GLOBAL_CAP_DFS, "dfs"}, 34 + {SMB2_GLOBAL_CAP_LEASING, "lease"}, 35 + {SMB2_GLOBAL_CAP_LARGE_MTU, "large-mtu"}, 36 + {SMB2_GLOBAL_CAP_MULTI_CHANNEL, "multi-channel"}, 37 + {SMB2_GLOBAL_CAP_PERSISTENT_HANDLES, "persistent-handles"}, 38 + {SMB2_GLOBAL_CAP_DIRECTORY_LEASING, "dir-lease"}, 39 + {SMB2_GLOBAL_CAP_ENCRYPTION, "encryption"} 40 + }; 41 + 42 + static const struct ksmbd_const_name ksmbd_cipher_const_names[] = { 43 + {le16_to_cpu(SMB2_ENCRYPTION_AES128_CCM), "aes128-ccm"}, 44 + {le16_to_cpu(SMB2_ENCRYPTION_AES128_GCM), "aes128-gcm"}, 45 + {le16_to_cpu(SMB2_ENCRYPTION_AES256_CCM), "aes256-ccm"}, 46 + {le16_to_cpu(SMB2_ENCRYPTION_AES256_GCM), "aes256-gcm"}, 47 + }; 48 + 49 + static const struct ksmbd_const_name ksmbd_signing_const_names[] = { 50 + {SIGNING_ALG_HMAC_SHA256, "hmac-sha256"}, 51 + {SIGNING_ALG_AES_CMAC, "aes-cmac"}, 52 + {SIGNING_ALG_AES_GMAC, "aes-gmac"}, 53 + }; 54 + 55 + static const char *session_state_string(struct ksmbd_session *session) 56 + { 57 + switch (session->state) { 58 + case SMB2_SESSION_VALID: 59 + return "valid"; 60 + case SMB2_SESSION_IN_PROGRESS: 61 + return "progress"; 62 + case SMB2_SESSION_EXPIRED: 63 + return "expired"; 64 + default: 65 + return ""; 66 + } 67 + } 68 + 69 + static const char *session_user_name(struct ksmbd_session *session) 70 + { 71 + if (user_guest(session->user)) 72 + return "(Guest)"; 73 + else if (ksmbd_anonymous_user(session->user)) 74 + return "(Anonymous)"; 75 + return session->user->name; 76 + } 77 + 78 + static int show_proc_session(struct seq_file *m, void *v) 79 + { 80 + struct ksmbd_session *sess; 81 + struct ksmbd_tree_connect *tree_conn; 82 + struct ksmbd_share_config *share_conf; 83 + struct channel *chan; 84 + unsigned long id; 85 + int i = 0; 86 + 87 + sess = (struct ksmbd_session *)m->private; 88 + ksmbd_user_session_get(sess); 89 + 90 + i = 0; 91 + down_read(&sess->chann_lock); 92 + xa_for_each(&sess->ksmbd_chann_list, id, chan) { 93 + #if IS_ENABLED(CONFIG_IPV6) 94 + if (chan->conn->inet_addr) 95 + seq_printf(m, "%-20s\t%pI4\n", "client", 96 + &chan->conn->inet_addr); 97 + else 98 + seq_printf(m, "%-20s\t%pI6c\n", "client", 99 + &chan->conn->inet6_addr); 100 + #else 101 + seq_printf(m, "%-20s\t%pI4\n", "client", 102 + &chan->conn->inet_addr); 103 + #endif 104 + seq_printf(m, "%-20s\t%s\n", "user", session_user_name(sess)); 105 + seq_printf(m, "%-20s\t%llu\n", "id", sess->id); 106 + seq_printf(m, "%-20s\t%s\n", "state", 107 + session_state_string(sess)); 108 + 109 + seq_printf(m, "%-20s\t", "capabilities"); 110 + ksmbd_proc_show_flag_names(m, 111 + ksmbd_sess_cap_const_names, 112 + ARRAY_SIZE(ksmbd_sess_cap_const_names), 113 + chan->conn->vals->req_capabilities); 114 + 115 + if (sess->sign) { 116 + seq_printf(m, "%-20s\t", "signing"); 117 + ksmbd_proc_show_const_name(m, "%s\t", 118 + ksmbd_signing_const_names, 119 + ARRAY_SIZE(ksmbd_signing_const_names), 120 + le16_to_cpu(chan->conn->signing_algorithm)); 121 + } else if (sess->enc) { 122 + seq_printf(m, "%-20s\t", "encryption"); 123 + ksmbd_proc_show_const_name(m, "%s\t", 124 + ksmbd_cipher_const_names, 125 + ARRAY_SIZE(ksmbd_cipher_const_names), 126 + le16_to_cpu(chan->conn->cipher_type)); 127 + } 128 + i++; 129 + } 130 + up_read(&sess->chann_lock); 131 + 132 + seq_printf(m, "%-20s\t%d\n", "channels", i); 133 + 134 + i = 0; 135 + down_read(&sess->tree_conns_lock); 136 + xa_for_each(&sess->tree_conns, id, tree_conn) { 137 + share_conf = tree_conn->share_conf; 138 + seq_printf(m, "%-20s\t%s\t%8d", "share", 139 + share_conf->name, tree_conn->id); 140 + if (test_share_config_flag(share_conf, KSMBD_SHARE_FLAG_PIPE)) 141 + seq_printf(m, " %s ", "pipe"); 142 + else 143 + seq_printf(m, " %s ", "disk"); 144 + seq_putc(m, '\n'); 145 + } 146 + up_read(&sess->tree_conns_lock); 147 + 148 + ksmbd_user_session_put(sess); 149 + return 0; 150 + } 151 + 152 + void ksmbd_proc_show_flag_names(struct seq_file *m, 153 + const struct ksmbd_const_name *table, 154 + int count, 155 + unsigned int flags) 156 + { 157 + int i; 158 + 159 + for (i = 0; i < count; i++) { 160 + if (table[i].const_value & flags) 161 + seq_printf(m, "0x%08x\t", table[i].const_value); 162 + } 163 + seq_putc(m, '\n'); 164 + } 165 + 166 + void ksmbd_proc_show_const_name(struct seq_file *m, 167 + const char *format, 168 + const struct ksmbd_const_name *table, 169 + int count, 170 + unsigned int const_value) 171 + { 172 + int i; 173 + 174 + for (i = 0; i < count; i++) { 175 + if (table[i].const_value & const_value) 176 + seq_printf(m, format, table[i].name); 177 + } 178 + seq_putc(m, '\n'); 179 + } 180 + 181 + static int create_proc_session(struct ksmbd_session *sess) 182 + { 183 + char name[30]; 184 + 185 + snprintf(name, sizeof(name), "sessions/%llu", sess->id); 186 + sess->proc_entry = ksmbd_proc_create(name, 187 + show_proc_session, sess); 188 + return 0; 189 + } 190 + 191 + static void delete_proc_session(struct ksmbd_session *sess) 192 + { 193 + if (sess->proc_entry) 194 + proc_remove(sess->proc_entry); 195 + } 196 + 197 + static int show_proc_sessions(struct seq_file *m, void *v) 198 + { 199 + struct ksmbd_session *session; 200 + struct channel *chan; 201 + int i; 202 + unsigned long id; 203 + 204 + seq_printf(m, "#%-40s %-15s %-10s %-10s\n", 205 + "<client>", "<user>", "<sess_id>", "<state>"); 206 + 207 + down_read(&sessions_table_lock); 208 + hash_for_each(sessions_table, i, session, hlist) { 209 + down_read(&session->chann_lock); 210 + xa_for_each(&session->ksmbd_chann_list, id, chan) { 211 + down_read(&chan->conn->session_lock); 212 + ksmbd_user_session_get(session); 213 + 214 + #if IS_ENABLED(CONFIG_IPV6) 215 + if (!chan->conn->inet_addr) 216 + seq_printf(m, " %-40pI6c", &chan->conn->inet6_addr); 217 + else 218 + #endif 219 + seq_printf(m, " %-40pI4", &chan->conn->inet_addr); 220 + seq_printf(m, " %-15s %-10llu %-10s\n", 221 + session_user_name(session), 222 + session->id, 223 + session_state_string(session)); 224 + 225 + ksmbd_user_session_put(session); 226 + up_read(&chan->conn->session_lock); 227 + } 228 + up_read(&session->chann_lock); 229 + } 230 + up_read(&sessions_table_lock); 231 + return 0; 232 + } 233 + 234 + int create_proc_sessions(void) 235 + { 236 + if (!ksmbd_proc_create("sessions/sessions", 237 + show_proc_sessions, NULL)) 238 + return -ENOMEM; 239 + return 0; 240 + } 241 + #else 242 + int create_proc_sessions(void) { return 0; } 243 + static int create_proc_session(struct ksmbd_session *sess) { return 0; } 244 + static void delete_proc_session(struct ksmbd_session *sess) {} 245 + #endif 246 + 33 247 static void free_channel_list(struct ksmbd_session *sess) 34 248 { 35 249 struct channel *chann; 36 250 unsigned long index; 37 251 252 + down_write(&sess->chann_lock); 38 253 xa_for_each(&sess->ksmbd_chann_list, index, chann) { 39 254 xa_erase(&sess->ksmbd_chann_list, index); 40 255 kfree(chann); 41 256 } 42 257 43 258 xa_destroy(&sess->ksmbd_chann_list); 259 + up_write(&sess->chann_lock); 44 260 } 45 261 46 262 static void __session_rpc_close(struct ksmbd_session *sess, ··· 381 159 if (!sess) 382 160 return; 383 161 162 + delete_proc_session(sess); 163 + 384 164 if (sess->user) 385 165 ksmbd_free_user(sess->user); 386 166 ··· 444 220 { 445 221 struct channel *chann; 446 222 223 + down_write(&sess->chann_lock); 447 224 chann = xa_erase(&sess->ksmbd_chann_list, (long)conn); 225 + up_write(&sess->chann_lock); 448 226 if (!chann) 449 227 return -ENOENT; 450 228 ··· 677 451 xa_init(&sess->ksmbd_chann_list); 678 452 xa_init(&sess->rpc_handle_list); 679 453 sess->sequence_number = 1; 680 - rwlock_init(&sess->tree_conns_lock); 681 454 atomic_set(&sess->refcnt, 2); 455 + init_rwsem(&sess->tree_conns_lock); 682 456 init_rwsem(&sess->rpc_lock); 457 + init_rwsem(&sess->chann_lock); 683 458 684 459 ret = __init_smb2_session(sess); 685 460 if (ret) ··· 692 465 hash_add(sessions_table, &sess->hlist, sess->id); 693 466 up_write(&sessions_table_lock); 694 467 468 + create_proc_session(sess); 469 + ksmbd_counter_inc(KSMBD_COUNTER_SESSIONS); 695 470 return sess; 696 471 697 472 error:
+6 -2
fs/smb/server/mgmt/user_session.h
··· 41 41 42 42 bool sign; 43 43 bool enc; 44 - bool is_anonymous; 45 44 46 45 int state; 47 46 __u8 *Preauth_HashValue; ··· 48 49 char sess_key[CIFS_KEY_SIZE]; 49 50 50 51 struct hlist_node hlist; 52 + struct rw_semaphore chann_lock; 51 53 struct xarray ksmbd_chann_list; 52 54 struct xarray tree_conns; 53 55 struct ida tree_conn_ida; ··· 60 60 61 61 struct ksmbd_file_table file_table; 62 62 unsigned long last_active; 63 - rwlock_t tree_conns_lock; 63 + struct rw_semaphore tree_conns_lock; 64 64 65 + #ifdef CONFIG_PROC_FS 66 + struct proc_dir_entry *proc_entry; 67 + #endif 65 68 atomic_t refcnt; 66 69 struct rw_semaphore rpc_lock; 67 70 }; ··· 114 111 int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id); 115 112 void ksmbd_user_session_get(struct ksmbd_session *sess); 116 113 void ksmbd_user_session_put(struct ksmbd_session *sess); 114 + int create_proc_sessions(void); 117 115 #endif /* __USER_SESSION_MANAGEMENT_H__ */
+30
fs/smb/server/misc.h
··· 6 6 #ifndef __KSMBD_MISC_H__ 7 7 #define __KSMBD_MISC_H__ 8 8 9 + #ifdef CONFIG_PROC_FS 10 + #include <linux/proc_fs.h> 11 + #endif 9 12 struct ksmbd_share_config; 10 13 struct nls_table; 11 14 struct kstat; ··· 37 34 struct timespec64 ksmbd_NTtimeToUnix(__le64 ntutc); 38 35 u64 ksmbd_UnixTimeToNT(struct timespec64 t); 39 36 long long ksmbd_systime(void); 37 + 38 + #ifdef CONFIG_PROC_FS 39 + struct ksmbd_const_name { 40 + unsigned int const_value; 41 + const char *name; 42 + }; 43 + 44 + void ksmbd_proc_init(void); 45 + void ksmbd_proc_cleanup(void); 46 + void ksmbd_proc_reset(void); 47 + struct proc_dir_entry *ksmbd_proc_create(const char *name, 48 + int (*show)(struct seq_file *m, void *v), 49 + void *v); 50 + void ksmbd_proc_show_flag_names(struct seq_file *m, 51 + const struct ksmbd_const_name *table, 52 + int count, 53 + unsigned int flags); 54 + void ksmbd_proc_show_const_name(struct seq_file *m, 55 + const char *format, 56 + const struct ksmbd_const_name *table, 57 + int count, 58 + unsigned int const_value); 59 + #else 60 + static inline void ksmbd_proc_init(void) {} 61 + static inline void ksmbd_proc_cleanup(void) {} 62 + static inline void ksmbd_proc_reset(void) {} 63 + #endif 40 64 #endif /* __KSMBD_MISC_H__ */
+134
fs/smb/server/proc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * Copyright (C) 2025, LG Electronics. 4 + * Author(s): Hyunchul Lee <hyc.lee@gmail.com> 5 + * Copyright (C) 2025, Samsung Electronics. 6 + * Author(s): Vedansh Bhardwaj <v.bhardwaj@samsung.com> 7 + */ 8 + 9 + #include <linux/module.h> 10 + #include <linux/proc_fs.h> 11 + #include <linux/seq_file.h> 12 + 13 + #include "misc.h" 14 + #include "server.h" 15 + #include "stats.h" 16 + #include "smb_common.h" 17 + #include "smb2pdu.h" 18 + 19 + static struct proc_dir_entry *ksmbd_proc_fs; 20 + struct ksmbd_counters ksmbd_counters; 21 + 22 + struct proc_dir_entry *ksmbd_proc_create(const char *name, 23 + int (*show)(struct seq_file *m, void *v), 24 + void *v) 25 + { 26 + return proc_create_single_data(name, 0400, ksmbd_proc_fs, 27 + show, v); 28 + } 29 + 30 + struct ksmbd_const_smb2_process_req { 31 + unsigned int const_value; 32 + const char *name; 33 + }; 34 + 35 + static const struct ksmbd_const_smb2_process_req smb2_process_req[KSMBD_COUNTER_MAX_REQS] = { 36 + {le16_to_cpu(SMB2_NEGOTIATE), "SMB2_NEGOTIATE"}, 37 + {le16_to_cpu(SMB2_SESSION_SETUP), "SMB2_SESSION_SETUP"}, 38 + {le16_to_cpu(SMB2_LOGOFF), "SMB2_LOGOFF"}, 39 + {le16_to_cpu(SMB2_TREE_CONNECT), "SMB2_TREE_CONNECT"}, 40 + {le16_to_cpu(SMB2_TREE_DISCONNECT), "SMB2_TREE_DISCONNECT"}, 41 + {le16_to_cpu(SMB2_CREATE), "SMB2_CREATE"}, 42 + {le16_to_cpu(SMB2_CLOSE), "SMB2_CLOSE"}, 43 + {le16_to_cpu(SMB2_FLUSH), "SMB2_FLUSH"}, 44 + {le16_to_cpu(SMB2_READ), "SMB2_READ"}, 45 + {le16_to_cpu(SMB2_WRITE), "SMB2_WRITE"}, 46 + {le16_to_cpu(SMB2_LOCK), "SMB2_LOCK"}, 47 + {le16_to_cpu(SMB2_IOCTL), "SMB2_IOCTL"}, 48 + {le16_to_cpu(SMB2_CANCEL), "SMB2_CANCEL"}, 49 + {le16_to_cpu(SMB2_ECHO), "SMB2_ECHO"}, 50 + {le16_to_cpu(SMB2_QUERY_DIRECTORY), "SMB2_QUERY_DIRECTORY"}, 51 + {le16_to_cpu(SMB2_CHANGE_NOTIFY), "SMB2_CHANGE_NOTIFY"}, 52 + {le16_to_cpu(SMB2_QUERY_INFO), "SMB2_QUERY_INFO"}, 53 + {le16_to_cpu(SMB2_SET_INFO), "SMB2_SET_INFO"}, 54 + {le16_to_cpu(SMB2_OPLOCK_BREAK), "SMB2_OPLOCK_BREAK"}, 55 + }; 56 + 57 + static int proc_show_ksmbd_stats(struct seq_file *m, void *v) 58 + { 59 + int i; 60 + 61 + seq_puts(m, "Server\n"); 62 + seq_printf(m, "name: %s\n", ksmbd_server_string()); 63 + seq_printf(m, "netbios: %s\n", ksmbd_netbios_name()); 64 + seq_printf(m, "work group: %s\n", ksmbd_work_group()); 65 + seq_printf(m, "min protocol: %s\n", ksmbd_get_protocol_string(server_conf.min_protocol)); 66 + seq_printf(m, "max protocol: %s\n", ksmbd_get_protocol_string(server_conf.max_protocol)); 67 + seq_printf(m, "flags: 0x%08x\n", server_conf.flags); 68 + seq_printf(m, "share_fake_fscaps: 0x%08x\n", 69 + server_conf.share_fake_fscaps); 70 + seq_printf(m, "sessions: %lld\n", 71 + ksmbd_counter_sum(KSMBD_COUNTER_SESSIONS)); 72 + seq_printf(m, "tree connects: %lld\n", 73 + ksmbd_counter_sum(KSMBD_COUNTER_TREE_CONNS)); 74 + seq_printf(m, "read bytes: %lld\n", 75 + ksmbd_counter_sum(KSMBD_COUNTER_READ_BYTES)); 76 + seq_printf(m, "written bytes: %lld\n", 77 + ksmbd_counter_sum(KSMBD_COUNTER_WRITE_BYTES)); 78 + 79 + seq_puts(m, "\nSMB2\n"); 80 + for (i = 0; i < KSMBD_COUNTER_MAX_REQS; i++) 81 + seq_printf(m, "%-20s:\t%lld\n", smb2_process_req[i].name, 82 + ksmbd_counter_sum(KSMBD_COUNTER_FIRST_REQ + i)); 83 + return 0; 84 + } 85 + 86 + void ksmbd_proc_cleanup(void) 87 + { 88 + int i; 89 + 90 + if (!ksmbd_proc_fs) 91 + return; 92 + 93 + proc_remove(ksmbd_proc_fs); 94 + 95 + for (i = 0; i < ARRAY_SIZE(ksmbd_counters.counters); i++) 96 + percpu_counter_destroy(&ksmbd_counters.counters[i]); 97 + 98 + ksmbd_proc_fs = NULL; 99 + } 100 + 101 + void ksmbd_proc_reset(void) 102 + { 103 + int i; 104 + 105 + for (i = 0; i < ARRAY_SIZE(ksmbd_counters.counters); i++) 106 + percpu_counter_set(&ksmbd_counters.counters[i], 0); 107 + } 108 + 109 + void ksmbd_proc_init(void) 110 + { 111 + int i; 112 + int retval; 113 + 114 + ksmbd_proc_fs = proc_mkdir("fs/ksmbd", NULL); 115 + if (!ksmbd_proc_fs) 116 + return; 117 + 118 + if (!proc_mkdir_mode("sessions", 0400, ksmbd_proc_fs)) 119 + goto err_out; 120 + 121 + for (i = 0; i < ARRAY_SIZE(ksmbd_counters.counters); i++) { 122 + retval = percpu_counter_init(&ksmbd_counters.counters[i], 0, GFP_KERNEL); 123 + if (retval) 124 + goto err_out; 125 + } 126 + 127 + if (!ksmbd_proc_create("server", proc_show_ksmbd_stats, NULL)) 128 + goto err_out; 129 + 130 + ksmbd_proc_reset(); 131 + return; 132 + err_out: 133 + ksmbd_proc_cleanup(); 134 + }
+12 -3
fs/smb/server/server.c
··· 21 21 #include "mgmt/user_session.h" 22 22 #include "crypto_ctx.h" 23 23 #include "auth.h" 24 + #include "misc.h" 25 + #include "stats.h" 24 26 25 27 int ksmbd_debug_types; 26 28 ··· 128 126 andx_again: 129 127 if (command >= conn->max_cmds) { 130 128 conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); 131 - return SERVER_HANDLER_CONTINUE; 129 + return SERVER_HANDLER_ABORT; 132 130 } 133 131 134 132 cmds = &conn->cmds[command]; 135 133 if (!cmds->proc) { 136 134 ksmbd_debug(SMB, "*** not implemented yet cmd = %x\n", command); 137 135 conn->ops->set_rsp_status(work, STATUS_NOT_IMPLEMENTED); 138 - return SERVER_HANDLER_CONTINUE; 136 + return SERVER_HANDLER_ABORT; 139 137 } 140 138 141 139 if (work->sess && conn->ops->is_sign_req(work, command)) { 142 140 ret = conn->ops->check_sign_req(work); 143 141 if (!ret) { 144 142 conn->ops->set_rsp_status(work, STATUS_ACCESS_DENIED); 145 - return SERVER_HANDLER_CONTINUE; 143 + return SERVER_HANDLER_ABORT; 146 144 } 147 145 } 148 146 149 147 ret = cmds->proc(work); 148 + if (conn->ops->inc_reqs) 149 + conn->ops->inc_reqs(command); 150 150 151 151 if (ret < 0) 152 152 ksmbd_debug(CONN, "Failed to process %u [%d]\n", command, ret); ··· 363 359 { 364 360 int ret; 365 361 362 + ksmbd_proc_reset(); 366 363 ret = ksmbd_conn_transport_init(); 367 364 if (ret) { 368 365 server_queue_ctrl_reset_work(); ··· 536 531 { 537 532 WRITE_ONCE(server_conf.state, SERVER_STATE_SHUTTING_DOWN); 538 533 534 + ksmbd_proc_cleanup(); 539 535 class_unregister(&ksmbd_control_class); 540 536 ksmbd_workqueue_destroy(); 541 537 ksmbd_ipc_release(); ··· 559 553 pr_err("Unable to register ksmbd-control class\n"); 560 554 return ret; 561 555 } 556 + 557 + ksmbd_proc_init(); 558 + create_proc_sessions(); 562 559 563 560 ksmbd_server_tcp_callbacks_init(); 564 561
+4
fs/smb/server/smb2ops.c
··· 11 11 #include "connection.h" 12 12 #include "smb_common.h" 13 13 #include "server.h" 14 + #include "stats.h" 14 15 15 16 static struct smb_version_values smb21_server_values = { 16 17 .version_string = SMB21_VERSION_STRING, ··· 122 121 123 122 static struct smb_version_ops smb2_0_server_ops = { 124 123 .get_cmd_val = get_smb2_cmd_val, 124 + .inc_reqs = ksmbd_counter_inc_reqs, 125 125 .init_rsp_hdr = init_smb2_rsp_hdr, 126 126 .set_rsp_status = set_smb2_rsp_status, 127 127 .allocate_rsp_buf = smb2_allocate_rsp_buf, ··· 136 134 137 135 static struct smb_version_ops smb3_0_server_ops = { 138 136 .get_cmd_val = get_smb2_cmd_val, 137 + .inc_reqs = ksmbd_counter_inc_reqs, 139 138 .init_rsp_hdr = init_smb2_rsp_hdr, 140 139 .set_rsp_status = set_smb2_rsp_status, 141 140 .allocate_rsp_buf = smb2_allocate_rsp_buf, ··· 155 152 156 153 static struct smb_version_ops smb3_11_server_ops = { 157 154 .get_cmd_val = get_smb2_cmd_val, 155 + .inc_reqs = ksmbd_counter_inc_reqs, 158 156 .init_rsp_hdr = init_smb2_rsp_hdr, 159 157 .set_rsp_status = set_smb2_rsp_status, 160 158 .allocate_rsp_buf = smb2_allocate_rsp_buf,
+17 -6
fs/smb/server/smb2pdu.c
··· 39 39 #include "mgmt/user_session.h" 40 40 #include "mgmt/ksmbd_ida.h" 41 41 #include "ndr.h" 42 + #include "stats.h" 42 43 #include "transport_tcp.h" 43 44 44 45 static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) ··· 80 79 81 80 struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn) 82 81 { 83 - return xa_load(&sess->ksmbd_chann_list, (long)conn); 82 + struct channel *chann; 83 + 84 + down_read(&sess->chann_lock); 85 + chann = xa_load(&sess->ksmbd_chann_list, (long)conn); 86 + up_read(&sess->chann_lock); 87 + 88 + return chann; 84 89 } 85 90 86 91 /** ··· 1565 1558 return -ENOMEM; 1566 1559 1567 1560 chann->conn = conn; 1561 + down_write(&sess->chann_lock); 1568 1562 old = xa_store(&sess->ksmbd_chann_list, (long)conn, chann, 1569 1563 KSMBD_DEFAULT_GFP); 1564 + up_write(&sess->chann_lock); 1570 1565 if (xa_is_err(old)) { 1571 1566 kfree(chann); 1572 1567 return xa_err(old); ··· 1660 1651 return -ENOMEM; 1661 1652 1662 1653 chann->conn = conn; 1654 + down_write(&sess->chann_lock); 1663 1655 old = xa_store(&sess->ksmbd_chann_list, (long)conn, 1664 1656 chann, KSMBD_DEFAULT_GFP); 1657 + up_write(&sess->chann_lock); 1665 1658 if (xa_is_err(old)) { 1666 1659 kfree(chann); 1667 1660 return xa_err(old); ··· 2037 2026 if (conn->posix_ext_supported) 2038 2027 status.tree_conn->posix_extensions = true; 2039 2028 2040 - write_lock(&sess->tree_conns_lock); 2029 + down_write(&sess->tree_conns_lock); 2041 2030 status.tree_conn->t_state = TREE_CONNECTED; 2042 - write_unlock(&sess->tree_conns_lock); 2031 + up_write(&sess->tree_conns_lock); 2043 2032 rsp->StructureSize = cpu_to_le16(16); 2044 2033 out_err1: 2045 2034 if (server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE && share && ··· 2193 2182 2194 2183 ksmbd_close_tree_conn_fds(work); 2195 2184 2196 - write_lock(&sess->tree_conns_lock); 2185 + down_write(&sess->tree_conns_lock); 2197 2186 if (tcon->t_state == TREE_DISCONNECTED) { 2198 - write_unlock(&sess->tree_conns_lock); 2187 + up_write(&sess->tree_conns_lock); 2199 2188 rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED; 2200 2189 err = -ENOENT; 2201 2190 goto err_out; 2202 2191 } 2203 2192 2204 2193 tcon->t_state = TREE_DISCONNECTED; 2205 - write_unlock(&sess->tree_conns_lock); 2194 + up_write(&sess->tree_conns_lock); 2206 2195 2207 2196 err = ksmbd_tree_conn_disconnect(sess, tcon); 2208 2197 if (err) {
+24
fs/smb/server/smb_common.c
··· 98 98 return SMB311_PROT; 99 99 } 100 100 101 + static const struct { 102 + int version; 103 + const char *string; 104 + } version_strings[] = { 105 + #ifdef CONFIG_SMB_INSECURE_SERVER 106 + {SMB1_PROT, SMB1_VERSION_STRING}, 107 + #endif 108 + {SMB2_PROT, SMB20_VERSION_STRING}, 109 + {SMB21_PROT, SMB21_VERSION_STRING}, 110 + {SMB30_PROT, SMB30_VERSION_STRING}, 111 + {SMB302_PROT, SMB302_VERSION_STRING}, 112 + {SMB311_PROT, SMB311_VERSION_STRING}, 113 + }; 114 + 115 + const char *ksmbd_get_protocol_string(int version) 116 + { 117 + int i; 118 + 119 + for (i = 0; i < ARRAY_SIZE(version_strings); i++) { 120 + if (version_strings[i].version == version) 121 + return version_strings[i].string; 122 + } 123 + return ""; 124 + } 101 125 int ksmbd_lookup_protocol_idx(char *str) 102 126 { 103 127 int offt = ARRAY_SIZE(smb1_protos) - 1;
+2
fs/smb/server/smb_common.h
··· 143 143 144 144 struct smb_version_ops { 145 145 u16 (*get_cmd_val)(struct ksmbd_work *swork); 146 + void (*inc_reqs)(unsigned int cmd); 146 147 int (*init_rsp_hdr)(struct ksmbd_work *swork); 147 148 void (*set_rsp_status)(struct ksmbd_work *swork, __le32 err); 148 149 int (*allocate_rsp_buf)(struct ksmbd_work *work); ··· 166 165 167 166 int ksmbd_min_protocol(void); 168 167 int ksmbd_max_protocol(void); 168 + const char *ksmbd_get_protocol_string(int version); 169 169 170 170 int ksmbd_lookup_protocol_idx(char *str); 171 171
+73
fs/smb/server/stats.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Copyright (C) 2025, LG Electronics. 4 + * Author(s): Hyunchul Lee <hyc.lee@gmail.com> 5 + * Copyright (C) 2025, Samsung Electronics. 6 + * Author(s): Vedansh Bhardwaj <v.bhardwaj@samsung.com> 7 + */ 8 + 9 + #ifndef __KSMBD_STATS_H__ 10 + #define __KSMBD_STATS_H__ 11 + 12 + #define KSMBD_COUNTER_MAX_REQS 19 13 + 14 + enum { 15 + KSMBD_COUNTER_SESSIONS = 0, 16 + KSMBD_COUNTER_TREE_CONNS, 17 + KSMBD_COUNTER_REQUESTS, 18 + KSMBD_COUNTER_READ_BYTES, 19 + KSMBD_COUNTER_WRITE_BYTES, 20 + KSMBD_COUNTER_FIRST_REQ, 21 + KSMBD_COUNTER_LAST_REQ = KSMBD_COUNTER_FIRST_REQ + 22 + KSMBD_COUNTER_MAX_REQS - 1, 23 + KSMBD_COUNTER_MAX, 24 + }; 25 + 26 + #ifdef CONFIG_PROC_FS 27 + extern struct ksmbd_counters ksmbd_counters; 28 + 29 + struct ksmbd_counters { 30 + struct percpu_counter counters[KSMBD_COUNTER_MAX]; 31 + }; 32 + 33 + static inline void ksmbd_counter_inc(int type) 34 + { 35 + percpu_counter_inc(&ksmbd_counters.counters[type]); 36 + } 37 + 38 + static inline void ksmbd_counter_dec(int type) 39 + { 40 + percpu_counter_dec(&ksmbd_counters.counters[type]); 41 + } 42 + 43 + static inline void ksmbd_counter_add(int type, s64 value) 44 + { 45 + percpu_counter_add(&ksmbd_counters.counters[type], value); 46 + } 47 + 48 + static inline void ksmbd_counter_sub(int type, s64 value) 49 + { 50 + percpu_counter_sub(&ksmbd_counters.counters[type], value); 51 + } 52 + 53 + static inline void ksmbd_counter_inc_reqs(unsigned int cmd) 54 + { 55 + if (cmd < KSMBD_COUNTER_MAX_REQS) 56 + percpu_counter_inc(&ksmbd_counters.counters[KSMBD_COUNTER_FIRST_REQ + cmd]); 57 + } 58 + 59 + static inline s64 ksmbd_counter_sum(int type) 60 + { 61 + return percpu_counter_sum_positive(&ksmbd_counters.counters[type]); 62 + } 63 + #else 64 + 65 + static inline void ksmbd_counter_inc(int type) {} 66 + static inline void ksmbd_counter_dec(int type) {} 67 + static inline void ksmbd_counter_add(int type, s64 value) {} 68 + static inline void ksmbd_counter_sub(int type, s64 value) {} 69 + static inline void ksmbd_counter_inc_reqs(unsigned int cmd) {} 70 + static inline s64 ksmbd_counter_sum(int type) { return 0; } 71 + #endif 72 + 73 + #endif
+258 -43
fs/smb/server/transport_rdma.c
··· 61 61 * Those may change after a SMB_DIRECT negotiation 62 62 */ 63 63 64 - /* Set 445 port to SMB Direct port by default */ 65 - static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND; 66 - 67 64 /* The local peer's maximum number of credits to grant to the peer */ 68 65 static int smb_direct_receive_credit_max = 255; 69 66 ··· 70 73 /* The maximum single message size can be sent to remote peer */ 71 74 static int smb_direct_max_send_size = 1364; 72 75 73 - /* The maximum fragmented upper-layer payload receive size supported */ 74 - static int smb_direct_max_fragmented_recv_size = 1024 * 1024; 76 + /* 77 + * The maximum fragmented upper-layer payload receive size supported 78 + * 79 + * Assume max_payload_per_credit is 80 + * smb_direct_receive_credit_max - 24 = 1340 81 + * 82 + * The maximum number would be 83 + * smb_direct_receive_credit_max * max_payload_per_credit 84 + * 85 + * 1340 * 255 = 341700 (0x536C4) 86 + * 87 + * The minimum value from the spec is 131072 (0x20000) 88 + * 89 + * For now we use the logic we used before: 90 + * (1364 * 255) / 2 = 173910 (0x2A756) 91 + */ 92 + static int smb_direct_max_fragmented_recv_size = (1364 * 255) / 2; 75 93 76 94 /* The maximum single-message size which can be received */ 77 95 static int smb_direct_max_receive_size = 1364; ··· 102 90 }; 103 91 104 92 static struct smb_direct_listener { 93 + int port; 105 94 struct rdma_cm_id *cm_id; 106 - } smb_direct_listener; 95 + } smb_direct_ib_listener, smb_direct_iw_listener; 107 96 108 97 static struct workqueue_struct *smb_direct_wq; 109 98 ··· 234 221 * in order to notice the broken connection. 235 222 */ 236 223 wake_up_all(&sc->status_wait); 224 + wake_up_all(&sc->send_io.bcredits.wait_queue); 237 225 wake_up_all(&sc->send_io.lcredits.wait_queue); 238 226 wake_up_all(&sc->send_io.credits.wait_queue); 239 227 wake_up_all(&sc->send_io.pending.zero_wait_queue); ··· 658 644 struct smbdirect_data_transfer *data_transfer = 659 645 (struct smbdirect_data_transfer *)recvmsg->packet; 660 646 u32 remaining_data_length, data_offset, data_length; 647 + int current_recv_credits; 661 648 u16 old_recv_credit_target; 662 649 663 650 if (wc->byte_len < ··· 697 682 } 698 683 699 684 atomic_dec(&sc->recv_io.posted.count); 700 - atomic_dec(&sc->recv_io.credits.count); 685 + current_recv_credits = atomic_dec_return(&sc->recv_io.credits.count); 701 686 702 687 old_recv_credit_target = sc->recv_io.credits.target; 703 688 sc->recv_io.credits.target = ··· 717 702 wake_up(&sc->send_io.credits.wait_queue); 718 703 719 704 if (data_length) { 720 - if (sc->recv_io.credits.target > old_recv_credit_target) 705 + if (current_recv_credits <= (sc->recv_io.credits.target / 4) || 706 + sc->recv_io.credits.target > old_recv_credit_target) 721 707 queue_work(sc->workqueue, &sc->recv_io.posted.refill_work); 722 708 723 709 enqueue_reassembly(sc, recvmsg, (int)data_length); ··· 1044 1028 } 1045 1029 } 1046 1030 1031 + atomic_add(credits, &sc->recv_io.credits.available); 1032 + 1033 + /* 1034 + * If the last send credit is waiting for credits 1035 + * it can grant we need to wake it up 1036 + */ 1037 + if (credits && 1038 + atomic_read(&sc->send_io.bcredits.count) == 0 && 1039 + atomic_read(&sc->send_io.credits.count) == 0) 1040 + wake_up(&sc->send_io.credits.wait_queue); 1041 + 1047 1042 if (credits) 1048 1043 queue_work(sc->workqueue, &sc->idle.immediate_work); 1049 1044 } ··· 1072 1045 ib_wc_status_msg(wc->status), wc->status, 1073 1046 wc->opcode); 1074 1047 1048 + if (unlikely(!(sendmsg->wr.send_flags & IB_SEND_SIGNALED))) { 1049 + /* 1050 + * This happens when smbdirect_send_io is a sibling 1051 + * before the final message, it is signaled on 1052 + * error anyway, so we need to skip 1053 + * smbdirect_connection_free_send_io here, 1054 + * otherwise is will destroy the memory 1055 + * of the siblings too, which will cause 1056 + * use after free problems for the others 1057 + * triggered from ib_drain_qp(). 1058 + */ 1059 + if (wc->status != IB_WC_SUCCESS) 1060 + goto skip_free; 1061 + 1062 + /* 1063 + * This should not happen! 1064 + * But we better just close the 1065 + * connection... 1066 + */ 1067 + pr_err("unexpected send completion wc->status=%s (%d) wc->opcode=%d\n", 1068 + ib_wc_status_msg(wc->status), wc->status, wc->opcode); 1069 + smb_direct_disconnect_rdma_connection(sc); 1070 + return; 1071 + } 1072 + 1075 1073 /* 1076 1074 * Free possible siblings and then the main send_io 1077 1075 */ ··· 1110 1058 lcredits += 1; 1111 1059 1112 1060 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { 1061 + skip_free: 1113 1062 pr_err("Send error. status='%s (%d)', opcode=%d\n", 1114 1063 ib_wc_status_msg(wc->status), wc->status, 1115 1064 wc->opcode); ··· 1127 1074 1128 1075 static int manage_credits_prior_sending(struct smbdirect_socket *sc) 1129 1076 { 1077 + int missing; 1078 + int available; 1130 1079 int new_credits; 1131 1080 1132 1081 if (atomic_read(&sc->recv_io.credits.count) >= sc->recv_io.credits.target) 1133 1082 return 0; 1134 1083 1135 - new_credits = atomic_read(&sc->recv_io.posted.count); 1136 - if (new_credits == 0) 1084 + missing = (int)sc->recv_io.credits.target - atomic_read(&sc->recv_io.credits.count); 1085 + available = atomic_xchg(&sc->recv_io.credits.available, 0); 1086 + new_credits = (u16)min3(U16_MAX, missing, available); 1087 + if (new_credits <= 0) { 1088 + /* 1089 + * If credits are available, but not granted 1090 + * we need to re-add them again. 1091 + */ 1092 + if (available) 1093 + atomic_add(available, &sc->recv_io.credits.available); 1137 1094 return 0; 1095 + } 1138 1096 1139 - new_credits -= atomic_read(&sc->recv_io.credits.count); 1140 - if (new_credits <= 0) 1141 - return 0; 1097 + if (new_credits < available) { 1098 + /* 1099 + * Readd the remaining available again. 1100 + */ 1101 + available -= new_credits; 1102 + atomic_add(available, &sc->recv_io.credits.available); 1103 + } 1142 1104 1105 + /* 1106 + * Remember we granted the credits 1107 + */ 1143 1108 atomic_add(new_credits, &sc->recv_io.credits.count); 1144 1109 return new_credits; 1145 1110 } ··· 1201 1130 send_ctx->wr_cnt = 0; 1202 1131 send_ctx->need_invalidate_rkey = need_invalidate_rkey; 1203 1132 send_ctx->remote_key = remote_key; 1133 + send_ctx->credit = 0; 1204 1134 } 1205 1135 1206 1136 static int smb_direct_flush_send_list(struct smbdirect_socket *sc, ··· 1209 1137 bool is_last) 1210 1138 { 1211 1139 struct smbdirect_send_io *first, *last; 1212 - int ret; 1140 + int ret = 0; 1213 1141 1214 1142 if (list_empty(&send_ctx->msg_list)) 1215 - return 0; 1143 + goto release_credit; 1216 1144 1217 1145 first = list_first_entry(&send_ctx->msg_list, 1218 1146 struct smbdirect_send_io, ··· 1254 1182 smb_direct_free_sendmsg(sc, last); 1255 1183 } 1256 1184 1185 + release_credit: 1186 + if (is_last && !ret && send_ctx->credit) { 1187 + atomic_add(send_ctx->credit, &sc->send_io.bcredits.count); 1188 + send_ctx->credit = 0; 1189 + wake_up(&sc->send_io.bcredits.wait_queue); 1190 + } 1191 + 1257 1192 return ret; 1258 1193 } 1259 1194 ··· 1284 1205 else if (ret < 0) 1285 1206 return ret; 1286 1207 } while (true); 1208 + } 1209 + 1210 + static int wait_for_send_bcredit(struct smbdirect_socket *sc, 1211 + struct smbdirect_send_batch *send_ctx) 1212 + { 1213 + int ret; 1214 + 1215 + if (send_ctx->credit) 1216 + return 0; 1217 + 1218 + ret = wait_for_credits(sc, 1219 + &sc->send_io.bcredits.wait_queue, 1220 + &sc->send_io.bcredits.count, 1221 + 1); 1222 + if (ret) 1223 + return ret; 1224 + 1225 + send_ctx->credit = 1; 1226 + return 0; 1287 1227 } 1288 1228 1289 1229 static int wait_for_send_lcredit(struct smbdirect_socket *sc, ··· 1354 1256 1355 1257 static int smb_direct_create_header(struct smbdirect_socket *sc, 1356 1258 int size, int remaining_data_length, 1259 + int new_credits, 1357 1260 struct smbdirect_send_io **sendmsg_out) 1358 1261 { 1359 1262 struct smbdirect_socket_parameters *sp = &sc->parameters; ··· 1370 1271 /* Fill in the packet header */ 1371 1272 packet = (struct smbdirect_data_transfer *)sendmsg->packet; 1372 1273 packet->credits_requested = cpu_to_le16(sp->send_credit_target); 1373 - packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(sc)); 1274 + packet->credits_granted = cpu_to_le16(new_credits); 1374 1275 1375 1276 packet->flags = 0; 1376 1277 if (manage_keep_alive_before_sending(sc)) ··· 1507 1408 struct smbdirect_send_io *msg; 1508 1409 int data_length; 1509 1410 struct scatterlist sg[SMBDIRECT_SEND_IO_MAX_SGE - 1]; 1411 + struct smbdirect_send_batch _send_ctx; 1412 + int new_credits; 1413 + 1414 + if (!send_ctx) { 1415 + smb_direct_send_ctx_init(&_send_ctx, false, 0); 1416 + send_ctx = &_send_ctx; 1417 + } 1418 + 1419 + ret = wait_for_send_bcredit(sc, send_ctx); 1420 + if (ret) 1421 + goto bcredit_failed; 1510 1422 1511 1423 ret = wait_for_send_lcredit(sc, send_ctx); 1512 1424 if (ret) ··· 1527 1417 if (ret) 1528 1418 goto credit_failed; 1529 1419 1420 + new_credits = manage_credits_prior_sending(sc); 1421 + if (new_credits == 0 && 1422 + atomic_read(&sc->send_io.credits.count) == 0 && 1423 + atomic_read(&sc->recv_io.credits.count) == 0) { 1424 + queue_work(sc->workqueue, &sc->recv_io.posted.refill_work); 1425 + ret = wait_event_interruptible(sc->send_io.credits.wait_queue, 1426 + atomic_read(&sc->send_io.credits.count) >= 1 || 1427 + atomic_read(&sc->recv_io.credits.available) >= 1 || 1428 + sc->status != SMBDIRECT_SOCKET_CONNECTED); 1429 + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) 1430 + ret = -ENOTCONN; 1431 + if (ret < 0) 1432 + goto credit_failed; 1433 + 1434 + new_credits = manage_credits_prior_sending(sc); 1435 + } 1436 + 1530 1437 data_length = 0; 1531 1438 for (i = 0; i < niov; i++) 1532 1439 data_length += iov[i].iov_len; 1533 1440 1534 1441 ret = smb_direct_create_header(sc, data_length, remaining_data_length, 1535 - &msg); 1442 + new_credits, &msg); 1536 1443 if (ret) 1537 1444 goto header_failed; 1538 1445 ··· 1587 1460 ret = post_sendmsg(sc, send_ctx, msg); 1588 1461 if (ret) 1589 1462 goto err; 1463 + 1464 + if (send_ctx == &_send_ctx) { 1465 + ret = smb_direct_flush_send_list(sc, send_ctx, true); 1466 + if (ret) 1467 + goto err; 1468 + } 1469 + 1590 1470 return 0; 1591 1471 err: 1592 1472 smb_direct_free_sendmsg(sc, msg); ··· 1602 1468 credit_failed: 1603 1469 atomic_inc(&sc->send_io.lcredits.count); 1604 1470 lcredit_failed: 1471 + atomic_add(send_ctx->credit, &sc->send_io.bcredits.count); 1472 + send_ctx->credit = 0; 1473 + bcredit_failed: 1605 1474 return ret; 1606 1475 } 1607 1476 ··· 2076 1939 resp->max_fragmented_size = 2077 1940 cpu_to_le32(sp->max_fragmented_recv_size); 2078 1941 1942 + atomic_set(&sc->send_io.bcredits.count, 1); 2079 1943 sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER; 2080 1944 sc->status = SMBDIRECT_SOCKET_CONNECTED; 2081 1945 } ··· 2546 2408 le32_to_cpu(req->max_receive_size)); 2547 2409 sp->max_fragmented_send_size = 2548 2410 le32_to_cpu(req->max_fragmented_size); 2411 + /* 2412 + * The maximum fragmented upper-layer payload receive size supported 2413 + * 2414 + * Assume max_payload_per_credit is 2415 + * smb_direct_receive_credit_max - 24 = 1340 2416 + * 2417 + * The maximum number would be 2418 + * smb_direct_receive_credit_max * max_payload_per_credit 2419 + * 2420 + * 1340 * 255 = 341700 (0x536C4) 2421 + * 2422 + * The minimum value from the spec is 131072 (0x20000) 2423 + * 2424 + * For now we use the logic we used before: 2425 + * (1364 * 255) / 2 = 173910 (0x2A756) 2426 + * 2427 + * We need to adjust this here in case the peer 2428 + * lowered sp->max_recv_size. 2429 + * 2430 + * TODO: instead of adjusting max_fragmented_recv_size 2431 + * we should adjust the number of available buffers, 2432 + * but for now we keep the current logic. 2433 + */ 2549 2434 sp->max_fragmented_recv_size = 2550 2435 (sp->recv_credit_max * sp->max_recv_size) / 2; 2551 2436 sc->recv_io.credits.target = le16_to_cpu(req->credits_requested); ··· 2656 2495 static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id, 2657 2496 struct rdma_cm_event *event) 2658 2497 { 2498 + struct smb_direct_listener *listener = new_cm_id->context; 2659 2499 struct smb_direct_transport *t; 2660 2500 struct smbdirect_socket *sc; 2661 2501 struct smbdirect_socket_parameters *sp; ··· 2745 2583 2746 2584 handler = kthread_run(ksmbd_conn_handler_loop, 2747 2585 KSMBD_TRANS(t)->conn, "ksmbd:r%u", 2748 - smb_direct_port); 2586 + listener->port); 2749 2587 if (IS_ERR(handler)) { 2750 2588 ret = PTR_ERR(handler); 2751 2589 pr_err("Can't start thread\n"); ··· 2782 2620 return 0; 2783 2621 } 2784 2622 2785 - static int smb_direct_listen(int port) 2623 + static int smb_direct_listen(struct smb_direct_listener *listener, 2624 + int port) 2786 2625 { 2787 2626 int ret; 2788 2627 struct rdma_cm_id *cm_id; 2628 + u8 node_type = RDMA_NODE_UNSPECIFIED; 2789 2629 struct sockaddr_in sin = { 2790 2630 .sin_family = AF_INET, 2791 2631 .sin_addr.s_addr = htonl(INADDR_ANY), 2792 2632 .sin_port = htons(port), 2793 2633 }; 2794 2634 2635 + switch (port) { 2636 + case SMB_DIRECT_PORT_IWARP: 2637 + /* 2638 + * only allow iWarp devices 2639 + * for port 5445. 2640 + */ 2641 + node_type = RDMA_NODE_RNIC; 2642 + break; 2643 + case SMB_DIRECT_PORT_INFINIBAND: 2644 + /* 2645 + * only allow InfiniBand, RoCEv1 or RoCEv2 2646 + * devices for port 445. 2647 + * 2648 + * (Basically don't allow iWarp devices) 2649 + */ 2650 + node_type = RDMA_NODE_IB_CA; 2651 + break; 2652 + default: 2653 + pr_err("unsupported smbdirect port=%d!\n", port); 2654 + return -ENODEV; 2655 + } 2656 + 2795 2657 cm_id = rdma_create_id(&init_net, smb_direct_listen_handler, 2796 - &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC); 2658 + listener, RDMA_PS_TCP, IB_QPT_RC); 2797 2659 if (IS_ERR(cm_id)) { 2798 2660 pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id)); 2799 2661 return PTR_ERR(cm_id); 2662 + } 2663 + 2664 + ret = rdma_restrict_node_type(cm_id, node_type); 2665 + if (ret) { 2666 + pr_err("rdma_restrict_node_type(%u) failed %d\n", 2667 + node_type, ret); 2668 + goto err; 2800 2669 } 2801 2670 2802 2671 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); ··· 2836 2643 goto err; 2837 2644 } 2838 2645 2839 - smb_direct_listener.cm_id = cm_id; 2840 - 2841 2646 ret = rdma_listen(cm_id, 10); 2842 2647 if (ret) { 2843 2648 pr_err("Can't listen: %d\n", ret); 2844 2649 goto err; 2845 2650 } 2651 + 2652 + listener->port = port; 2653 + listener->cm_id = cm_id; 2654 + 2846 2655 return 0; 2847 2656 err: 2848 - smb_direct_listener.cm_id = NULL; 2657 + listener->port = 0; 2658 + listener->cm_id = NULL; 2849 2659 rdma_destroy_id(cm_id); 2850 2660 return ret; 2851 2661 } ··· 2856 2660 static int smb_direct_ib_client_add(struct ib_device *ib_dev) 2857 2661 { 2858 2662 struct smb_direct_device *smb_dev; 2859 - 2860 - /* Set 5445 port if device type is iWARP(No IB) */ 2861 - if (ib_dev->node_type != RDMA_NODE_IB_CA) 2862 - smb_direct_port = SMB_DIRECT_PORT_IWARP; 2863 2663 2864 2664 if (!rdma_frwr_is_supported(&ib_dev->attrs)) 2865 2665 return 0; ··· 2899 2707 { 2900 2708 int ret; 2901 2709 2902 - smb_direct_port = SMB_DIRECT_PORT_INFINIBAND; 2903 - smb_direct_listener.cm_id = NULL; 2710 + smb_direct_ib_listener = smb_direct_iw_listener = (struct smb_direct_listener) { 2711 + .cm_id = NULL, 2712 + }; 2904 2713 2905 2714 ret = ib_register_client(&smb_direct_ib_client); 2906 2715 if (ret) { ··· 2917 2724 smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq", 2918 2725 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_PERCPU, 2919 2726 0); 2920 - if (!smb_direct_wq) 2921 - return -ENOMEM; 2922 - 2923 - ret = smb_direct_listen(smb_direct_port); 2924 - if (ret) { 2925 - destroy_workqueue(smb_direct_wq); 2926 - smb_direct_wq = NULL; 2927 - pr_err("Can't listen: %d\n", ret); 2928 - return ret; 2727 + if (!smb_direct_wq) { 2728 + ret = -ENOMEM; 2729 + goto err; 2929 2730 } 2930 2731 2931 - ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n", 2932 - smb_direct_listener.cm_id); 2732 + ret = smb_direct_listen(&smb_direct_ib_listener, 2733 + SMB_DIRECT_PORT_INFINIBAND); 2734 + if (ret) { 2735 + pr_err("Can't listen on InfiniBand/RoCEv1/RoCEv2: %d\n", ret); 2736 + goto err; 2737 + } 2738 + 2739 + ksmbd_debug(RDMA, "InfiniBand/RoCEv1/RoCEv2 RDMA listener. cm_id=%p\n", 2740 + smb_direct_ib_listener.cm_id); 2741 + 2742 + ret = smb_direct_listen(&smb_direct_iw_listener, 2743 + SMB_DIRECT_PORT_IWARP); 2744 + if (ret) { 2745 + pr_err("Can't listen on iWarp: %d\n", ret); 2746 + goto err; 2747 + } 2748 + 2749 + ksmbd_debug(RDMA, "iWarp RDMA listener. cm_id=%p\n", 2750 + smb_direct_iw_listener.cm_id); 2751 + 2933 2752 return 0; 2753 + err: 2754 + ksmbd_rdma_stop_listening(); 2755 + ksmbd_rdma_destroy(); 2756 + return ret; 2934 2757 } 2935 2758 2936 2759 void ksmbd_rdma_stop_listening(void) 2937 2760 { 2938 - if (!smb_direct_listener.cm_id) 2761 + if (!smb_direct_ib_listener.cm_id && !smb_direct_iw_listener.cm_id) 2939 2762 return; 2940 2763 2941 2764 ib_unregister_client(&smb_direct_ib_client); 2942 - rdma_destroy_id(smb_direct_listener.cm_id); 2943 2765 2944 - smb_direct_listener.cm_id = NULL; 2766 + if (smb_direct_ib_listener.cm_id) 2767 + rdma_destroy_id(smb_direct_ib_listener.cm_id); 2768 + if (smb_direct_iw_listener.cm_id) 2769 + rdma_destroy_id(smb_direct_iw_listener.cm_id); 2770 + 2771 + smb_direct_ib_listener = smb_direct_iw_listener = (struct smb_direct_listener) { 2772 + .cm_id = NULL, 2773 + }; 2945 2774 } 2946 2775 2947 2776 void ksmbd_rdma_destroy(void)
+2 -1
fs/smb/server/transport_tcp.c
··· 40 40 41 41 static void tcp_stop_kthread(struct task_struct *kthread); 42 42 static struct interface *alloc_iface(char *ifname); 43 + static void ksmbd_tcp_disconnect(struct ksmbd_transport *t); 43 44 44 45 #define KSMBD_TRANS(t) (&(t)->transport) 45 46 #define TCP_TRANS(t) ((struct tcp_transport *)container_of(t, \ ··· 203 202 if (IS_ERR(handler)) { 204 203 pr_err("cannot start conn thread\n"); 205 204 rc = PTR_ERR(handler); 206 - free_transport(t); 205 + ksmbd_tcp_disconnect(KSMBD_TRANS(t)); 207 206 } 208 207 return rc; 209 208 }
+3
fs/smb/server/vfs.c
··· 31 31 #include "ndr.h" 32 32 #include "auth.h" 33 33 #include "misc.h" 34 + #include "stats.h" 34 35 35 36 #include "smb_common.h" 36 37 #include "mgmt/share_config.h" ··· 381 380 } 382 381 383 382 filp->f_pos = *pos; 383 + ksmbd_counter_add(KSMBD_COUNTER_READ_BYTES, (s64)nbytes); 384 384 return nbytes; 385 385 } 386 386 ··· 519 517 pr_err("fsync failed for filename = %pD, err = %d\n", 520 518 fp->filp, err); 521 519 } 520 + ksmbd_counter_add(KSMBD_COUNTER_WRITE_BYTES, (s64)*written); 522 521 523 522 out: 524 523 return err;
+94
fs/smb/server/vfs_cache.c
··· 16 16 #include "oplock.h" 17 17 #include "vfs.h" 18 18 #include "connection.h" 19 + #include "misc.h" 19 20 #include "mgmt/tree_connect.h" 20 21 #include "mgmt/user_session.h" 21 22 #include "smb_common.h" 22 23 #include "server.h" 24 + #include "smb2pdu.h" 23 25 24 26 #define S_DEL_PENDING 1 25 27 #define S_DEL_ON_CLS 2 ··· 35 33 static struct ksmbd_file_table global_ft; 36 34 static atomic_long_t fd_limit; 37 35 static struct kmem_cache *filp_cache; 36 + 37 + #define OPLOCK_NONE 0 38 + #define OPLOCK_EXCLUSIVE 1 39 + #define OPLOCK_BATCH 2 40 + #define OPLOCK_READ 3 /* level 2 oplock */ 41 + 42 + #ifdef CONFIG_PROC_FS 43 + 44 + static const struct ksmbd_const_name ksmbd_lease_const_names[] = { 45 + {le32_to_cpu(SMB2_LEASE_NONE_LE), "LEASE_NONE"}, 46 + {le32_to_cpu(SMB2_LEASE_READ_CACHING_LE), "LEASE_R"}, 47 + {le32_to_cpu(SMB2_LEASE_HANDLE_CACHING_LE), "LEASE_H"}, 48 + {le32_to_cpu(SMB2_LEASE_WRITE_CACHING_LE), "LEASE_W"}, 49 + {le32_to_cpu(SMB2_LEASE_READ_CACHING_LE | 50 + SMB2_LEASE_HANDLE_CACHING_LE), "LEASE_RH"}, 51 + {le32_to_cpu(SMB2_LEASE_READ_CACHING_LE | 52 + SMB2_LEASE_WRITE_CACHING_LE), "LEASE_RW"}, 53 + {le32_to_cpu(SMB2_LEASE_HANDLE_CACHING_LE | 54 + SMB2_LEASE_WRITE_CACHING_LE), "LEASE_WH"}, 55 + {le32_to_cpu(SMB2_LEASE_READ_CACHING_LE | 56 + SMB2_LEASE_HANDLE_CACHING_LE | 57 + SMB2_LEASE_WRITE_CACHING_LE), "LEASE_RWH"}, 58 + }; 59 + 60 + static const struct ksmbd_const_name ksmbd_oplock_const_names[] = { 61 + {SMB2_OPLOCK_LEVEL_NONE, "OPLOCK_NONE"}, 62 + {SMB2_OPLOCK_LEVEL_II, "OPLOCK_II"}, 63 + {SMB2_OPLOCK_LEVEL_EXCLUSIVE, "OPLOCK_EXECL"}, 64 + {SMB2_OPLOCK_LEVEL_BATCH, "OPLOCK_BATCH"}, 65 + }; 66 + 67 + static int proc_show_files(struct seq_file *m, void *v) 68 + { 69 + struct ksmbd_file *fp = NULL; 70 + unsigned int id; 71 + struct oplock_info *opinfo; 72 + 73 + seq_printf(m, "#%-10s %-10s %-10s %-10s %-15s %-10s %-10s %s\n", 74 + "<tree id>", "<pid>", "<vid>", "<refcnt>", 75 + "<oplock>", "<daccess>", "<saccess>", 76 + "<name>"); 77 + 78 + read_lock(&global_ft.lock); 79 + idr_for_each_entry(global_ft.idr, fp, id) { 80 + seq_printf(m, "%#-10x %#-10llx %#-10llx %#-10x", 81 + fp->tcon->id, 82 + fp->persistent_id, 83 + fp->volatile_id, 84 + atomic_read(&fp->refcount)); 85 + 86 + rcu_read_lock(); 87 + opinfo = rcu_dereference(fp->f_opinfo); 88 + rcu_read_unlock(); 89 + 90 + if (!opinfo) { 91 + seq_printf(m, " %-15s", " "); 92 + } else { 93 + const struct ksmbd_const_name *const_names; 94 + int count; 95 + unsigned int level; 96 + 97 + if (opinfo->is_lease) { 98 + const_names = ksmbd_lease_const_names; 99 + count = ARRAY_SIZE(ksmbd_lease_const_names); 100 + level = le32_to_cpu(opinfo->o_lease->state); 101 + } else { 102 + const_names = ksmbd_oplock_const_names; 103 + count = ARRAY_SIZE(ksmbd_oplock_const_names); 104 + level = opinfo->level; 105 + } 106 + ksmbd_proc_show_const_name(m, " %-15s", 107 + const_names, count, level); 108 + } 109 + 110 + seq_printf(m, " %#010x %#010x %s\n", 111 + le32_to_cpu(fp->daccess), 112 + le32_to_cpu(fp->saccess), 113 + fp->filp->f_path.dentry->d_name.name); 114 + } 115 + read_unlock(&global_ft.lock); 116 + return 0; 117 + } 118 + 119 + static int create_proc_files(void) 120 + { 121 + ksmbd_proc_create("files", proc_show_files, NULL); 122 + return 0; 123 + } 124 + #else 125 + static int create_proc_files(void) { return 0; } 126 + #endif 38 127 39 128 static bool durable_scavenger_running; 40 129 static DEFINE_MUTEX(durable_scavenger_lock); ··· 1042 949 1043 950 int ksmbd_init_global_file_table(void) 1044 951 { 952 + create_proc_files(); 1045 953 return ksmbd_init_file_table(&global_ft); 1046 954 } 1047 955
+17
include/rdma/rdma_cm.h
··· 169 169 void rdma_destroy_id(struct rdma_cm_id *id); 170 170 171 171 /** 172 + * rdma_restrict_node_type - Restrict an RDMA identifier to specific 173 + * RDMA device node type. 174 + * 175 + * @id: RDMA identifier. 176 + * @node_type: The device node type. Only RDMA_NODE_UNSPECIFIED (default), 177 + * RDMA_NODE_RNIC and RDMA_NODE_IB_CA are allowed 178 + * 179 + * This allows the caller to restrict the possible devices 180 + * used to iWarp (RDMA_NODE_RNIC) or InfiniBand/RoCEv1/RoCEv2 (RDMA_NODE_IB_CA). 181 + * 182 + * It needs to be called before the RDMA identifier is bound 183 + * to an device, which mean it should be called before 184 + * rdma_bind_addr(), rdma_bind_addr() and rdma_listen(). 185 + */ 186 + int rdma_restrict_node_type(struct rdma_cm_id *id, u8 node_type); 187 + 188 + /** 172 189 * rdma_bind_addr - Bind an RDMA identifier to a source address and 173 190 * associated RDMA device, if needed. 174 191 *