Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
"The executive summary includes:

- Post-merge review comments for tcm_vhost (MST + nab)
- Avoid debugging overhead when not debugging for tcm-fc(FCoE) (MDR)
- Fix NULL pointer dereference bug on alloc_page failulre (Yi Zou)
- Fix REPORT_LUNs regression bug with pSCSI export (AlexE + nab)
- Fix regression bug with handling of zero-length data CDBs (nab)
- Fix vhost_scsi_target structure alignment (MST)

Thanks again to everyone who contributed a bugfix patch, gave review
feedback on tcm_vhost code, and/or reported a bug during their own
testing over the last weeks.

There is one other outstanding bug reported by Roland recently related
to SCSI transfer length overflow handling, for which the current
proposed bugfix has been left in queue pending further testing with
other non iscsi-target based fabric drivers.

As the patch is verified with loopback (local SGL memory from SCSI
LLD) + tcm_qla2xxx (TCM allocated SGL memory mapped to PCI HW) fabric
ports, it will be included into the next 3.6-rc-fixes PULL request."

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
target: Remove unused se_cmd.cmd_spdtl
tcm_fc: rcu_deref outside rcu lock/unlock section
tcm_vhost: Fix vhost_scsi_target structure alignment
target: Fix regression bug with handling of zero-length data CDBs
target/pscsi: Fix bug with REPORT_LUNs handling for SCSI passthrough
tcm_vhost: Change vhost_scsi_target->vhost_wwpn to char *
target: fix NULL pointer dereference bug alloc_page() fails to get memory
tcm_fc: Avoid debug overhead when not debugging
tcm_vhost: Post-merge review changes requested by MST
tcm_vhost: Fix incorrect IS_ERR() usage in vhost_scsi_map_iov_to_sgl

+146 -108
+8 -1
drivers/target/target_core_pscsi.c
··· 673 673 struct scsi_device *sd = pdv->pdv_sd; 674 674 int result; 675 675 struct pscsi_plugin_task *pt = cmd->priv; 676 - unsigned char *cdb = &pt->pscsi_cdb[0]; 676 + unsigned char *cdb; 677 + /* 678 + * Special case for REPORT_LUNs handling where pscsi_plugin_task has 679 + * not been allocated because TCM is handling the emulation directly. 680 + */ 681 + if (!pt) 682 + return 0; 677 683 684 + cdb = &pt->pscsi_cdb[0]; 678 685 result = pt->pscsi_result; 679 686 /* 680 687 * Hack to make sure that Write-Protect modepage is set if R/O mode is
+8 -7
drivers/target/target_core_transport.c
··· 1165 1165 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1166 1166 cmd->data_length, size, cmd->t_task_cdb[0]); 1167 1167 1168 - cmd->cmd_spdtl = size; 1169 - 1170 1168 if (cmd->data_direction == DMA_TO_DEVICE) { 1171 1169 pr_err("Rejecting underflow/overflow" 1172 1170 " WRITE data\n"); ··· 2292 2294 return 0; 2293 2295 2294 2296 out: 2295 - while (i >= 0) { 2296 - __free_page(sg_page(&cmd->t_data_sg[i])); 2297 + while (i > 0) { 2297 2298 i--; 2299 + __free_page(sg_page(&cmd->t_data_sg[i])); 2298 2300 } 2299 2301 kfree(cmd->t_data_sg); 2300 2302 cmd->t_data_sg = NULL; ··· 2321 2323 if (ret < 0) 2322 2324 goto out_fail; 2323 2325 } 2324 - 2325 - /* Workaround for handling zero-length control CDBs */ 2326 - if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) { 2326 + /* 2327 + * If this command doesn't have any payload and we don't have to call 2328 + * into the fabric for data transfers, go ahead and complete it right 2329 + * away. 2330 + */ 2331 + if (!cmd->data_length) { 2327 2332 spin_lock_irq(&cmd->t_state_lock); 2328 2333 cmd->t_state = TRANSPORT_COMPLETE; 2329 2334 cmd->transport_state |= CMD_T_ACTIVE;
+1
drivers/target/tcm_fc/tcm_fc.h
··· 131 131 extern struct mutex ft_lport_lock; 132 132 extern struct fc4_prov ft_prov; 133 133 extern struct target_fabric_configfs *ft_configfs; 134 + extern unsigned int ft_debug_logging; 134 135 135 136 /* 136 137 * Fabric methods.
+7 -1
drivers/target/tcm_fc/tfc_cmd.c
··· 48 48 /* 49 49 * Dump cmd state for debugging. 50 50 */ 51 - void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) 51 + static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller) 52 52 { 53 53 struct fc_exch *ep; 54 54 struct fc_seq *sp; ··· 78 78 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, 79 79 sp->id, ep->esb_stat); 80 80 } 81 + } 82 + 83 + void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) 84 + { 85 + if (unlikely(ft_debug_logging)) 86 + _ft_dump_cmd(cmd, caller); 81 87 } 82 88 83 89 static void ft_free_cmd(struct ft_cmd *cmd)
+3 -1
drivers/target/tcm_fc/tfc_sess.c
··· 456 456 struct ft_tport *tport; 457 457 458 458 mutex_lock(&ft_lport_lock); 459 - tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]); 459 + tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP], 460 + lockdep_is_held(&ft_lport_lock)); 461 + 460 462 if (!tport) { 461 463 mutex_unlock(&ft_lport_lock); 462 464 return;
+112 -91
drivers/vhost/tcm_vhost.c
··· 53 53 #include "vhost.h" 54 54 #include "tcm_vhost.h" 55 55 56 + enum { 57 + VHOST_SCSI_VQ_CTL = 0, 58 + VHOST_SCSI_VQ_EVT = 1, 59 + VHOST_SCSI_VQ_IO = 2, 60 + }; 61 + 56 62 struct vhost_scsi { 57 - atomic_t vhost_ref_cnt; 58 - struct tcm_vhost_tpg *vs_tpg; 63 + struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */ 59 64 struct vhost_dev dev; 60 65 struct vhost_virtqueue vqs[3]; 61 66 ··· 136 131 return 1; 137 132 } 138 133 139 - static u32 tcm_vhost_get_pr_transport_id( 140 - struct se_portal_group *se_tpg, 134 + static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, 141 135 struct se_node_acl *se_nacl, 142 136 struct t10_pr_registration *pr_reg, 143 137 int *format_code, ··· 166 162 format_code, buf); 167 163 } 168 164 169 - static u32 tcm_vhost_get_pr_transport_id_len( 170 - struct se_portal_group *se_tpg, 165 + static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, 171 166 struct se_node_acl *se_nacl, 172 167 struct t10_pr_registration *pr_reg, 173 168 int *format_code) ··· 195 192 format_code); 196 193 } 197 194 198 - static char *tcm_vhost_parse_pr_out_transport_id( 199 - struct se_portal_group *se_tpg, 195 + static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, 200 196 const char *buf, 201 197 u32 *out_tid_len, 202 198 char **port_nexus_ptr) ··· 238 236 return &nacl->se_node_acl; 239 237 } 240 238 241 - static void tcm_vhost_release_fabric_acl( 242 - struct se_portal_group *se_tpg, 239 + static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, 243 240 struct se_node_acl *se_nacl) 244 241 { 245 242 struct tcm_vhost_nacl *nacl = container_of(se_nacl, ··· 298 297 return 0; 299 298 } 300 299 301 - static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *); 300 + static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) 301 + { 302 + struct vhost_scsi *vs = tv_cmd->tvc_vhost; 303 + 304 + spin_lock_bh(&vs->vs_completion_lock); 305 + list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); 306 + spin_unlock_bh(&vs->vs_completion_lock); 307 + 308 + vhost_work_queue(&vs->dev, &vs->vs_completion_work); 309 + } 302 310 303 311 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) 304 312 { ··· 391 381 vs_completion_work); 392 382 struct tcm_vhost_cmd *tv_cmd; 393 383 394 - while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) { 384 + while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) { 395 385 struct virtio_scsi_cmd_resp v_rsp; 396 386 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; 397 387 int ret; ··· 416 406 } 417 407 418 408 vhost_signal(&vs->dev, &vs->vqs[2]); 419 - } 420 - 421 - static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) 422 - { 423 - struct vhost_scsi *vs = tv_cmd->tvc_vhost; 424 - 425 - pr_debug("%s tv_cmd %p\n", __func__, tv_cmd); 426 - 427 - spin_lock_bh(&vs->vs_completion_lock); 428 - list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); 429 - spin_unlock_bh(&vs->vs_completion_lock); 430 - 431 - vhost_work_queue(&vs->dev, &vs->vs_completion_work); 432 409 } 433 410 434 411 static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( ··· 530 533 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); 531 534 if (!sg) 532 535 return -ENOMEM; 533 - pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__, 534 - sg, sgl_count, IS_ERR(sg)); 536 + pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__, 537 + sg, sgl_count, !sg); 535 538 sg_init_table(sg, sgl_count); 536 539 537 540 tv_cmd->tvc_sgl = sg; ··· 784 787 785 788 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 786 789 { 787 - pr_err("%s: The handling func for control queue.\n", __func__); 790 + pr_debug("%s: The handling func for control queue.\n", __func__); 788 791 } 789 792 790 793 static void vhost_scsi_evt_handle_kick(struct vhost_work *work) 791 794 { 792 - pr_err("%s: The handling func for event queue.\n", __func__); 795 + pr_debug("%s: The handling func for event queue.\n", __func__); 793 796 } 794 797 795 798 static void vhost_scsi_handle_kick(struct vhost_work *work) ··· 822 825 return -EFAULT; 823 826 } 824 827 } 825 - 826 - if (vs->vs_tpg) { 827 - mutex_unlock(&vs->dev.mutex); 828 - return -EEXIST; 829 - } 830 828 mutex_unlock(&vs->dev.mutex); 831 829 832 830 mutex_lock(&tcm_vhost_mutex); ··· 831 839 mutex_unlock(&tv_tpg->tv_tpg_mutex); 832 840 continue; 833 841 } 834 - if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) { 842 + if (tv_tpg->tv_tpg_vhost_count != 0) { 835 843 mutex_unlock(&tv_tpg->tv_tpg_mutex); 836 844 continue; 837 845 } ··· 839 847 840 848 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) && 841 849 (tv_tpg->tport_tpgt == t->vhost_tpgt)) { 842 - atomic_inc(&tv_tpg->tv_tpg_vhost_count); 843 - smp_mb__after_atomic_inc(); 850 + tv_tpg->tv_tpg_vhost_count++; 844 851 mutex_unlock(&tv_tpg->tv_tpg_mutex); 845 852 mutex_unlock(&tcm_vhost_mutex); 846 853 847 854 mutex_lock(&vs->dev.mutex); 855 + if (vs->vs_tpg) { 856 + mutex_unlock(&vs->dev.mutex); 857 + mutex_lock(&tv_tpg->tv_tpg_mutex); 858 + tv_tpg->tv_tpg_vhost_count--; 859 + mutex_unlock(&tv_tpg->tv_tpg_mutex); 860 + return -EEXIST; 861 + } 862 + 848 863 vs->vs_tpg = tv_tpg; 849 - atomic_inc(&vs->vhost_ref_cnt); 850 864 smp_mb__after_atomic_inc(); 851 865 mutex_unlock(&vs->dev.mutex); 852 866 return 0; ··· 869 871 { 870 872 struct tcm_vhost_tport *tv_tport; 871 873 struct tcm_vhost_tpg *tv_tpg; 872 - int index; 874 + int index, ret; 873 875 874 876 mutex_lock(&vs->dev.mutex); 875 877 /* Verify that ring has been setup correctly. */ 876 878 for (index = 0; index < vs->dev.nvqs; ++index) { 877 879 if (!vhost_vq_access_ok(&vs->vqs[index])) { 878 - mutex_unlock(&vs->dev.mutex); 879 - return -EFAULT; 880 + ret = -EFAULT; 881 + goto err; 880 882 } 881 883 } 882 884 883 885 if (!vs->vs_tpg) { 884 - mutex_unlock(&vs->dev.mutex); 885 - return -ENODEV; 886 + ret = -ENODEV; 887 + goto err; 886 888 } 887 889 tv_tpg = vs->vs_tpg; 888 890 tv_tport = tv_tpg->tport; 889 891 890 892 if (strcmp(tv_tport->tport_name, t->vhost_wwpn) || 891 893 (tv_tpg->tport_tpgt != t->vhost_tpgt)) { 892 - mutex_unlock(&vs->dev.mutex); 893 894 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu" 894 895 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", 895 896 tv_tport->tport_name, tv_tpg->tport_tpgt, 896 897 t->vhost_wwpn, t->vhost_tpgt); 897 - return -EINVAL; 898 + ret = -EINVAL; 899 + goto err; 898 900 } 899 - atomic_dec(&tv_tpg->tv_tpg_vhost_count); 901 + tv_tpg->tv_tpg_vhost_count--; 900 902 vs->vs_tpg = NULL; 901 903 mutex_unlock(&vs->dev.mutex); 902 904 903 905 return 0; 906 + 907 + err: 908 + mutex_unlock(&vs->dev.mutex); 909 + return ret; 904 910 } 905 911 906 912 static int vhost_scsi_open(struct inode *inode, struct file *f) ··· 920 918 INIT_LIST_HEAD(&s->vs_completion_list); 921 919 spin_lock_init(&s->vs_completion_lock); 922 920 923 - s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick; 924 - s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick; 925 - s->vqs[2].handle_kick = vhost_scsi_handle_kick; 921 + s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; 922 + s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; 923 + s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick; 926 924 r = vhost_dev_init(&s->dev, s->vqs, 3); 927 925 if (r < 0) { 928 926 kfree(s); ··· 951 949 return 0; 952 950 } 953 951 952 + static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) 953 + { 954 + vhost_poll_flush(&vs->dev.vqs[index].poll); 955 + } 956 + 957 + static void vhost_scsi_flush(struct vhost_scsi *vs) 958 + { 959 + vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL); 960 + vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT); 961 + vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO); 962 + } 963 + 954 964 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) 955 965 { 956 966 if (features & ~VHOST_FEATURES) ··· 975 961 return -EFAULT; 976 962 } 977 963 vs->dev.acked_features = features; 978 - /* TODO possibly smp_wmb() and flush vqs */ 964 + smp_wmb(); 965 + vhost_scsi_flush(vs); 979 966 mutex_unlock(&vs->dev.mutex); 980 967 return 0; 981 968 } ··· 989 974 void __user *argp = (void __user *)arg; 990 975 u64 __user *featurep = argp; 991 976 u64 features; 992 - int r; 977 + int r, abi_version = VHOST_SCSI_ABI_VERSION; 993 978 994 979 switch (ioctl) { 995 980 case VHOST_SCSI_SET_ENDPOINT: 996 981 if (copy_from_user(&backend, argp, sizeof backend)) 997 982 return -EFAULT; 983 + if (backend.reserved != 0) 984 + return -EOPNOTSUPP; 998 985 999 986 return vhost_scsi_set_endpoint(vs, &backend); 1000 987 case VHOST_SCSI_CLEAR_ENDPOINT: 1001 988 if (copy_from_user(&backend, argp, sizeof backend)) 1002 989 return -EFAULT; 990 + if (backend.reserved != 0) 991 + return -EOPNOTSUPP; 1003 992 1004 993 return vhost_scsi_clear_endpoint(vs, &backend); 1005 994 case VHOST_SCSI_GET_ABI_VERSION: 1006 - if (copy_from_user(&backend, argp, sizeof backend)) 1007 - return -EFAULT; 1008 - 1009 - backend.abi_version = VHOST_SCSI_ABI_VERSION; 1010 - 1011 - if (copy_to_user(argp, &backend, sizeof backend)) 995 + if (copy_to_user(argp, &abi_version, sizeof abi_version)) 1012 996 return -EFAULT; 1013 997 return 0; 1014 998 case VHOST_GET_FEATURES: ··· 1027 1013 } 1028 1014 } 1029 1015 1016 + #ifdef CONFIG_COMPAT 1017 + static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl, 1018 + unsigned long arg) 1019 + { 1020 + return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); 1021 + } 1022 + #endif 1023 + 1030 1024 static const struct file_operations vhost_scsi_fops = { 1031 1025 .owner = THIS_MODULE, 1032 1026 .release = vhost_scsi_release, 1033 1027 .unlocked_ioctl = vhost_scsi_ioctl, 1034 - /* TODO compat ioctl? */ 1028 + #ifdef CONFIG_COMPAT 1029 + .compat_ioctl = vhost_scsi_compat_ioctl, 1030 + #endif 1035 1031 .open = vhost_scsi_open, 1036 1032 .llseek = noop_llseek, 1037 1033 }; ··· 1078 1054 return "Unknown"; 1079 1055 } 1080 1056 1081 - static int tcm_vhost_port_link( 1082 - struct se_portal_group *se_tpg, 1057 + static int tcm_vhost_port_link(struct se_portal_group *se_tpg, 1083 1058 struct se_lun *lun) 1084 1059 { 1085 1060 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1086 1061 struct tcm_vhost_tpg, se_tpg); 1087 1062 1088 - atomic_inc(&tv_tpg->tv_tpg_port_count); 1089 - smp_mb__after_atomic_inc(); 1063 + mutex_lock(&tv_tpg->tv_tpg_mutex); 1064 + tv_tpg->tv_tpg_port_count++; 1065 + mutex_unlock(&tv_tpg->tv_tpg_mutex); 1090 1066 1091 1067 return 0; 1092 1068 } 1093 1069 1094 - static void tcm_vhost_port_unlink( 1095 - struct se_portal_group *se_tpg, 1070 + static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, 1096 1071 struct se_lun *se_lun) 1097 1072 { 1098 1073 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1099 1074 struct tcm_vhost_tpg, se_tpg); 1100 1075 1101 - atomic_dec(&tv_tpg->tv_tpg_port_count); 1102 - smp_mb__after_atomic_dec(); 1076 + mutex_lock(&tv_tpg->tv_tpg_mutex); 1077 + tv_tpg->tv_tpg_port_count--; 1078 + mutex_unlock(&tv_tpg->tv_tpg_mutex); 1103 1079 } 1104 1080 1105 1081 static struct se_node_acl *tcm_vhost_make_nodeacl( ··· 1146 1122 kfree(nacl); 1147 1123 } 1148 1124 1149 - static int tcm_vhost_make_nexus( 1150 - struct tcm_vhost_tpg *tv_tpg, 1125 + static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg, 1151 1126 const char *name) 1152 1127 { 1153 1128 struct se_portal_group *se_tpg; ··· 1191 1168 return -ENOMEM; 1192 1169 } 1193 1170 /* 1194 - * Now register the TCM vHost virtual I_T Nexus as active with the 1171 + * Now register the TCM vhost virtual I_T Nexus as active with the 1195 1172 * call to __transport_register_session() 1196 1173 */ 1197 1174 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, ··· 1202 1179 return 0; 1203 1180 } 1204 1181 1205 - static int tcm_vhost_drop_nexus( 1206 - struct tcm_vhost_tpg *tpg) 1182 + static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) 1207 1183 { 1208 1184 struct se_session *se_sess; 1209 1185 struct tcm_vhost_nexus *tv_nexus; ··· 1220 1198 return -ENODEV; 1221 1199 } 1222 1200 1223 - if (atomic_read(&tpg->tv_tpg_port_count)) { 1201 + if (tpg->tv_tpg_port_count != 0) { 1224 1202 mutex_unlock(&tpg->tv_tpg_mutex); 1225 - pr_err("Unable to remove TCM_vHost I_T Nexus with" 1203 + pr_err("Unable to remove TCM_vhost I_T Nexus with" 1226 1204 " active TPG port count: %d\n", 1227 - atomic_read(&tpg->tv_tpg_port_count)); 1228 - return -EPERM; 1205 + tpg->tv_tpg_port_count); 1206 + return -EBUSY; 1229 1207 } 1230 1208 1231 - if (atomic_read(&tpg->tv_tpg_vhost_count)) { 1209 + if (tpg->tv_tpg_vhost_count != 0) { 1232 1210 mutex_unlock(&tpg->tv_tpg_mutex); 1233 - pr_err("Unable to remove TCM_vHost I_T Nexus with" 1211 + pr_err("Unable to remove TCM_vhost I_T Nexus with" 1234 1212 " active TPG vhost count: %d\n", 1235 - atomic_read(&tpg->tv_tpg_vhost_count)); 1236 - return -EPERM; 1213 + tpg->tv_tpg_vhost_count); 1214 + return -EBUSY; 1237 1215 } 1238 1216 1239 - pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated" 1217 + pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" 1240 1218 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), 1241 1219 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1242 1220 /* 1243 - * Release the SCSI I_T Nexus to the emulated vHost Target Port 1221 + * Release the SCSI I_T Nexus to the emulated vhost Target Port 1244 1222 */ 1245 1223 transport_deregister_session(tv_nexus->tvn_se_sess); 1246 1224 tpg->tpg_nexus = NULL; ··· 1250 1228 return 0; 1251 1229 } 1252 1230 1253 - static ssize_t tcm_vhost_tpg_show_nexus( 1254 - struct se_portal_group *se_tpg, 1231 + static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, 1255 1232 char *page) 1256 1233 { 1257 1234 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, ··· 1271 1250 return ret; 1272 1251 } 1273 1252 1274 - static ssize_t tcm_vhost_tpg_store_nexus( 1275 - struct se_portal_group *se_tpg, 1253 + static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, 1276 1254 const char *page, 1277 1255 size_t count) 1278 1256 { ··· 1356 1336 NULL, 1357 1337 }; 1358 1338 1359 - static struct se_portal_group *tcm_vhost_make_tpg( 1360 - struct se_wwn *wwn, 1339 + static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn, 1361 1340 struct config_group *group, 1362 1341 const char *name) 1363 1342 { ··· 1404 1385 list_del(&tpg->tv_tpg_list); 1405 1386 mutex_unlock(&tcm_vhost_mutex); 1406 1387 /* 1407 - * Release the virtual I_T Nexus for this vHost TPG 1388 + * Release the virtual I_T Nexus for this vhost TPG 1408 1389 */ 1409 1390 tcm_vhost_drop_nexus(tpg); 1410 1391 /* ··· 1414 1395 kfree(tpg); 1415 1396 } 1416 1397 1417 - static struct se_wwn *tcm_vhost_make_tport( 1418 - struct target_fabric_configfs *tf, 1398 + static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf, 1419 1399 struct config_group *group, 1420 1400 const char *name) 1421 1401 { ··· 1610 1592 static int __init tcm_vhost_init(void) 1611 1593 { 1612 1594 int ret = -ENOMEM; 1613 - 1595 + /* 1596 + * Use our own dedicated workqueue for submitting I/O into 1597 + * target core to avoid contention within system_wq. 1598 + */ 1614 1599 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); 1615 1600 if (!tcm_vhost_workqueue) 1616 1601 goto out;
+7 -5
drivers/vhost/tcm_vhost.h
··· 47 47 /* Vhost port target portal group tag for TCM */ 48 48 u16 tport_tpgt; 49 49 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ 50 - atomic_t tv_tpg_port_count; 51 - /* Used for vhost_scsi device reference to tpg_nexus */ 52 - atomic_t tv_tpg_vhost_count; 50 + int tv_tpg_port_count; 51 + /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ 52 + int tv_tpg_vhost_count; 53 53 /* list for tcm_vhost_list */ 54 54 struct list_head tv_tpg_list; 55 55 /* Used to protect access for tpg_nexus */ ··· 91 91 92 92 struct vhost_scsi_target { 93 93 int abi_version; 94 - unsigned char vhost_wwpn[TRANSPORT_IQN_LEN]; 94 + char vhost_wwpn[TRANSPORT_IQN_LEN]; 95 95 unsigned short vhost_tpgt; 96 + unsigned short reserved; 96 97 }; 97 98 98 99 /* VHOST_SCSI specific defines */ 99 100 #define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) 100 101 #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) 101 - #define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target) 102 + /* Changing this breaks userspace. */ 103 + #define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
-2
include/target/target_core_base.h
··· 503 503 u32 se_ordered_id; 504 504 /* Total size in bytes associated with command */ 505 505 u32 data_length; 506 - /* SCSI Presented Data Transfer Length */ 507 - u32 cmd_spdtl; 508 506 u32 residual_count; 509 507 u32 orig_fe_lun; 510 508 /* Persistent Reservation key */