Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch '3.3-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

This series contains pending target bug-fixes and cleanups for v3.3-rc3
that have been addressed the past weeks in lio-core.git.

Some of the highlights include:

- Fix handling for control CDBs with data greater than PAGE_SIZE (andy)
- Use IP_FREEBIND for iscsi-target to address network portal creation
issues with systemd (dax)
- Allow PERSISTENT RESERVE IN for non-reservation holder (marco)
- Fix iblock se_dev_attrib.unmap_granularity (marco)
- Fix unsupported WRITE_SAME sense payload handling (martin)
- Add workaround for zero-length control CDB handling (nab)
- Fix discovery with INADDR_ANY and IN6ADDR_ANY_INIT (nab)
- Fix target_submit_cmd() exception handling (nab)
- Return correct ASC for unimplemented VPD pages (roland)
- Don't zero pages used for data buffers (roland)
- Fix return code of core_tpg_.*_lun (sebastian)

* '3.3-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (26 commits)
target: Fix unsupported WRITE_SAME sense payload
iscsi: use IP_FREEBIND socket option
iblock: fix handling of large requests
target: handle empty string writes in sysfs
iscsi_target: in_aton needs linux/inet.h
target: Fix iblock se_dev_attrib.unmap_granularity
target: Fix target_submit_cmd() exception handling
target: Change target_submit_cmd() to return void
target: accept REQUEST_SENSE with 18bytes
target: Fail INQUIRY commands with EVPD==0 but PAGE CODE!=0
target: Return correct ASC for unimplemented VPD pages
iscsi-target: Fix discovery with INADDR_ANY and IN6ADDR_ANY_INIT
target: Allow control CDBs with data > 1 page
iscsi-target: Fix up a few assignments
iscsi-target: make one-bit bitfields unsigned
iscsi-target: Fix double list_add with iscsit_alloc_buffs reject
iscsi-target: Fix reject release handling in iscsit_free_cmd()
target: fix return code of core_tpg_.*_lun
target: use save/restore lock primitive in core_dec_lacl_count()
target: avoid multiple outputs in scsi_dump_inquiry()
...

+268 -142
+34 -5
drivers/target/iscsi/iscsi_target.c
··· 1061 1061 if (ret < 0) 1062 1062 return iscsit_add_reject_from_cmd( 1063 1063 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1064 - 1, 1, buf, cmd); 1064 + 1, 0, buf, cmd); 1065 1065 /* 1066 1066 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if 1067 1067 * the Immediate Bit is not set, and no Immediate ··· 3164 3164 return 0; 3165 3165 } 3166 3166 3167 + static bool iscsit_check_inaddr_any(struct iscsi_np *np) 3168 + { 3169 + bool ret = false; 3170 + 3171 + if (np->np_sockaddr.ss_family == AF_INET6) { 3172 + const struct sockaddr_in6 sin6 = { 3173 + .sin6_addr = IN6ADDR_ANY_INIT }; 3174 + struct sockaddr_in6 *sock_in6 = 3175 + (struct sockaddr_in6 *)&np->np_sockaddr; 3176 + 3177 + if (!memcmp(sock_in6->sin6_addr.s6_addr, 3178 + sin6.sin6_addr.s6_addr, 16)) 3179 + ret = true; 3180 + } else { 3181 + struct sockaddr_in * sock_in = 3182 + (struct sockaddr_in *)&np->np_sockaddr; 3183 + 3184 + if (sock_in->sin_addr.s_addr == INADDR_ANY) 3185 + ret = true; 3186 + } 3187 + 3188 + return ret; 3189 + } 3190 + 3167 3191 static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) 3168 3192 { 3169 3193 char *payload = NULL; ··· 3237 3213 spin_lock(&tpg->tpg_np_lock); 3238 3214 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, 3239 3215 tpg_np_list) { 3216 + struct iscsi_np *np = tpg_np->tpg_np; 3217 + bool inaddr_any = iscsit_check_inaddr_any(np); 3218 + 3240 3219 len = sprintf(buf, "TargetAddress=" 3241 3220 "%s%s%s:%hu,%hu", 3242 - (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ? 3243 - "[" : "", tpg_np->tpg_np->np_ip, 3244 - (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ? 3245 - "]" : "", tpg_np->tpg_np->np_port, 3221 + (np->np_sockaddr.ss_family == AF_INET6) ? 3222 + "[" : "", (inaddr_any == false) ? 3223 + np->np_ip : conn->local_ip, 3224 + (np->np_sockaddr.ss_family == AF_INET6) ? 3225 + "]" : "", (inaddr_any == false) ? 3226 + np->np_port : conn->local_port, 3246 3227 tpg->tpgt); 3247 3228 len += 1; 3248 3229
+1
drivers/target/iscsi/iscsi_target_configfs.c
··· 21 21 22 22 #include <linux/configfs.h> 23 23 #include <linux/export.h> 24 + #include <linux/inet.h> 24 25 #include <target/target_core_base.h> 25 26 #include <target/target_core_fabric.h> 26 27 #include <target/target_core_fabric_configfs.h>
+4 -2
drivers/target/iscsi/iscsi_target_core.h
··· 508 508 u16 cid; 509 509 /* Remote TCP Port */ 510 510 u16 login_port; 511 + u16 local_port; 511 512 int net_size; 512 513 u32 auth_id; 513 514 #define CONNFLAG_SCTP_STRUCT_FILE 0x01 ··· 528 527 unsigned char bad_hdr[ISCSI_HDR_LEN]; 529 528 #define IPV6_ADDRESS_SPACE 48 530 529 unsigned char login_ip[IPV6_ADDRESS_SPACE]; 530 + unsigned char local_ip[IPV6_ADDRESS_SPACE]; 531 531 int conn_usage_count; 532 532 int conn_waiting_on_uc; 533 533 atomic_t check_immediate_queue; ··· 563 561 struct hash_desc conn_tx_hash; 564 562 /* Used for scheduling TX and RX connection kthreads */ 565 563 cpumask_var_t conn_cpumask; 566 - int conn_rx_reset_cpumask:1; 567 - int conn_tx_reset_cpumask:1; 564 + unsigned int conn_rx_reset_cpumask:1; 565 + unsigned int conn_tx_reset_cpumask:1; 568 566 /* list_head of struct iscsi_cmd for this connection */ 569 567 struct list_head conn_cmd_list; 570 568 struct list_head immed_queue_list;
+2 -2
drivers/target/iscsi/iscsi_target_erl1.c
··· 1238 1238 { 1239 1239 struct iscsi_conn *conn = cmd->conn; 1240 1240 struct iscsi_session *sess = conn->sess; 1241 - struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess); 1241 + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1242 1242 1243 1243 spin_lock_bh(&cmd->dataout_timeout_lock); 1244 1244 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) { ··· 1261 1261 struct iscsi_conn *conn) 1262 1262 { 1263 1263 struct iscsi_session *sess = conn->sess; 1264 - struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess); 1264 + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1265 1265 1266 1266 if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING) 1267 1267 return;
+35 -4
drivers/target/iscsi/iscsi_target_login.c
··· 615 615 } 616 616 617 617 pr_debug("iSCSI Login successful on CID: %hu from %s to" 618 - " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip, 619 - np->np_port, tpg->tpgt); 618 + " %s:%hu,%hu\n", conn->cid, conn->login_ip, 619 + conn->local_ip, conn->local_port, tpg->tpgt); 620 620 621 621 list_add_tail(&conn->conn_list, &sess->sess_conn_list); 622 622 atomic_inc(&sess->nconn); ··· 658 658 sess->session_state = TARG_SESS_STATE_LOGGED_IN; 659 659 660 660 pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n", 661 - conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt); 661 + conn->cid, conn->login_ip, conn->local_ip, conn->local_port, 662 + tpg->tpgt); 662 663 663 664 spin_lock_bh(&sess->conn_lock); 664 665 list_add_tail(&conn->conn_list, &sess->sess_conn_list); ··· 838 837 (char *)&opt, sizeof(opt)); 839 838 if (ret < 0) { 840 839 pr_err("kernel_setsockopt() for SO_REUSEADDR" 840 + " failed\n"); 841 + goto fail; 842 + } 843 + 844 + ret = kernel_setsockopt(sock, IPPROTO_IP, IP_FREEBIND, 845 + (char *)&opt, sizeof(opt)); 846 + if (ret < 0) { 847 + pr_err("kernel_setsockopt() for IP_FREEBIND" 841 848 " failed\n"); 842 849 goto fail; 843 850 } ··· 1029 1020 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", 1030 1021 &sock_in6.sin6_addr.in6_u); 1031 1022 conn->login_port = ntohs(sock_in6.sin6_port); 1023 + 1024 + if (conn->sock->ops->getname(conn->sock, 1025 + (struct sockaddr *)&sock_in6, &err, 0) < 0) { 1026 + pr_err("sock_ops->getname() failed.\n"); 1027 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1028 + ISCSI_LOGIN_STATUS_TARGET_ERROR); 1029 + goto new_sess_out; 1030 + } 1031 + snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", 1032 + &sock_in6.sin6_addr.in6_u); 1033 + conn->local_port = ntohs(sock_in6.sin6_port); 1034 + 1032 1035 } else { 1033 1036 memset(&sock_in, 0, sizeof(struct sockaddr_in)); 1034 1037 ··· 1053 1032 } 1054 1033 sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr); 1055 1034 conn->login_port = ntohs(sock_in.sin_port); 1035 + 1036 + if (conn->sock->ops->getname(conn->sock, 1037 + (struct sockaddr *)&sock_in, &err, 0) < 0) { 1038 + pr_err("sock_ops->getname() failed.\n"); 1039 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1040 + ISCSI_LOGIN_STATUS_TARGET_ERROR); 1041 + goto new_sess_out; 1042 + } 1043 + sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr); 1044 + conn->local_port = ntohs(sock_in.sin_port); 1056 1045 } 1057 1046 1058 1047 conn->network_transport = np->np_network_transport; ··· 1070 1039 pr_debug("Received iSCSI login request from %s on %s Network" 1071 1040 " Portal %s:%hu\n", conn->login_ip, 1072 1041 (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP", 1073 - np->np_ip, np->np_port); 1042 + conn->local_ip, conn->local_port); 1074 1043 1075 1044 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n"); 1076 1045 conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
+11
drivers/target/iscsi/iscsi_target_util.c
··· 849 849 case ISCSI_OP_SCSI_TMFUNC: 850 850 transport_generic_free_cmd(&cmd->se_cmd, 1); 851 851 break; 852 + case ISCSI_OP_REJECT: 853 + /* 854 + * Handle special case for REJECT when iscsi_add_reject*() has 855 + * overwritten the original iscsi_opcode assignment, and the 856 + * associated cmd->se_cmd needs to be released. 857 + */ 858 + if (cmd->se_cmd.se_tfo != NULL) { 859 + transport_generic_free_cmd(&cmd->se_cmd, 1); 860 + break; 861 + } 862 + /* Fall-through */ 852 863 default: 853 864 iscsit_release_cmd(cmd); 854 865 break;
+4 -4
drivers/target/target_core_alua.c
··· 78 78 return -EINVAL; 79 79 } 80 80 81 - buf = transport_kmap_first_data_page(cmd); 81 + buf = transport_kmap_data_sg(cmd); 82 82 83 83 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 84 84 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, ··· 163 163 buf[2] = ((rd_len >> 8) & 0xff); 164 164 buf[3] = (rd_len & 0xff); 165 165 166 - transport_kunmap_first_data_page(cmd); 166 + transport_kunmap_data_sg(cmd); 167 167 168 168 task->task_scsi_status = GOOD; 169 169 transport_complete_task(task, 1); ··· 194 194 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 195 195 return -EINVAL; 196 196 } 197 - buf = transport_kmap_first_data_page(cmd); 197 + buf = transport_kmap_data_sg(cmd); 198 198 199 199 /* 200 200 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed ··· 351 351 } 352 352 353 353 out: 354 - transport_kunmap_first_data_page(cmd); 354 + transport_kunmap_data_sg(cmd); 355 355 task->task_scsi_status = GOOD; 356 356 transport_complete_task(task, 1); 357 357 return 0;
+26 -25
drivers/target/target_core_cdb.c
··· 83 83 return -EINVAL; 84 84 } 85 85 86 - buf = transport_kmap_first_data_page(cmd); 86 + buf = transport_kmap_data_sg(cmd); 87 87 88 88 if (dev == tpg->tpg_virt_lun0.lun_se_dev) { 89 89 buf[0] = 0x3f; /* Not connected */ ··· 134 134 buf[4] = 31; /* Set additional length to 31 */ 135 135 136 136 out: 137 - transport_kunmap_first_data_page(cmd); 137 + transport_kunmap_data_sg(cmd); 138 138 return 0; 139 139 } 140 140 ··· 698 698 int p, ret; 699 699 700 700 if (!(cdb[1] & 0x1)) { 701 + if (cdb[2]) { 702 + pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", 703 + cdb[2]); 704 + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 705 + return -EINVAL; 706 + } 707 + 701 708 ret = target_emulate_inquiry_std(cmd); 702 709 goto out; 703 710 } ··· 723 716 return -EINVAL; 724 717 } 725 718 726 - buf = transport_kmap_first_data_page(cmd); 719 + buf = transport_kmap_data_sg(cmd); 727 720 728 721 buf[0] = dev->transport->get_device_type(dev); 729 722 ··· 736 729 } 737 730 738 731 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 739 - cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 732 + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 740 733 ret = -EINVAL; 741 734 742 735 out_unmap: 743 - transport_kunmap_first_data_page(cmd); 736 + transport_kunmap_data_sg(cmd); 744 737 out: 745 738 if (!ret) { 746 739 task->task_scsi_status = GOOD; ··· 762 755 else 763 756 blocks = (u32)blocks_long; 764 757 765 - buf = transport_kmap_first_data_page(cmd); 758 + buf = transport_kmap_data_sg(cmd); 766 759 767 760 buf[0] = (blocks >> 24) & 0xff; 768 761 buf[1] = (blocks >> 16) & 0xff; ··· 778 771 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 779 772 put_unaligned_be32(0xFFFFFFFF, &buf[0]); 780 773 781 - transport_kunmap_first_data_page(cmd); 774 + transport_kunmap_data_sg(cmd); 782 775 783 776 task->task_scsi_status = GOOD; 784 777 transport_complete_task(task, 1); ··· 792 785 unsigned char *buf; 793 786 unsigned long long blocks = dev->transport->get_blocks(dev); 794 787 795 - buf = transport_kmap_first_data_page(cmd); 788 + buf = transport_kmap_data_sg(cmd); 796 789 797 790 buf[0] = (blocks >> 56) & 0xff; 798 791 buf[1] = (blocks >> 48) & 0xff; ··· 813 806 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 814 807 buf[14] = 0x80; 815 808 816 - transport_kunmap_first_data_page(cmd); 809 + transport_kunmap_data_sg(cmd); 817 810 818 811 task->task_scsi_status = GOOD; 819 812 transport_complete_task(task, 1); ··· 1026 1019 offset = cmd->data_length; 1027 1020 } 1028 1021 1029 - rbuf = transport_kmap_first_data_page(cmd); 1022 + rbuf = transport_kmap_data_sg(cmd); 1030 1023 memcpy(rbuf, buf, offset); 1031 - transport_kunmap_first_data_page(cmd); 1024 + transport_kunmap_data_sg(cmd); 1032 1025 1033 1026 task->task_scsi_status = GOOD; 1034 1027 transport_complete_task(task, 1); ··· 1050 1043 return -ENOSYS; 1051 1044 } 1052 1045 1053 - buf = transport_kmap_first_data_page(cmd); 1046 + buf = transport_kmap_data_sg(cmd); 1054 1047 1055 1048 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 1056 1049 /* ··· 1058 1051 */ 1059 1052 buf[0] = 0x70; 1060 1053 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 1061 - /* 1062 - * Make sure request data length is enough for additional 1063 - * sense data. 1064 - */ 1065 - if (cmd->data_length <= 18) { 1054 + 1055 + if (cmd->data_length < 18) { 1066 1056 buf[7] = 0x00; 1067 1057 err = -EINVAL; 1068 1058 goto end; ··· 1076 1072 */ 1077 1073 buf[0] = 0x70; 1078 1074 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; 1079 - /* 1080 - * Make sure request data length is enough for additional 1081 - * sense data. 1082 - */ 1083 - if (cmd->data_length <= 18) { 1075 + 1076 + if (cmd->data_length < 18) { 1084 1077 buf[7] = 0x00; 1085 1078 err = -EINVAL; 1086 1079 goto end; ··· 1090 1089 } 1091 1090 1092 1091 end: 1093 - transport_kunmap_first_data_page(cmd); 1092 + transport_kunmap_data_sg(cmd); 1094 1093 task->task_scsi_status = GOOD; 1095 1094 transport_complete_task(task, 1); 1096 1095 return 0; ··· 1124 1123 dl = get_unaligned_be16(&cdb[0]); 1125 1124 bd_dl = get_unaligned_be16(&cdb[2]); 1126 1125 1127 - buf = transport_kmap_first_data_page(cmd); 1126 + buf = transport_kmap_data_sg(cmd); 1128 1127 1129 1128 ptr = &buf[offset]; 1130 1129 pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" ··· 1148 1147 } 1149 1148 1150 1149 err: 1151 - transport_kunmap_first_data_page(cmd); 1150 + transport_kunmap_data_sg(cmd); 1152 1151 if (!ret) { 1153 1152 task->task_scsi_status = GOOD; 1154 1153 transport_complete_task(task, 1);
+8 -4
drivers/target/target_core_configfs.c
··· 1704 1704 return -EINVAL; 1705 1705 } 1706 1706 1707 - se_dev->su_dev_flags |= SDF_USING_ALIAS; 1708 1707 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, 1709 1708 "%s", page); 1710 - 1709 + if (!read_bytes) 1710 + return -EINVAL; 1711 1711 if (se_dev->se_dev_alias[read_bytes - 1] == '\n') 1712 1712 se_dev->se_dev_alias[read_bytes - 1] = '\0'; 1713 + 1714 + se_dev->su_dev_flags |= SDF_USING_ALIAS; 1713 1715 1714 1716 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 1715 1717 config_item_name(&hba->hba_group.cg_item), ··· 1755 1753 return -EINVAL; 1756 1754 } 1757 1755 1758 - se_dev->su_dev_flags |= SDF_USING_UDEV_PATH; 1759 1756 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, 1760 1757 "%s", page); 1761 - 1758 + if (!read_bytes) 1759 + return -EINVAL; 1762 1760 if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n') 1763 1761 se_dev->se_dev_udev_path[read_bytes - 1] = '\0'; 1762 + 1763 + se_dev->su_dev_flags |= SDF_USING_UDEV_PATH; 1764 1764 1765 1765 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1766 1766 config_item_name(&hba->hba_group.cg_item),
+15 -13
drivers/target/target_core_device.c
··· 320 320 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) 321 321 { 322 322 struct se_dev_entry *deve; 323 + unsigned long flags; 323 324 324 - spin_lock_irq(&se_nacl->device_list_lock); 325 + spin_lock_irqsave(&se_nacl->device_list_lock, flags); 325 326 deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; 326 327 deve->deve_cmds--; 327 - spin_unlock_irq(&se_nacl->device_list_lock); 328 + spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); 328 329 } 329 330 330 331 void core_update_device_list_access( ··· 657 656 unsigned char *buf; 658 657 u32 cdb_offset = 0, lun_count = 0, offset = 8, i; 659 658 660 - buf = transport_kmap_first_data_page(se_cmd); 659 + buf = (unsigned char *) transport_kmap_data_sg(se_cmd); 661 660 662 661 /* 663 662 * If no struct se_session pointer is present, this struct se_cmd is ··· 695 694 * See SPC3 r07, page 159. 696 695 */ 697 696 done: 698 - transport_kunmap_first_data_page(se_cmd); 697 + transport_kunmap_data_sg(se_cmd); 699 698 lun_count *= 8; 700 699 buf[0] = ((lun_count >> 24) & 0xff); 701 700 buf[1] = ((lun_count >> 16) & 0xff); ··· 1295 1294 { 1296 1295 struct se_lun *lun_p; 1297 1296 u32 lun_access = 0; 1297 + int rc; 1298 1298 1299 1299 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { 1300 1300 pr_err("Unable to export struct se_device while dev_access_obj: %d\n", 1301 1301 atomic_read(&dev->dev_access_obj.obj_access_count)); 1302 - return NULL; 1302 + return ERR_PTR(-EACCES); 1303 1303 } 1304 1304 1305 1305 lun_p = core_tpg_pre_addlun(tpg, lun); 1306 - if ((IS_ERR(lun_p)) || !lun_p) 1307 - return NULL; 1306 + if (IS_ERR(lun_p)) 1307 + return lun_p; 1308 1308 1309 1309 if (dev->dev_flags & DF_READ_ONLY) 1310 1310 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1311 1311 else 1312 1312 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 1313 1313 1314 - if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) 1315 - return NULL; 1314 + rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev); 1315 + if (rc < 0) 1316 + return ERR_PTR(rc); 1316 1317 1317 1318 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1318 1319 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), ··· 1351 1348 u32 unpacked_lun) 1352 1349 { 1353 1350 struct se_lun *lun; 1354 - int ret = 0; 1355 1351 1356 - lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); 1357 - if (!lun) 1358 - return ret; 1352 + lun = core_tpg_pre_dellun(tpg, unpacked_lun); 1353 + if (IS_ERR(lun)) 1354 + return PTR_ERR(lun); 1359 1355 1360 1356 core_tpg_post_dellun(tpg, lun); 1361 1357
+2 -2
drivers/target/target_core_fabric_configfs.c
··· 766 766 767 767 lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, 768 768 lun->unpacked_lun); 769 - if (IS_ERR(lun_p) || !lun_p) { 769 + if (IS_ERR(lun_p)) { 770 770 pr_err("core_dev_add_lun() failed\n"); 771 - ret = -EINVAL; 771 + ret = PTR_ERR(lun_p); 772 772 goto out; 773 773 } 774 774
+9 -2
drivers/target/target_core_iblock.c
··· 129 129 /* 130 130 * These settings need to be made tunable.. 131 131 */ 132 - ib_dev->ibd_bio_set = bioset_create(32, 64); 132 + ib_dev->ibd_bio_set = bioset_create(32, 0); 133 133 if (!ib_dev->ibd_bio_set) { 134 134 pr_err("IBLOCK: Unable to create bioset()\n"); 135 135 return ERR_PTR(-ENOMEM); ··· 181 181 */ 182 182 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 183 183 dev->se_sub_dev->se_dev_attrib.unmap_granularity = 184 - q->limits.discard_granularity; 184 + q->limits.discard_granularity >> 9; 185 185 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 186 186 q->limits.discard_alignment; 187 187 ··· 487 487 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 488 488 struct iblock_req *ib_req = IBLOCK_REQ(task); 489 489 struct bio *bio; 490 + 491 + /* 492 + * Only allocate as many vector entries as the bio code allows us to, 493 + * we'll loop later on until we have handled the whole request. 494 + */ 495 + if (sg_num > BIO_MAX_PAGES) 496 + sg_num = BIO_MAX_PAGES; 490 497 491 498 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 492 499 if (!bio) {
+1 -1
drivers/target/target_core_internal.h
··· 90 90 struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); 91 91 int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, 92 92 u32, void *); 93 - struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *); 93 + struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); 94 94 int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); 95 95 96 96 /* target_core_transport.c */
+22 -21
drivers/target/target_core_pr.c
··· 478 478 case READ_MEDIA_SERIAL_NUMBER: 479 479 case REPORT_LUNS: 480 480 case REQUEST_SENSE: 481 + case PERSISTENT_RESERVE_IN: 481 482 ret = 0; /*/ Allowed CDBs */ 482 483 break; 483 484 default: ··· 1535 1534 tidh_new->dest_local_nexus = 1; 1536 1535 list_add_tail(&tidh_new->dest_list, &tid_dest_list); 1537 1536 1538 - buf = transport_kmap_first_data_page(cmd); 1537 + buf = transport_kmap_data_sg(cmd); 1539 1538 /* 1540 1539 * For a PERSISTENT RESERVE OUT specify initiator ports payload, 1541 1540 * first extract TransportID Parameter Data Length, and make sure ··· 1786 1785 1787 1786 } 1788 1787 1789 - transport_kunmap_first_data_page(cmd); 1788 + transport_kunmap_data_sg(cmd); 1790 1789 1791 1790 /* 1792 1791 * Go ahead and create a registrations from tid_dest_list for the ··· 1834 1833 1835 1834 return 0; 1836 1835 out: 1837 - transport_kunmap_first_data_page(cmd); 1836 + transport_kunmap_data_sg(cmd); 1838 1837 /* 1839 1838 * For the failure case, release everything from tid_dest_list 1840 1839 * including *dest_pr_reg and the configfs dependances.. ··· 3121 3120 if (!calling_it_nexus) 3122 3121 core_scsi3_ua_allocate(pr_reg_nacl, 3123 3122 pr_res_mapped_lun, 0x2A, 3124 - ASCQ_2AH_RESERVATIONS_PREEMPTED); 3123 + ASCQ_2AH_REGISTRATIONS_PREEMPTED); 3125 3124 } 3126 3125 spin_unlock(&pr_tmpl->registration_lock); 3127 3126 /* ··· 3234 3233 * additional sense code set to REGISTRATIONS PREEMPTED; 3235 3234 */ 3236 3235 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, 3237 - ASCQ_2AH_RESERVATIONS_PREEMPTED); 3236 + ASCQ_2AH_REGISTRATIONS_PREEMPTED); 3238 3237 } 3239 3238 spin_unlock(&pr_tmpl->registration_lock); 3240 3239 /* ··· 3411 3410 * will be moved to for the TransportID containing SCSI initiator WWN 3412 3411 * information. 3413 3412 */ 3414 - buf = transport_kmap_first_data_page(cmd); 3413 + buf = transport_kmap_data_sg(cmd); 3415 3414 rtpi = (buf[18] & 0xff) << 8; 3416 3415 rtpi |= buf[19] & 0xff; 3417 3416 tid_len = (buf[20] & 0xff) << 24; 3418 3417 tid_len |= (buf[21] & 0xff) << 16; 3419 3418 tid_len |= (buf[22] & 0xff) << 8; 3420 3419 tid_len |= buf[23] & 0xff; 3421 - transport_kunmap_first_data_page(cmd); 3420 + transport_kunmap_data_sg(cmd); 3422 3421 buf = NULL; 3423 3422 3424 3423 if ((tid_len + 24) != cmd->data_length) { ··· 3470 3469 return -EINVAL; 3471 3470 } 3472 3471 3473 - buf = transport_kmap_first_data_page(cmd); 3472 + buf = transport_kmap_data_sg(cmd); 3474 3473 proto_ident = (buf[24] & 0x0f); 3475 3474 #if 0 3476 3475 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" ··· 3504 3503 goto out; 3505 3504 } 3506 3505 3507 - transport_kunmap_first_data_page(cmd); 3506 + transport_kunmap_data_sg(cmd); 3508 3507 buf = NULL; 3509 3508 3510 3509 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" ··· 3769 3768 " REGISTER_AND_MOVE\n"); 3770 3769 } 3771 3770 3772 - transport_kunmap_first_data_page(cmd); 3771 + transport_kunmap_data_sg(cmd); 3773 3772 3774 3773 core_scsi3_put_pr_reg(dest_pr_reg); 3775 3774 return 0; 3776 3775 out: 3777 3776 if (buf) 3778 - transport_kunmap_first_data_page(cmd); 3777 + transport_kunmap_data_sg(cmd); 3779 3778 if (dest_se_deve) 3780 3779 core_scsi3_lunacl_undepend_item(dest_se_deve); 3781 3780 if (dest_node_acl) ··· 3849 3848 scope = (cdb[2] & 0xf0); 3850 3849 type = (cdb[2] & 0x0f); 3851 3850 3852 - buf = transport_kmap_first_data_page(cmd); 3851 + buf = transport_kmap_data_sg(cmd); 3853 3852 /* 3854 3853 * From PERSISTENT_RESERVE_OUT parameter list (payload) 3855 3854 */ ··· 3867 3866 aptpl = (buf[17] & 0x01); 3868 3867 unreg = (buf[17] & 0x02); 3869 3868 } 3870 - transport_kunmap_first_data_page(cmd); 3869 + transport_kunmap_data_sg(cmd); 3871 3870 buf = NULL; 3872 3871 3873 3872 /* ··· 3967 3966 return -EINVAL; 3968 3967 } 3969 3968 3970 - buf = transport_kmap_first_data_page(cmd); 3969 + buf = transport_kmap_data_sg(cmd); 3971 3970 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 3972 3971 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 3973 3972 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); ··· 4001 4000 buf[6] = ((add_len >> 8) & 0xff); 4002 4001 buf[7] = (add_len & 0xff); 4003 4002 4004 - transport_kunmap_first_data_page(cmd); 4003 + transport_kunmap_data_sg(cmd); 4005 4004 4006 4005 return 0; 4007 4006 } ··· 4027 4026 return -EINVAL; 4028 4027 } 4029 4028 4030 - buf = transport_kmap_first_data_page(cmd); 4029 + buf = transport_kmap_data_sg(cmd); 4031 4030 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 4032 4031 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 4033 4032 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); ··· 4086 4085 4087 4086 err: 4088 4087 spin_unlock(&se_dev->dev_reservation_lock); 4089 - transport_kunmap_first_data_page(cmd); 4088 + transport_kunmap_data_sg(cmd); 4090 4089 4091 4090 return 0; 4092 4091 } ··· 4110 4109 return -EINVAL; 4111 4110 } 4112 4111 4113 - buf = transport_kmap_first_data_page(cmd); 4112 + buf = transport_kmap_data_sg(cmd); 4114 4113 4115 4114 buf[0] = ((add_len << 8) & 0xff); 4116 4115 buf[1] = (add_len & 0xff); ··· 4142 4141 buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ 4143 4142 buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ 4144 4143 4145 - transport_kunmap_first_data_page(cmd); 4144 + transport_kunmap_data_sg(cmd); 4146 4145 4147 4146 return 0; 4148 4147 } ··· 4172 4171 return -EINVAL; 4173 4172 } 4174 4173 4175 - buf = transport_kmap_first_data_page(cmd); 4174 + buf = transport_kmap_data_sg(cmd); 4176 4175 4177 4176 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 4178 4177 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); ··· 4293 4292 buf[6] = ((add_len >> 8) & 0xff); 4294 4293 buf[7] = (add_len & 0xff); 4295 4294 4296 - transport_kunmap_first_data_page(cmd); 4295 + transport_kunmap_data_sg(cmd); 4297 4296 4298 4297 return 0; 4299 4298 }
+2 -2
drivers/target/target_core_pscsi.c
··· 693 693 694 694 if (task->task_se_cmd->se_deve->lun_flags & 695 695 TRANSPORT_LUNFLAGS_READ_ONLY) { 696 - unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd); 696 + unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd); 697 697 698 698 if (cdb[0] == MODE_SENSE_10) { 699 699 if (!(buf[3] & 0x80)) ··· 703 703 buf[2] |= 0x80; 704 704 } 705 705 706 - transport_kunmap_first_data_page(task->task_se_cmd); 706 + transport_kunmap_data_sg(task->task_se_cmd); 707 707 } 708 708 } 709 709 after_mode_sense:
+1 -2
drivers/target/target_core_tpg.c
··· 807 807 808 808 struct se_lun *core_tpg_pre_dellun( 809 809 struct se_portal_group *tpg, 810 - u32 unpacked_lun, 811 - int *ret) 810 + u32 unpacked_lun) 812 811 { 813 812 struct se_lun *lun; 814 813
+85 -43
drivers/target/target_core_transport.c
··· 1255 1255 static void scsi_dump_inquiry(struct se_device *dev) 1256 1256 { 1257 1257 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; 1258 + char buf[17]; 1258 1259 int i, device_type; 1259 1260 /* 1260 1261 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1261 1262 */ 1262 - pr_debug(" Vendor: "); 1263 1263 for (i = 0; i < 8; i++) 1264 1264 if (wwn->vendor[i] >= 0x20) 1265 - pr_debug("%c", wwn->vendor[i]); 1265 + buf[i] = wwn->vendor[i]; 1266 1266 else 1267 - pr_debug(" "); 1267 + buf[i] = ' '; 1268 + buf[i] = '\0'; 1269 + pr_debug(" Vendor: %s\n", buf); 1268 1270 1269 - pr_debug(" Model: "); 1270 1271 for (i = 0; i < 16; i++) 1271 1272 if (wwn->model[i] >= 0x20) 1272 - pr_debug("%c", wwn->model[i]); 1273 + buf[i] = wwn->model[i]; 1273 1274 else 1274 - pr_debug(" "); 1275 + buf[i] = ' '; 1276 + buf[i] = '\0'; 1277 + pr_debug(" Model: %s\n", buf); 1275 1278 1276 - pr_debug(" Revision: "); 1277 1279 for (i = 0; i < 4; i++) 1278 1280 if (wwn->revision[i] >= 0x20) 1279 - pr_debug("%c", wwn->revision[i]); 1281 + buf[i] = wwn->revision[i]; 1280 1282 else 1281 - pr_debug(" "); 1282 - 1283 - pr_debug("\n"); 1283 + buf[i] = ' '; 1284 + buf[i] = '\0'; 1285 + pr_debug(" Revision: %s\n", buf); 1284 1286 1285 1287 device_type = dev->transport->get_device_type(dev); 1286 1288 pr_debug(" Type: %s ", scsi_device_type(device_type)); ··· 1657 1655 * This may only be called from process context, and also currently 1658 1656 * assumes internal allocation of fabric payload buffer by target-core. 1659 1657 **/ 1660 - int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1658 + void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1661 1659 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1662 1660 u32 data_length, int task_attr, int data_dir, int flags) 1663 1661 { ··· 1690 1688 /* 1691 1689 * Locate se_lun pointer and attach it to struct se_cmd 1692 1690 */ 1693 - if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) 1694 - goto out_check_cond; 1691 + if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) { 1692 + transport_send_check_condition_and_sense(se_cmd, 1693 + se_cmd->scsi_sense_reason, 0); 1694 + target_put_sess_cmd(se_sess, se_cmd); 1695 + return; 1696 + } 1695 1697 /* 1696 1698 * Sanitize CDBs via transport_generic_cmd_sequencer() and 1697 1699 * allocate the necessary tasks to complete the received CDB+data 1698 1700 */ 1699 1701 rc = transport_generic_allocate_tasks(se_cmd, cdb); 1700 - if (rc != 0) 1701 - goto out_check_cond; 1702 + if (rc != 0) { 1703 + transport_generic_request_failure(se_cmd); 1704 + return; 1705 + } 1702 1706 /* 1703 1707 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend 1704 1708 * for immediate execution of READs, otherwise wait for ··· 1712 1704 * when fabric has filled the incoming buffer. 1713 1705 */ 1714 1706 transport_handle_cdb_direct(se_cmd); 1715 - return 0; 1716 - 1717 - out_check_cond: 1718 - transport_send_check_condition_and_sense(se_cmd, 1719 - se_cmd->scsi_sense_reason, 0); 1720 - return 0; 1707 + return; 1721 1708 } 1722 1709 EXPORT_SYMBOL(target_submit_cmd); 1723 1710 ··· 2697 2694 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2698 2695 2699 2696 if (target_check_write_same_discard(&cdb[10], dev) < 0) 2700 - goto out_invalid_cdb_field; 2697 + goto out_unsupported_cdb; 2701 2698 if (!passthrough) 2702 2699 cmd->execute_task = target_emulate_write_same; 2703 2700 break; ··· 2980 2977 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2981 2978 2982 2979 if (target_check_write_same_discard(&cdb[1], dev) < 0) 2983 - goto out_invalid_cdb_field; 2980 + goto out_unsupported_cdb; 2984 2981 if (!passthrough) 2985 2982 cmd->execute_task = target_emulate_write_same; 2986 2983 break; ··· 3003 3000 * of byte 1 bit 3 UNMAP instead of original reserved field 3004 3001 */ 3005 3002 if (target_check_write_same_discard(&cdb[1], dev) < 0) 3006 - goto out_invalid_cdb_field; 3003 + goto out_unsupported_cdb; 3007 3004 if (!passthrough) 3008 3005 cmd->execute_task = target_emulate_write_same; 3009 3006 break; ··· 3084 3081 if (!(passthrough || cmd->execute_task || 3085 3082 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) 3086 3083 goto out_unsupported_cdb; 3087 - 3088 - /* Let's limit control cdbs to a page, for simplicity's sake. */ 3089 - if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && 3090 - size > PAGE_SIZE) 3091 - goto out_invalid_cdb_field; 3092 3084 3093 3085 transport_set_supported_SAM_opcode(cmd); 3094 3086 return ret; ··· 3488 3490 } 3489 3491 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); 3490 3492 3491 - void *transport_kmap_first_data_page(struct se_cmd *cmd) 3493 + void *transport_kmap_data_sg(struct se_cmd *cmd) 3492 3494 { 3493 3495 struct scatterlist *sg = cmd->t_data_sg; 3496 + struct page **pages; 3497 + int i; 3494 3498 3495 3499 BUG_ON(!sg); 3496 3500 /* ··· 3500 3500 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 3501 3501 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 3502 3502 */ 3503 - return kmap(sg_page(sg)) + sg->offset; 3504 - } 3505 - EXPORT_SYMBOL(transport_kmap_first_data_page); 3503 + if (!cmd->t_data_nents) 3504 + return NULL; 3505 + else if (cmd->t_data_nents == 1) 3506 + return kmap(sg_page(sg)) + sg->offset; 3506 3507 3507 - void transport_kunmap_first_data_page(struct se_cmd *cmd) 3508 - { 3509 - kunmap(sg_page(cmd->t_data_sg)); 3508 + /* >1 page. use vmap */ 3509 + pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 3510 + if (!pages) 3511 + return NULL; 3512 + 3513 + /* convert sg[] to pages[] */ 3514 + for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 3515 + pages[i] = sg_page(sg); 3516 + } 3517 + 3518 + cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 3519 + kfree(pages); 3520 + if (!cmd->t_data_vmap) 3521 + return NULL; 3522 + 3523 + return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 3510 3524 } 3511 - EXPORT_SYMBOL(transport_kunmap_first_data_page); 3525 + EXPORT_SYMBOL(transport_kmap_data_sg); 3526 + 3527 + void transport_kunmap_data_sg(struct se_cmd *cmd) 3528 + { 3529 + if (!cmd->t_data_nents) 3530 + return; 3531 + else if (cmd->t_data_nents == 1) 3532 + kunmap(sg_page(cmd->t_data_sg)); 3533 + 3534 + vunmap(cmd->t_data_vmap); 3535 + cmd->t_data_vmap = NULL; 3536 + } 3537 + EXPORT_SYMBOL(transport_kunmap_data_sg); 3512 3538 3513 3539 static int 3514 3540 transport_generic_get_mem(struct se_cmd *cmd) ··· 3542 3516 u32 length = cmd->data_length; 3543 3517 unsigned int nents; 3544 3518 struct page *page; 3519 + gfp_t zero_flag; 3545 3520 int i = 0; 3546 3521 3547 3522 nents = DIV_ROUND_UP(length, PAGE_SIZE); ··· 3553 3526 cmd->t_data_nents = nents; 3554 3527 sg_init_table(cmd->t_data_sg, nents); 3555 3528 3529 + zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO; 3530 + 3556 3531 while (length) { 3557 3532 u32 page_len = min_t(u32, length, PAGE_SIZE); 3558 - page = alloc_page(GFP_KERNEL | __GFP_ZERO); 3533 + page = alloc_page(GFP_KERNEL | zero_flag); 3559 3534 if (!page) 3560 3535 goto out; 3561 3536 ··· 3785 3756 struct se_task *task; 3786 3757 unsigned long flags; 3787 3758 3759 + /* Workaround for handling zero-length control CDBs */ 3760 + if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && 3761 + !cmd->data_length) 3762 + return 0; 3763 + 3788 3764 task = transport_generic_get_task(cmd, cmd->data_direction); 3789 3765 if (!task) 3790 3766 return -ENOMEM; ··· 3861 3827 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { 3862 3828 cmd->t_state = TRANSPORT_COMPLETE; 3863 3829 atomic_set(&cmd->t_transport_active, 1); 3830 + 3831 + if (cmd->t_task_cdb[0] == REQUEST_SENSE) { 3832 + u8 ua_asc = 0, ua_ascq = 0; 3833 + 3834 + core_scsi3_ua_clear_for_request_sense(cmd, 3835 + &ua_asc, &ua_ascq); 3836 + } 3837 + 3864 3838 INIT_WORK(&cmd->work, target_complete_ok_work); 3865 3839 queue_work(target_completion_wq, &cmd->work); 3866 3840 return 0; ··· 4490 4448 /* CURRENT ERROR */ 4491 4449 buffer[offset] = 0x70; 4492 4450 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4493 - /* ABORTED COMMAND */ 4494 - buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4451 + /* ILLEGAL REQUEST */ 4452 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4495 4453 /* INVALID FIELD IN CDB */ 4496 4454 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; 4497 4455 break; ··· 4499 4457 /* CURRENT ERROR */ 4500 4458 buffer[offset] = 0x70; 4501 4459 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4502 - /* ABORTED COMMAND */ 4503 - buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4460 + /* ILLEGAL REQUEST */ 4461 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4504 4462 /* INVALID FIELD IN PARAMETER LIST */ 4505 4463 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; 4506 4464 break;
+2 -7
drivers/target/tcm_fc/tfc_cmd.c
··· 540 540 int data_dir = 0; 541 541 u32 data_len; 542 542 int task_attr; 543 - int ret; 544 543 545 544 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); 546 545 if (!fcp) ··· 602 603 * Use a single se_cmd->cmd_kref as we expect to release se_cmd 603 604 * directly from ft_check_stop_free callback in response path. 604 605 */ 605 - ret = target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb, 606 + target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb, 606 607 &cmd->ft_sense_buffer[0], cmd->lun, data_len, 607 608 task_attr, data_dir, 0); 608 - pr_debug("r_ctl %x alloc target_submit_cmd %d\n", fh->fh_r_ctl, ret); 609 - if (ret < 0) { 610 - ft_dump_cmd(cmd, __func__); 611 - return; 612 - } 609 + pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); 613 610 return; 614 611 615 612 err:
+2 -2
include/target/target_core_backend.h
··· 59 59 int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); 60 60 61 61 /* core helpers also used by command snooping in pscsi */ 62 - void *transport_kmap_first_data_page(struct se_cmd *); 63 - void transport_kunmap_first_data_page(struct se_cmd *); 62 + void *transport_kmap_data_sg(struct se_cmd *); 63 + void transport_kunmap_data_sg(struct se_cmd *); 64 64 65 65 #endif /* TARGET_CORE_BACKEND_H */
+1
include/target/target_core_base.h
··· 582 582 583 583 struct scatterlist *t_data_sg; 584 584 unsigned int t_data_nents; 585 + void *t_data_vmap; 585 586 struct scatterlist *t_bidi_data_sg; 586 587 unsigned int t_bidi_data_nents; 587 588
+1 -1
include/target/target_core_fabric.h
··· 114 114 struct se_session *, u32, int, int, unsigned char *); 115 115 int transport_lookup_cmd_lun(struct se_cmd *, u32); 116 116 int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); 117 - int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, 117 + void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, 118 118 unsigned char *, u32, u32, int, int, int); 119 119 int transport_handle_cdb_direct(struct se_cmd *); 120 120 int transport_generic_handle_cdb_map(struct se_cmd *);