Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
- fix tcm-user backend driver expired cmd time processing (agrover)
- eliminate kref_put_spinlock_irqsave() for I/O completion (bart)
- fix iscsi login kthread failure case hung task regression (nab)
- fix COMPARE_AND_WRITE completion use-after-free race (nab)
- fix COMPARE_AND_WRITE with SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC non zero
SGL offset data corruption. (Jan + Doug)
- fix >= v4.4-rc1 regression for tcm_qla2xxx enable configfs attribute
(Himanshu + HCH)

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
target/stat: print full t10_wwn.model buffer
target: fix COMPARE_AND_WRITE non zero SGL offset data corruption
qla2xxx: Fix regression introduced by target configFS changes
kref: Remove kref_put_spinlock_irqsave()
target: Invoke release_cmd() callback without holding a spinlock
target: Fix race for SCF_COMPARE_AND_WRITE_POST checking
iscsi-target: Fix rx_login_comp hang after login failure
iscsi-target: return -ENOMEM instead of -1 in case of failed kmalloc()
target/user: Do not set unused fields in tcmu_ops
target/user: Fix time calc in expired cmd processing

+53 -64
+1 -1
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 902 902 return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); 903 903 } 904 904 905 - CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable); 905 + CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable); 906 906 CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); 907 907 CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); 908 908
+12 -1
drivers/target/iscsi/iscsi_target.c
··· 4074 4074 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 4075 4075 } 4076 4076 4077 + static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) 4078 + { 4079 + bool ret; 4080 + 4081 + spin_lock_bh(&conn->state_lock); 4082 + ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN); 4083 + spin_unlock_bh(&conn->state_lock); 4084 + 4085 + return ret; 4086 + } 4087 + 4077 4088 int iscsi_target_rx_thread(void *arg) 4078 4089 { 4079 4090 int ret, rc; ··· 4102 4091 * incoming iscsi/tcp socket I/O, and/or failing the connection. 4103 4092 */ 4104 4093 rc = wait_for_completion_interruptible(&conn->rx_login_comp); 4105 - if (rc < 0) 4094 + if (rc < 0 || iscsi_target_check_conn_state(conn)) 4106 4095 return 0; 4107 4096 4108 4097 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+1
drivers/target/iscsi/iscsi_target_nego.c
··· 388 388 if (login->login_complete) { 389 389 if (conn->rx_thread && conn->rx_thread_active) { 390 390 send_sig(SIGINT, conn->rx_thread, 1); 391 + complete(&conn->rx_login_comp); 391 392 kthread_stop(conn->rx_thread); 392 393 } 393 394 if (conn->tx_thread && conn->tx_thread_active) {
+5 -5
drivers/target/iscsi/iscsi_target_parameters.c
··· 208 208 if (!pl) { 209 209 pr_err("Unable to allocate memory for" 210 210 " struct iscsi_param_list.\n"); 211 - return -1 ; 211 + return -ENOMEM; 212 212 } 213 213 INIT_LIST_HEAD(&pl->param_list); 214 214 INIT_LIST_HEAD(&pl->extra_response_list); ··· 578 578 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); 579 579 if (!param_list) { 580 580 pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); 581 - return -1; 581 + return -ENOMEM; 582 582 } 583 583 INIT_LIST_HEAD(&param_list->param_list); 584 584 INIT_LIST_HEAD(&param_list->extra_response_list); ··· 629 629 630 630 err_out: 631 631 iscsi_release_param_list(param_list); 632 - return -1; 632 + return -ENOMEM; 633 633 } 634 634 635 635 static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) ··· 729 729 if (!extra_response) { 730 730 pr_err("Unable to allocate memory for" 731 731 " struct iscsi_extra_response.\n"); 732 - return -1; 732 + return -ENOMEM; 733 733 } 734 734 INIT_LIST_HEAD(&extra_response->er_list); 735 735 ··· 1370 1370 tmpbuf = kzalloc(length + 1, GFP_KERNEL); 1371 1371 if (!tmpbuf) { 1372 1372 pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); 1373 - return -1; 1373 + return -ENOMEM; 1374 1374 } 1375 1375 1376 1376 memcpy(tmpbuf, textbuf, length);
+11 -6
drivers/target/target_core_sbc.c
··· 371 371 return 0; 372 372 } 373 373 374 - static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) 374 + static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, 375 + int *post_ret) 375 376 { 376 377 unsigned char *buf, *addr; 377 378 struct scatterlist *sg; ··· 438 437 cmd->data_direction); 439 438 } 440 439 441 - static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) 440 + static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, 441 + int *post_ret) 442 442 { 443 443 struct se_device *dev = cmd->se_dev; 444 444 ··· 449 447 * sent to the backend driver. 450 448 */ 451 449 spin_lock_irq(&cmd->t_state_lock); 452 - if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 450 + if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { 453 451 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 452 + *post_ret = 1; 453 + } 454 454 spin_unlock_irq(&cmd->t_state_lock); 455 455 456 456 /* ··· 464 460 return TCM_NO_SENSE; 465 461 } 466 462 467 - static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) 463 + static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, 464 + int *post_ret) 468 465 { 469 466 struct se_device *dev = cmd->se_dev; 470 467 struct scatterlist *write_sg = NULL, *sg; ··· 561 556 562 557 if (block_size < PAGE_SIZE) { 563 558 sg_set_page(&write_sg[i], m.page, block_size, 564 - block_size); 559 + m.piter.sg->offset + block_size); 565 560 } else { 566 561 sg_miter_next(&m); 567 562 sg_set_page(&write_sg[i], m.page, block_size, 568 - 0); 563 + m.piter.sg->offset); 569 564 } 570 565 len -= block_size; 571 566 i++;
+1 -1
drivers/target/target_core_stat.c
··· 246 246 char str[sizeof(dev->t10_wwn.model)+1]; 247 247 248 248 /* scsiLuProductId */ 249 - for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) 249 + for (i = 0; i < sizeof(dev->t10_wwn.model); i++) 250 250 str[i] = ISPRINT(dev->t10_wwn.model[i]) ? 251 251 dev->t10_wwn.model[i] : ' '; 252 252 str[i] = '\0';
+6 -1
drivers/target/target_core_tmr.c
··· 130 130 if (tmr->ref_task_tag != ref_tag) 131 131 continue; 132 132 133 + if (!kref_get_unless_zero(&se_cmd->cmd_kref)) 134 + continue; 135 + 133 136 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", 134 137 se_cmd->se_tfo->get_fabric_name(), ref_tag); 135 138 ··· 142 139 " skipping\n", ref_tag); 143 140 spin_unlock(&se_cmd->t_state_lock); 144 141 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 142 + 143 + target_put_sess_cmd(se_cmd); 144 + 145 145 goto out; 146 146 } 147 147 se_cmd->transport_state |= CMD_T_ABORTED; 148 148 spin_unlock(&se_cmd->t_state_lock); 149 149 150 150 list_del_init(&se_cmd->se_cmd_list); 151 - kref_get(&se_cmd->cmd_kref); 152 151 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 153 152 154 153 cancel_work_sync(&se_cmd->work);
+14 -12
drivers/target/target_core_transport.c
··· 1658 1658 void transport_generic_request_failure(struct se_cmd *cmd, 1659 1659 sense_reason_t sense_reason) 1660 1660 { 1661 - int ret = 0; 1661 + int ret = 0, post_ret = 0; 1662 1662 1663 1663 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1664 1664 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); ··· 1680 1680 */ 1681 1681 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1682 1682 cmd->transport_complete_callback) 1683 - cmd->transport_complete_callback(cmd, false); 1683 + cmd->transport_complete_callback(cmd, false, &post_ret); 1684 1684 1685 1685 switch (sense_reason) { 1686 1686 case TCM_NON_EXISTENT_LUN: ··· 2068 2068 */ 2069 2069 if (cmd->transport_complete_callback) { 2070 2070 sense_reason_t rc; 2071 + bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2072 + bool zero_dl = !(cmd->data_length); 2073 + int post_ret = 0; 2071 2074 2072 - rc = cmd->transport_complete_callback(cmd, true); 2073 - if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 2074 - if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2075 - !cmd->data_length) 2075 + rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2076 + if (!rc && !post_ret) { 2077 + if (caw && zero_dl) 2076 2078 goto queue_rsp; 2077 2079 2078 2080 return; ··· 2509 2507 EXPORT_SYMBOL(target_get_sess_cmd); 2510 2508 2511 2509 static void target_release_cmd_kref(struct kref *kref) 2512 - __releases(&se_cmd->se_sess->sess_cmd_lock) 2513 2510 { 2514 2511 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2515 2512 struct se_session *se_sess = se_cmd->se_sess; 2513 + unsigned long flags; 2516 2514 2515 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2517 2516 if (list_empty(&se_cmd->se_cmd_list)) { 2518 - spin_unlock(&se_sess->sess_cmd_lock); 2517 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2519 2518 se_cmd->se_tfo->release_cmd(se_cmd); 2520 2519 return; 2521 2520 } 2522 2521 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2523 - spin_unlock(&se_sess->sess_cmd_lock); 2522 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2524 2523 complete(&se_cmd->cmd_wait_comp); 2525 2524 return; 2526 2525 } 2527 2526 list_del(&se_cmd->se_cmd_list); 2528 - spin_unlock(&se_sess->sess_cmd_lock); 2527 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2529 2528 2530 2529 se_cmd->se_tfo->release_cmd(se_cmd); 2531 2530 } ··· 2542 2539 se_cmd->se_tfo->release_cmd(se_cmd); 2543 2540 return 1; 2544 2541 } 2545 - return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2546 - &se_sess->sess_cmd_lock); 2542 + return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2547 2543 } 2548 2544 EXPORT_SYMBOL(target_put_sess_cmd); 2549 2545
+1 -3
drivers/target/target_core_user.c
··· 638 638 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 639 639 return 0; 640 640 641 - if (!time_after(cmd->deadline, jiffies)) 641 + if (!time_after(jiffies, cmd->deadline)) 642 642 return 0; 643 643 644 644 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); ··· 1101 1101 1102 1102 static const struct target_backend_ops tcmu_ops = { 1103 1103 .name = "user", 1104 - .inquiry_prod = "USER", 1105 - .inquiry_rev = TCMU_VERSION, 1106 1104 .owner = THIS_MODULE, 1107 1105 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1108 1106 .attach_hba = tcmu_attach_hba,
-33
include/linux/kref.h
··· 19 19 #include <linux/atomic.h> 20 20 #include <linux/kernel.h> 21 21 #include <linux/mutex.h> 22 - #include <linux/spinlock.h> 23 22 24 23 struct kref { 25 24 atomic_t refcount; ··· 96 97 static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) 97 98 { 98 99 return kref_sub(kref, 1, release); 99 - } 100 - 101 - /** 102 - * kref_put_spinlock_irqsave - decrement refcount for object. 103 - * @kref: object. 104 - * @release: pointer to the function that will clean up the object when the 105 - * last reference to the object is released. 106 - * This pointer is required, and it is not acceptable to pass kfree 107 - * in as this function. 108 - * @lock: lock to take in release case 109 - * 110 - * Behaves identical to kref_put with one exception. If the reference count 111 - * drops to zero, the lock will be taken atomically wrt dropping the reference 112 - * count. The release function has to call spin_unlock() without _irqrestore. 113 - */ 114 - static inline int kref_put_spinlock_irqsave(struct kref *kref, 115 - void (*release)(struct kref *kref), 116 - spinlock_t *lock) 117 - { 118 - unsigned long flags; 119 - 120 - WARN_ON(release == NULL); 121 - if (atomic_add_unless(&kref->refcount, -1, 1)) 122 - return 0; 123 - spin_lock_irqsave(lock, flags); 124 - if (atomic_dec_and_test(&kref->refcount)) { 125 - release(kref); 126 - local_irq_restore(flags); 127 - return 1; 128 - } 129 - spin_unlock_irqrestore(lock, flags); 130 - return 0; 131 100 } 132 101 133 102 static inline int kref_put_mutex(struct kref *kref,
+1 -1
include/target/target_core_base.h
··· 474 474 struct completion cmd_wait_comp; 475 475 const struct target_core_fabric_ops *se_tfo; 476 476 sense_reason_t (*execute_cmd)(struct se_cmd *); 477 - sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); 477 + sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); 478 478 void *protocol_data; 479 479 480 480 unsigned char *t_task_cdb;