Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
"Mostly minor items this time around, the most notable being a FILEIO
backend change to enforce hw_max_sectors based upon the current
block_size to address a bug where large sized I/Os (> 1M) where being
rejected"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure
target: Remove extra percpu_ref_init
target/file: Update hw_max_sectors based on current block_size
iser-target: Move INIT_WORK setup into isert_create_device_ib_res
iscsi-target: Fix incorrect np->np_thread NULL assignment
qla2xxx: Fix schedule_delayed_work() for target timeout calculations
iser-target: fix error return code in isert_create_device_ib_res()
iscsi-target: Fix-up all zero data-length CDBs with R/W_BIT set
target: Remove write-only stats fields and lock from struct se_node_acl
iscsi-target: return -EINVAL on oversized configfs parameter

+54 -51
+18 -8
drivers/infiniband/ulp/isert/ib_isert.c
··· 207 207 isert_conn->conn_rx_descs = NULL; 208 208 } 209 209 210 + static void isert_cq_tx_work(struct work_struct *); 210 211 static void isert_cq_tx_callback(struct ib_cq *, void *); 212 + static void isert_cq_rx_work(struct work_struct *); 211 213 static void isert_cq_rx_callback(struct ib_cq *, void *); 212 214 213 215 static int ··· 261 259 cq_desc[i].device = device; 262 260 cq_desc[i].cq_index = i; 263 261 262 + INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work); 264 263 device->dev_rx_cq[i] = ib_create_cq(device->ib_device, 265 264 isert_cq_rx_callback, 266 265 isert_cq_event_callback, 267 266 (void *)&cq_desc[i], 268 267 ISER_MAX_RX_CQ_LEN, i); 269 - if (IS_ERR(device->dev_rx_cq[i])) 268 + if (IS_ERR(device->dev_rx_cq[i])) { 269 + ret = PTR_ERR(device->dev_rx_cq[i]); 270 + device->dev_rx_cq[i] = NULL; 270 271 goto out_cq; 272 + } 271 273 274 + INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); 272 275 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, 273 276 isert_cq_tx_callback, 274 277 isert_cq_event_callback, 275 278 (void *)&cq_desc[i], 276 279 ISER_MAX_TX_CQ_LEN, i); 277 - if (IS_ERR(device->dev_tx_cq[i])) 280 + if (IS_ERR(device->dev_tx_cq[i])) { 281 + ret = PTR_ERR(device->dev_tx_cq[i]); 282 + device->dev_tx_cq[i] = NULL; 283 + goto out_cq; 284 + } 285 + 286 + ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); 287 + if (ret) 278 288 goto out_cq; 279 289 280 - if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) 281 - goto out_cq; 282 - 283 - if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) 290 + ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); 291 + if (ret) 284 292 goto out_cq; 285 293 } 286 294 ··· 1736 1724 { 1737 1725 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1738 1726 1739 - INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work); 1740 1727 queue_work(isert_comp_wq, &cq_desc->cq_tx_work); 1741 1728 } 1742 1729 ··· 1779 1768 { 1780 1769 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1781 1770 1782 - INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work); 1783 1771 queue_work(isert_rx_wq, &cq_desc->cq_rx_work); 1784 1772 } 1785 1773
+6 -4
drivers/scsi/qla2xxx/qla_target.c
··· 471 471 schedule_delayed_work(&tgt->sess_del_work, 0); 472 472 else 473 473 schedule_delayed_work(&tgt->sess_del_work, 474 - jiffies - sess->expires); 474 + sess->expires - jiffies); 475 475 } 476 476 477 477 /* ha->hardware_lock supposed to be held on entry */ ··· 550 550 struct scsi_qla_host *vha = tgt->vha; 551 551 struct qla_hw_data *ha = vha->hw; 552 552 struct qla_tgt_sess *sess; 553 - unsigned long flags; 553 + unsigned long flags, elapsed; 554 554 555 555 spin_lock_irqsave(&ha->hardware_lock, flags); 556 556 while (!list_empty(&tgt->del_sess_list)) { 557 557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 558 558 del_list_entry); 559 - if (time_after_eq(jiffies, sess->expires)) { 559 + elapsed = jiffies; 560 + if (time_after_eq(elapsed, sess->expires)) { 560 561 qlt_undelete_sess(sess); 561 562 562 563 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, ··· 567 566 ha->tgt.tgt_ops->put_sess(sess); 568 567 } else { 569 568 schedule_delayed_work(&tgt->sess_del_work, 570 - jiffies - sess->expires); 569 + sess->expires - elapsed); 571 570 break; 572 571 } 573 572 } ··· 4291 4290 if (rc != 0) { 4292 4291 ha->tgt.tgt_ops = NULL; 4293 4292 ha->tgt.target_lport_ptr = NULL; 4293 + scsi_host_put(host); 4294 4294 } 4295 4295 mutex_unlock(&qla_tgt_mutex); 4296 4296 return rc;
+13 -14
drivers/target/iscsi/iscsi_target.c
··· 465 465 */ 466 466 send_sig(SIGINT, np->np_thread, 1); 467 467 kthread_stop(np->np_thread); 468 + np->np_thread = NULL; 468 469 } 469 470 470 471 np->np_transport->iscsit_free_np(np); ··· 824 823 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 825 824 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 826 825 /* 827 - * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) 828 - * that adds support for RESERVE/RELEASE. There is a bug 829 - * add with this new functionality that sets R/W bits when 830 - * neither CDB carries any READ or WRITE datapayloads. 826 + * From RFC-3720 Section 10.3.1: 827 + * 828 + * "Either or both of R and W MAY be 1 when either the 829 + * Expected Data Transfer Length and/or Bidirectional Read 830 + * Expected Data Transfer Length are 0" 831 + * 832 + * For this case, go ahead and clear the unnecssary bits 833 + * to avoid any confusion with ->data_direction. 831 834 */ 832 - if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { 833 - hdr->flags &= ~ISCSI_FLAG_CMD_READ; 834 - hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; 835 - goto done; 836 - } 835 + hdr->flags &= ~ISCSI_FLAG_CMD_READ; 836 + hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; 837 837 838 - pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 838 + pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 839 839 " set when Expected Data Transfer Length is 0 for" 840 - " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 841 - return iscsit_add_reject_cmd(cmd, 842 - ISCSI_REASON_BOOKMARK_INVALID, buf); 840 + " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]); 843 841 } 844 - done: 845 842 846 843 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 847 844 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
+2 -1
drivers/target/iscsi/iscsi_target_configfs.c
··· 474 474 \ 475 475 if (!capable(CAP_SYS_ADMIN)) \ 476 476 return -EPERM; \ 477 - \ 477 + if (count >= sizeof(auth->name)) \ 478 + return -EINVAL; \ 478 479 snprintf(auth->name, sizeof(auth->name), "%s", page); \ 479 480 if (!strncmp("NULL", auth->name, 4)) \ 480 481 auth->naf_flags &= ~flags; \
-6
drivers/target/iscsi/iscsi_target_login.c
··· 1403 1403 1404 1404 out: 1405 1405 stop = kthread_should_stop(); 1406 - if (!stop && signal_pending(current)) { 1407 - spin_lock_bh(&np->np_thread_lock); 1408 - stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN); 1409 - spin_unlock_bh(&np->np_thread_lock); 1410 - } 1411 1406 /* Wait for another socket.. */ 1412 1407 if (!stop) 1413 1408 return 1; ··· 1410 1415 iscsi_stop_login_thread_timer(np); 1411 1416 spin_lock_bh(&np->np_thread_lock); 1412 1417 np->np_thread_state = ISCSI_NP_THREAD_EXIT; 1413 - np->np_thread = NULL; 1414 1418 spin_unlock_bh(&np->np_thread_lock); 1415 1419 1416 1420 return 0;
+5
drivers/target/target_core_device.c
··· 1106 1106 dev->dev_attrib.block_size = block_size; 1107 1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1108 1108 dev, block_size); 1109 + 1110 + if (dev->dev_attrib.max_bytes_per_io) 1111 + dev->dev_attrib.hw_max_sectors = 1112 + dev->dev_attrib.max_bytes_per_io / block_size; 1113 + 1109 1114 return 0; 1110 1115 } 1111 1116
+4 -4
drivers/target/target_core_file.c
··· 66 66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 67 67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 68 68 TARGET_CORE_MOD_VERSION); 69 - pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 70 - " MaxSectors: %u\n", 71 - hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); 69 + pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", 70 + hba->hba_id, fd_host->fd_host_id); 72 71 73 72 return 0; 74 73 } ··· 219 220 } 220 221 221 222 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; 222 - dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; 223 + dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; 224 + dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; 223 225 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 226 225 227 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
+4 -1
drivers/target/target_core_file.h
··· 7 7 #define FD_DEVICE_QUEUE_DEPTH 32 8 8 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 9 9 #define FD_BLOCKSIZE 512 10 - #define FD_MAX_SECTORS 2048 10 + /* 11 + * Limited by the number of iovecs (2048) per vfs_[writev,readv] call 12 + */ 13 + #define FD_MAX_BYTES 8388608 11 14 12 15 #define RRF_EMULATE_CDB 0x01 13 16 #define RRF_GOT_LBA 0x02
+1 -9
drivers/target/target_core_tpg.c
··· 278 278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 279 279 acl->se_tpg = tpg; 280 280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 281 - spin_lock_init(&acl->stats_lock); 282 281 acl->dynamic_node_acl = 1; 283 282 284 283 tpg->se_tpg_tfo->set_default_node_attributes(acl); ··· 405 406 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 406 407 acl->se_tpg = tpg; 407 408 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 408 - spin_lock_init(&acl->stats_lock); 409 409 410 410 tpg->se_tpg_tfo->set_default_node_attributes(acl); 411 411 ··· 656 658 spin_lock_init(&lun->lun_sep_lock); 657 659 init_completion(&lun->lun_ref_comp); 658 660 659 - ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); 661 + ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 660 662 if (ret < 0) 661 663 return ret; 662 - 663 - ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 664 - if (ret < 0) { 665 - percpu_ref_cancel_init(&lun->lun_ref); 666 - return ret; 667 - } 668 664 669 665 return 0; 670 666 }
+1 -4
include/target/target_core_base.h
··· 517 517 u32 acl_index; 518 518 #define MAX_ACL_TAG_SIZE 64 519 519 char acl_tag[MAX_ACL_TAG_SIZE]; 520 - u64 num_cmds; 521 - u64 read_bytes; 522 - u64 write_bytes; 523 - spinlock_t stats_lock; 524 520 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 525 521 atomic_t acl_pr_ref_count; 526 522 struct se_dev_entry **device_list; ··· 620 624 u32 unmap_granularity; 621 625 u32 unmap_granularity_alignment; 622 626 u32 max_write_same_len; 627 + u32 max_bytes_per_io; 623 628 struct se_device *da_dev; 624 629 struct config_group da_group; 625 630 };