Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"This is three logical fixes (as 5 patches).

The 3ware class of drivers were causing an oops with multiqueue by
tearing down the command mappings after completing the command (where
the variables in the command used to tear down the mapping were
no-longer valid). There's also a fix for the qnap iscsi target which
was choking on us sending it commands that were too long and a fix for
the reworked aha1542 allocating GFP_KERNEL under a lock"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
3w-9xxx: fix command completion race
3w-xxxx: fix command completion race
3w-sas: fix command completion race
aha1542: Allocate memory before taking a lock
SCSI: add 1024 max sectors black list flag

+48 -146
+13 -44
drivers/scsi/3w-9xxx.c
··· 149 149 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); 150 150 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); 151 151 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); 152 - static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id); 153 152 154 153 /* Functions */ 155 154 ··· 1339 1340 } 1340 1341 1341 1342 /* Now complete the io */ 1343 + scsi_dma_unmap(cmd); 1344 + cmd->scsi_done(cmd); 1342 1345 tw_dev->state[request_id] = TW_S_COMPLETED; 1343 1346 twa_free_request_id(tw_dev, request_id); 1344 1347 tw_dev->posted_request_count--; 1345 - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); 1346 - twa_unmap_scsi_data(tw_dev, request_id); 1347 1348 } 1348 1349 1349 1350 /* Check for valid status after each drain */ ··· 1400 1401 } 1401 1402 } 1402 1403 } /* End twa_load_sgl() */ 1403 - 1404 - /* This function will perform a pci-dma mapping for a scatter gather list */ 1405 - static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) 1406 - { 1407 - int use_sg; 1408 - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1409 - 1410 - use_sg = scsi_dma_map(cmd); 1411 - if (!use_sg) 1412 - return 0; 1413 - else if (use_sg < 0) { 1414 - TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); 1415 - return 0; 1416 - } 1417 - 1418 - cmd->SCp.phase = TW_PHASE_SGLIST; 1419 - cmd->SCp.have_data_in = use_sg; 1420 - 1421 - return use_sg; 1422 - } /* End twa_map_scsi_sg_data() */ 1423 1404 1424 1405 /* This function will poll for a response interrupt of a request */ 1425 1406 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) ··· 1579 1600 (tw_dev->state[i] != TW_S_INITIAL) && 1580 1601 (tw_dev->state[i] != TW_S_COMPLETED)) { 1581 1602 if (tw_dev->srb[i]) { 1582 - tw_dev->srb[i]->result = (DID_RESET << 16); 1583 - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); 1584 - twa_unmap_scsi_data(tw_dev, i); 1603 + struct scsi_cmnd *cmd = tw_dev->srb[i]; 1604 + 1605 + cmd->result = (DID_RESET << 16); 1606 + scsi_dma_unmap(cmd); 1607 + cmd->scsi_done(cmd); 1585 1608 } 1586 1609 } 1587 1610 } ··· 1762 1781 /* Save the scsi command for use by the ISR */ 1763 1782 tw_dev->srb[request_id] = SCpnt; 1764 1783 1765 - /* Initialize phase to zero */ 1766 - SCpnt->SCp.phase = TW_PHASE_INITIAL; 1767 - 1768 1784 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1769 1785 switch (retval) { 1770 1786 case SCSI_MLQUEUE_HOST_BUSY: 1787 + scsi_dma_unmap(SCpnt); 1771 1788 twa_free_request_id(tw_dev, request_id); 1772 - twa_unmap_scsi_data(tw_dev, request_id); 1773 1789 break; 1774 1790 case 1: 1791 + SCpnt->result = (DID_ERROR << 16); 1792 + scsi_dma_unmap(SCpnt); 1793 + done(SCpnt); 1775 1794 tw_dev->state[request_id] = TW_S_COMPLETED; 1776 1795 twa_free_request_id(tw_dev, request_id); 1777 - twa_unmap_scsi_data(tw_dev, request_id); 1778 - SCpnt->result = (DID_ERROR << 16); 1779 - done(SCpnt); 1780 1796 retval = 0; 1781 1797 } 1782 1798 out: ··· 1841 1863 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 1842 1864 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); 1843 1865 } else { 1844 - sg_count = twa_map_scsi_sg_data(tw_dev, request_id); 1845 - if (sg_count == 0) 1866 + sg_count = scsi_dma_map(srb); 1867 + if (sg_count < 0) 1846 1868 goto out; 1847 1869 1848 1870 scsi_for_each_sg(srb, sg, sg_count, i) { ··· 1956 1978 (table[index].text != (char *)0)); index++); 1957 1979 return(table[index].text); 1958 1980 } /* End twa_string_lookup() */ 1959 - 1960 - /* This function will perform a pci-dma unmap */ 1961 - static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) 1962 - { 1963 - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1964 - 1965 - if (cmd->SCp.phase == TW_PHASE_SGLIST) 1966 - scsi_dma_unmap(cmd); 1967 - } /* End twa_unmap_scsi_data() */ 1968 1981 1969 1982 /* This function gets called when a disk is coming on-line */ 1970 1983 static int twa_slave_configure(struct scsi_device *sdev)
-5
drivers/scsi/3w-9xxx.h
··· 324 324 #define TW_CURRENT_DRIVER_BUILD 0 325 325 #define TW_CURRENT_DRIVER_BRANCH 0 326 326 327 - /* Phase defines */ 328 - #define TW_PHASE_INITIAL 0 329 - #define TW_PHASE_SINGLE 1 330 - #define TW_PHASE_SGLIST 2 331 - 332 327 /* Misc defines */ 333 328 #define TW_9550SX_DRAIN_COMPLETED 0xFFFF 334 329 #define TW_SECTOR_SIZE 512
+10 -40
drivers/scsi/3w-sas.c
··· 290 290 return 0; 291 291 } /* End twl_post_command_packet() */ 292 292 293 - /* This function will perform a pci-dma mapping for a scatter gather list */ 294 - static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) 295 - { 296 - int use_sg; 297 - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 298 - 299 - use_sg = scsi_dma_map(cmd); 300 - if (!use_sg) 301 - return 0; 302 - else if (use_sg < 0) { 303 - TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list"); 304 - return 0; 305 - } 306 - 307 - cmd->SCp.phase = TW_PHASE_SGLIST; 308 - cmd->SCp.have_data_in = use_sg; 309 - 310 - return use_sg; 311 - } /* End twl_map_scsi_sg_data() */ 312 - 313 293 /* This function hands scsi cdb's to the firmware */ 314 294 static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) 315 295 { ··· 337 357 if (!sglistarg) { 338 358 /* Map sglist from scsi layer to cmd packet */ 339 359 if (scsi_sg_count(srb)) { 340 - sg_count = twl_map_scsi_sg_data(tw_dev, request_id); 341 - if (sg_count == 0) 360 + sg_count = scsi_dma_map(srb); 361 + if (sg_count <= 0) 342 362 goto out; 343 363 344 364 scsi_for_each_sg(srb, sg, sg_count, i) { ··· 1082 1102 return retval; 1083 1103 } /* End twl_initialize_device_extension() */ 1084 1104 1085 - /* This function will perform a pci-dma unmap */ 1086 - static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) 1087 - { 1088 - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1089 - 1090 - if (cmd->SCp.phase == TW_PHASE_SGLIST) 1091 - scsi_dma_unmap(cmd); 1092 - } /* End twl_unmap_scsi_data() */ 1093 - 1094 1105 /* This function will handle attention interrupts */ 1095 1106 static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) 1096 1107 { ··· 1222 1251 } 1223 1252 1224 1253 /* Now complete the io */ 1254 + scsi_dma_unmap(cmd); 1255 + cmd->scsi_done(cmd); 1225 1256 tw_dev->state[request_id] = TW_S_COMPLETED; 1226 1257 twl_free_request_id(tw_dev, request_id); 1227 1258 tw_dev->posted_request_count--; 1228 - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); 1229 - twl_unmap_scsi_data(tw_dev, request_id); 1230 1259 } 1231 1260 1232 1261 /* Check for another response interrupt */ ··· 1371 1400 if ((tw_dev->state[i] != TW_S_FINISHED) && 1372 1401 (tw_dev->state[i] != TW_S_INITIAL) && 1373 1402 (tw_dev->state[i] != TW_S_COMPLETED)) { 1374 - if (tw_dev->srb[i]) { 1375 - tw_dev->srb[i]->result = (DID_RESET << 16); 1376 - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); 1377 - twl_unmap_scsi_data(tw_dev, i); 1403 + struct scsi_cmnd *cmd = tw_dev->srb[i]; 1404 + 1405 + if (cmd) { 1406 + cmd->result = (DID_RESET << 16); 1407 + scsi_dma_unmap(cmd); 1408 + cmd->scsi_done(cmd); 1378 1409 } 1379 1410 } 1380 1411 } ··· 1479 1506 1480 1507 /* Save the scsi command for use by the ISR */ 1481 1508 tw_dev->srb[request_id] = SCpnt; 1482 - 1483 - /* Initialize phase to zero */ 1484 - SCpnt->SCp.phase = TW_PHASE_INITIAL; 1485 1509 1486 1510 retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1487 1511 if (retval) {
-4
drivers/scsi/3w-sas.h
··· 103 103 #define TW_CURRENT_DRIVER_BUILD 0 104 104 #define TW_CURRENT_DRIVER_BRANCH 0 105 105 106 - /* Phase defines */ 107 - #define TW_PHASE_INITIAL 0 108 - #define TW_PHASE_SGLIST 2 109 - 110 106 /* Misc defines */ 111 107 #define TW_SECTOR_SIZE 512 112 108 #define TW_MAX_UNITS 32
+6 -36
drivers/scsi/3w-xxxx.c
··· 1271 1271 return 0; 1272 1272 } /* End tw_initialize_device_extension() */ 1273 1273 1274 - static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 1275 - { 1276 - int use_sg; 1277 - 1278 - dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n"); 1279 - 1280 - use_sg = scsi_dma_map(cmd); 1281 - if (use_sg < 0) { 1282 - printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n"); 1283 - return 0; 1284 - } 1285 - 1286 - cmd->SCp.phase = TW_PHASE_SGLIST; 1287 - cmd->SCp.have_data_in = use_sg; 1288 - 1289 - return use_sg; 1290 - } /* End tw_map_scsi_sg_data() */ 1291 - 1292 - static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 1293 - { 1294 - dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); 1295 - 1296 - if (cmd->SCp.phase == TW_PHASE_SGLIST) 1297 - scsi_dma_unmap(cmd); 1298 - } /* End tw_unmap_scsi_data() */ 1299 - 1300 1274 /* This function will reset a device extension */ 1301 1275 static int tw_reset_device_extension(TW_Device_Extension *tw_dev) 1302 1276 { ··· 1293 1319 srb = tw_dev->srb[i]; 1294 1320 if (srb != NULL) { 1295 1321 srb->result = (DID_RESET << 16); 1296 - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); 1297 - tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]); 1322 + scsi_dma_unmap(srb); 1323 + srb->scsi_done(srb); 1298 1324 } 1299 1325 } 1300 1326 } ··· 1741 1767 command_packet->byte8.io.lba = lba; 1742 1768 command_packet->byte6.block_count = num_sectors; 1743 1769 1744 - use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); 1745 - if (!use_sg) 1770 + use_sg = scsi_dma_map(srb); 1771 + if (use_sg <= 0) 1746 1772 return 1; 1747 1773 1748 1774 scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) { ··· 1928 1954 1929 1955 /* Save the scsi command for use by the ISR */ 1930 1956 tw_dev->srb[request_id] = SCpnt; 1931 - 1932 - /* Initialize phase to zero */ 1933 - SCpnt->SCp.phase = TW_PHASE_INITIAL; 1934 1957 1935 1958 switch (*command) { 1936 1959 case READ_10: ··· 2156 2185 2157 2186 /* Now complete the io */ 2158 2187 if ((error != TW_ISR_DONT_COMPLETE)) { 2188 + scsi_dma_unmap(tw_dev->srb[request_id]); 2189 + tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); 2159 2190 tw_dev->state[request_id] = TW_S_COMPLETED; 2160 2191 tw_state_request_finish(tw_dev, request_id); 2161 2192 tw_dev->posted_request_count--; 2162 - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); 2163 - 2164 - tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); 2165 2193 } 2166 2194 } 2167 2195
-5
drivers/scsi/3w-xxxx.h
··· 195 195 #define TW_AEN_SMART_FAIL 0x000F 196 196 #define TW_AEN_SBUF_FAIL 0x0024 197 197 198 - /* Phase defines */ 199 - #define TW_PHASE_INITIAL 0 200 - #define TW_PHASE_SINGLE 1 201 - #define TW_PHASE_SGLIST 2 202 - 203 198 /* Misc defines */ 204 199 #define TW_ALIGNMENT_6000 64 /* 64 bytes */ 205 200 #define TW_ALIGNMENT_7000 4 /* 4 bytes */
+11 -12
drivers/scsi/aha1542.c
··· 375 375 u8 lun = cmd->device->lun; 376 376 unsigned long flags; 377 377 int bufflen = scsi_bufflen(cmd); 378 - int mbo; 378 + int mbo, sg_count; 379 379 struct mailbox *mb = aha1542->mb; 380 380 struct ccb *ccb = aha1542->ccb; 381 + struct chain *cptr; 381 382 382 383 if (*cmd->cmnd == REQUEST_SENSE) { 383 384 /* Don't do the command - we have the sense data already */ ··· 398 397 print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); 399 398 } 400 399 #endif 400 + if (bufflen) { /* allocate memory before taking host_lock */ 401 + sg_count = scsi_sg_count(cmd); 402 + cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA); 403 + if (!cptr) 404 + return SCSI_MLQUEUE_HOST_BUSY; 405 + } 406 + 401 407 /* Use the outgoing mailboxes in a round-robin fashion, because this 402 408 is how the host adapter will scan for them */ 403 409 ··· 449 441 450 442 if (bufflen) { 451 443 struct scatterlist *sg; 452 - struct chain *cptr; 453 - int i, sg_count = scsi_sg_count(cmd); 444 + int i; 454 445 455 446 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ 456 - cmd->host_scribble = kmalloc(sizeof(*cptr)*sg_count, 457 - GFP_KERNEL | GFP_DMA); 458 - cptr = (struct chain *) cmd->host_scribble; 459 - if (cptr == NULL) { 460 - /* free the claimed mailbox slot */ 461 - aha1542->int_cmds[mbo] = NULL; 462 - spin_unlock_irqrestore(sh->host_lock, flags); 463 - return SCSI_MLQUEUE_HOST_BUSY; 464 - } 447 + cmd->host_scribble = (void *)cptr; 465 448 scsi_for_each_sg(cmd, sg, sg_count, i) { 466 449 any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg)) 467 450 + sg->offset);
+1
drivers/scsi/scsi_devinfo.c
··· 226 226 {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 227 227 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, 228 228 {"Promise", "", NULL, BLIST_SPARSELUN}, 229 + {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, 229 230 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, 230 231 {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, 231 232 {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
+6
drivers/scsi/scsi_scan.c
··· 897 897 */ 898 898 if (*bflags & BLIST_MAX_512) 899 899 blk_queue_max_hw_sectors(sdev->request_queue, 512); 900 + /* 901 + * Max 1024 sector transfer length for targets that report incorrect 902 + * max/optimal lengths and relied on the old block layer safe default 903 + */ 904 + else if (*bflags & BLIST_MAX_1024) 905 + blk_queue_max_hw_sectors(sdev->request_queue, 1024); 900 906 901 907 /* 902 908 * Some devices may not want to have a start command automatically
+1
include/scsi/scsi_devinfo.h
··· 36 36 for sequential scan */ 37 37 #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ 38 38 #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ 39 + #define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ 39 40 40 41 #endif