Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
async_xor: dma_map destination DMA_BIDIRECTIONAL
dmaengine: protect 'id' from concurrent registrations
ioat: wait for self-test completion

+41 -9
+9 -2
crypto/async_tx/async_xor.c
··· 53 53 int xor_src_cnt; 54 54 dma_addr_t dma_dest; 55 55 56 - dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE); 57 - for (i = 0; i < src_cnt; i++) 56 + /* map the dest bidrectional in case it is re-used as a source */ 57 + dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); 58 + for (i = 0; i < src_cnt; i++) { 59 + /* only map the dest once */ 60 + if (unlikely(src_list[i] == dest)) { 61 + dma_src[i] = dma_dest; 62 + continue; 63 + } 58 64 dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, 59 65 len, DMA_TO_DEVICE); 66 + } 60 67 61 68 while (src_cnt) { 62 69 async_flags = flags;
+3
drivers/dma/dmaengine.c
··· 388 388 389 389 init_completion(&device->done); 390 390 kref_init(&device->refcount); 391 + 392 + mutex_lock(&dma_list_mutex); 391 393 device->dev_id = id++; 394 + mutex_unlock(&dma_list_mutex); 392 395 393 396 /* represent channels in sysfs. Probably want devs too */ 394 397 list_for_each_entry(chan, &device->channels, device_node) {
+4 -1
drivers/dma/ioat_dma.c
··· 1341 1341 */ 1342 1342 #define IOAT_TEST_SIZE 2000 1343 1343 1344 + DECLARE_COMPLETION(test_completion); 1344 1345 static void ioat_dma_test_callback(void *dma_async_param) 1345 1346 { 1346 1347 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", 1347 1348 dma_async_param); 1349 + complete(&test_completion); 1348 1350 } 1349 1351 1350 1352 /** ··· 1412 1410 goto free_resources; 1413 1411 } 1414 1412 device->common.device_issue_pending(dma_chan); 1415 - msleep(1); 1413 + 1414 + wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000)); 1416 1415 1417 1416 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) 1418 1417 != DMA_SUCCESS) {
+13 -3
drivers/dma/iop-adma.c
··· 85 85 enum dma_ctrl_flags flags = desc->async_tx.flags; 86 86 u32 src_cnt; 87 87 dma_addr_t addr; 88 + dma_addr_t dest; 88 89 90 + src_cnt = unmap->unmap_src_cnt; 91 + dest = iop_desc_get_dest_addr(unmap, iop_chan); 89 92 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 90 - addr = iop_desc_get_dest_addr(unmap, iop_chan); 91 - dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 93 + enum dma_data_direction dir; 94 + 95 + if (src_cnt > 1) /* is xor? */ 96 + dir = DMA_BIDIRECTIONAL; 97 + else 98 + dir = DMA_FROM_DEVICE; 99 + 100 + dma_unmap_page(dev, dest, len, dir); 92 101 } 93 102 94 103 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 95 - src_cnt = unmap->unmap_src_cnt; 96 104 while (src_cnt--) { 97 105 addr = iop_desc_get_src_addr(unmap, 98 106 iop_chan, 99 107 src_cnt); 108 + if (addr == dest) 109 + continue; 100 110 dma_unmap_page(dev, addr, len, 101 111 DMA_TO_DEVICE); 102 112 }
+12 -3
drivers/dma/mv_xor.c
··· 311 311 enum dma_ctrl_flags flags = desc->async_tx.flags; 312 312 u32 src_cnt; 313 313 dma_addr_t addr; 314 + dma_addr_t dest; 314 315 316 + src_cnt = unmap->unmap_src_cnt; 317 + dest = mv_desc_get_dest_addr(unmap); 315 318 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 316 - addr = mv_desc_get_dest_addr(unmap); 317 - dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 319 + enum dma_data_direction dir; 320 + 321 + if (src_cnt > 1) /* is xor ? */ 322 + dir = DMA_BIDIRECTIONAL; 323 + else 324 + dir = DMA_FROM_DEVICE; 325 + dma_unmap_page(dev, dest, len, dir); 318 326 } 319 327 320 328 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 321 - src_cnt = unmap->unmap_src_cnt; 322 329 while (src_cnt--) { 323 330 addr = mv_desc_get_src_addr(unmap, 324 331 src_cnt); 332 + if (addr == dest) 333 + continue; 325 334 dma_unmap_page(dev, addr, len, 326 335 DMA_TO_DEVICE); 327 336 }