Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'dmaengine-fix-5.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
"Core:
- Documentation typo fixes
- fix the channel indexes
- dmatest: fixes for process hang and iterations

Drivers:
- hisilicon: build error fix without PCI_MSI
- ti-k3: deadlock fix
- uniphier-xdmac: fix for reg region
- pch: fix data race
- tegra: fix clock state"

* tag 'dmaengine-fix-5.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: dmatest: Fix process hang when reading 'wait' parameter
dmaengine: dmatest: Fix iteration non-stop logic
dmaengine: tegra-apb: Ensure that clock is enabled during of DMA synchronization
dmaengine: fix channel index enumeration
dmaengine: mmp_tdma: Reset channel error on release
dmaengine: mmp_tdma: Do not ignore slave config validation errors
dmaengine: pch_dma.c: Avoid data race between probe and irq handler
dt-bindings: dma: uniphier-xdmac: switch to single reg region
include/linux/dmaengine: Typos fixes in API documentation
dmaengine: xilinx_dma: Add missing check for empty list
dmaengine: ti: k3-psil: fix deadlock on error path
dmaengine: hisilicon: Fix build error without PCI_MSI

+65 -60
+3 -4
Documentation/devicetree/bindings/dma/socionext,uniphier-xdmac.yaml
··· 22 22 const: socionext,uniphier-xdmac 23 23 24 24 reg: 25 - items: 26 - - description: XDMAC base register region (offset and length) 27 - - description: XDMAC extension register region (offset and length) 25 + maxItems: 1 28 26 29 27 interrupts: 30 28 maxItems: 1 ··· 47 49 - reg 48 50 - interrupts 49 51 - "#dma-cells" 52 + - dma-channels 50 53 51 54 examples: 52 55 - | 53 56 xdmac: dma-controller@5fc10000 { 54 57 compatible = "socionext,uniphier-xdmac"; 55 - reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>; 58 + reg = <0x5fc10000 0x5300>; 56 59 interrupts = <0 188 4>; 57 60 #dma-cells = <2>; 58 61 dma-channels = <16>;
+2 -1
drivers/dma/Kconfig
··· 241 241 242 242 config HISI_DMA 243 243 tristate "HiSilicon DMA Engine support" 244 - depends on ARM64 || (COMPILE_TEST && PCI_MSI) 244 + depends on ARM64 || COMPILE_TEST 245 + depends on PCI_MSI 245 246 select DMA_ENGINE 246 247 select DMA_VIRTUAL_CHANNELS 247 248 help
+26 -34
drivers/dma/dmaengine.c
··· 232 232 struct dma_chan_dev *chan_dev; 233 233 234 234 chan_dev = container_of(dev, typeof(*chan_dev), device); 235 - if (atomic_dec_and_test(chan_dev->idr_ref)) { 236 - ida_free(&dma_ida, chan_dev->dev_id); 237 - kfree(chan_dev->idr_ref); 238 - } 239 235 kfree(chan_dev); 240 236 } 241 237 ··· 1039 1043 } 1040 1044 1041 1045 static int __dma_async_device_channel_register(struct dma_device *device, 1042 - struct dma_chan *chan, 1043 - int chan_id) 1046 + struct dma_chan *chan) 1044 1047 { 1045 1048 int rc = 0; 1046 - int chancnt = device->chancnt; 1047 - atomic_t *idr_ref; 1048 - struct dma_chan *tchan; 1049 - 1050 - tchan = list_first_entry_or_null(&device->channels, 1051 - struct dma_chan, device_node); 1052 - if (!tchan) 1053 - return -ENODEV; 1054 - 1055 - if (tchan->dev) { 1056 - idr_ref = tchan->dev->idr_ref; 1057 - } else { 1058 - idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 1059 - if (!idr_ref) 1060 - return -ENOMEM; 1061 - atomic_set(idr_ref, 0); 1062 - } 1063 1049 1064 1050 chan->local = alloc_percpu(typeof(*chan->local)); 1065 1051 if (!chan->local) ··· 1057 1079 * When the chan_id is a negative value, we are dynamically adding 1058 1080 * the channel. Otherwise we are static enumerating. 1059 1081 */ 1060 - chan->chan_id = chan_id < 0 ? chancnt : chan_id; 1082 + mutex_lock(&device->chan_mutex); 1083 + chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); 1084 + mutex_unlock(&device->chan_mutex); 1085 + if (chan->chan_id < 0) { 1086 + pr_err("%s: unable to alloc ida for chan: %d\n", 1087 + __func__, chan->chan_id); 1088 + goto err_out; 1089 + } 1090 + 1061 1091 chan->dev->device.class = &dma_devclass; 1062 1092 chan->dev->device.parent = device->dev; 1063 1093 chan->dev->chan = chan; 1064 - chan->dev->idr_ref = idr_ref; 1065 1094 chan->dev->dev_id = device->dev_id; 1066 - atomic_inc(idr_ref); 1067 1095 dev_set_name(&chan->dev->device, "dma%dchan%d", 1068 1096 device->dev_id, chan->chan_id); 1069 - 1070 1097 rc = device_register(&chan->dev->device); 1071 1098 if (rc) 1072 - goto err_out; 1099 + goto err_out_ida; 1073 1100 chan->client_count = 0; 1074 - device->chancnt = chan->chan_id + 1; 1101 + device->chancnt++; 1075 1102 1076 1103 return 0; 1077 1104 1105 + err_out_ida: 1106 + mutex_lock(&device->chan_mutex); 1107 + ida_free(&device->chan_ida, chan->chan_id); 1108 + mutex_unlock(&device->chan_mutex); 1078 1109 err_out: 1079 1110 free_percpu(chan->local); 1080 1111 kfree(chan->dev); 1081 - if (atomic_dec_return(idr_ref) == 0) 1082 - kfree(idr_ref); 1083 1112 return rc; 1084 1113 } 1085 1114 ··· 1095 1110 { 1096 1111 int rc; 1097 1112 1098 - rc = __dma_async_device_channel_register(device, chan, -1); 1113 + rc = __dma_async_device_channel_register(device, chan); 1099 1114 if (rc < 0) 1100 1115 return rc; 1101 1116 ··· 1115 1130 device->chancnt--; 1116 1131 chan->dev->chan = NULL; 1117 1132 mutex_unlock(&dma_list_mutex); 1133 + mutex_lock(&device->chan_mutex); 1134 + ida_free(&device->chan_ida, chan->chan_id); 1135 + mutex_unlock(&device->chan_mutex); 1118 1136 device_unregister(&chan->dev->device); 1119 1137 free_percpu(chan->local); 1120 1138 } ··· 1140 1152 */ 1141 1153 int dma_async_device_register(struct dma_device *device) 1142 1154 { 1143 - int rc, i = 0; 1155 + int rc; 1144 1156 struct dma_chan* chan; 1145 1157 1146 1158 if (!device) ··· 1245 1257 if (rc != 0) 1246 1258 return rc; 1247 1259 1260 + mutex_init(&device->chan_mutex); 1261 + ida_init(&device->chan_ida); 1262 + 1248 1263 /* represent channels in sysfs. Probably want devs too */ 1249 1264 list_for_each_entry(chan, &device->channels, device_node) { 1250 - rc = __dma_async_device_channel_register(device, chan, i++); 1265 + rc = __dma_async_device_channel_register(device, chan); 1251 1266 if (rc < 0) 1252 1267 goto err_out; 1253 1268 } ··· 1325 1334 */ 1326 1335 dma_cap_set(DMA_PRIVATE, device->cap_mask); 1327 1336 dma_channel_rebalance(); 1337 + ida_free(&dma_ida, device->dev_id); 1328 1338 dma_device_put(device); 1329 1339 mutex_unlock(&dma_list_mutex); 1330 1340 }
+3 -3
drivers/dma/dmatest.c
··· 240 240 struct dmatest_thread *thread; 241 241 242 242 list_for_each_entry(thread, &dtc->threads, node) { 243 - if (!thread->done) 243 + if (!thread->done && !thread->pending) 244 244 return true; 245 245 } 246 246 } ··· 662 662 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 663 663 664 664 ktime = ktime_get(); 665 - while (!kthread_should_stop() 666 - && !(params->iterations && total_tests >= params->iterations)) { 665 + while (!(kthread_should_stop() || 666 + (params->iterations && total_tests >= params->iterations))) { 667 667 struct dma_async_tx_descriptor *tx = NULL; 668 668 struct dmaengine_unmap_data *um; 669 669 dma_addr_t *dsts;
+4 -1
drivers/dma/mmp_tdma.c
··· 363 363 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, 364 364 size); 365 365 tdmac->desc_arr = NULL; 366 + if (tdmac->status == DMA_ERROR) 367 + tdmac->status = DMA_COMPLETE; 366 368 367 369 return; 368 370 } ··· 445 443 if (!desc) 446 444 goto err_out; 447 445 448 - mmp_tdma_config_write(chan, direction, &tdmac->slave_config); 446 + if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config)) 447 + goto err_out; 449 448 450 449 while (buf < buf_len) { 451 450 desc = &tdmac->desc_arr[i];
+1 -1
drivers/dma/pch_dma.c
··· 865 865 } 866 866 867 867 pci_set_master(pdev); 868 + pd->dma.dev = &pdev->dev; 868 869 869 870 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); 870 871 if (err) { ··· 881 880 goto err_free_irq; 882 881 } 883 882 884 - pd->dma.dev = &pdev->dev; 885 883 886 884 INIT_LIST_HEAD(&pd->dma.channels); 887 885
+9
drivers/dma/tegra20-apb-dma.c
··· 816 816 static void tegra_dma_synchronize(struct dma_chan *dc) 817 817 { 818 818 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 819 + int err; 820 + 821 + err = pm_runtime_get_sync(tdc->tdma->dev); 822 + if (err < 0) { 823 + dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err); 824 + return; 825 + } 819 826 820 827 /* 821 828 * CPU, which handles interrupt, could be busy in ··· 832 825 wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc)); 833 826 834 827 tasklet_kill(&tdc->tasklet); 828 + 829 + pm_runtime_put(tdc->tdma->dev); 835 830 } 836 831 837 832 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
+1
drivers/dma/ti/k3-psil.c
··· 27 27 soc_ep_map = &j721e_ep_map; 28 28 } else { 29 29 pr_err("PSIL: No compatible machine found for map\n"); 30 + mutex_unlock(&ep_map_mutex); 30 31 return ERR_PTR(-ENOTSUPP); 31 32 } 32 33 pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
+10 -10
drivers/dma/xilinx/xilinx_dma.c
··· 1230 1230 return ret; 1231 1231 1232 1232 spin_lock_irqsave(&chan->lock, flags); 1233 - 1234 - desc = list_last_entry(&chan->active_list, 1235 - struct xilinx_dma_tx_descriptor, node); 1236 - /* 1237 - * VDMA and simple mode do not support residue reporting, so the 1238 - * residue field will always be 0. 1239 - */ 1240 - if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) 1241 - residue = xilinx_dma_get_residue(chan, desc); 1242 - 1233 + if (!list_empty(&chan->active_list)) { 1234 + desc = list_last_entry(&chan->active_list, 1235 + struct xilinx_dma_tx_descriptor, node); 1236 + /* 1237 + * VDMA and simple mode do not support residue reporting, so the 1238 + * residue field will always be 0. 1239 + */ 1240 + if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) 1241 + residue = xilinx_dma_get_residue(chan, desc); 1242 + } 1243 1243 spin_unlock_irqrestore(&chan->lock, flags); 1244 1244 1245 1245 dma_set_residue(txstate, residue);
+6 -6
include/linux/dmaengine.h
··· 83 83 /** 84 84 * Interleaved Transfer Request 85 85 * ---------------------------- 86 - * A chunk is collection of contiguous bytes to be transfered. 86 + * A chunk is collection of contiguous bytes to be transferred. 87 87 * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). 88 - * ICGs may or maynot change between chunks. 88 + * ICGs may or may not change between chunks. 89 89 * A FRAME is the smallest series of contiguous {chunk,icg} pairs, 90 90 * that when repeated an integral number of times, specifies the transfer. 91 91 * A transfer template is specification of a Frame, the number of times ··· 341 341 * @chan: driver channel device 342 342 * @device: sysfs device 343 343 * @dev_id: parent dma_device dev_id 344 - * @idr_ref: reference count to gate release of dma_device dev_id 345 344 */ 346 345 struct dma_chan_dev { 347 346 struct dma_chan *chan; 348 347 struct device device; 349 348 int dev_id; 350 - atomic_t *idr_ref; 351 349 }; 352 350 353 351 /** ··· 833 835 int dev_id; 834 836 struct device *dev; 835 837 struct module *owner; 838 + struct ida chan_ida; 839 + struct mutex chan_mutex; /* to protect chan_ida */ 836 840 837 841 u32 src_addr_widths; 838 842 u32 dst_addr_widths; ··· 1069 1069 * dmaengine_synchronize() needs to be called before it is safe to free 1070 1070 * any memory that is accessed by previously submitted descriptors or before 1071 1071 * freeing any resources accessed from within the completion callback of any 1072 - * perviously submitted descriptors. 1072 + * previously submitted descriptors. 1073 1073 * 1074 1074 * This function can be called from atomic context as well as from within a 1075 1075 * complete callback of a descriptor submitted on the same channel. ··· 1091 1091 * 1092 1092 * Synchronizes to the DMA channel termination to the current context. When this 1093 1093 * function returns it is guaranteed that all transfers for previously issued 1094 - * descriptors have stopped and and it is safe to free the memory assoicated 1094 + * descriptors have stopped and it is safe to free the memory associated 1095 1095 * with them. Furthermore it is guaranteed that all complete callback functions 1096 1096 * for a previously submitted descriptor have finished running and it is safe to 1097 1097 * free resources accessed from within the complete callbacks.