Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mhi-for-v7.1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/mani/mhi into char-misc-next

Manivannan writes:

MHI Host
--------

- Add support for Qualcomm SDX35 and Telit FE912C04 modems reusing the existing
channel and event configurations.

- Enable IP_SW and IP_ETH MHI channels for Qualcomm 5G DU X100 Accelerator
device (QDU100). These channels are used to carry O-RAN specific M-Plane,
S-Plane and Netconf packets. The drivers making use of these channels is being
reviewed.

- Add NMEA channels to Telit FN920C04 and FN990A modems for GPS/GNSS support

- Switch to mhi_async_power_up() API in pci_generic driver to avoid boot delays
as some Qcom modems take a while start. This API ensures that the pci_generic
driver powers up the modem asynchronously and doesn't block the system boot.

- Add pm_runtime_forbid() in remove callback to balance the pm_runtime_allow()
call made during the Mission Mode transition.

- Used kzalloc_flex() to simplify kzalloc() + kzalloc() calls

MHI Endpoint
------------

- Test for non-zero return value 'if (ret)' in the endpoint stack where
applicable to maintain code uniformity.

* tag 'mhi-for-v7.1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/mani/mhi:
bus: mhi: host: pci_generic: Add Telit FE912C04 modem support
bus: mhi: ep: Test for non-zero return value where applicable
bus: mhi: host: Use kzalloc_flex
bus: mhi: host: pci_generic: Add pm_runtime_forbid() in remove callback
bus: mhi: host: pci_generic: Switch to async power up to avoid boot delays
bus: mhi: host: pci_generic: Add NMEA channels to FN920C04 and FN990A
bus: mhi: host: pci_generic: Enable IP_SW and IP_ETH channels for Qcom QDU100 device
bus: mhi: host: pci_generic: Add Qualcomm SDX35 modem

+67 -45
+5 -5
drivers/bus/mhi/ep/main.c
··· 367 367 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, 368 368 MHI_TRE_DATA_GET_LEN(el), 369 369 MHI_EV_CC_EOB); 370 - if (ret < 0) { 370 + if (ret) { 371 371 dev_err(&mhi_chan->mhi_dev->dev, 372 372 "Error sending transfer compl. event\n"); 373 373 goto err_free_tre_buf; ··· 383 383 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, 384 384 MHI_TRE_DATA_GET_LEN(el), 385 385 MHI_EV_CC_EOT); 386 - if (ret < 0) { 386 + if (ret) { 387 387 dev_err(&mhi_chan->mhi_dev->dev, 388 388 "Error sending transfer compl. event\n"); 389 389 goto err_free_tre_buf; ··· 449 449 450 450 dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); 451 451 ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info); 452 - if (ret < 0) { 452 + if (ret) { 453 453 dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); 454 454 goto err_free_buf_addr; 455 455 } ··· 494 494 } else { 495 495 /* UL channel */ 496 496 ret = mhi_ep_read_channel(mhi_cntrl, ring); 497 - if (ret < 0) { 497 + if (ret) { 498 498 dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); 499 499 return ret; 500 500 } ··· 591 591 592 592 dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); 593 593 ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info); 594 - if (ret < 0) { 594 + if (ret) { 595 595 dev_err(dev, "Error writing to the channel\n"); 596 596 goto err_exit; 597 597 }
+3 -3
drivers/bus/mhi/ep/ring.c
··· 49 49 buf_info.dev_addr = &ring->ring_cache[start]; 50 50 51 51 ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); 52 - if (ret < 0) 52 + if (ret) 53 53 return ret; 54 54 } else { 55 55 buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); ··· 57 57 buf_info.dev_addr = &ring->ring_cache[start]; 58 58 59 59 ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); 60 - if (ret < 0) 60 + if (ret) 61 61 return ret; 62 62 63 63 if (end) { ··· 66 66 buf_info.size = end * sizeof(struct mhi_ring_element); 67 67 68 68 ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); 69 - if (ret < 0) 69 + if (ret) 70 70 return ret; 71 71 } 72 72 }
+3 -19
drivers/bus/mhi/host/boot.c
··· 308 308 struct mhi_buf *mhi_buf = image_info->mhi_buf; 309 309 310 310 dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, mhi_buf->dma_addr); 311 - kfree(image_info->mhi_buf); 312 311 kfree(image_info); 313 312 } 314 313 ··· 321 322 dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, 322 323 mhi_buf->buf, mhi_buf->dma_addr); 323 324 324 - kfree(image_info->mhi_buf); 325 325 kfree(image_info); 326 326 } 327 327 ··· 331 333 struct image_info *img_info; 332 334 struct mhi_buf *mhi_buf; 333 335 334 - img_info = kzalloc_obj(*img_info); 336 + img_info = kzalloc_flex(*img_info, mhi_buf, 1); 335 337 if (!img_info) 336 338 return -ENOMEM; 337 - 338 - /* Allocate memory for entry */ 339 - img_info->mhi_buf = kzalloc_obj(*img_info->mhi_buf); 340 - if (!img_info->mhi_buf) 341 - goto error_alloc_mhi_buf; 342 339 343 340 /* Allocate and populate vector table */ 344 341 mhi_buf = img_info->mhi_buf; ··· 351 358 return 0; 352 359 353 360 error_alloc_segment: 354 - kfree(mhi_buf); 355 - error_alloc_mhi_buf: 356 361 kfree(img_info); 357 362 358 363 return -ENOMEM; ··· 366 375 struct image_info *img_info; 367 376 struct mhi_buf *mhi_buf; 368 377 369 - img_info = kzalloc_obj(*img_info); 378 + img_info = kzalloc_flex(*img_info, mhi_buf, segments); 370 379 if (!img_info) 371 380 return -ENOMEM; 372 381 373 - /* Allocate memory for entries */ 374 - img_info->mhi_buf = kzalloc_objs(*img_info->mhi_buf, segments); 375 - if (!img_info->mhi_buf) 376 - goto error_alloc_mhi_buf; 382 + img_info->entries = segments; 377 383 378 384 /* Allocate and populate vector table */ 379 385 mhi_buf = img_info->mhi_buf; ··· 390 402 } 391 403 392 404 img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; 393 - img_info->entries = segments; 394 405 *image_info = img_info; 395 406 396 407 return 0; ··· 398 411 for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) 399 412 dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, 400 413 mhi_buf->buf, mhi_buf->dma_addr); 401 - kfree(img_info->mhi_buf); 402 - 403 - error_alloc_mhi_buf: 404 414 kfree(img_info); 405 415 406 416 return -ENOMEM;
+39 -1
drivers/bus/mhi/host/pci_generic.c
··· 253 253 MHI_CHANNEL_CONFIG_DL(41, "MHI_PHC", 32, 4), 254 254 MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 256, 5), 255 255 MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 256, 5), 256 + MHI_CHANNEL_CONFIG_UL(48, "IP_SW1", 256, 6), 257 + MHI_CHANNEL_CONFIG_DL(49, "IP_SW1", 256, 6), 258 + MHI_CHANNEL_CONFIG_UL(50, "IP_ETH0", 256, 7), 259 + MHI_CHANNEL_CONFIG_DL(51, "IP_ETH0", 256, 7), 260 + MHI_CHANNEL_CONFIG_UL(52, "IP_ETH1", 256, 8), 261 + MHI_CHANNEL_CONFIG_DL(53, "IP_ETH1", 256, 8), 262 + 256 263 }; 257 264 258 265 static struct mhi_event_config mhi_qcom_qdu100_events[] = { ··· 275 268 MHI_EVENT_CONFIG_SW_DATA(5, 512), 276 269 MHI_EVENT_CONFIG_SW_DATA(6, 512), 277 270 MHI_EVENT_CONFIG_SW_DATA(7, 512), 271 + MHI_EVENT_CONFIG_SW_DATA(8, 512), 278 272 }; 279 273 280 274 static const struct mhi_controller_config mhi_qcom_qdu100_config = { ··· 413 405 .dma_data_width = 32, 414 406 .mru_default = 32768, 415 407 .sideband_wake = false, 408 + }; 409 + 410 + static const struct mhi_pci_dev_info mhi_qcom_sdx35_info = { 411 + .name = "qcom-sdx35m", 412 + .config = &modem_qcom_v2_mhiv_config, 413 + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 414 + .dma_data_width = 32, 415 + .mru_default = 32768, 416 + .sideband_wake = false, 417 + .edl_trigger = true, 416 418 }; 417 419 418 420 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { ··· 806 788 MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), 807 789 MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1), 808 790 MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1), 791 + MHI_CHANNEL_CONFIG_UL(94, "NMEA", 32, 1), 792 + MHI_CHANNEL_CONFIG_DL(95, "NMEA", 32, 1), 809 793 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), 810 794 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), 811 795 }; ··· 859 839 MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), 860 840 MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1), 861 841 MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1), 842 + MHI_CHANNEL_CONFIG_UL(94, "NMEA", 32, 1), 843 + MHI_CHANNEL_CONFIG_DL(95, "NMEA", 32, 1), 862 844 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), 863 845 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3), 864 846 }; ··· 904 882 .edl_trigger = true, 905 883 }; 906 884 885 + static const struct mhi_pci_dev_info mhi_telit_fe912c04_info = { 886 + .name = "telit-fe912c04", 887 + .config = &modem_telit_fn920c04_config, 888 + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 889 + .dma_data_width = 32, 890 + .sideband_wake = false, 891 + .mru_default = 32768, 892 + .edl_trigger = true, 893 + }; 894 + 907 895 static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = { 908 896 .name = "netprisma-lcur57", 909 897 .edl = "qcom/prog_firehose_sdx24.mbn", ··· 941 909 /* Telit FN920C04 (sdx35) */ 942 910 {PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x011a, 0x1c5d, 0x2020), 943 911 .driver_data = (kernel_ulong_t) &mhi_telit_fn920c04_info }, 912 + /* Telit FE912C04 (sdx35) */ 913 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x011a, 0x1c5d, 0x2045), 914 + .driver_data = (kernel_ulong_t) &mhi_telit_fe912c04_info }, 915 + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x011a), 916 + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx35_info }, 944 917 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), 945 918 .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, 946 919 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c), ··· 1430 1393 goto err_unregister; 1431 1394 } 1432 1395 1433 - err = mhi_sync_power_up(mhi_cntrl); 1396 + err = mhi_async_power_up(mhi_cntrl); 1434 1397 if (err) { 1435 1398 dev_err(&pdev->dev, "failed to power up MHI controller\n"); 1436 1399 goto err_unprepare; ··· 1465 1428 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); 1466 1429 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1467 1430 1431 + pm_runtime_forbid(&pdev->dev); 1468 1432 pci_disable_sriov(pdev); 1469 1433 1470 1434 if (pdev->is_physfn)
+17 -17
include/linux/mhi.h
··· 86 86 }; 87 87 88 88 /** 89 + * struct mhi_buf - MHI Buffer description 90 + * @buf: Virtual address of the buffer 91 + * @name: Buffer label. For offload channel, configurations name must be: 92 + * ECA - Event context array data 93 + * CCA - Channel context array data 94 + * @dma_addr: IOMMU address of the buffer 95 + * @len: # of bytes 96 + */ 97 + struct mhi_buf { 98 + void *buf; 99 + const char *name; 100 + dma_addr_t dma_addr; 101 + size_t len; 102 + }; 103 + 104 + /** 89 105 * struct image_info - Firmware and RDDM table 90 106 * @mhi_buf: Buffer for firmware and RDDM table 91 107 * @entries: # of entries in table 92 108 */ 93 109 struct image_info { 94 - struct mhi_buf *mhi_buf; 95 110 /* private: from internal.h */ 96 111 struct bhi_vec_entry *bhi_vec; 97 112 /* public: */ 98 113 u32 entries; 114 + struct mhi_buf mhi_buf[] __counted_by(entries); 99 115 }; 100 116 101 117 /** ··· 502 486 size_t bytes_xferd; 503 487 enum dma_data_direction dir; 504 488 int transaction_status; 505 - }; 506 - 507 - /** 508 - * struct mhi_buf - MHI Buffer description 509 - * @buf: Virtual address of the buffer 510 - * @name: Buffer label. For offload channel, configurations name must be: 511 - * ECA - Event context array data 512 - * CCA - Channel context array data 513 - * @dma_addr: IOMMU address of the buffer 514 - * @len: # of bytes 515 - */ 516 - struct mhi_buf { 517 - void *buf; 518 - const char *name; 519 - dma_addr_t dma_addr; 520 - size_t len; 521 489 }; 522 490 523 491 /**