Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mhi-for-v6.18' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/mani/mhi into char-misc-next

Manivannan writes:

MHI Host
========

- Add support for all Foxconn T99W696 SKU variants

- Fix accessing the uninitialized 'dev' pointer in mhi_init_irq_setup()

- Notify the MHI Execution Environment (EE) change to userspace using uevent

- Add support for Virtual Functions (VFs) in SR-IOV capable QDU100 device from
Qualcomm. For adding SR-IOV support, MHI pci_generic driver has been modified
to apply different configurations for PFs and VFs.

MHI Endpoint
============

- Fix the handling of chained transfers in EP MHI driver that leads to reading
past the host transfer buffers causing IOMMU faults in the host and other
issues.

* tag 'mhi-for-v6.18' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/mani/mhi:
bus: mhi: host: pci_generic: Set DMA mask for VFs
bus: mhi: core: Improve mhi_sync_power_up handling for SYS_ERR state
bus: mhi: host: pci_generic: Reset QDU100 while the MHI driver is removed
bus: mhi: host: pci_generic: Add SRIOV support
bus: mhi: host: pci_generic: Read SUBSYSTEM_VENDOR_ID for VF's to check status
bus: mhi: host: Add support for separate controller configurations for VF and PF
bus: mhi: ep: Fix chained transfer handling in read path
bus: mhi: host: Notify EE change via uevent
bus: mhi: host: Do not use uninitialized 'dev' pointer in mhi_init_irq_setup()
bus: mhi: host: pci_generic: Add support for all Foxconn T99W696 SKU variants

+103 -57
+12 -25
drivers/bus/mhi/ep/main.c
··· 403 403 { 404 404 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 405 405 struct device *dev = &mhi_cntrl->mhi_dev->dev; 406 - size_t tr_len, read_offset, write_offset; 406 + size_t tr_len, read_offset; 407 407 struct mhi_ep_buf_info buf_info = {}; 408 408 u32 len = MHI_EP_DEFAULT_MTU; 409 409 struct mhi_ring_element *el; 410 - bool tr_done = false; 411 410 void *buf_addr; 412 - u32 buf_left; 413 411 int ret; 414 - 415 - buf_left = len; 416 412 417 413 do { 418 414 /* Don't process the transfer ring if the channel is not in RUNNING state */ ··· 422 426 /* Check if there is data pending to be read from previous read operation */ 423 427 if (mhi_chan->tre_bytes_left) { 424 428 dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); 425 - tr_len = min(buf_left, mhi_chan->tre_bytes_left); 429 + tr_len = min(len, mhi_chan->tre_bytes_left); 426 430 } else { 427 431 mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); 428 432 mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); 429 433 mhi_chan->tre_bytes_left = mhi_chan->tre_size; 430 434 431 - tr_len = min(buf_left, mhi_chan->tre_size); 435 + tr_len = min(len, mhi_chan->tre_size); 432 436 } 433 437 434 438 read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; 435 - write_offset = len - buf_left; 436 439 437 440 buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); 438 441 if (!buf_addr) 439 442 return -ENOMEM; 440 443 441 444 buf_info.host_addr = mhi_chan->tre_loc + read_offset; 442 - buf_info.dev_addr = buf_addr + write_offset; 445 + buf_info.dev_addr = buf_addr; 443 446 buf_info.size = tr_len; 444 447 buf_info.cb = mhi_ep_read_completion; 445 448 buf_info.cb_buf = buf_addr; ··· 454 459 goto err_free_buf_addr; 455 460 } 456 461 457 - buf_left -= tr_len; 458 462 mhi_chan->tre_bytes_left -= tr_len; 459 463 460 - if (!mhi_chan->tre_bytes_left) { 461 - if (MHI_TRE_DATA_GET_IEOT(el)) 462 - tr_done = true; 463 - 464 + if (!mhi_chan->tre_bytes_left) 464 465 mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; 465 - } 466 - } while (buf_left && !tr_done); 466 + /* Read until the some buffer is left or the ring becomes not empty */ 467 + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); 467 468 468 469 return 0; 469 470 ··· 493 502 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 494 503 } else { 495 504 /* UL channel */ 496 - do { 497 - ret = mhi_ep_read_channel(mhi_cntrl, ring); 498 - if (ret < 0) { 499 - dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); 500 - return ret; 501 - } 502 - 503 - /* Read until the ring becomes empty */ 504 - } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); 505 + ret = mhi_ep_read_channel(mhi_cntrl, ring); 506 + if (ret < 0) { 507 + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); 508 + return ret; 509 + } 505 510 } 506 511 507 512 return 0;
+2 -3
drivers/bus/mhi/host/init.c
··· 194 194 static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) 195 195 { 196 196 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; 197 - struct device *dev = &mhi_cntrl->mhi_dev->dev; 198 197 unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; 199 198 int i, ret; 200 199 ··· 220 221 continue; 221 222 222 223 if (mhi_event->irq >= mhi_cntrl->nr_irqs) { 223 - dev_err(dev, "irq %d not available for event ring\n", 224 + dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n", 224 225 mhi_event->irq); 225 226 ret = -EINVAL; 226 227 goto error_request; ··· 231 232 irq_flags, 232 233 "mhi", mhi_event); 233 234 if (ret) { 234 - dev_err(dev, "Error requesting irq:%d for ev:%d\n", 235 + dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n", 235 236 mhi_cntrl->irq[mhi_event->irq], i); 236 237 goto error_request; 237 238 }
+3
drivers/bus/mhi/host/internal.h
··· 170 170 MHI_PM_IN_ERROR_STATE(pm_state)) 171 171 #define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ 172 172 (MHI_PM_M3_ENTER | MHI_PM_M3)) 173 + #define MHI_PM_FATAL_ERROR(pm_state) ((pm_state == MHI_PM_FW_DL_ERR) || \ 174 + (pm_state >= MHI_PM_SYS_ERR_FAIL)) 173 175 174 176 #define NR_OF_CMD_RINGS 1 175 177 #define CMD_EL_PER_RING 128 ··· 405 403 struct mhi_event *mhi_event, u32 event_quota); 406 404 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, 407 405 struct mhi_event *mhi_event, u32 event_quota); 406 + void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee); 408 407 409 408 /* ISR handlers */ 410 409 irqreturn_t mhi_irq_handler(int irq_number, void *dev);
+1
drivers/bus/mhi/host/main.c
··· 512 512 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { 513 513 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); 514 514 mhi_cntrl->ee = ee; 515 + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); 515 516 wake_up_all(&mhi_cntrl->state_event); 516 517 } 517 518 break;
+57 -28
drivers/bus/mhi/host/pci_generic.c
··· 34 34 /** 35 35 * struct mhi_pci_dev_info - MHI PCI device specific information 36 36 * @config: MHI controller configuration 37 + * @vf_config: MHI controller configuration for Virtual function (optional) 37 38 * @name: name of the PCI module 38 39 * @fw: firmware path (if any) 39 40 * @edl: emergency download mode firmware path (if any) 40 41 * @edl_trigger: capable of triggering EDL mode in the device (if supported) 41 42 * @bar_num: PCI base address register to use for MHI MMIO register space 42 43 * @dma_data_width: DMA transfer word size (32 or 64 bits) 44 + * @vf_dma_data_width: DMA transfer word size for VF's (optional) 43 45 * @mru_default: default MRU size for MBIM network packets 44 46 * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead 45 47 * of inband wake support (such as sdx24) 46 48 * @no_m3: M3 not supported 49 + * @reset_on_remove: Set true for devices that require SoC during driver removal 47 50 */ 48 51 struct mhi_pci_dev_info { 49 52 const struct mhi_controller_config *config; 53 + const struct mhi_controller_config *vf_config; 50 54 const char *name; 51 55 const char *fw; 52 56 const char *edl; 53 57 bool edl_trigger; 54 58 unsigned int bar_num; 55 59 unsigned int dma_data_width; 60 + unsigned int vf_dma_data_width; 56 61 unsigned int mru_default; 57 62 bool sideband_wake; 58 63 bool no_m3; 64 + bool reset_on_remove; 59 65 }; 60 66 61 67 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ ··· 302 296 .config = &mhi_qcom_qdu100_config, 303 297 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 304 298 .dma_data_width = 32, 299 + .vf_dma_data_width = 40, 305 300 .sideband_wake = false, 306 301 .no_m3 = true, 302 + .reset_on_remove = true, 307 303 }; 308 304 309 305 static const struct mhi_channel_config mhi_qcom_sa8775p_channels[] = { ··· 925 917 /* Telit FE990A */ 926 918 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015), 927 919 .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info }, 928 - /* Foxconn T99W696.01, Lenovo Generic SKU */ 929 - { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe142), 930 - .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info }, 931 - /* Foxconn T99W696.02, Lenovo X1 Carbon SKU */ 932 - { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe143), 933 - .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info }, 934 - /* Foxconn T99W696.03, Lenovo X1 2in1 SKU */ 935 - { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe144), 936 - .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info }, 937 - /* Foxconn T99W696.04, Lenovo PRC SKU */ 938 - { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe145), 939 - .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info }, 940 - /* Foxconn T99W696.00, Foxconn SKU */ 941 - { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe146), 920 + /* Foxconn T99W696, all variants */ 921 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, PCI_ANY_ID), 942 922 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info }, 943 923 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), 944 924 .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, ··· 1033 1037 struct work_struct recovery_work; 1034 1038 struct timer_list health_check_timer; 1035 1039 unsigned long status; 1040 + bool reset_on_remove; 1036 1041 }; 1037 1042 1038 1043 static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl, ··· 1089 1092 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); 1090 1093 u16 vendor = 0; 1091 1094 1092 - if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) 1095 + if (pci_read_config_word(pci_physfn(pdev), PCI_VENDOR_ID, &vendor)) 1093 1096 return false; 1094 1097 1095 1098 if (vendor == (u16) ~0 || vendor == 0) ··· 1200 1203 1201 1204 dev_warn(&pdev->dev, "device recovery started\n"); 1202 1205 1203 - timer_delete(&mhi_pdev->health_check_timer); 1206 + if (pdev->is_physfn) 1207 + timer_delete(&mhi_pdev->health_check_timer); 1208 + 1204 1209 pm_runtime_forbid(&pdev->dev); 1205 1210 1206 1211 /* Clean up MHI state */ ··· 1229 1230 dev_dbg(&pdev->dev, "Recovery completed\n"); 1230 1231 1231 1232 set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); 1232 - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1233 + 1234 + if (pdev->is_physfn) 1235 + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1236 + 1233 1237 return; 1234 1238 1235 1239 err_unprepare: ··· 1303 1301 const struct mhi_controller_config *mhi_cntrl_config; 1304 1302 struct mhi_pci_device *mhi_pdev; 1305 1303 struct mhi_controller *mhi_cntrl; 1304 + unsigned int dma_data_width; 1306 1305 int err; 1307 1306 1308 1307 dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name); ··· 1314 1311 return -ENOMEM; 1315 1312 1316 1313 INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work); 1317 - timer_setup(&mhi_pdev->health_check_timer, health_check, 0); 1318 1314 1319 - mhi_cntrl_config = info->config; 1315 + if (pdev->is_virtfn && info->vf_config) 1316 + mhi_cntrl_config = info->vf_config; 1317 + else 1318 + mhi_cntrl_config = info->config; 1319 + 1320 + /* Initialize health check monitor only for Physical functions */ 1321 + if (pdev->is_physfn) 1322 + timer_setup(&mhi_pdev->health_check_timer, health_check, 0); 1323 + 1320 1324 mhi_cntrl = &mhi_pdev->mhi_cntrl; 1325 + 1326 + dma_data_width = (pdev->is_virtfn && info->vf_dma_data_width) ? 1327 + info->vf_dma_data_width : info->dma_data_width; 1321 1328 1322 1329 mhi_cntrl->cntrl_dev = &pdev->dev; 1323 1330 mhi_cntrl->iova_start = 0; 1324 - mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width); 1331 + mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(dma_data_width); 1325 1332 mhi_cntrl->fw_image = info->fw; 1326 1333 mhi_cntrl->edl_image = info->edl; 1327 1334 ··· 1343 1330 mhi_cntrl->mru = info->mru_default; 1344 1331 mhi_cntrl->name = info->name; 1345 1332 1333 + if (pdev->is_physfn) 1334 + mhi_pdev->reset_on_remove = info->reset_on_remove; 1335 + 1346 1336 if (info->edl_trigger) 1347 1337 mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger; 1348 1338 ··· 1355 1339 mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; 1356 1340 } 1357 1341 1358 - err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); 1342 + err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(dma_data_width)); 1359 1343 if (err) 1360 1344 return err; 1361 1345 ··· 1392 1376 set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); 1393 1377 1394 1378 /* start health check */ 1395 - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1379 + if (pdev->is_physfn) 1380 + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1396 1381 1397 1382 /* Allow runtime suspend only if both PME from D3Hot and M3 are supported */ 1398 1383 if (pci_pme_capable(pdev, PCI_D3hot) && !(info->no_m3)) { ··· 1418 1401 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); 1419 1402 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1420 1403 1421 - timer_delete_sync(&mhi_pdev->health_check_timer); 1404 + pci_disable_sriov(pdev); 1405 + 1406 + if (pdev->is_physfn) 1407 + timer_delete_sync(&mhi_pdev->health_check_timer); 1422 1408 cancel_work_sync(&mhi_pdev->recovery_work); 1423 1409 1424 1410 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { ··· 1432 1412 /* balancing probe put_noidle */ 1433 1413 if (pci_pme_capable(pdev, PCI_D3hot)) 1434 1414 pm_runtime_get_noresume(&pdev->dev); 1415 + 1416 + if (mhi_pdev->reset_on_remove) 1417 + mhi_soc_reset(mhi_cntrl); 1435 1418 1436 1419 mhi_unregister_controller(mhi_cntrl); 1437 1420 } ··· 1452 1429 1453 1430 dev_info(&pdev->dev, "reset\n"); 1454 1431 1455 - timer_delete(&mhi_pdev->health_check_timer); 1432 + if (pdev->is_physfn) 1433 + timer_delete(&mhi_pdev->health_check_timer); 1456 1434 1457 1435 /* Clean up MHI state */ 1458 1436 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { ··· 1498 1474 } 1499 1475 1500 1476 set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); 1501 - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1477 + if (pdev->is_physfn) 1478 + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1502 1479 } 1503 1480 1504 1481 static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev, ··· 1564 1539 if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) 1565 1540 return 0; 1566 1541 1567 - timer_delete(&mhi_pdev->health_check_timer); 1542 + if (pdev->is_physfn) 1543 + timer_delete(&mhi_pdev->health_check_timer); 1544 + 1568 1545 cancel_work_sync(&mhi_pdev->recovery_work); 1569 1546 1570 1547 if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || ··· 1617 1590 } 1618 1591 1619 1592 /* Resume health check */ 1620 - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1593 + if (pdev->is_physfn) 1594 + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1621 1595 1622 1596 /* It can be a remote wakeup (no mhi runtime_get), update access time */ 1623 1597 pm_runtime_mark_last_busy(dev); ··· 1704 1676 .remove = mhi_pci_remove, 1705 1677 .shutdown = mhi_pci_shutdown, 1706 1678 .err_handler = &mhi_pci_err_handler, 1707 - .driver.pm = &mhi_pci_pm_ops 1679 + .driver.pm = &mhi_pci_pm_ops, 1680 + .sriov_configure = pci_sriov_configure_simple, 1708 1681 }; 1709 1682 module_pci_driver(mhi_pci_driver); 1710 1683
+28 -1
drivers/bus/mhi/host/pm.c
··· 418 418 device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee, 419 419 mhi_destroy_device); 420 420 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); 421 + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); 421 422 422 423 /* Force MHI to be in M0 state before continuing */ 423 424 ret = __mhi_device_get_sync(mhi_cntrl); ··· 632 631 /* Wake up threads waiting for state transition */ 633 632 wake_up_all(&mhi_cntrl->state_event); 634 633 634 + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); 635 + 635 636 if (MHI_REG_ACCESS_VALID(prev_state)) { 636 637 /* 637 638 * If the device is in PBL or SBL, it will only respond to ··· 832 829 mhi_create_devices(mhi_cntrl); 833 830 if (mhi_cntrl->fbc_download) 834 831 mhi_download_amss_image(mhi_cntrl); 832 + 833 + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); 835 834 break; 836 835 case DEV_ST_TRANSITION_MISSION_MODE: 837 836 mhi_pm_mission_mode_transition(mhi_cntrl); ··· 843 838 mhi_cntrl->ee = MHI_EE_FP; 844 839 write_unlock_irq(&mhi_cntrl->pm_lock); 845 840 mhi_create_devices(mhi_cntrl); 841 + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); 846 842 break; 847 843 case DEV_ST_TRANSITION_READY: 848 844 mhi_ready_state_transition(mhi_cntrl); ··· 1246 1240 write_unlock_irq(&mhi_cntrl->pm_lock); 1247 1241 mutex_unlock(&mhi_cntrl->pm_mutex); 1248 1242 1243 + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); 1244 + 1249 1245 if (destroy_device) 1250 1246 mhi_queue_state_transition(mhi_cntrl, 1251 1247 DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE); ··· 1287 1279 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; 1288 1280 wait_event_timeout(mhi_cntrl->state_event, 1289 1281 MHI_IN_MISSION_MODE(mhi_cntrl->ee) || 1290 - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), 1282 + MHI_PM_FATAL_ERROR(mhi_cntrl->pm_state), 1291 1283 msecs_to_jiffies(timeout_ms)); 1292 1284 1293 1285 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; ··· 1346 1338 read_unlock_bh(&mhi_cntrl->pm_lock); 1347 1339 } 1348 1340 EXPORT_SYMBOL_GPL(mhi_device_put); 1341 + 1342 + void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee) 1343 + { 1344 + struct device *dev = &mhi_cntrl->mhi_dev->dev; 1345 + char *buf[2]; 1346 + int ret; 1347 + 1348 + buf[0] = kasprintf(GFP_KERNEL, "EXEC_ENV=%s", TO_MHI_EXEC_STR(ee)); 1349 + buf[1] = NULL; 1350 + 1351 + if (!buf[0]) 1352 + return; 1353 + 1354 + ret = kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, buf); 1355 + if (ret) 1356 + dev_err(dev, "Failed to send %s uevent\n", TO_MHI_EXEC_STR(ee)); 1357 + 1358 + kfree(buf[0]); 1359 + }