Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'char-misc-6.2-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
"Here are some small char/misc and other subsystem driver fixes for
6.2-rc5 to resolve a few reported issues. They include:

- long time pending fastrpc fixes (should have gone into 6.1, my
fault)

- mei driver/bus fixes and new device ids

- interconnect driver fixes for reported problems

- vmci bugfix

- w1 driver bugfixes for reported problems

Almost all of these have been in linux-next with no reported problems,
the rest have all passed 0-day bot testing in my tree and on the
mailing lists where they have sat too long due to me taking a long
time to catch up on my pending patch queue"

* tag 'char-misc-6.2-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
VMCI: Use threaded irqs instead of tasklets
misc: fastrpc: Pass bitfield into qcom_scm_assign_mem
gsmi: fix null-deref in gsmi_get_variable
misc: fastrpc: Fix use-after-free race condition for maps
misc: fastrpc: Don't remove map on creater_process and device_release
misc: fastrpc: Fix use-after-free and race in fastrpc_map_find
misc: fastrpc: fix error code in fastrpc_req_mmap()
mei: me: add meteor lake point M DID
mei: bus: fix unlink on bus in error path
w1: fix WARNING after calling w1_process()
w1: fix deadloop in __w1_remove_master_device()
comedi: adv_pci1760: Fix PWM instruction handling
interconnect: qcom: rpm: Use _optional func for provider clocks
interconnect: qcom: msm8996: Fix regmap max_register values
interconnect: qcom: msm8996: Provide UFS clocks to A2NoC
dt-bindings: interconnect: Add UFS clocks to MSM8996 A2NoC

+122 -91
+23 -1
Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
··· 84 84 - qcom,msm8939-pcnoc 85 85 - qcom,msm8939-snoc 86 86 - qcom,msm8996-a1noc 87 - - qcom,msm8996-a2noc 88 87 - qcom,msm8996-bimc 89 88 - qcom,msm8996-cnoc 90 89 - qcom,msm8996-pnoc ··· 184 185 185 186 required: 186 187 - power-domains 188 + 189 + - if: 190 + properties: 191 + compatible: 192 + contains: 193 + enum: 194 + - qcom,msm8996-a2noc 195 + 196 + then: 197 + properties: 198 + clock-names: 199 + items: 200 + - const: bus 201 + - const: bus_a 202 + - const: aggre2_ufs_axi 203 + - const: ufs_axi 204 + 205 + clocks: 206 + items: 207 + - description: Bus Clock 208 + - description: Bus A Clock 209 + - description: Aggregate2 NoC UFS AXI Clock 210 + - description: UFS AXI Clock 187 211 188 212 - if: 189 213 properties:
+1 -1
drivers/comedi/drivers/adv_pci1760.c
··· 58 58 #define PCI1760_CMD_CLR_IMB2 0x00 /* Clears IMB2 */ 59 59 #define PCI1760_CMD_SET_DO 0x01 /* Set output state */ 60 60 #define PCI1760_CMD_GET_DO 0x02 /* Read output status */ 61 - #define PCI1760_CMD_GET_STATUS 0x03 /* Read current status */ 61 + #define PCI1760_CMD_GET_STATUS 0x07 /* Read current status */ 62 62 #define PCI1760_CMD_GET_FW_VER 0x0e /* Read firmware version */ 63 63 #define PCI1760_CMD_GET_HW_VER 0x0f /* Read hardware version */ 64 64 #define PCI1760_CMD_SET_PWM_HI(x) (0x10 + (x) * 2) /* Set "hi" period */
+4 -3
drivers/firmware/google/gsmi.c
··· 361 361 memcpy(data, gsmi_dev.data_buf->start, *data_size); 362 362 363 363 /* All variables are have the following attributes */ 364 - *attr = EFI_VARIABLE_NON_VOLATILE | 365 - EFI_VARIABLE_BOOTSERVICE_ACCESS | 366 - EFI_VARIABLE_RUNTIME_ACCESS; 364 + if (attr) 365 + *attr = EFI_VARIABLE_NON_VOLATILE | 366 + EFI_VARIABLE_BOOTSERVICE_ACCESS | 367 + EFI_VARIABLE_RUNTIME_ACCESS; 367 368 } 368 369 369 370 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+1 -1
drivers/interconnect/qcom/icc-rpm.c
··· 488 488 } 489 489 490 490 regmap_done: 491 - ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks); 491 + ret = devm_clk_bulk_get_optional(dev, qp->num_clks, qp->bus_clks); 492 492 if (ret) 493 493 return ret; 494 494
+14 -5
drivers/interconnect/qcom/msm8996.c
··· 33 33 "aggre0_noc_mpu_cfg" 34 34 }; 35 35 36 + static const char * const bus_a2noc_clocks[] = { 37 + "bus", 38 + "bus_a", 39 + "aggre2_ufs_axi", 40 + "ufs_axi" 41 + }; 42 + 36 43 static const u16 mas_a0noc_common_links[] = { 37 44 MSM8996_SLAVE_A0NOC_SNOC 38 45 }; ··· 1813 1806 .reg_bits = 32, 1814 1807 .reg_stride = 4, 1815 1808 .val_bits = 32, 1816 - .max_register = 0x9000, 1809 + .max_register = 0x6000, 1817 1810 .fast_io = true 1818 1811 }; 1819 1812 ··· 1837 1830 .reg_bits = 32, 1838 1831 .reg_stride = 4, 1839 1832 .val_bits = 32, 1840 - .max_register = 0x7000, 1833 + .max_register = 0x5000, 1841 1834 .fast_io = true 1842 1835 }; 1843 1836 ··· 1858 1851 .reg_bits = 32, 1859 1852 .reg_stride = 4, 1860 1853 .val_bits = 32, 1861 - .max_register = 0xa000, 1854 + .max_register = 0x7000, 1862 1855 .fast_io = true 1863 1856 }; 1864 1857 ··· 1866 1859 .type = QCOM_ICC_NOC, 1867 1860 .nodes = a2noc_nodes, 1868 1861 .num_nodes = ARRAY_SIZE(a2noc_nodes), 1862 + .clocks = bus_a2noc_clocks, 1863 + .num_clocks = ARRAY_SIZE(bus_a2noc_clocks), 1869 1864 .regmap_cfg = &msm8996_a2noc_regmap_config 1870 1865 }; 1871 1866 ··· 1886 1877 .reg_bits = 32, 1887 1878 .reg_stride = 4, 1888 1879 .val_bits = 32, 1889 - .max_register = 0x62000, 1880 + .max_register = 0x5a000, 1890 1881 .fast_io = true 1891 1882 }; 1892 1883 ··· 1997 1988 .reg_bits = 32, 1998 1989 .reg_stride = 4, 1999 1990 .val_bits = 32, 2000 - .max_register = 0x20000, 1991 + .max_register = 0x1c000, 2001 1992 .fast_io = true 2002 1993 }; 2003 1994
+41 -42
drivers/misc/fastrpc.c
··· 321 321 perm.vmid = QCOM_SCM_VMID_HLOS; 322 322 perm.perm = QCOM_SCM_PERM_RWX; 323 323 err = qcom_scm_assign_mem(map->phys, map->size, 324 - &(map->fl->cctx->vmperms[0].vmid), &perm, 1); 324 + &map->fl->cctx->perms, &perm, 1); 325 325 if (err) { 326 326 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", 327 327 map->phys, map->size, err); ··· 334 334 dma_buf_put(map->buf); 335 335 } 336 336 337 + if (map->fl) { 338 + spin_lock(&map->fl->lock); 339 + list_del(&map->node); 340 + spin_unlock(&map->fl->lock); 341 + map->fl = NULL; 342 + } 343 + 337 344 kfree(map); 338 345 } 339 346 ··· 350 343 kref_put(&map->refcount, fastrpc_free_map); 351 344 } 352 345 353 - static void fastrpc_map_get(struct fastrpc_map *map) 346 + static int fastrpc_map_get(struct fastrpc_map *map) 354 347 { 355 - if (map) 356 - kref_get(&map->refcount); 348 + if (!map) 349 + return -ENOENT; 350 + 351 + return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT; 357 352 } 358 353 359 354 360 355 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, 361 - struct fastrpc_map **ppmap) 356 + struct fastrpc_map **ppmap, bool take_ref) 362 357 { 358 + struct fastrpc_session_ctx *sess = fl->sctx; 363 359 struct fastrpc_map *map = NULL; 360 + int ret = -ENOENT; 364 361 365 - mutex_lock(&fl->mutex); 362 + spin_lock(&fl->lock); 366 363 list_for_each_entry(map, &fl->maps, node) { 367 - if (map->fd == fd) { 368 - *ppmap = map; 369 - mutex_unlock(&fl->mutex); 370 - return 0; 364 + if (map->fd != fd) 365 + continue; 366 + 367 + if (take_ref) { 368 + ret = fastrpc_map_get(map); 369 + if (ret) { 370 + dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n", 371 + __func__, fd, ret); 372 + break; 373 + } 371 374 } 375 + 376 + *ppmap = map; 377 + ret = 0; 378 + break; 372 379 } 373 - mutex_unlock(&fl->mutex); 374 - 375 - return -ENOENT; 376 - } 377 - 378 - static int fastrpc_map_find(struct fastrpc_user *fl, int fd, 379 - struct fastrpc_map **ppmap) 380 - { 381 - int ret = fastrpc_map_lookup(fl, fd, ppmap); 382 - 383 - if (!ret) 384 - fastrpc_map_get(*ppmap); 380 + spin_unlock(&fl->lock); 385 381 386 382 return ret; 387 383 } ··· 756 746 struct fastrpc_map *map = NULL; 757 747 int err = 0; 758 748 759 - if (!fastrpc_map_find(fl, fd, ppmap)) 749 + if (!fastrpc_map_lookup(fl, fd, ppmap, true)) 760 750 return 0; 761 751 762 752 map = kzalloc(sizeof(*map), GFP_KERNEL); ··· 798 788 * If subsystem VMIDs are defined in DTSI, then do 799 789 * hyp_assign from HLOS to those VM(s) 800 790 */ 801 - unsigned int perms = BIT(QCOM_SCM_VMID_HLOS); 802 - 803 791 map->attr = attr; 804 - err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms, 792 + err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms, 805 793 fl->cctx->vmperms, fl->cctx->vmcount); 806 794 if (err) { 807 795 dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", ··· 1078 1070 for (i = 0; i < FASTRPC_MAX_FDLIST; i++) { 1079 1071 if (!fdlist[i]) 1080 1072 break; 1081 - if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap)) 1073 + if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false)) 1082 1074 fastrpc_map_put(mmap); 1083 1075 } 1084 1076 ··· 1266 1258 1267 1259 /* Map if we have any heap VMIDs associated with this ADSP Static Process. */ 1268 1260 if (fl->cctx->vmcount) { 1269 - unsigned int perms = BIT(QCOM_SCM_VMID_HLOS); 1270 - 1271 1261 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, 1272 - (u64)fl->cctx->remote_heap->size, &perms, 1262 + (u64)fl->cctx->remote_heap->size, 1263 + &fl->cctx->perms, 1273 1264 fl->cctx->vmperms, fl->cctx->vmcount); 1274 1265 if (err) { 1275 1266 dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", ··· 1316 1309 perm.perm = QCOM_SCM_PERM_RWX; 1317 1310 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, 1318 1311 (u64)fl->cctx->remote_heap->size, 1319 - &(fl->cctx->vmperms[0].vmid), &perm, 1); 1312 + &fl->cctx->perms, &perm, 1); 1320 1313 if (err) 1321 1314 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", 1322 1315 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); ··· 1440 1433 fl->init_mem = NULL; 1441 1434 fastrpc_buf_free(imem); 1442 1435 err_alloc: 1443 - if (map) { 1444 - spin_lock(&fl->lock); 1445 - list_del(&map->node); 1446 - spin_unlock(&fl->lock); 1447 - fastrpc_map_put(map); 1448 - } 1436 + fastrpc_map_put(map); 1449 1437 err: 1450 1438 kfree(args); 1451 1439 ··· 1516 1514 fastrpc_context_put(ctx); 1517 1515 } 1518 1516 1519 - list_for_each_entry_safe(map, m, &fl->maps, node) { 1520 - list_del(&map->node); 1517 + list_for_each_entry_safe(map, m, &fl->maps, node) 1521 1518 fastrpc_map_put(map); 1522 - } 1523 1519 1524 1520 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { 1525 1521 list_del(&buf->node); ··· 1894 1894 /* Add memory to static PD pool, protection thru hypervisor */ 1895 1895 if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { 1896 1896 struct qcom_scm_vmperm perm; 1897 - int err = 0; 1898 1897 1899 1898 perm.vmid = QCOM_SCM_VMID_HLOS; 1900 1899 perm.perm = QCOM_SCM_PERM_RWX; 1901 1900 err = qcom_scm_assign_mem(buf->phys, buf->size, 1902 - &(fl->cctx->vmperms[0].vmid), &perm, 1); 1901 + &fl->cctx->perms, &perm, 1); 1903 1902 if (err) { 1904 1903 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", 1905 1904 buf->phys, buf->size, err);
+8 -4
drivers/misc/mei/bus.c
··· 702 702 if (cl->state == MEI_FILE_UNINITIALIZED) { 703 703 ret = mei_cl_link(cl); 704 704 if (ret) 705 - goto out; 705 + goto notlinked; 706 706 /* update pointers */ 707 707 cl->cldev = cldev; 708 708 } 709 709 710 710 ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size); 711 - out: 711 + if (ret) 712 + mei_cl_unlink(cl); 713 + notlinked: 712 714 mutex_unlock(&bus->device_lock); 713 715 if (ret) 714 716 return ERR_PTR(ret); ··· 760 758 if (cl->state == MEI_FILE_UNINITIALIZED) { 761 759 ret = mei_cl_link(cl); 762 760 if (ret) 763 - goto out; 761 + goto notlinked; 764 762 /* update pointers */ 765 763 cl->cldev = cldev; 766 764 } ··· 787 785 } 788 786 789 787 out: 788 + if (ret) 789 + mei_cl_unlink(cl); 790 + notlinked: 790 791 mutex_unlock(&bus->device_lock); 791 792 792 793 return ret; ··· 1282 1277 mei_cl_flush_queues(cldev->cl, NULL); 1283 1278 mei_me_cl_put(cldev->me_cl); 1284 1279 mei_dev_bus_put(cldev->bus); 1285 - mei_cl_unlink(cldev->cl); 1286 1280 kfree(cldev->cl); 1287 1281 kfree(cldev); 1288 1282 }
+2
drivers/misc/mei/hw-me-regs.h
··· 111 111 112 112 #define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ 113 113 114 + #define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */ 115 + 114 116 /* 115 117 * MEI HW Section 116 118 */
+2
drivers/misc/mei/pci-me.c
··· 118 118 119 119 {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, 120 120 121 + {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)}, 122 + 121 123 /* required last entry */ 122 124 {0, } 123 125 };
+19 -30
drivers/misc/vmw_vmci/vmci_guest.c
··· 56 56 57 57 bool exclusive_vectors; 58 58 59 - struct tasklet_struct datagram_tasklet; 60 - struct tasklet_struct bm_tasklet; 61 59 struct wait_queue_head inout_wq; 62 60 63 61 void *data_buffer; ··· 302 304 * This function assumes that it has exclusive access to the data 303 305 * in register(s) for the duration of the call. 304 306 */ 305 - static void vmci_dispatch_dgs(unsigned long data) 307 + static void vmci_dispatch_dgs(struct vmci_guest_device *vmci_dev) 306 308 { 307 - struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; 308 309 u8 *dg_in_buffer = vmci_dev->data_buffer; 309 310 struct vmci_datagram *dg; 310 311 size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; ··· 462 465 * Scans the notification bitmap for raised flags, clears them 463 466 * and handles the notifications. 464 467 */ 465 - static void vmci_process_bitmap(unsigned long data) 468 + static void vmci_process_bitmap(struct vmci_guest_device *dev) 466 469 { 467 - struct vmci_guest_device *dev = (struct vmci_guest_device *)data; 468 - 469 470 if (!dev->notification_bitmap) { 470 471 dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); 471 472 return; ··· 481 486 struct vmci_guest_device *dev = _dev; 482 487 483 488 /* 484 - * If we are using MSI-X with exclusive vectors then we simply schedule 485 - * the datagram tasklet, since we know the interrupt was meant for us. 489 + * If we are using MSI-X with exclusive vectors then we simply call 490 + * vmci_dispatch_dgs(), since we know the interrupt was meant for us. 486 491 * Otherwise we must read the ICR to determine what to do. 487 492 */ 488 493 489 494 if (dev->exclusive_vectors) { 490 - tasklet_schedule(&dev->datagram_tasklet); 495 + vmci_dispatch_dgs(dev); 491 496 } else { 492 497 unsigned int icr; 493 498 ··· 497 502 return IRQ_NONE; 498 503 499 504 if (icr & VMCI_ICR_DATAGRAM) { 500 - tasklet_schedule(&dev->datagram_tasklet); 505 + vmci_dispatch_dgs(dev); 501 506 icr &= ~VMCI_ICR_DATAGRAM; 502 507 } 503 508 504 509 if (icr & VMCI_ICR_NOTIFICATION) { 505 - tasklet_schedule(&dev->bm_tasklet); 510 + vmci_process_bitmap(dev); 506 511 icr &= ~VMCI_ICR_NOTIFICATION; 507 512 } 508 513 ··· 531 536 struct vmci_guest_device *dev = _dev; 532 537 533 538 /* For MSI-X we can just assume it was meant for us. */ 534 - tasklet_schedule(&dev->bm_tasklet); 539 + vmci_process_bitmap(dev); 535 540 536 541 return IRQ_HANDLED; 537 542 } ··· 633 638 vmci_dev->iobase = iobase; 634 639 vmci_dev->mmio_base = mmio_base; 635 640 636 - tasklet_init(&vmci_dev->datagram_tasklet, 637 - vmci_dispatch_dgs, (unsigned long)vmci_dev); 638 - tasklet_init(&vmci_dev->bm_tasklet, 639 - vmci_process_bitmap, (unsigned long)vmci_dev); 640 641 init_waitqueue_head(&vmci_dev->inout_wq); 641 642 642 643 if (mmio_base != NULL) { ··· 799 808 * Request IRQ for legacy or MSI interrupts, or for first 800 809 * MSI-X vector. 801 810 */ 802 - error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt, 803 - IRQF_SHARED, KBUILD_MODNAME, vmci_dev); 811 + error = request_threaded_irq(pci_irq_vector(pdev, 0), NULL, 812 + vmci_interrupt, IRQF_SHARED, 813 + KBUILD_MODNAME, vmci_dev); 804 814 if (error) { 805 815 dev_err(&pdev->dev, "Irq %u in use: %d\n", 806 816 pci_irq_vector(pdev, 0), error); ··· 815 823 * between the vectors. 816 824 */ 817 825 if (vmci_dev->exclusive_vectors) { 818 - error = request_irq(pci_irq_vector(pdev, 1), 819 - vmci_interrupt_bm, 0, KBUILD_MODNAME, 820 - vmci_dev); 826 + error = request_threaded_irq(pci_irq_vector(pdev, 1), NULL, 827 + vmci_interrupt_bm, 0, 828 + KBUILD_MODNAME, vmci_dev); 821 829 if (error) { 822 830 dev_err(&pdev->dev, 823 831 "Failed to allocate irq %u: %d\n", ··· 825 833 goto err_free_irq; 826 834 } 827 835 if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) { 828 - error = request_irq(pci_irq_vector(pdev, 2), 829 - vmci_interrupt_dma_datagram, 830 - 0, KBUILD_MODNAME, vmci_dev); 836 + error = request_threaded_irq(pci_irq_vector(pdev, 2), 837 + NULL, 838 + vmci_interrupt_dma_datagram, 839 + 0, KBUILD_MODNAME, 840 + vmci_dev); 831 841 if (error) { 832 842 dev_err(&pdev->dev, 833 843 "Failed to allocate irq %u: %d\n", ··· 865 871 866 872 err_free_irq: 867 873 free_irq(pci_irq_vector(pdev, 0), vmci_dev); 868 - tasklet_kill(&vmci_dev->datagram_tasklet); 869 - tasklet_kill(&vmci_dev->bm_tasklet); 870 874 871 875 err_disable_msi: 872 876 pci_free_irq_vectors(pdev); ··· 934 942 } 935 943 free_irq(pci_irq_vector(pdev, 0), vmci_dev); 936 944 pci_free_irq_vectors(pdev); 937 - 938 - tasklet_kill(&vmci_dev->datagram_tasklet); 939 - tasklet_kill(&vmci_dev->bm_tasklet); 940 945 941 946 if (vmci_dev->notification_bitmap) { 942 947 /*
+5 -1
drivers/w1/w1.c
··· 1166 1166 /* remainder if it woke up early */ 1167 1167 unsigned long jremain = 0; 1168 1168 1169 + atomic_inc(&dev->refcnt); 1170 + 1169 1171 for (;;) { 1170 1172 1171 1173 if (!jremain && dev->search_count) { ··· 1195 1193 */ 1196 1194 mutex_unlock(&dev->list_mutex); 1197 1195 1198 - if (kthread_should_stop()) 1196 + if (kthread_should_stop()) { 1197 + __set_current_state(TASK_RUNNING); 1199 1198 break; 1199 + } 1200 1200 1201 1201 /* Only sleep when the search is active. */ 1202 1202 if (dev->search_count) {
+2 -3
drivers/w1/w1_int.c
··· 51 51 dev->search_count = w1_search_count; 52 52 dev->enable_pullup = w1_enable_pullup; 53 53 54 - /* 1 for w1_process to decrement 55 - * 1 for __w1_remove_master_device to decrement 54 + /* For __w1_remove_master_device to decrement 56 55 */ 57 - atomic_set(&dev->refcnt, 2); 56 + atomic_set(&dev->refcnt, 1); 58 57 59 58 INIT_LIST_HEAD(&dev->slist); 60 59 INIT_LIST_HEAD(&dev->async_list);