Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'i3c/fixes-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux

Pull i3c fixes from Alexandre Belloni:
"This introduces the I3C_OR_I2C symbol which is not a fix per se but is
affecting multiple subsystems so it is included to ease
synchronization.

Apart from that, Adrian is mostly fixing the mipi-i3c-hci driver DMA
handling, and I took the opportunity to add two fixes for the dw-i3c
driver.

Subsystem:
- simplify combined i3c/i2c dependencies

Drivers:
- dw: handle 2C properly, fix possible race condition
- mipi-i3c-hci: many DMA related fixes"

* tag 'i3c/fixes-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux:
i3c: dw-i3c-master: Set SIR_REJECT in DAT on device attach and reattach
i3c: master: dw-i3c: Fix missing of_node for virtual I2C adapter
i3c: mipi-i3c-hci: Fallback to software reset when bus disable fails
i3c: mipi-i3c-hci: Fix handling of shared IRQs during early initialization
i3c: mipi-i3c-hci: Fix race in DMA error handling in interrupt context
i3c: mipi-i3c-hci: Consolidate common xfer processing logic
i3c: mipi-i3c-hci: Restart DMA ring correctly after dequeue abort
i3c: mipi-i3c-hci: Add missing TID field to no-op command descriptor
i3c: mipi-i3c-hci: Correct RING_CTRL_ABORT handling in DMA dequeue
i3c: mipi-i3c-hci: Fix race between DMA ring dequeue and interrupt handler
i3c: mipi-i3c-hci: Fix race in DMA ring dequeue
i3c: mipi-i3c-hci: Fix race in DMA ring enqueue for parallel xfers
i3c: mipi-i3c-hci: Consolidate spinlocks
i3c: mipi-i3c-hci: Factor out DMA mapping from queuing path
i3c: mipi-i3c-hci: Fix Hot-Join NACK
i3c: mipi-i3c-hci: Use ETIMEDOUT instead of ETIME for timeout errors
i3c: simplify combined i3c/i2c dependencies

+210 -157
+2 -4
drivers/hwmon/Kconfig
··· 1493 1493 1494 1494 config SENSORS_LM75 1495 1495 tristate "National Semiconductor LM75 and compatibles" 1496 - depends on I2C 1497 - depends on I3C || !I3C 1496 + depends on I3C_OR_I2C 1498 1497 select REGMAP_I2C 1499 1498 select REGMAP_I3C if I3C 1500 1499 help ··· 2381 2382 2382 2383 config SENSORS_TMP108 2383 2384 tristate "Texas Instruments TMP108" 2384 - depends on I2C 2385 - depends on I3C || !I3C 2385 + depends on I3C_OR_I2C 2386 2386 select REGMAP_I2C 2387 2387 select REGMAP_I3C if I3C 2388 2388 help
+12
drivers/i3c/Kconfig
··· 22 22 if I3C 23 23 source "drivers/i3c/master/Kconfig" 24 24 endif # I3C 25 + 26 + config I3C_OR_I2C 27 + tristate 28 + default m if I3C=m 29 + default I2C 30 + help 31 + Device drivers using module_i3c_i2c_driver() can use either 32 + i2c or i3c hosts, but cannot be built-in for the kernel when 33 + CONFIG_I3C=m. 34 + 35 + Add 'depends on I2C_OR_I3C' in Kconfig for those drivers to 36 + get the correct dependencies.
+4 -2
drivers/i3c/master/dw-i3c-master.c
··· 1024 1024 master->free_pos &= ~BIT(pos); 1025 1025 } 1026 1026 1027 - writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), 1027 + writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr) | DEV_ADDR_TABLE_SIR_REJECT, 1028 1028 master->regs + 1029 1029 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1030 1030 ··· 1053 1053 master->free_pos &= ~BIT(pos); 1054 1054 i3c_dev_set_master_data(dev, data); 1055 1055 1056 - writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr), 1056 + writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr) | DEV_ADDR_TABLE_SIR_REJECT, 1057 1057 master->regs + 1058 1058 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1059 1059 ··· 1659 1659 pm_runtime_get_noresume(&pdev->dev); 1660 1660 1661 1661 INIT_WORK(&master->hj_work, dw_i3c_hj_work); 1662 + 1663 + device_set_of_node_from_dev(&master->base.i2c.dev, &pdev->dev); 1662 1664 ret = i3c_master_register(&master->base, &pdev->dev, 1663 1665 &dw_mipi_i3c_ops, false); 1664 1666 if (ret)
+1
drivers/i3c/master/mipi-i3c-hci/cmd.h
··· 17 17 #define CMD_0_TOC W0_BIT_(31) 18 18 #define CMD_0_ROC W0_BIT_(30) 19 19 #define CMD_0_ATTR W0_MASK(2, 0) 20 + #define CMD_0_TID W0_MASK(6, 3) 20 21 21 22 /* 22 23 * Response Descriptor Structure
+3 -5
drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
··· 331 331 CMD_A0_ROC | CMD_A0_TOC; 332 332 xfer->cmd_desc[1] = 0; 333 333 xfer->completion = &done; 334 - hci->io->queue_xfer(hci, xfer, 1); 335 - if (!wait_for_completion_timeout(&done, HZ) && 336 - hci->io->dequeue_xfer(hci, xfer, 1)) { 337 - ret = -ETIME; 334 + xfer->timeout = HZ; 335 + ret = i3c_hci_process_xfer(hci, xfer, 1); 336 + if (ret) 338 337 break; 339 - } 340 338 if ((RESP_STATUS(xfer->response) == RESP_ERR_ADDR_HEADER || 341 339 RESP_STATUS(xfer->response) == RESP_ERR_NACK) && 342 340 RESP_DATA_LENGTH(xfer->response) == 1) {
+3 -5
drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
··· 253 253 xfer[0].rnw = true; 254 254 xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8); 255 255 xfer[1].completion = &done; 256 + xfer[1].timeout = HZ; 256 257 257 258 for (;;) { 258 259 ret = i3c_master_get_free_addr(&hci->master, next_addr); ··· 273 272 CMD_A0_ASSIGN_ADDRESS(next_addr) | 274 273 CMD_A0_ROC | 275 274 CMD_A0_TOC; 276 - hci->io->queue_xfer(hci, xfer, 2); 277 - if (!wait_for_completion_timeout(&done, HZ) && 278 - hci->io->dequeue_xfer(hci, xfer, 2)) { 279 - ret = -ETIME; 275 + ret = i3c_hci_process_xfer(hci, xfer, 2); 276 + if (ret) 280 277 break; 281 - } 282 278 if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) { 283 279 ret = 0; /* no more devices to be assigned */ 284 280 break;
+90 -53
drivers/i3c/master/mipi-i3c-hci/core.c
··· 152 152 if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD) 153 153 amd_set_resp_buf_thld(hci); 154 154 155 - reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE); 155 + scoped_guard(spinlock_irqsave, &hci->lock) 156 + hci->irq_inactive = false; 157 + 158 + /* Enable bus with Hot-Join disabled */ 159 + reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL); 156 160 dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL)); 157 161 158 162 return 0; ··· 181 177 return ret; 182 178 } 183 179 180 + static int i3c_hci_software_reset(struct i3c_hci *hci) 181 + { 182 + u32 regval; 183 + int ret; 184 + 185 + /* 186 + * SOFT_RST must be clear before we write to it. 187 + * Then we must wait until it clears again. 188 + */ 189 + ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 190 + !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 191 + if (ret) { 192 + dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__); 193 + return ret; 194 + } 195 + 196 + reg_write(RESET_CONTROL, SOFT_RST); 197 + 198 + ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 199 + !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 200 + if (ret) { 201 + dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__); 202 + return ret; 203 + } 204 + 205 + return 0; 206 + } 207 + 184 208 void i3c_hci_sync_irq_inactive(struct i3c_hci *hci) 185 209 { 186 210 struct platform_device *pdev = to_platform_device(hci->master.dev.parent); 187 211 int irq = platform_get_irq(pdev, 0); 188 212 189 213 reg_write(INTR_SIGNAL_ENABLE, 0x0); 190 - hci->irq_inactive = true; 191 214 synchronize_irq(irq); 215 + scoped_guard(spinlock_irqsave, &hci->lock) 216 + hci->irq_inactive = true; 192 217 } 193 218 194 219 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m) 195 220 { 196 221 struct i3c_hci *hci = to_i3c_hci(m); 197 222 198 - i3c_hci_bus_disable(hci); 223 + if (i3c_hci_bus_disable(hci)) 224 + i3c_hci_software_reset(hci); 199 225 hci->io->cleanup(hci); 200 226 } 201 227 ··· 244 210 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci) 245 211 { 246 212 reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0)); 213 + } 214 + 215 + int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n) 216 + { 217 + struct completion *done = xfer[n - 1].completion; 218 + unsigned long timeout = xfer[n - 1].timeout; 219 + int ret; 220 + 221 + ret = hci->io->queue_xfer(hci, xfer, n); 222 + if (ret) 223 + return ret; 224 + 225 + if (!wait_for_completion_timeout(done, timeout)) { 226 + if (hci->io->dequeue_xfer(hci, xfer, n)) { 227 + dev_err(&hci->master.dev, "%s: timeout error\n", __func__); 228 + return -ETIMEDOUT; 229 + } 230 + return 0; 231 + } 232 + 233 + if (hci->io->handle_error) { 234 + bool error = false; 235 + 236 + for (int i = 0; i < n && !error; i++) 237 + error = RESP_STATUS(xfer[i].response); 238 + if (error) 239 + return hci->io->handle_error(hci, xfer, n); 240 + } 241 + 242 + return 0; 247 243 } 248 244 249 245 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m, ··· 316 252 last = i - 1; 317 253 xfer[last].cmd_desc[0] |= CMD_0_TOC; 318 254 xfer[last].completion = &done; 255 + xfer[last].timeout = HZ; 319 256 320 257 if (prefixed) 321 258 xfer--; 322 259 323 - ret = hci->io->queue_xfer(hci, xfer, nxfers); 260 + ret = i3c_hci_process_xfer(hci, xfer, nxfers); 324 261 if (ret) 325 262 goto out; 326 - if (!wait_for_completion_timeout(&done, HZ) && 327 - hci->io->dequeue_xfer(hci, xfer, nxfers)) { 328 - ret = -ETIME; 329 - goto out; 330 - } 331 263 for (i = prefixed; i < nxfers; i++) { 332 264 if (ccc->rnw) 333 265 ccc->dests[i - prefixed].payload.len = ··· 394 334 last = i - 1; 395 335 xfer[last].cmd_desc[0] |= CMD_0_TOC; 396 336 xfer[last].completion = &done; 337 + xfer[last].timeout = HZ; 397 338 398 - ret = hci->io->queue_xfer(hci, xfer, nxfers); 339 + ret = i3c_hci_process_xfer(hci, xfer, nxfers); 399 340 if (ret) 400 341 goto out; 401 - if (!wait_for_completion_timeout(&done, HZ) && 402 - hci->io->dequeue_xfer(hci, xfer, nxfers)) { 403 - ret = -ETIME; 404 - goto out; 405 - } 406 342 for (i = 0; i < nxfers; i++) { 407 343 if (i3c_xfers[i].rnw) 408 344 i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response); ··· 438 382 last = i - 1; 439 383 xfer[last].cmd_desc[0] |= CMD_0_TOC; 440 384 xfer[last].completion = &done; 385 + xfer[last].timeout = m->i2c.timeout; 441 386 442 - ret = hci->io->queue_xfer(hci, xfer, nxfers); 387 + ret = i3c_hci_process_xfer(hci, xfer, nxfers); 443 388 if (ret) 444 389 goto out; 445 - if (!wait_for_completion_timeout(&done, m->i2c.timeout) && 446 - hci->io->dequeue_xfer(hci, xfer, nxfers)) { 447 - ret = -ETIME; 448 - goto out; 449 - } 450 390 for (i = 0; i < nxfers; i++) { 451 391 if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) { 452 392 ret = -EIO; ··· 618 566 irqreturn_t result = IRQ_NONE; 619 567 u32 val; 620 568 569 + guard(spinlock)(&hci->lock); 570 + 621 571 /* 622 572 * The IRQ can be shared, so the handler may be called when the IRQ is 623 573 * due to a different device. That could happen when runtime suspended, ··· 653 599 result = IRQ_HANDLED; 654 600 655 601 return result; 656 - } 657 - 658 - static int i3c_hci_software_reset(struct i3c_hci *hci) 659 - { 660 - u32 regval; 661 - int ret; 662 - 663 - /* 664 - * SOFT_RST must be clear before we write to it. 665 - * Then we must wait until it clears again. 666 - */ 667 - ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 668 - !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 669 - if (ret) { 670 - dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__); 671 - return ret; 672 - } 673 - 674 - reg_write(RESET_CONTROL, SOFT_RST); 675 - 676 - ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 677 - !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 678 - if (ret) { 679 - dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__); 680 - return ret; 681 - } 682 - 683 - return 0; 684 602 } 685 603 686 604 static inline bool is_version_1_1_or_newer(struct i3c_hci *hci) ··· 765 739 int ret; 766 740 767 741 ret = i3c_hci_bus_disable(hci); 768 - if (ret) 742 + if (ret) { 743 + /* Fall back to software reset to disable the bus */ 744 + ret = i3c_hci_software_reset(hci); 745 + i3c_hci_sync_irq_inactive(hci); 769 746 return ret; 747 + } 770 748 771 749 hci->io->suspend(hci); 772 750 ··· 790 760 791 761 mipi_i3c_hci_dat_v1.restore(hci); 792 762 793 - hci->irq_inactive = false; 794 - 795 763 hci->io->resume(hci); 796 764 797 - reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE); 765 + scoped_guard(spinlock_irqsave, &hci->lock) 766 + hci->irq_inactive = false; 767 + 768 + /* Enable bus with Hot-Join disabled */ 769 + reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL); 798 770 799 771 return 0; 800 772 } ··· 956 924 if (!hci) 957 925 return -ENOMEM; 958 926 927 + spin_lock_init(&hci->lock); 928 + mutex_init(&hci->control_mutex); 929 + 959 930 /* 960 931 * Multi-bus instances share the same MMIO address range, but not 961 932 * necessarily in separate contiguous sub-ranges. To avoid overlapping ··· 984 949 ret = i3c_hci_init(hci); 985 950 if (ret) 986 951 return ret; 952 + 953 + hci->irq_inactive = true; 987 954 988 955 irq = platform_get_irq(pdev, 0); 989 956 ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
+83 -73
drivers/i3c/master/mipi-i3c-hci/dma.c
··· 129 129 dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma; 130 130 unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total; 131 131 unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz; 132 - unsigned int done_ptr, ibi_chunk_ptr; 132 + unsigned int done_ptr, ibi_chunk_ptr, xfer_space; 133 133 struct hci_xfer **src_xfers; 134 - spinlock_t lock; 135 134 struct completion op_done; 136 135 }; 137 136 ··· 260 261 261 262 rh->done_ptr = 0; 262 263 rh->ibi_chunk_ptr = 0; 264 + rh->xfer_space = rh->xfer_entries; 263 265 } 264 266 265 267 static void hci_dma_init_rings(struct i3c_hci *hci) ··· 344 344 goto err_out; 345 345 rh = &rings->headers[i]; 346 346 rh->regs = hci->base_regs + offset; 347 - spin_lock_init(&rh->lock); 348 347 init_completion(&rh->op_done); 349 348 350 349 rh->xfer_entries = XFER_RING_ENTRIES; ··· 438 439 } 439 440 } 440 441 442 + static struct i3c_dma *hci_dma_map_xfer(struct device *dev, struct hci_xfer *xfer) 443 + { 444 + enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 445 + bool need_bounce = device_iommu_mapped(dev) && xfer->rnw && (xfer->data_len & 3); 446 + 447 + return i3c_master_dma_map_single(dev, xfer->data, xfer->data_len, need_bounce, dir); 448 + } 449 + 450 + static int hci_dma_map_xfer_list(struct i3c_hci *hci, struct device *dev, 451 + struct hci_xfer *xfer_list, int n) 452 + { 453 + for (int i = 0; i < n; i++) { 454 + struct hci_xfer *xfer = xfer_list + i; 455 + 456 + if (!xfer->data) 457 + continue; 458 + 459 + xfer->dma = hci_dma_map_xfer(dev, xfer); 460 + if (!xfer->dma) { 461 + hci_dma_unmap_xfer(hci, xfer_list, i); 462 + return -ENOMEM; 463 + } 464 + } 465 + 466 + return 0; 467 + } 468 + 441 469 static int hci_dma_queue_xfer(struct i3c_hci *hci, 442 470 struct hci_xfer *xfer_list, int n) 443 471 { 444 472 struct hci_rings_data *rings = hci->io_data; 445 473 struct hci_rh_data *rh; 446 474 unsigned int i, ring, enqueue_ptr; 447 - u32 op1_val, op2_val; 475 + u32 op1_val; 476 + int ret; 477 + 478 + ret = hci_dma_map_xfer_list(hci, rings->sysdev, xfer_list, n); 479 + if (ret) 480 + return ret; 448 481 449 482 /* For now we only use ring 0 */ 450 483 ring = 0; 451 484 rh = &rings->headers[ring]; 485 + 486 + spin_lock_irq(&hci->lock); 487 + 488 + if (n > rh->xfer_space) { 489 + spin_unlock_irq(&hci->lock); 490 + hci_dma_unmap_xfer(hci, xfer_list, n); 491 + return -EBUSY; 492 + } 452 493 453 494 op1_val = rh_reg_read(RING_OPERATION1); 454 495 enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val); 455 496 for (i = 0; i < n; i++) { 456 497 struct hci_xfer *xfer = xfer_list + i; 457 498 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr; 458 - enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : 459 - DMA_TO_DEVICE; 460 - bool need_bounce; 461 499 462 500 /* store cmd descriptor */ 463 501 *ring_data++ = xfer->cmd_desc[0]; ··· 513 477 514 478 /* 2nd and 3rd words of Data Buffer Descriptor Structure */ 515 479 if (xfer->data) { 516 - need_bounce = device_iommu_mapped(rings->sysdev) && 517 - xfer->rnw && 518 - xfer->data_len != ALIGN(xfer->data_len, 4); 519 - xfer->dma = i3c_master_dma_map_single(rings->sysdev, 520 - xfer->data, 521 - xfer->data_len, 522 - need_bounce, 523 - dir); 524 - if (!xfer->dma) { 525 - hci_dma_unmap_xfer(hci, xfer_list, i); 526 - return -ENOMEM; 527 - } 528 480 *ring_data++ = lower_32_bits(xfer->dma->addr); 529 481 *ring_data++ = upper_32_bits(xfer->dma->addr); 530 482 } else { ··· 527 503 xfer->ring_entry = enqueue_ptr; 528 504 529 505 enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries; 530 - 531 - /* 532 - * We may update the hardware view of the enqueue pointer 533 - * only if we didn't reach its dequeue pointer. 534 - */ 535 - op2_val = rh_reg_read(RING_OPERATION2); 536 - if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) { 537 - /* the ring is full */ 538 - hci_dma_unmap_xfer(hci, xfer_list, i + 1); 539 - return -EBUSY; 540 - } 541 506 } 542 507 543 - /* take care to update the hardware enqueue pointer atomically */ 544 - spin_lock_irq(&rh->lock); 545 - op1_val = rh_reg_read(RING_OPERATION1); 508 + rh->xfer_space -= n; 509 + 546 510 op1_val &= ~RING_OP1_CR_ENQ_PTR; 547 511 op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr); 548 512 rh_reg_write(RING_OPERATION1, op1_val); 549 - spin_unlock_irq(&rh->lock); 513 + spin_unlock_irq(&hci->lock); 550 514 551 515 return 0; 552 516 } ··· 546 534 struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number]; 547 535 unsigned int i; 548 536 bool did_unqueue = false; 537 + u32 ring_status; 549 538 550 - /* stop the ring */ 551 - rh_reg_write(RING_CONTROL, RING_CTRL_ABORT); 552 - if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) { 553 - /* 554 - * We're deep in it if ever this condition is ever met. 555 - * Hardware might still be writing to memory, etc. 556 - */ 557 - dev_crit(&hci->master.dev, "unable to abort the ring\n"); 558 - WARN_ON(1); 539 + guard(mutex)(&hci->control_mutex); 540 + 541 + ring_status = rh_reg_read(RING_STATUS); 542 + if (ring_status & RING_STATUS_RUNNING) { 543 + /* stop the ring */ 544 + reinit_completion(&rh->op_done); 545 + rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_ABORT); 546 + wait_for_completion_timeout(&rh->op_done, HZ); 547 + ring_status = rh_reg_read(RING_STATUS); 548 + if (ring_status & RING_STATUS_RUNNING) { 549 + /* 550 + * We're deep in it if ever this condition is ever met. 551 + * Hardware might still be writing to memory, etc. 552 + */ 553 + dev_crit(&hci->master.dev, "unable to abort the ring\n"); 554 + WARN_ON(1); 555 + } 559 556 } 557 + 558 + spin_lock_irq(&hci->lock); 560 559 561 560 for (i = 0; i < n; i++) { 562 561 struct hci_xfer *xfer = xfer_list + i; ··· 582 559 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx; 583 560 584 561 /* store no-op cmd descriptor */ 585 - *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7); 562 + *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7) | FIELD_PREP(CMD_0_TID, xfer->cmd_tid); 586 563 *ring_data++ = 0; 587 564 if (hci->cmd == &mipi_i3c_hci_cmd_v2) { 588 565 *ring_data++ = 0; ··· 600 577 } 601 578 602 579 /* restart the ring */ 580 + mipi_i3c_hci_resume(hci); 603 581 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 582 + rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP); 583 + 584 + spin_unlock_irq(&hci->lock); 604 585 605 586 return did_unqueue; 587 + } 588 + 589 + static int hci_dma_handle_error(struct i3c_hci *hci, struct hci_xfer *xfer_list, int n) 590 + { 591 + return hci_dma_dequeue_xfer(hci, xfer_list, n) ? -EIO : 0; 606 592 } 607 593 608 594 static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh) 609 595 { 610 596 u32 op1_val, op2_val, resp, *ring_resp; 611 597 unsigned int tid, done_ptr = rh->done_ptr; 598 + unsigned int done_cnt = 0; 612 599 struct hci_xfer *xfer; 613 600 614 601 for (;;) { ··· 636 603 dev_dbg(&hci->master.dev, "orphaned ring entry"); 637 604 } else { 638 605 hci_dma_unmap_xfer(hci, xfer, 1); 606 + rh->src_xfers[done_ptr] = NULL; 639 607 xfer->ring_entry = -1; 640 608 xfer->response = resp; 641 609 if (tid != xfer->cmd_tid) { ··· 651 617 652 618 done_ptr = (done_ptr + 1) % rh->xfer_entries; 653 619 rh->done_ptr = done_ptr; 620 + done_cnt += 1; 654 621 } 655 622 656 - /* take care to update the software dequeue pointer atomically */ 657 - spin_lock(&rh->lock); 623 + rh->xfer_space += done_cnt; 658 624 op1_val = rh_reg_read(RING_OPERATION1); 659 625 op1_val &= ~RING_OP1_CR_SW_DEQ_PTR; 660 626 op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr); 661 627 rh_reg_write(RING_OPERATION1, op1_val); 662 - spin_unlock(&rh->lock); 663 628 } 664 629 665 630 static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev, ··· 838 805 i3c_master_queue_ibi(dev, slot); 839 806 840 807 done: 841 - /* take care to update the ibi dequeue pointer atomically */ 842 - spin_lock(&rh->lock); 843 808 op1_val = rh_reg_read(RING_OPERATION1); 844 809 op1_val &= ~RING_OP1_IBI_DEQ_PTR; 845 810 op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr); 846 811 rh_reg_write(RING_OPERATION1, op1_val); 847 - spin_unlock(&rh->lock); 848 812 849 813 /* update the chunk pointer */ 850 814 rh->ibi_chunk_ptr += ibi_chunks; ··· 875 845 hci_dma_xfer_done(hci, rh); 876 846 if (status & INTR_RING_OP) 877 847 complete(&rh->op_done); 878 - 879 - if (status & INTR_TRANSFER_ABORT) { 880 - u32 ring_status; 881 - 882 - dev_notice_ratelimited(&hci->master.dev, 883 - "Ring %d: Transfer Aborted\n", i); 884 - mipi_i3c_hci_resume(hci); 885 - ring_status = rh_reg_read(RING_STATUS); 886 - if (!(ring_status & RING_STATUS_RUNNING) && 887 - status & INTR_TRANSFER_COMPLETION && 888 - status & INTR_TRANSFER_ERR) { 889 - /* 890 - * Ring stop followed by run is an Intel 891 - * specific required quirk after resuming the 892 - * halted controller. Do it only when the ring 893 - * is not in running state after a transfer 894 - * error. 895 - */ 896 - rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 897 - rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | 898 - RING_CTRL_RUN_STOP); 899 - } 900 - } 848 + if (status & INTR_TRANSFER_ABORT) 849 + dev_dbg(&hci->master.dev, "Ring %d: Transfer Aborted\n", i); 901 850 if (status & INTR_IBI_RING_FULL) 902 851 dev_err_ratelimited(&hci->master.dev, 903 852 "Ring %d: IBI Ring Full Condition\n", i); ··· 892 883 .cleanup = hci_dma_cleanup, 893 884 .queue_xfer = hci_dma_queue_xfer, 894 885 .dequeue_xfer = hci_dma_dequeue_xfer, 886 + .handle_error = hci_dma_handle_error, 895 887 .irq_handler = hci_dma_irq_handler, 896 888 .request_ibi = hci_dma_request_ibi, 897 889 .free_ibi = hci_dma_free_ibi,
+5
drivers/i3c/master/mipi-i3c-hci/hci.h
··· 50 50 const struct hci_io_ops *io; 51 51 void *io_data; 52 52 const struct hci_cmd_ops *cmd; 53 + spinlock_t lock; 54 + struct mutex control_mutex; 53 55 atomic_t next_cmd_tid; 54 56 bool irq_inactive; 55 57 u32 caps; ··· 89 87 unsigned int data_len; 90 88 unsigned int cmd_tid; 91 89 struct completion *completion; 90 + unsigned long timeout; 92 91 union { 93 92 struct { 94 93 /* PIO specific */ ··· 123 120 bool (*irq_handler)(struct i3c_hci *hci); 124 121 int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 125 122 bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 123 + int (*handle_error)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 126 124 int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev, 127 125 const struct i3c_ibi_setup *req); 128 126 void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev); ··· 158 154 void amd_set_od_pp_timing(struct i3c_hci *hci); 159 155 void amd_set_resp_buf_thld(struct i3c_hci *hci); 160 156 void i3c_hci_sync_irq_inactive(struct i3c_hci *hci); 157 + int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 161 158 162 159 #endif
+5 -11
drivers/i3c/master/mipi-i3c-hci/pio.c
··· 123 123 }; 124 124 125 125 struct hci_pio_data { 126 - spinlock_t lock; 127 126 struct hci_xfer *curr_xfer, *xfer_queue; 128 127 struct hci_xfer *curr_rx, *rx_queue; 129 128 struct hci_xfer *curr_tx, *tx_queue; ··· 211 212 return -ENOMEM; 212 213 213 214 hci->io_data = pio; 214 - spin_lock_init(&pio->lock); 215 215 216 216 __hci_pio_init(hci, &size_val); 217 217 ··· 629 631 xfer[i].data_left = xfer[i].data_len; 630 632 } 631 633 632 - spin_lock_irq(&pio->lock); 634 + spin_lock_irq(&hci->lock); 633 635 prev_queue_tail = pio->xfer_queue; 634 636 pio->xfer_queue = &xfer[n - 1]; 635 637 if (pio->curr_xfer) { ··· 643 645 pio_reg_read(INTR_STATUS), 644 646 pio_reg_read(INTR_SIGNAL_ENABLE)); 645 647 } 646 - spin_unlock_irq(&pio->lock); 648 + spin_unlock_irq(&hci->lock); 647 649 return 0; 648 650 } 649 651 ··· 714 716 struct hci_pio_data *pio = hci->io_data; 715 717 int ret; 716 718 717 - spin_lock_irq(&pio->lock); 719 + spin_lock_irq(&hci->lock); 718 720 dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n, 719 721 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE)); 720 722 dev_dbg(&hci->master.dev, "main_status = %#x/%#x", 721 723 readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28)); 722 724 723 725 ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n); 724 - spin_unlock_irq(&pio->lock); 726 + spin_unlock_irq(&hci->lock); 725 727 return ret; 726 728 } 727 729 ··· 1014 1016 struct hci_pio_data *pio = hci->io_data; 1015 1017 u32 status; 1016 1018 1017 - spin_lock(&pio->lock); 1018 1019 status = pio_reg_read(INTR_STATUS); 1019 1020 dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x", 1020 1021 status, pio->enabled_irqs); 1021 1022 status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS; 1022 - if (!status) { 1023 - spin_unlock(&pio->lock); 1023 + if (!status) 1024 1024 return false; 1025 - } 1026 1025 1027 1026 if (status & STAT_IBI_STATUS_THLD) 1028 1027 hci_pio_process_ibi(hci, pio); ··· 1053 1058 pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs); 1054 1059 dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x", 1055 1060 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE)); 1056 - spin_unlock(&pio->lock); 1057 1061 return true; 1058 1062 } 1059 1063
+1 -2
drivers/iio/magnetometer/Kconfig
··· 143 143 tristate "MEMSIC MMC5633 3-axis magnetic sensor" 144 144 select REGMAP_I2C 145 145 select REGMAP_I3C if I3C 146 - depends on I2C 147 - depends on I3C || !I3C 146 + depends on I3C_OR_I2C 148 147 help 149 148 Say yes here to build support for the MEMSIC MMC5633 3-axis 150 149 magnetic sensor.
+1 -2
drivers/misc/amd-sbi/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config AMD_SBRMI_I2C 3 3 tristate "AMD side band RMI support" 4 - depends on I2C 4 + depends on I3C_OR_I2C 5 5 depends on ARM || ARM64 || COMPILE_TEST 6 6 select REGMAP_I2C 7 - depends on I3C || !I3C 8 7 select REGMAP_I3C if I3C 9 8 help 10 9 Side band RMI over I2C/I3C support for AMD out of band management.