Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'iommu-fixes-v6.17-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux

Pull iommu fixes from Joerg Roedel:

- Fixes for memory leak and memory corruption bugs on S390 and AMD-Vi

- Race condition fix in AMD-Vi page table code and S390 device attach
code

- Intel VT-d: Fix alignment checks in __domain_mapping()

- AMD-Vi: Fix potentially incorrect DTE settings when device has
aliases

* tag 'iommu-fixes-v6.17-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux:
iommu/amd/pgtbl: Fix possible race while increase page table level
iommu/amd: Fix alias device DTE setting
iommu/s390: Make attach succeed when the device was surprise removed
iommu/vt-d: Fix __domain_mapping()'s usage of switch_to_super_page()
iommu/s390: Fix memory corruption when using identity domain
iommu/amd: Fix ivrs_base memleak in early_amd_iommu_init()

+59 -22
+5 -5
arch/s390/include/asm/pci_insn.h
··· 16 16 #define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40 17 17 #define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44 18 18 19 - /* Load/Store return codes */ 20 - #define ZPCI_PCI_LS_OK 0 21 - #define ZPCI_PCI_LS_ERR 1 22 - #define ZPCI_PCI_LS_BUSY 2 23 - #define ZPCI_PCI_LS_INVAL_HANDLE 3 19 + /* PCI instruction condition codes */ 20 + #define ZPCI_CC_OK 0 21 + #define ZPCI_CC_ERR 1 22 + #define ZPCI_CC_BUSY 2 23 + #define ZPCI_CC_INVAL_HANDLE 3 24 24 25 25 /* Load/Store address space identifiers */ 26 26 #define ZPCI_PCIAS_MEMIO_0 0
+1
drivers/iommu/amd/amd_iommu_types.h
··· 555 555 }; 556 556 557 557 struct amd_io_pgtable { 558 + seqcount_t seqcount; /* Protects root/mode update */ 558 559 struct io_pgtable pgtbl; 559 560 int mode; 560 561 u64 *root;
+5 -4
drivers/iommu/amd/init.c
··· 1455 1455 PCI_FUNC(e->devid)); 1456 1456 1457 1457 devid = e->devid; 1458 - for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 1459 - if (alias) 1458 + if (alias) { 1459 + for (dev_i = devid_start; dev_i <= devid; ++dev_i) 1460 1460 pci_seg->alias_table[dev_i] = devid_to; 1461 + set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); 1461 1462 } 1462 1463 set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags); 1463 - set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); 1464 1464 break; 1465 1465 case IVHD_DEV_SPECIAL: { 1466 1466 u8 handle, type; ··· 3067 3067 3068 3068 if (!boot_cpu_has(X86_FEATURE_CX16)) { 3069 3069 pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n"); 3070 - return -EINVAL; 3070 + ret = -EINVAL; 3071 + goto out; 3071 3072 } 3072 3073 3073 3074 /*
+21 -4
drivers/iommu/amd/io_pgtable.c
··· 17 17 #include <linux/slab.h> 18 18 #include <linux/types.h> 19 19 #include <linux/dma-mapping.h> 20 + #include <linux/seqlock.h> 20 21 21 22 #include <asm/barrier.h> 22 23 ··· 131 130 132 131 *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root)); 133 132 133 + write_seqcount_begin(&pgtable->seqcount); 134 134 pgtable->root = pte; 135 135 pgtable->mode += 1; 136 + write_seqcount_end(&pgtable->seqcount); 137 + 136 138 amd_iommu_update_and_flush_device_table(domain); 137 139 138 140 pte = NULL; ··· 157 153 { 158 154 unsigned long last_addr = address + (page_size - 1); 159 155 struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; 156 + unsigned int seqcount; 160 157 int level, end_lvl; 161 158 u64 *pte, *page; 162 159 ··· 175 170 } 176 171 177 172 178 - level = pgtable->mode - 1; 179 - pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; 173 + do { 174 + seqcount = read_seqcount_begin(&pgtable->seqcount); 175 + 176 + level = pgtable->mode - 1; 177 + pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; 178 + } while (read_seqcount_retry(&pgtable->seqcount, seqcount)); 179 + 180 + 180 181 address = PAGE_SIZE_ALIGN(address, page_size); 181 182 end_lvl = PAGE_SIZE_LEVEL(page_size); 182 183 ··· 260 249 unsigned long *page_size) 261 250 { 262 251 int level; 252 + unsigned int seqcount; 263 253 u64 *pte; 264 254 265 255 *page_size = 0; ··· 268 256 if (address > PM_LEVEL_SIZE(pgtable->mode)) 269 257 return NULL; 270 258 271 - level = pgtable->mode - 1; 272 - pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; 259 + do { 260 + seqcount = read_seqcount_begin(&pgtable->seqcount); 261 + level = pgtable->mode - 1; 262 + pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; 263 + } while (read_seqcount_retry(&pgtable->seqcount, seqcount)); 264 + 273 265 *page_size = PTE_LEVEL_PAGE_SIZE(level); 274 266 275 267 while (level > 0) { ··· 557 541 if (!pgtable->root) 558 542 return NULL; 559 543 pgtable->mode = PAGE_MODE_3_LEVEL; 544 + seqcount_init(&pgtable->seqcount); 560 545 561 546 cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap; 562 547 cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
+6 -1
drivers/iommu/intel/iommu.c
··· 1575 1575 unsigned long lvl_pages = lvl_to_nr_pages(level); 1576 1576 struct dma_pte *pte = NULL; 1577 1577 1578 + if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) || 1579 + !IS_ALIGNED(end_pfn + 1, lvl_pages))) 1580 + return; 1581 + 1578 1582 while (start_pfn <= end_pfn) { 1579 1583 if (!pte) 1580 1584 pte = pfn_to_dma_pte(domain, start_pfn, &level, ··· 1654 1650 unsigned long pages_to_remove; 1655 1651 1656 1652 pteval |= DMA_PTE_LARGE_PAGE; 1657 - pages_to_remove = min_t(unsigned long, nr_pages, 1653 + pages_to_remove = min_t(unsigned long, 1654 + round_down(nr_pages, lvl_pages), 1658 1655 nr_pte_to_next_page(pte) * lvl_pages); 1659 1656 end_pfn = iov_pfn + pages_to_remove - 1; 1660 1657 switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
+21 -8
drivers/iommu/s390-iommu.c
··· 612 612 } 613 613 } 614 614 615 + static bool reg_ioat_propagate_error(int cc, u8 status) 616 + { 617 + /* 618 + * If the device is in the error state the reset routine 619 + * will register the IOAT of the newly set domain on re-enable 620 + */ 621 + if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL) 622 + return false; 623 + /* 624 + * If the device was removed treat registration as success 625 + * and let the subsequent error event trigger tear down. 626 + */ 627 + if (cc == ZPCI_CC_INVAL_HANDLE) 628 + return false; 629 + return cc != ZPCI_CC_OK; 630 + } 631 + 615 632 static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev, 616 633 struct iommu_domain *domain, u8 *status) 617 634 { ··· 713 696 714 697 /* If we fail now DMA remains blocked via blocking domain */ 715 698 cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); 716 - if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) 699 + if (reg_ioat_propagate_error(cc, status)) 717 700 return -EIO; 718 701 zdev->dma_table = s390_domain->dma_table; 719 702 zdev_s390_domain_update(zdev, domain); ··· 1049 1032 1050 1033 lockdep_assert_held(&zdev->dom_lock); 1051 1034 1052 - if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED) 1035 + if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED || 1036 + zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) 1053 1037 return NULL; 1054 1038 1055 1039 s390_domain = to_s390_domain(zdev->s390_domain); ··· 1141 1123 1142 1124 /* If we fail now DMA remains blocked via blocking domain */ 1143 1125 cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); 1144 - 1145 - /* 1146 - * If the device is undergoing error recovery the reset code 1147 - * will re-establish the new domain. 1148 - */ 1149 - if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) 1126 + if (reg_ioat_propagate_error(cc, status)) 1150 1127 return -EIO; 1151 1128 1152 1129 zdev_s390_domain_update(zdev, domain);