Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.infradead.org/iommu-2.6

* git://git.infradead.org/iommu-2.6:
intel-iommu: fix superpage support in pfn_to_dma_pte()
intel-iommu: set iommu_superpage on VM domains to lowest common denominator
intel-iommu: fix return value of iommu_unmap() API
MAINTAINERS: Update VT-d entry for drivers/pci -> drivers/iommu move
intel-iommu: Export a flag indicating that the IOMMU is used for iGFX.
intel-iommu: Workaround IOTLB hang on Ironlake GPU
intel-iommu: Fix AB-BA lockdep report

+46 -31
+1 -1
MAINTAINERS
··· 3313 3313 L: iommu@lists.linux-foundation.org 3314 3314 T: git git://git.infradead.org/iommu-2.6.git 3315 3315 S: Supported 3316 - F: drivers/pci/intel-iommu.c 3316 + F: drivers/iommu/intel-iommu.c 3317 3317 F: include/linux/intel-iommu.h 3318 3318 3319 3319 INTEL IOP-ADMA DMA DRIVER
+45 -30
drivers/iommu/intel-iommu.c
··· 306 306 return (pte->val & 3) != 0; 307 307 } 308 308 309 + static inline bool dma_pte_superpage(struct dma_pte *pte) 310 + { 311 + return (pte->val & (1 << 7)); 312 + } 313 + 309 314 static inline int first_pte_in_page(struct dma_pte *pte) 310 315 { 311 316 return !((unsigned long)pte & ~VTD_PAGE_MASK); ··· 408 403 static int dmar_forcedac; 409 404 static int intel_iommu_strict; 410 405 static int intel_iommu_superpage = 1; 406 + 407 + int intel_iommu_gfx_mapped; 408 + EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); 411 409 412 410 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) 413 411 static DEFINE_SPINLOCK(device_domain_lock); ··· 585 577 586 578 static void domain_update_iommu_superpage(struct dmar_domain *domain) 587 579 { 588 - int i, mask = 0xf; 580 + struct dmar_drhd_unit *drhd; 581 + struct intel_iommu *iommu = NULL; 582 + int mask = 0xf; 589 583 590 584 if (!intel_iommu_superpage) { 591 585 domain->iommu_superpage = 0; 592 586 return; 593 587 } 594 588 595 - domain->iommu_superpage = 4; /* 1TiB */ 596 - 597 - for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { 598 - mask |= cap_super_page_val(g_iommus[i]->cap); 589 + /* set iommu_superpage to the smallest common denominator */ 590 + for_each_active_iommu(iommu, drhd) { 591 + mask &= cap_super_page_val(iommu->cap); 599 592 if (!mask) { 600 593 break; 601 594 } ··· 739 730 } 740 731 741 732 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, 742 - unsigned long pfn, int large_level) 733 + unsigned long pfn, int target_level) 743 734 { 744 735 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 745 736 struct dma_pte *parent, *pte = NULL; 746 737 int level = agaw_to_level(domain->agaw); 747 - int offset, target_level; 738 + int offset; 748 739 749 740 BUG_ON(!domain->pgd); 750 741 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); 751 742 parent = domain->pgd; 752 - 753 - /* Search pte */ 754 - if (!large_level) 755 - target_level = 1; 756 - else 757 - target_level = large_level; 758 743 759 744 while (level > 0) { 760 745 void *tmp_page; 761 746 762 747 offset = pfn_level_offset(pfn, level); 763 748 pte = &parent[offset]; 764 - if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) 749 + if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) 765 750 break; 766 751 if (level == target_level) 767 752 break; ··· 819 816 } 820 817 821 818 /* clear last level pte, a tlb flush should be followed */ 822 - static void dma_pte_clear_range(struct dmar_domain *domain, 819 + static int dma_pte_clear_range(struct dmar_domain *domain, 823 820 unsigned long start_pfn, 824 821 unsigned long last_pfn) 825 822 { 826 823 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 827 824 unsigned int large_page = 1; 828 825 struct dma_pte *first_pte, *pte; 826 + int order; 829 827 830 828 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 831 829 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); ··· 850 846 (void *)pte - (void *)first_pte); 851 847 852 848 } while (start_pfn && start_pfn <= last_pfn); 849 + 850 + order = (large_page - 1) * 9; 851 + return order; 853 852 } 854 853 855 854 /* free page table pages. last level pte should already be cleared */ ··· 3233 3226 } 3234 3227 } 3235 3228 3236 - if (dmar_map_gfx) 3237 - return; 3238 - 3239 3229 for_each_drhd_unit(drhd) { 3240 3230 int i; 3241 3231 if (drhd->ignored || drhd->include_all) ··· 3240 3236 3241 3237 for (i = 0; i < drhd->devices_cnt; i++) 3242 3238 if (drhd->devices[i] && 3243 - !IS_GFX_DEVICE(drhd->devices[i])) 3239 + !IS_GFX_DEVICE(drhd->devices[i])) 3244 3240 break; 3245 3241 3246 3242 if (i < drhd->devices_cnt) 3247 3243 continue; 3248 3244 3249 - /* bypass IOMMU if it is just for gfx devices */ 3250 - drhd->ignored = 1; 3251 - for (i = 0; i < drhd->devices_cnt; i++) { 3252 - if (!drhd->devices[i]) 3253 - continue; 3254 - drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; 3245 + /* This IOMMU has *only* gfx devices. Either bypass it or 3246 + set the gfx_mapped flag, as appropriate */ 3247 + if (dmar_map_gfx) { 3248 + intel_iommu_gfx_mapped = 1; 3249 + } else { 3250 + drhd->ignored = 1; 3251 + for (i = 0; i < drhd->devices_cnt; i++) { 3252 + if (!drhd->devices[i]) 3253 + continue; 3254 + drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; 3255 + } 3255 3256 } 3256 3257 } 3257 3258 } ··· 3577 3568 found = 1; 3578 3569 } 3579 3570 3571 + spin_unlock_irqrestore(&device_domain_lock, flags); 3572 + 3580 3573 if (found == 0) { 3581 3574 unsigned long tmp_flags; 3582 3575 spin_lock_irqsave(&domain->iommu_lock, tmp_flags); ··· 3595 3584 spin_unlock_irqrestore(&iommu->lock, tmp_flags); 3596 3585 } 3597 3586 } 3598 - 3599 - spin_unlock_irqrestore(&device_domain_lock, flags); 3600 3587 } 3601 3588 3602 3589 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) ··· 3748 3739 vm_domain_exit(dmar_domain); 3749 3740 return -ENOMEM; 3750 3741 } 3742 + domain_update_iommu_cap(dmar_domain); 3751 3743 domain->priv = dmar_domain; 3752 3744 3753 3745 return 0; ··· 3874 3864 { 3875 3865 struct dmar_domain *dmar_domain = domain->priv; 3876 3866 size_t size = PAGE_SIZE << gfp_order; 3867 + int order; 3877 3868 3878 - dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 3869 + order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 3879 3870 (iova + size - 1) >> VTD_PAGE_SHIFT); 3880 3871 3881 3872 if (dmar_domain->max_addr == iova + size) 3882 3873 dmar_domain->max_addr = iova; 3883 3874 3884 - return gfp_order; 3875 + return order; 3885 3876 } 3886 3877 3887 3878 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, ··· 3961 3950 if (!(ggc & GGC_MEMORY_VT_ENABLED)) { 3962 3951 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); 3963 3952 dmar_map_gfx = 0; 3964 - } 3953 + } else if (dmar_map_gfx) { 3954 + /* we have to ensure the gfx device is idle before we flush */ 3955 + printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n"); 3956 + intel_iommu_strict = 1; 3957 + } 3965 3958 } 3966 3959 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); 3967 3960 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);