Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/amdgpu: prevent immediate PASID reuse case

PASID resue could cause interrupt issue when process
immediately runs into hw state left by previous
process exited with the same PASID, it's possible that
page faults are still pending in the IH ring buffer when
the process exits and frees up its PASID. To prevent the
case, it uses idr cyclic allocator same as kernel pid's.

Signed-off-by: Eric Huang <jinhuieric.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 8f1de51f49be692de137c8525106e0fce2d1912d)
Cc: stable@vger.kernel.org

authored by

Eric Huang and committed by
Alex Deucher
14b81abe 2d300ebf

+34 -13
+32 -13
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 35 35 * PASIDs are global address space identifiers that can be shared 36 36 * between the GPU, an IOMMU and the driver. VMs on different devices 37 37 * may use the same PASID if they share the same address 38 - * space. Therefore PASIDs are allocated using a global IDA. VMs are 39 - * looked up from the PASID per amdgpu_device. 38 + * space. Therefore PASIDs are allocated using IDR cyclic allocator 39 + * (similar to kernel PID allocation) which naturally delays reuse. 40 + * VMs are looked up from the PASID per amdgpu_device. 40 41 */ 41 - static DEFINE_IDA(amdgpu_pasid_ida); 42 + 43 + static DEFINE_IDR(amdgpu_pasid_idr); 44 + static DEFINE_SPINLOCK(amdgpu_pasid_idr_lock); 42 45 43 46 /* Helper to free pasid from a fence callback */ 44 47 struct amdgpu_pasid_cb { ··· 53 50 * amdgpu_pasid_alloc - Allocate a PASID 54 51 * @bits: Maximum width of the PASID in bits, must be at least 1 55 52 * 56 - * Allocates a PASID of the given width while keeping smaller PASIDs 57 - * available if possible. 53 + * Uses kernel's IDR cyclic allocator (same as PID allocation). 54 + * Allocates sequentially with automatic wrap-around. 58 55 * 59 56 * Returns a positive integer on success. Returns %-EINVAL if bits==0. 60 57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on ··· 62 59 */ 63 60 int amdgpu_pasid_alloc(unsigned int bits) 64 61 { 65 - int pasid = -EINVAL; 62 + int pasid; 66 63 67 - for (bits = min(bits, 31U); bits > 0; bits--) { 68 - pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1), 69 - (1U << bits) - 1, GFP_KERNEL); 70 - if (pasid != -ENOSPC) 71 - break; 72 - } 64 + if (bits == 0) 65 + return -EINVAL; 66 + 67 + spin_lock(&amdgpu_pasid_idr_lock); 68 + pasid = idr_alloc_cyclic(&amdgpu_pasid_idr, NULL, 1, 69 + 1U << bits, GFP_KERNEL); 70 + spin_unlock(&amdgpu_pasid_idr_lock); 73 71 74 72 if (pasid >= 0) 75 73 trace_amdgpu_pasid_allocated(pasid); ··· 85 81 void amdgpu_pasid_free(u32 pasid) 86 82 { 87 83 trace_amdgpu_pasid_freed(pasid); 88 - ida_free(&amdgpu_pasid_ida, pasid); 84 + 85 + spin_lock(&amdgpu_pasid_idr_lock); 86 + idr_remove(&amdgpu_pasid_idr, pasid); 87 + spin_unlock(&amdgpu_pasid_idr_lock); 89 88 } 90 89 91 90 static void amdgpu_pasid_free_cb(struct dma_fence *fence, ··· 622 615 dma_fence_put(id->pasid_mapping); 623 616 } 624 617 } 618 + } 619 + 620 + /** 621 + * amdgpu_pasid_mgr_cleanup - cleanup PASID manager 622 + * 623 + * Cleanup the IDR allocator. 624 + */ 625 + void amdgpu_pasid_mgr_cleanup(void) 626 + { 627 + spin_lock(&amdgpu_pasid_idr_lock); 628 + idr_destroy(&amdgpu_pasid_idr); 629 + spin_unlock(&amdgpu_pasid_idr_lock); 625 630 }
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
··· 74 74 void amdgpu_pasid_free(u32 pasid); 75 75 void amdgpu_pasid_free_delayed(struct dma_resv *resv, 76 76 u32 pasid); 77 + void amdgpu_pasid_mgr_cleanup(void); 77 78 78 79 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, 79 80 struct amdgpu_vmid *id);
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 2898 2898 xa_destroy(&adev->vm_manager.pasids); 2899 2899 2900 2900 amdgpu_vmid_mgr_fini(adev); 2901 + amdgpu_pasid_mgr_cleanup(); 2901 2902 } 2902 2903 2903 2904 /**