Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/amdgpu: round robin through clear_entities in amdgpu_fill_buffer

This makes clear of different BOs run in parallel. Partial jobs to
clear a single BO still execute sequentially.

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Pierre-Eric Pelloux-Prayer and committed by
Alex Deucher
e2b0c863 ab5dd4dc

+20 -5
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 1325 1325 if (r) 1326 1326 goto out; 1327 1327 1328 - r = amdgpu_fill_buffer(&adev->mman.clear_entities[0], abo, 0, &bo->base._resv, 1328 + r = amdgpu_fill_buffer(amdgpu_ttm_next_clear_entity(adev), 1329 + abo, 0, &bo->base._resv, 1329 1330 &fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE); 1330 1331 if (WARN_ON(r)) 1331 1332 goto out;
+16 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 2369 2369 adev->mman.clear_entities = kcalloc(num_clear_entities, 2370 2370 sizeof(struct amdgpu_ttm_buffer_entity), 2371 2371 GFP_KERNEL); 2372 + atomic_set(&adev->mman.next_clear_entity, 0); 2372 2373 if (!adev->mman.clear_entities) 2373 2374 goto error_free_default_entity; 2374 2375 ··· 2643 2642 struct amdgpu_res_cursor dst; 2644 2643 int r; 2645 2644 2646 - if (!adev->mman.buffer_funcs_enabled) { 2647 - dev_err(adev->dev, 2648 - "Trying to clear memory with ring turned off.\n"); 2645 + if (!entity) 2649 2646 return -EINVAL; 2650 - } 2651 2647 2652 2648 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst); 2653 2649 ··· 2678 2680 *f = dma_fence_get(fence); 2679 2681 dma_fence_put(fence); 2680 2682 return r; 2683 + } 2684 + 2685 + struct amdgpu_ttm_buffer_entity * 2686 + amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev) 2687 + { 2688 + struct amdgpu_mman *mman = &adev->mman; 2689 + u32 i; 2690 + 2691 + if (mman->num_clear_entities == 0) 2692 + return NULL; 2693 + 2694 + i = atomic_inc_return(&mman->next_clear_entity) % 2695 + mman->num_clear_entities; 2696 + return &mman->clear_entities[i]; 2681 2697 } 2682 2698 2683 2699 /**
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 73 73 /* @default_entity: for workarounds, has no gart windows */ 74 74 struct amdgpu_ttm_buffer_entity default_entity; 75 75 struct amdgpu_ttm_buffer_entity *clear_entities; 76 + atomic_t next_clear_entity; 76 77 u32 num_clear_entities; 77 78 struct amdgpu_ttm_buffer_entity move_entities[TTM_NUM_MOVE_FENCES]; 78 79 u32 num_move_entities; ··· 194 193 struct dma_resv *resv, 195 194 struct dma_fence **f, 196 195 u64 k_job_id); 196 + struct amdgpu_ttm_buffer_entity *amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev); 197 197 198 198 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); 199 199 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);