Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/amdgpu: allocate clear entities dynamically

No functional change for now, as we always allocate a single entity
and use it everywhere.

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Pierre-Eric Pelloux-Prayer and committed by
Alex Deucher
0f1fbe74 4ea64d48

+42 -18
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 1325 1325 if (r) 1326 1326 goto out; 1327 1327 1328 - r = amdgpu_fill_buffer(&adev->mman.clear_entity, abo, 0, &bo->base._resv, 1328 + r = amdgpu_fill_buffer(&adev->mman.clear_entities[0], abo, 0, &bo->base._resv, 1329 1329 &fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE); 1330 1330 if (WARN_ON(r)) 1331 1331 goto out;
+39 -16
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 2335 2335 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) 2336 2336 { 2337 2337 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 2338 + u32 num_clear_entities; 2338 2339 uint64_t size; 2339 - int r; 2340 + int r, i, j; 2340 2341 2341 2342 if (!adev->mman.initialized || amdgpu_in_reset(adev) || 2342 2343 adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu) ··· 2352 2351 return; 2353 2352 } 2354 2353 2354 + num_clear_entities = 1; 2355 2355 ring = adev->mman.buffer_funcs_ring; 2356 2356 sched = &ring->sched; 2357 2357 r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, ··· 2365 2363 return; 2366 2364 } 2367 2365 2368 - r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, 2369 - &adev->mman.clear_entity, 2370 - DRM_SCHED_PRIORITY_NORMAL, 2371 - &sched, 1, 1); 2372 - if (r < 0) { 2373 - dev_err(adev->dev, 2374 - "Failed setting up TTM BO clear entity (%d)\n", r); 2366 + adev->mman.clear_entities = kcalloc(num_clear_entities, 2367 + sizeof(struct amdgpu_ttm_buffer_entity), 2368 + GFP_KERNEL); 2369 + if (!adev->mman.clear_entities) 2375 2370 goto error_free_default_entity; 2371 + 2372 + adev->mman.num_clear_entities = num_clear_entities; 2373 + 2374 + for (i = 0; i < num_clear_entities; i++) { 2375 + r = amdgpu_ttm_buffer_entity_init( 2376 + &adev->mman.gtt_mgr, &adev->mman.clear_entities[i], 2377 + DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 1); 2378 + 2379 + if (r < 0) { 2380 + for (j = 0; j < i; j++) 2381 + amdgpu_ttm_buffer_entity_fini( 2382 + &adev->mman.gtt_mgr, &adev->mman.clear_entities[j]); 2383 + kfree(adev->mman.clear_entities); 2384 + adev->mman.num_clear_entities = 0; 2385 + adev->mman.clear_entities = NULL; 2386 + goto error_free_default_entity; 2387 + } 2376 2388 } 2377 2389 2378 2390 r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, ··· 2396 2380 if (r < 0) { 2397 2381 dev_err(adev->dev, 2398 2382 "Failed setting up TTM BO move entity (%d)\n", r); 2399 - goto error_free_clear_entity; 2383 + goto error_free_clear_entities; 2400 2384 } 2401 2385 } else { 2402 2386 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2403 2387 &adev->mman.default_entity); 2404 - amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2405 - &adev->mman.clear_entity); 2388 + for (i = 0; i < adev->mman.num_clear_entities; i++) 2389 + amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2390 + &adev->mman.clear_entities[i]); 2406 2391 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2407 2392 &adev->mman.move_entity); 2408 2393 /* Drop all the old fences since re-creating the scheduler entities 2409 2394 * will allocate new contexts. 2410 2395 */ 2411 2396 ttm_resource_manager_cleanup(man); 2397 + kfree(adev->mman.clear_entities); 2398 + adev->mman.clear_entities = NULL; 2399 + adev->mman.num_clear_entities = 0; 2412 2400 } 2413 2401 2414 2402 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ ··· 2425 2405 2426 2406 return; 2427 2407 2428 - error_free_clear_entity: 2429 - amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2430 - &adev->mman.clear_entity); 2408 + error_free_clear_entities: 2409 + for (i = 0; i < adev->mman.num_clear_entities; i++) 2410 + amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2411 + &adev->mman.clear_entities[i]); 2412 + kfree(adev->mman.clear_entities); 2413 + adev->mman.clear_entities = NULL; 2414 + adev->mman.num_clear_entities = 0; 2431 2415 error_free_default_entity: 2432 2416 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2433 2417 &adev->mman.default_entity); ··· 2581 2557 2582 2558 if (!fence) 2583 2559 return -EINVAL; 2584 - 2585 - entity = &adev->mman.clear_entity; 2560 + entity = &adev->mman.clear_entities[0]; 2586 2561 *fence = dma_fence_get_stub(); 2587 2562 2588 2563 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 72 72 73 73 /* @default_entity: for workarounds, has no gart windows */ 74 74 struct amdgpu_ttm_buffer_entity default_entity; 75 - struct amdgpu_ttm_buffer_entity clear_entity; 76 75 struct amdgpu_ttm_buffer_entity move_entity; 76 + struct amdgpu_ttm_buffer_entity *clear_entities; 77 + u32 num_clear_entities; 77 78 78 79 struct amdgpu_vram_mgr vram_mgr; 79 80 struct amdgpu_gtt_mgr gtt_mgr;