Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/scheduler: Scheduler priority fixes (v2)

Remove DRM_SCHED_PRIORITY_LOW, as it was used
in only one place.

Rename and separate by a line
DRM_SCHED_PRIORITY_MAX to DRM_SCHED_PRIORITY_COUNT
as it represents a (total) count of said
priorities and it is used as such in loops
throughout the code. (0-based indexing is the
the count number.)

Remove redundant word HIGH in priority names,
and rename *KERNEL* to *HIGH*, as it really
means that, high.

v2: Add back KERNEL and remove SW and HW,
in lieu of a single HIGH between NORMAL and KERNEL.

Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Luben Tuikov and committed by
Alex Deucher
e2d732fd c80e966b

+18 -16
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
··· 46 46 static int amdgpu_ctx_priority_permit(struct drm_file *filp, 47 47 enum drm_sched_priority priority) 48 48 { 49 - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) 49 + if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT) 50 50 return -EINVAL; 51 51 52 52 /* NORMAL and below are accessible by everyone */ ··· 65 65 static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) 66 66 { 67 67 switch (prio) { 68 - case DRM_SCHED_PRIORITY_HIGH_HW: 68 + case DRM_SCHED_PRIORITY_HIGH: 69 69 case DRM_SCHED_PRIORITY_KERNEL: 70 70 return AMDGPU_GFX_PIPE_PRIO_HIGH; 71 71 default:
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 251 251 int i; 252 252 253 253 /* Signal all jobs not yet scheduled */ 254 - for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 254 + for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 255 255 struct drm_sched_rq *rq = &sched->sched_rq[i]; 256 256 257 257 if (!rq)
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
··· 267 267 &ring->sched; 268 268 } 269 269 270 - for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) 270 + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i) 271 271 atomic_set(&ring->num_jobs[i], 0); 272 272 273 273 return 0;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
··· 243 243 bool has_compute_vm_bug; 244 244 bool no_scheduler; 245 245 246 - atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; 246 + atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT]; 247 247 struct mutex priority_mutex; 248 248 /* protected by priority_mutex */ 249 249 int priority;
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
··· 36 36 { 37 37 switch (amdgpu_priority) { 38 38 case AMDGPU_CTX_PRIORITY_VERY_HIGH: 39 - return DRM_SCHED_PRIORITY_HIGH_HW; 39 + return DRM_SCHED_PRIORITY_HIGH; 40 40 case AMDGPU_CTX_PRIORITY_HIGH: 41 - return DRM_SCHED_PRIORITY_HIGH_SW; 41 + return DRM_SCHED_PRIORITY_HIGH; 42 42 case AMDGPU_CTX_PRIORITY_NORMAL: 43 43 return DRM_SCHED_PRIORITY_NORMAL; 44 44 case AMDGPU_CTX_PRIORITY_LOW: 45 45 case AMDGPU_CTX_PRIORITY_VERY_LOW: 46 - return DRM_SCHED_PRIORITY_LOW; 46 + return DRM_SCHED_PRIORITY_MIN; 47 47 case AMDGPU_CTX_PRIORITY_UNSET: 48 48 return DRM_SCHED_PRIORITY_UNSET; 49 49 default:
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 2109 2109 ring = adev->mman.buffer_funcs_ring; 2110 2110 sched = &ring->sched; 2111 2111 r = drm_sched_entity_init(&adev->mman.entity, 2112 - DRM_SCHED_PRIORITY_KERNEL, &sched, 2112 + DRM_SCHED_PRIORITY_KERNEL, &sched, 2113 2113 1, NULL); 2114 2114 if (r) { 2115 2115 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
+2 -2
drivers/gpu/drm/scheduler/sched_main.c
··· 623 623 return NULL; 624 624 625 625 /* Kernel run queue has higher priority than normal run queue*/ 626 - for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 626 + for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 627 627 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); 628 628 if (entity) 629 629 break; ··· 851 851 sched->name = name; 852 852 sched->timeout = timeout; 853 853 sched->hang_limit = hang_limit; 854 - for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) 854 + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) 855 855 drm_sched_rq_init(sched, &sched->sched_rq[i]); 856 856 857 857 init_waitqueue_head(&sched->wake_up_worker);
+7 -5
include/drm/gpu_scheduler.h
··· 33 33 struct drm_gpu_scheduler; 34 34 struct drm_sched_rq; 35 35 36 + /* These are often used as an (initial) index 37 + * to an array, and as such should start at 0. 38 + */ 36 39 enum drm_sched_priority { 37 40 DRM_SCHED_PRIORITY_MIN, 38 - DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, 39 41 DRM_SCHED_PRIORITY_NORMAL, 40 - DRM_SCHED_PRIORITY_HIGH_SW, 41 - DRM_SCHED_PRIORITY_HIGH_HW, 42 + DRM_SCHED_PRIORITY_HIGH, 42 43 DRM_SCHED_PRIORITY_KERNEL, 43 - DRM_SCHED_PRIORITY_MAX, 44 + 45 + DRM_SCHED_PRIORITY_COUNT, 44 46 DRM_SCHED_PRIORITY_INVALID = -1, 45 47 DRM_SCHED_PRIORITY_UNSET = -2 46 48 }; ··· 276 274 uint32_t hw_submission_limit; 277 275 long timeout; 278 276 const char *name; 279 - struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; 277 + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; 280 278 wait_queue_head_t wake_up_worker; 281 279 wait_queue_head_t job_scheduled; 282 280 atomic_t hw_rq_count;