Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'amd-drm-next-6.20-2026-02-19' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.20-2026-02-19:

amdgpu:
- Fixes for DC analog support
- DC FAMS fixes
- DML 2.1 fixes
- eDP fixes
- Misc DC fixes
- Fastboot fix
- 3DLUT fixes
- GPUVM fixes
- 64bpp format fix
- XGMI fix
- Fix for MacBooks with switchable gfx

amdkfd:
- Fix piority inversion with MQDs
- NULL check fix

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patch.msgid.link/20260219172807.2451298-1-alexander.deucher@amd.com

+455 -272
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
··· 107 107 { 108 108 struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f); 109 109 110 - return fence->timeline_name; 110 + return fence ? fence->timeline_name : NULL; 111 111 } 112 112 113 113 /**
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 4615 4615 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a 4616 4616 * internal path natively support atomics, set have_atomics_support to true. 4617 4617 */ 4618 - } else if ((adev->flags & AMD_IS_APU) && 4619 - (amdgpu_ip_version(adev, GC_HWIP, 0) > 4620 - IP_VERSION(9, 0, 0))) { 4618 + } else if ((adev->flags & AMD_IS_APU && 4619 + amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) || 4620 + (adev->gmc.xgmi.connected_to_cpu && 4621 + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 1, 0))) { 4621 4622 adev->have_atomics_support = true; 4622 4623 } else { 4623 4624 adev->have_atomics_support =
+10
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 1068 1068 case CHIP_RENOIR: 1069 1069 adev->mman.keep_stolen_vga_memory = true; 1070 1070 break; 1071 + case CHIP_POLARIS10: 1072 + case CHIP_POLARIS11: 1073 + case CHIP_POLARIS12: 1074 + /* MacBookPros with switchable graphics put VRAM at 0 when 1075 + * the iGPU is enabled which results in cursor issues if 1076 + * the cursor ends up at 0. Reserve vram at 0 in that case. 1077 + */ 1078 + if (adev->gmc.vram_start == 0) 1079 + adev->mman.keep_stolen_vga_memory = true; 1080 + break; 1071 1081 default: 1072 1082 adev->mman.keep_stolen_vga_memory = false; 1073 1083 break;
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 33 33 #include "amdgpu_ras.h" 34 34 35 35 /* VA hole for 48bit and 57bit addresses */ 36 - #define AMDGPU_GMC_HOLE_START (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\ 36 + #define AMDGPU_GMC_HOLE_START (adev->vm_manager.max_level == 4 ?\ 37 37 0x0100000000000000ULL : 0x0000800000000000ULL) 38 - #define AMDGPU_GMC_HOLE_END (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\ 38 + #define AMDGPU_GMC_HOLE_END (adev->vm_manager.max_level == 4 ?\ 39 39 0xff00000000000000ULL : 0xffff800000000000ULL) 40 40 41 41 /* ··· 45 45 * This mask is used to remove the upper 16bits of the VA and so come up with 46 46 * the linear addr value. 47 47 */ 48 - #define AMDGPU_GMC_HOLE_MASK (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\ 49 - 0x00ffffffffffffffULL : 0x0000ffffffffffffULL) 48 + #define AMDGPU_GMC_HOLE_MASK (adev->vm_manager.max_level == 4 ?\ 49 + 0x01ffffffffffffffULL : 0x0000ffffffffffffULL) 50 50 51 51 /* 52 52 * Ring size as power of two for the log of recent faults.
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 2400 2400 } 2401 2401 2402 2402 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; 2403 + adev->vm_manager.max_level = max_level; 2403 2404 2404 2405 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); 2405 2406 if (amdgpu_vm_block_size != -1)
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 456 456 bool concurrent_flush; 457 457 458 458 uint64_t max_pfn; 459 + uint32_t max_level; 459 460 uint32_t num_level; 460 461 uint32_t block_size; 461 462 uint32_t fragment_size;
+4 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v4_2_0.c
··· 395 395 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 396 396 ENABLE_CONTEXT, 1); 397 397 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 398 - PAGE_TABLE_DEPTH, 0); 398 + PAGE_TABLE_DEPTH, adev->gmc.vmid0_page_table_depth); 399 + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 400 + PAGE_TABLE_BLOCK_SIZE, 401 + adev->gmc.vmid0_page_table_block_size); 399 402 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 400 403 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 401 404 WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
··· 2359 2359 if (kdev->kfd->hive_id) { 2360 2360 for (nid = 0; nid < proximity_domain; ++nid) { 2361 2361 peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid); 2362 - if (!peer_dev->gpu) 2362 + if (!peer_dev || !peer_dev->gpu) 2363 2363 continue; 2364 2364 if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) 2365 2365 continue;
+12 -3
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
··· 523 523 int i, r = 0, rewind_count = 0; 524 524 525 525 for (i = 0; i < target->n_pdds; i++) { 526 + uint32_t caps; 527 + uint32_t caps2; 526 528 struct kfd_topology_device *topo_dev = 527 - kfd_topology_device_by_id(target->pdds[i]->dev->id); 528 - uint32_t caps = topo_dev->node_props.capability; 529 - uint32_t caps2 = topo_dev->node_props.capability2; 529 + kfd_topology_device_by_id(target->pdds[i]->dev->id); 530 + if (!topo_dev) 531 + return -EINVAL; 532 + 533 + caps = topo_dev->node_props.capability; 534 + caps2 = topo_dev->node_props.capability2; 530 535 531 536 if (!(caps & HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED) && 532 537 (*flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP)) { ··· 1091 1086 for (i = 0; i < tmp_num_devices; i++) { 1092 1087 struct kfd_process_device *pdd = target->pdds[i]; 1093 1088 struct kfd_topology_device *topo_dev = kfd_topology_device_by_id(pdd->dev->id); 1089 + if (!topo_dev) { 1090 + r = -EINVAL; 1091 + break; 1092 + } 1094 1093 1095 1094 device_info.gpu_id = pdd->dev->id; 1096 1095 device_info.exception_status = pdd->exception_status;
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
··· 70 70 static void set_priority(struct cik_mqd *m, struct queue_properties *q) 71 71 { 72 72 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 73 - m->cp_hqd_queue_priority = q->priority; 73 + /* m->cp_hqd_queue_priority = q->priority; */ 74 74 } 75 75 76 76 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
··· 70 70 static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q) 71 71 { 72 72 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 73 - m->cp_hqd_queue_priority = q->priority; 73 + /* m->cp_hqd_queue_priority = q->priority; */ 74 74 } 75 75 76 76 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
··· 96 96 static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q) 97 97 { 98 98 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 99 - m->cp_hqd_queue_priority = q->priority; 99 + /* m->cp_hqd_queue_priority = q->priority; */ 100 100 } 101 101 102 102 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
··· 77 77 static void set_priority(struct v12_compute_mqd *m, struct queue_properties *q) 78 78 { 79 79 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 80 - m->cp_hqd_queue_priority = q->priority; 80 + /* m->cp_hqd_queue_priority = q->priority; */ 81 81 } 82 82 83 83 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
··· 131 131 static void set_priority(struct v12_1_compute_mqd *m, struct queue_properties *q) 132 132 { 133 133 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 134 - m->cp_hqd_queue_priority = q->priority; 134 + /* m->cp_hqd_queue_priority = q->priority; */ 135 135 } 136 136 137 137 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
··· 106 106 static void set_priority(struct v9_mqd *m, struct queue_properties *q) 107 107 { 108 108 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 109 - m->cp_hqd_queue_priority = q->priority; 109 + /* m->cp_hqd_queue_priority = q->priority; */ 110 110 } 111 111 112 112 static bool mqd_on_vram(struct amdgpu_device *adev)
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
··· 73 73 static void set_priority(struct vi_mqd *m, struct queue_properties *q) 74 74 { 75 75 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 76 - m->cp_hqd_queue_priority = q->priority; 76 + /* m->cp_hqd_queue_priority = q->priority; */ 77 77 } 78 78 79 79 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
-3
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 1773 1773 struct kfd_node *dev; 1774 1774 int ret; 1775 1775 1776 - if (!drm_file) 1777 - return -EINVAL; 1778 - 1779 1776 if (pdd->drm_priv) 1780 1777 return -EBUSY; 1781 1778
+5 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 3614 3614 if (aconnector->mst_root) 3615 3615 continue; 3616 3616 3617 + /* Skip eDP detection, when there is no sink present */ 3618 + if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_EDP && 3619 + !aconnector->dc_link->edp_sink_present) 3620 + continue; 3621 + 3617 3622 guard(mutex)(&aconnector->hpd_lock); 3618 3623 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3619 3624 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); ··· 8045 8040 dc_plane_state->plane_size.chroma_size.height = stream->src.height; 8046 8041 dc_plane_state->plane_size.chroma_size.width = stream->src.width; 8047 8042 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 8048 - dc_plane_state->tiling_info.gfxversion = DcGfxVersion9; 8049 8043 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; 8050 8044 dc_plane_state->rotation = ROTATION_ANGLE_0; 8051 8045 dc_plane_state->is_tiling_rotated = false;
+10 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 1153 1153 1154 1154 void dm_helpers_override_panel_settings( 1155 1155 struct dc_context *ctx, 1156 - struct dc_panel_config *panel_config) 1156 + struct dc_link *link) 1157 1157 { 1158 + unsigned int panel_inst = 0; 1159 + 1158 1160 // Feature DSC 1159 1161 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1160 - panel_config->dsc.disable_dsc_edp = true; 1162 + link->panel_config.dsc.disable_dsc_edp = true; 1163 + 1164 + if (dc_get_edp_link_panel_inst(ctx->dc, link, &panel_inst) && panel_inst == 1) { 1165 + link->panel_config.psr.disable_psr = true; 1166 + link->panel_config.psr.disallow_psrsu = true;; 1167 + link->panel_config.psr.disallow_replay = true; 1168 + } 1161 1169 } 1162 1170 1163 1171 void *dm_helpers_allocate_gpu_mem(
+5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
··· 1060 1060 *min_downscale = plane_cap->max_downscale_factor.nv12; 1061 1061 break; 1062 1062 1063 + /* All 64 bpp formats have the same fp16 scaling limits */ 1063 1064 case DRM_FORMAT_XRGB16161616F: 1064 1065 case DRM_FORMAT_ARGB16161616F: 1065 1066 case DRM_FORMAT_XBGR16161616F: 1066 1067 case DRM_FORMAT_ABGR16161616F: 1068 + case DRM_FORMAT_XRGB16161616: 1069 + case DRM_FORMAT_ARGB16161616: 1070 + case DRM_FORMAT_XBGR16161616: 1071 + case DRM_FORMAT_ABGR16161616: 1067 1072 *max_upscale = plane_cap->max_upscale_factor.fp16; 1068 1073 *min_downscale = plane_cap->max_downscale_factor.fp16; 1069 1074 break;
+8
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
··· 80 80 link->psr_settings.psr_feature_enabled = false; 81 81 82 82 } else { 83 + unsigned int panel_inst = 0; 84 + 83 85 if (link_supports_psrsu(link)) 84 86 link->psr_settings.psr_version = DC_PSR_VERSION_SU_1; 85 87 else 86 88 link->psr_settings.psr_version = DC_PSR_VERSION_1; 87 89 88 90 link->psr_settings.psr_feature_enabled = true; 91 + 92 + /*disable allow psr/psrsu/replay on eDP1*/ 93 + if (dc_get_edp_link_panel_inst(link->ctx->dc, link, &panel_inst) && panel_inst == 1) { 94 + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 95 + link->psr_settings.psr_feature_enabled = false; 96 + } 89 97 } 90 98 } 91 99
+1 -2
drivers/gpu/drm/amd/display/dc/bios/command_table.c
··· 1874 1874 uint8_t dac_standard) 1875 1875 { 1876 1876 params->ucDacStandard = dac_standard; 1877 - if (action == ENCODER_CONTROL_SETUP || 1878 - action == ENCODER_CONTROL_INIT) 1877 + if (action == ENCODER_CONTROL_INIT) 1879 1878 params->ucAction = ATOM_ENCODER_INIT; 1880 1879 else if (action == ENCODER_CONTROL_ENABLE) 1881 1880 params->ucAction = ATOM_ENABLE;
+4 -1
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 2770 2770 case DcGfxVersion7: 2771 2771 case DcGfxVersion8: 2772 2772 case DcGfxVersionUnknown: 2773 - case DcGfxBase: 2774 2773 default: 2775 2774 break; 2776 2775 } ··· 3368 3369 stream->scaler_sharpener_update = *update->scaler_sharpener_update; 3369 3370 if (update->sharpening_required) 3370 3371 stream->sharpening_required = *update->sharpening_required; 3372 + 3373 + if (update->drr_trigger_mode) { 3374 + stream->drr_trigger_mode = *update->drr_trigger_mode; 3375 + } 3371 3376 } 3372 3377 3373 3378 static void backup_planes_and_stream_state(
-7
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
··· 2065 2065 while (bottom_pipe_ctx->bottom_pipe != NULL) 2066 2066 bottom_pipe_ctx = bottom_pipe_ctx->bottom_pipe; 2067 2067 2068 - if (bottom_pipe_ctx->plane_state->tiling_info.gfxversion == DcGfxBase) { 2069 - /* LINEAR Surface - set border color to red */ 2070 - color->color_r_cr = color_value; 2071 - return; 2072 - } 2073 - 2074 - ASSERT(bottom_pipe_ctx->plane_state->tiling_info.gfxversion == DcGfxVersion9); 2075 2068 switch (bottom_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) { 2076 2069 case DC_SW_LINEAR: 2077 2070 /* LINEAR Surface - set border color to red */
-1
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 4434 4434 4435 4435 if (dc->res_pool->funcs->patch_unknown_plane_state && 4436 4436 pipe_ctx->plane_state && 4437 - pipe_ctx->plane_state->tiling_info.gfxversion == DcGfxVersion9 && 4438 4437 pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) { 4439 4438 result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state); 4440 4439 if (result != DC_OK)
+15
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 515 515 } 516 516 } 517 517 518 + if (stream->drr_trigger_mode == DRR_TRIGGER_ON_FLIP_AND_CURSOR) { 519 + /* apply manual trigger */ 520 + int i; 521 + 522 + for (i = 0; i < dc->res_pool->pipe_count; i++) { 523 + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 524 + 525 + /* trigger event on first pipe with current stream */ 526 + if (stream == pipe_ctx->stream) { 527 + pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 528 + break; 529 + } 530 + } 531 + } 532 + 518 533 return true; 519 534 } 520 535
-1
drivers/gpu/drm/amd/display/dc/dc.h
··· 505 505 } bits; 506 506 unsigned char raw; 507 507 }; 508 - 509 508 /* Structure to hold configuration flags set by dm at dc creation. */ 510 509 struct dc_config { 511 510 bool gpu_vm_support;
+1 -2
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
··· 342 342 }; 343 343 344 344 enum dc_gfxversion { 345 - DcGfxBase = 0, 346 - DcGfxVersion7, 345 + DcGfxVersion7 = 0, 347 346 DcGfxVersion8, 348 347 DcGfxVersion9, 349 348 DcGfxVersion10,
+9
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 183 183 int dm_max_decrease_from_nominal; 184 184 }; 185 185 186 + enum dc_drr_trigger_mode { 187 + DRR_TRIGGER_ON_FLIP = 0, 188 + DRR_TRIGGER_ON_FLIP_AND_CURSOR, 189 + }; 190 + 186 191 struct dc_stream_state { 187 192 // sink is deprecated, new code should not reference 188 193 // this pointer ··· 321 316 bool scaler_sharpener_update; 322 317 bool sharpening_required; 323 318 319 + enum dc_drr_trigger_mode drr_trigger_mode; 320 + 324 321 struct dc_update_scratch_space *update_scratch; 325 322 }; 326 323 ··· 373 366 bool *hw_cursor_req; 374 367 bool *scaler_sharpener_update; 375 368 bool *sharpening_required; 369 + 370 + enum dc_drr_trigger_mode *drr_trigger_mode; 376 371 }; 377 372 378 373 bool dc_is_stream_unchanged(
+53 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
··· 131 131 132 132 void dccg2_init(struct dccg *dccg) 133 133 { 134 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 135 + 136 + /* Hardcoded register values for DCN20 137 + * These are specific to 100Mhz refclk 138 + * Different ASICs with different refclk may override this in their own init 139 + */ 140 + REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x00120264); 141 + REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x001186a0); 142 + REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x0e01003c); 143 + 144 + if (REG(REFCLK_CNTL)) 145 + REG_WRITE(REFCLK_CNTL, 0); 146 + } 147 + 148 + void dccg2_refclk_setup(struct dccg *dccg) 149 + { 150 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 151 + 152 + /* REFCLK programming that must occur after hubbub initialization */ 153 + if (REG(REFCLK_CNTL)) 154 + REG_WRITE(REFCLK_CNTL, 0); 155 + } 156 + 157 + bool dccg2_is_s0i3_golden_init_wa_done(struct dccg *dccg) 158 + { 159 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 160 + 161 + return REG_READ(MICROSECOND_TIME_BASE_DIV) == 0x00120464; 162 + } 163 + 164 + void dccg2_allow_clock_gating(struct dccg *dccg, bool allow) 165 + { 166 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 167 + 168 + if (allow) { 169 + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 170 + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 171 + } else { 172 + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0xFFFFFFFF); 173 + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0xFFFFFFFF); 174 + } 175 + } 176 + 177 + void dccg2_enable_memory_low_power(struct dccg *dccg, bool enable) 178 + { 179 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 180 + 181 + REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, enable ? 0 : 1); 134 182 } 135 183 136 184 static const struct dccg_funcs dccg2_funcs = { ··· 187 139 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 188 140 .otg_add_pixel = dccg2_otg_add_pixel, 189 141 .otg_drop_pixel = dccg2_otg_drop_pixel, 190 - .dccg_init = dccg2_init 142 + .dccg_init = dccg2_init, 143 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 144 + .allow_clock_gating = dccg2_allow_clock_gating, 145 + .enable_memory_low_power = dccg2_enable_memory_low_power, 146 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 191 147 }; 192 148 193 149 struct dccg *dccg2_create(
+11 -3
drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
··· 37 37 SR(REFCLK_CNTL),\ 38 38 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\ 39 39 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\ 40 - SR(DISPCLK_FREQ_CHANGE_CNTL) 40 + SR(DISPCLK_FREQ_CHANGE_CNTL),\ 41 + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL) 41 42 42 43 #define DCCG_REG_LIST_DCN2() \ 43 44 DCCG_COMMON_REG_LIST_DCN_BASE(),\ ··· 82 81 DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\ 83 82 DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\ 84 83 DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\ 85 - DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh) 84 + DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh),\ 85 + DCCG_SF(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh) 86 86 87 87 88 88 ··· 132 130 type DISPCLK_CHG_FWD_CORR_DISABLE;\ 133 131 type DISPCLK_FREQ_CHANGE_CNTL;\ 134 132 type OTG_ADD_PIXEL[MAX_PIPES];\ 135 - type OTG_DROP_PIXEL[MAX_PIPES]; 133 + type OTG_DROP_PIXEL[MAX_PIPES];\ 134 + type DC_MEM_GLOBAL_PWR_REQ_DIS; 136 135 137 136 #define DCCG3_REG_FIELD_LIST(type) \ 138 137 type HDMICHARCLK0_EN;\ ··· 517 514 518 515 519 516 void dccg2_init(struct dccg *dccg); 517 + 518 + void dccg2_refclk_setup(struct dccg *dccg); 519 + void dccg2_allow_clock_gating(struct dccg *dccg, bool allow); 520 + void dccg2_enable_memory_low_power(struct dccg *dccg, bool enable); 521 + bool dccg2_is_s0i3_golden_init_wa_done(struct dccg *dccg); 520 522 521 523 struct dccg *dccg2_create( 522 524 struct dc_context *ctx,
+6 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn201/dcn201_dccg.c
··· 24 24 */ 25 25 26 26 #include "dcn201_dccg.h" 27 + #include "dcn20/dcn20_dccg.h" 27 28 28 29 #include "reg_helper.h" 29 30 #include "core_types.h" ··· 57 56 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 58 57 .otg_add_pixel = dccg2_otg_add_pixel, 59 58 .otg_drop_pixel = dccg2_otg_drop_pixel, 60 - .dccg_init = dccg2_init 59 + .dccg_init = dccg2_init, 60 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 61 + .allow_clock_gating = dccg2_allow_clock_gating, 62 + .enable_memory_low_power = dccg2_enable_memory_low_power, 63 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 61 64 }; 62 65 63 66 struct dccg *dccg201_create(
+5 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
··· 103 103 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 104 104 .otg_add_pixel = dccg2_otg_add_pixel, 105 105 .otg_drop_pixel = dccg2_otg_drop_pixel, 106 - .dccg_init = dccg2_init 106 + .dccg_init = dccg2_init, 107 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 108 + .allow_clock_gating = dccg2_allow_clock_gating, 109 + .enable_memory_low_power = dccg2_enable_memory_low_power, 110 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 107 111 }; 108 112 109 113 struct dccg *dccg21_create(
+5 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn30/dcn30_dccg.c
··· 49 49 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 50 50 .otg_add_pixel = dccg2_otg_add_pixel, 51 51 .otg_drop_pixel = dccg2_otg_drop_pixel, 52 - .dccg_init = dccg2_init 52 + .dccg_init = dccg2_init, 53 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 54 + .allow_clock_gating = dccg2_allow_clock_gating, 55 + .enable_memory_low_power = dccg2_enable_memory_low_power, 56 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 53 57 }; 54 58 55 59 struct dccg *dccg3_create(
+5 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.c
··· 48 48 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 49 49 .otg_add_pixel = dccg2_otg_add_pixel, 50 50 .otg_drop_pixel = dccg2_otg_drop_pixel, 51 - .dccg_init = dccg2_init 51 + .dccg_init = dccg2_init, 52 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 53 + .allow_clock_gating = dccg2_allow_clock_gating, 54 + .enable_memory_low_power = dccg2_enable_memory_low_power, 55 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 52 56 }; 53 57 54 58 struct dccg *dccg301_create(
+5
drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
··· 26 26 #include "reg_helper.h" 27 27 #include "core_types.h" 28 28 #include "dcn31_dccg.h" 29 + #include "dcn20/dcn20_dccg.h" 29 30 #include "dal_asic_id.h" 30 31 31 32 #define TO_DCN_DCCG(dccg)\ ··· 851 850 .disable_dsc = dccg31_disable_dscclk, 852 851 .enable_dsc = dccg31_enable_dscclk, 853 852 .dccg_read_reg_state = dccg31_read_reg_state, 853 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 854 + .allow_clock_gating = dccg2_allow_clock_gating, 855 + .enable_memory_low_power = dccg2_enable_memory_low_power, 856 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 854 857 }; 855 858 856 859 struct dccg *dccg31_create(
+6 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
··· 29 29 30 30 #include "dcn31/dcn31_dccg.h" 31 31 #include "dcn314_dccg.h" 32 + #include "dcn20/dcn20_dccg.h" 32 33 33 34 #define TO_DCN_DCCG(dccg)\ 34 35 container_of(dccg, struct dcn_dccg, base) ··· 379 378 .trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync, 380 379 .set_valid_pixel_rate = dccg314_set_valid_pixel_rate, 381 380 .set_dtbclk_p_src = dccg314_set_dtbclk_p_src, 382 - .dccg_read_reg_state = dccg31_read_reg_state 381 + .dccg_read_reg_state = dccg31_read_reg_state, 382 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 383 + .allow_clock_gating = dccg2_allow_clock_gating, 384 + .enable_memory_low_power = dccg2_enable_memory_low_power, 385 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 383 386 }; 384 387 385 388 struct dccg *dccg314_create(
+5
drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c
··· 26 26 #include "reg_helper.h" 27 27 #include "core_types.h" 28 28 #include "dcn32_dccg.h" 29 + #include "dcn20/dcn20_dccg.h" 29 30 30 31 #define TO_DCN_DCCG(dccg)\ 31 32 container_of(dccg, struct dcn_dccg, base) ··· 348 347 .get_pixel_rate_div = dccg32_get_pixel_rate_div, 349 348 .trigger_dio_fifo_resync = dccg32_trigger_dio_fifo_resync, 350 349 .set_dtbclk_p_src = dccg32_set_dtbclk_p_src, 350 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 351 + .allow_clock_gating = dccg2_allow_clock_gating, 352 + .enable_memory_low_power = dccg2_enable_memory_low_power, 353 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 351 354 }; 352 355 353 356 struct dccg *dccg32_create(
+10 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
··· 26 26 #include "core_types.h" 27 27 #include "resource.h" 28 28 #include "dcn35_dccg.h" 29 + #include "dcn20/dcn20_dccg.h" 29 30 30 31 #define TO_DCN_DCCG(dccg)\ 31 32 container_of(dccg, struct dcn_dccg, base) ··· 2412 2411 .enable_symclk_se = dccg35_enable_symclk_se_cb, 2413 2412 .disable_symclk_se = dccg35_disable_symclk_se_cb, 2414 2413 .set_dtbclk_p_src = dccg35_set_dtbclk_p_src_cb, 2414 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 2415 + .allow_clock_gating = dccg2_allow_clock_gating, 2416 + .enable_memory_low_power = dccg2_enable_memory_low_power, 2417 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 2415 2418 }; 2416 2419 2417 2420 static const struct dccg_funcs dccg35_funcs = { ··· 2447 2442 .enable_symclk_se = dccg35_enable_symclk_se, 2448 2443 .disable_symclk_se = dccg35_disable_symclk_se, 2449 2444 .set_dtbclk_p_src = dccg35_set_dtbclk_p_src, 2445 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 2446 + .allow_clock_gating = dccg2_allow_clock_gating, 2447 + .enable_memory_low_power = dccg2_enable_memory_low_power, 2448 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done, /* Deprecated - for backward compatibility only */ 2450 2449 .dccg_root_gate_disable_control = dccg35_root_gate_disable_control, 2451 - .dccg_read_reg_state = dccg31_read_reg_state, 2450 + .dccg_read_reg_state = dccg31_read_reg_state 2452 2451 }; 2453 2452 2454 2453 struct dccg *dccg35_create(
+7 -10
drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
··· 27 27 #include "core_types.h" 28 28 #include "dcn401_dccg.h" 29 29 #include "dcn31/dcn31_dccg.h" 30 + #include "dcn20/dcn20_dccg.h" 30 31 31 32 /* 32 33 #include "dmub_common.h" ··· 596 595 597 596 bool enable = false; 598 597 599 - if (params->otg_inst > 3) { 600 - /* dcn401 only has 4 instances */ 601 - BREAK_TO_DEBUGGER(); 602 - return; 603 - } 604 - if (!params->refclk_hz) { 605 - BREAK_TO_DEBUGGER(); 606 - return; 607 - } 608 - 609 598 if (!dc_is_tmds_signal(params->signal)) { 610 599 uint64_t dto_integer; 611 600 uint64_t dto_phase_hz; 612 601 uint64_t dto_modulo_hz = params->refclk_hz; 613 602 614 603 enable = true; 604 + 605 + if (!params->refclk_hz) { 606 + BREAK_TO_DEBUGGER(); 607 + return; 608 + } 615 609 616 610 /* Set DTO values: 617 611 * int = target_pix_rate / reference_clock ··· 862 866 .update_dpp_dto = dccg401_update_dpp_dto, 863 867 .get_dccg_ref_freq = dccg401_get_dccg_ref_freq, 864 868 .dccg_init = dccg401_init, 869 + .allow_clock_gating = dccg2_allow_clock_gating, 865 870 .set_dpstreamclk = dccg401_set_dpstreamclk, 866 871 .enable_symclk32_se = dccg31_enable_symclk32_se, 867 872 .disable_symclk32_se = dccg31_disable_symclk32_se,
-3
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
··· 100 100 static enum mi_tiling_format get_mi_tiling( 101 101 struct dc_tiling_info *tiling_info) 102 102 { 103 - ASSERT(tiling_info->gfxversion == DcGfxVersion8); 104 103 switch (tiling_info->gfx8.array_mode) { 105 104 case DC_ARRAY_1D_TILED_THIN1: 106 105 case DC_ARRAY_1D_TILED_THICK: ··· 433 434 struct dce_mem_input *dce_mi, const struct dc_tiling_info *info) 434 435 { 435 436 if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */ 436 - ASSERT(info->gfxversion == DcGfxVersion9); 437 437 REG_UPDATE_6(GRPH_CONTROL, 438 438 GRPH_SW_MODE, info->gfx9.swizzle, 439 439 GRPH_NUM_BANKS, log_2(info->gfx9.num_banks), ··· 447 449 } 448 450 449 451 if (dce_mi->masks->GRPH_MICRO_TILE_MODE) { /* GFX8 */ 450 - ASSERT(info->gfxversion == DcGfxVersion8); 451 452 REG_UPDATE_9(GRPH_CONTROL, 452 453 GRPH_NUM_BANKS, info->gfx8.num_banks, 453 454 GRPH_BANK_WIDTH, info->gfx8.bank_width,
+19 -4
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
··· 1498 1498 { 1499 1499 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 1500 1500 1501 - REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst); 1501 + if (enc->id == ENGINE_ID_DACA || enc->id == ENGINE_ID_DACB) 1502 + REG_UPDATE(DAC_SOURCE_SELECT, DAC_SOURCE_SELECT, tg_inst); 1503 + else 1504 + REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst); 1502 1505 } 1503 1506 1504 1507 static unsigned int dig_source_otg( ··· 1510 1507 uint32_t tg_inst = 0; 1511 1508 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 1512 1509 1513 - REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst); 1510 + if (enc->id == ENGINE_ID_DACA || enc->id == ENGINE_ID_DACB) 1511 + REG_GET(DAC_SOURCE_SELECT, DAC_SOURCE_SELECT, &tg_inst); 1512 + else 1513 + REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst); 1514 1514 1515 1515 return tg_inst; 1516 1516 } ··· 1574 1568 enc110->se_mask = se_mask; 1575 1569 } 1576 1570 1577 - static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {}; 1571 + static const struct stream_encoder_funcs dce110_an_str_enc_funcs = { 1572 + .dig_connect_to_otg = dig_connect_to_otg, 1573 + .dig_source_otg = dig_source_otg, 1574 + }; 1578 1575 1579 1576 void dce110_analog_stream_encoder_construct( 1580 1577 struct dce110_stream_encoder *enc110, 1581 1578 struct dc_context *ctx, 1582 1579 struct dc_bios *bp, 1583 - enum engine_id eng_id) 1580 + enum engine_id eng_id, 1581 + const struct dce110_stream_enc_registers *regs, 1582 + const struct dce_stream_encoder_shift *se_shift, 1583 + const struct dce_stream_encoder_mask *se_mask) 1584 1584 { 1585 1585 enc110->base.funcs = &dce110_an_str_enc_funcs; 1586 1586 enc110->base.ctx = ctx; 1587 1587 enc110->base.id = eng_id; 1588 1588 enc110->base.bp = bp; 1589 + enc110->regs = regs; 1590 + enc110->se_shift = se_shift; 1591 + enc110->se_mask = se_mask; 1589 1592 }
+10 -2
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
··· 65 65 SRI(AFMT_60958_1, DIG, id), \ 66 66 SRI(AFMT_60958_2, DIG, id), \ 67 67 SRI(DIG_FE_CNTL, DIG, id), \ 68 + SR(DAC_SOURCE_SELECT), \ 68 69 SRI(HDMI_CONTROL, DIG, id), \ 69 70 SRI(HDMI_GC, DIG, id), \ 70 71 SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ ··· 291 290 #define SE_COMMON_MASK_SH_LIST_DCE80_100(mask_sh)\ 292 291 SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\ 293 292 SE_SF(TMDS_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\ 294 - SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh) 293 + SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ 294 + SE_SF(DAC_SOURCE_SELECT, DAC_SOURCE_SELECT, mask_sh) 295 295 296 296 #define SE_COMMON_MASK_SH_LIST_DCE110(mask_sh)\ 297 297 SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\ ··· 496 494 uint8_t DP_VID_N_MUL; 497 495 uint8_t DP_VID_M_DOUBLE_VALUE_EN; 498 496 uint8_t DIG_SOURCE_SELECT; 497 + uint8_t DAC_SOURCE_SELECT; 499 498 }; 500 499 501 500 struct dce_stream_encoder_mask { ··· 629 626 uint32_t DP_VID_N_MUL; 630 627 uint32_t DP_VID_M_DOUBLE_VALUE_EN; 631 628 uint32_t DIG_SOURCE_SELECT; 629 + uint32_t DAC_SOURCE_SELECT; 632 630 }; 633 631 634 632 struct dce110_stream_enc_registers { ··· 657 653 uint32_t AFMT_60958_1; 658 654 uint32_t AFMT_60958_2; 659 655 uint32_t DIG_FE_CNTL; 656 + uint32_t DAC_SOURCE_SELECT; 660 657 uint32_t DP_MSE_RATE_CNTL; 661 658 uint32_t DP_MSE_RATE_UPDATE; 662 659 uint32_t DP_PIXEL_FORMAT; ··· 717 712 struct dce110_stream_encoder *enc110, 718 713 struct dc_context *ctx, 719 714 struct dc_bios *bp, 720 - enum engine_id eng_id); 715 + enum engine_id eng_id, 716 + const struct dce110_stream_enc_registers *regs, 717 + const struct dce_stream_encoder_shift *se_shift, 718 + const struct dce_stream_encoder_mask *se_mask); 721 719 722 720 void dce110_se_audio_mute_control( 723 721 struct stream_encoder *enc, bool mute);
-3
drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
··· 165 165 const struct dc_tiling_info *info, 166 166 const enum surface_pixel_format pixel_format) 167 167 { 168 - ASSERT(info->gfxversion == DcGfxVersion8); 169 - 170 168 uint32_t value = 0; 171 169 172 170 set_reg_field_value(value, info->gfx8.num_banks, ··· 541 543 else 542 544 bpp = bpp_8; 543 545 544 - ASSERT(tiling_info->gfxversion == DcGfxVersion8); 545 546 switch (tiling_info->gfx8.array_mode) { 546 547 case DC_ARRAY_1D_TILED_THIN1: 547 548 case DC_ARRAY_1D_TILED_THICK:
+1 -1
drivers/gpu/drm/amd/display/dc/dm_helpers.h
··· 209 209 struct dc_sink *sink); 210 210 void dm_helpers_override_panel_settings( 211 211 struct dc_context *ctx, 212 - struct dc_panel_config *config); 212 + struct dc_link *link); 213 213 int dm_helper_dmub_aux_transfer_sync( 214 214 struct dc_context *ctx, 215 215 const struct dc_link *link,
-1
drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
··· 1006 1006 1007 1007 v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs( 1008 1008 pipe->plane_state->format); 1009 - ASSERT(pipe->plane_state->tiling_info.gfxversion == DcGfxVersion9); 1010 1009 v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs( 1011 1010 pipe->plane_state->tiling_info.gfx9.swizzle); 1012 1011 v->lb_bit_per_pixel[input_idx] = tl_lb_bpp_to_int(pipe->plane_res.scl_data.lb_params.depth);
-3
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
··· 460 460 case DcGfxAddr3: 461 461 surface->tiling = gfx_addr3_to_dml2_swizzle_mode(plane_state->tiling_info.gfx_addr3.swizzle); 462 462 break; 463 - case DcGfxBase: 464 - surface->tiling = dml2_sw_linear; 465 - break; 466 463 } 467 464 } 468 465
+5 -5
drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
··· 41 41 #define FN(reg_name, field_name) \ 42 42 hubbub2->shifts->field_name, hubbub2->masks->field_name 43 43 44 - static void dcn401_init_crb(struct hubbub *hubbub) 44 + void dcn401_init_crb(struct hubbub *hubbub) 45 45 { 46 46 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 47 47 ··· 1110 1110 return true; 1111 1111 } 1112 1112 1113 - static void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg) 1113 + void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg) 1114 1114 { 1115 1115 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 1116 1116 ··· 1147 1147 } 1148 1148 } 1149 1149 1150 - static void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase) 1150 + void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase) 1151 1151 { 1152 1152 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 1153 1153 ··· 1170 1170 } 1171 1171 } 1172 1172 1173 - static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) 1173 + void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) 1174 1174 { 1175 1175 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 1176 1176 ··· 1192 1192 } 1193 1193 } 1194 1194 1195 - static bool dcn401_program_arbiter(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower) 1195 + bool dcn401_program_arbiter(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower) 1196 1196 { 1197 1197 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 1198 1198
+10
drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h
··· 194 194 const struct dc_dcc_surface_param *input, 195 195 struct dc_surface_dcc_cap *output); 196 196 197 + bool dcn401_program_arbiter( 198 + struct hubbub *hubbub, 199 + struct dml2_display_arb_regs *arb_regs, 200 + bool safe_to_lower); 201 + 197 202 void hubbub401_construct(struct dcn20_hubbub *hubbub2, 198 203 struct dc_context *ctx, 199 204 const struct dcn_hubbub_registers *hubbub_regs, ··· 207 202 int det_size_kb, 208 203 int pixel_chunk_size_kb, 209 204 int config_return_buffer_size_kb); 205 + 206 + void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg); 207 + void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); 208 + void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst); 209 + void dcn401_init_crb(struct hubbub *hubbub); 210 210 211 211 #endif
+12 -19
drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
··· 145 145 { 146 146 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); 147 147 148 - ASSERT(info->gfxversion == DcGfxVersion9 || info->gfxversion == DcGfxBase); 148 + REG_UPDATE_6(DCSURF_ADDR_CONFIG, 149 + NUM_PIPES, log_2(info->gfx9.num_pipes), 150 + NUM_BANKS, log_2(info->gfx9.num_banks), 151 + PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 152 + NUM_SE, log_2(info->gfx9.num_shader_engines), 153 + NUM_RB_PER_SE, log_2(info->gfx9.num_rb_per_se), 154 + MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags)); 149 155 150 - if (info->gfxversion == DcGfxVersion9) { 151 - REG_UPDATE_6(DCSURF_ADDR_CONFIG, 152 - NUM_PIPES, log_2(info->gfx9.num_pipes), 153 - NUM_BANKS, log_2(info->gfx9.num_banks), 154 - PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 155 - NUM_SE, log_2(info->gfx9.num_shader_engines), 156 - NUM_RB_PER_SE, log_2(info->gfx9.num_rb_per_se), 157 - MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags)); 158 - 159 - REG_UPDATE_4(DCSURF_TILING_CONFIG, 160 - SW_MODE, info->gfx9.swizzle, 161 - META_LINEAR, info->gfx9.meta_linear, 162 - RB_ALIGNED, info->gfx9.rb_aligned, 163 - PIPE_ALIGNED, info->gfx9.pipe_aligned); 164 - } else { 165 - hubp1_clear_tiling(&hubp1->base); 166 - } 167 - 156 + REG_UPDATE_4(DCSURF_TILING_CONFIG, 157 + SW_MODE, info->gfx9.swizzle, 158 + META_LINEAR, info->gfx9.meta_linear, 159 + RB_ALIGNED, info->gfx9.rb_aligned, 160 + PIPE_ALIGNED, info->gfx9.pipe_aligned); 168 161 } 169 162 170 163 void hubp1_program_size(
+9 -15
drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
··· 313 313 const struct dc_tiling_info *info, 314 314 const enum surface_pixel_format pixel_format) 315 315 { 316 - ASSERT(info->gfxversion == DcGfxVersion9 || info->gfxversion == DcGfxBase); 316 + REG_UPDATE_3(DCSURF_ADDR_CONFIG, 317 + NUM_PIPES, log_2(info->gfx9.num_pipes), 318 + PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 319 + MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags)); 317 320 318 - if (info->gfxversion == DcGfxVersion9) { 319 - REG_UPDATE_3(DCSURF_ADDR_CONFIG, 320 - NUM_PIPES, log_2(info->gfx9.num_pipes), 321 - PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 322 - MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags)); 323 - 324 - REG_UPDATE_4(DCSURF_TILING_CONFIG, 325 - SW_MODE, info->gfx9.swizzle, 326 - META_LINEAR, 0, 327 - RB_ALIGNED, 0, 328 - PIPE_ALIGNED, 0); 329 - } else { 330 - hubp2_clear_tiling(&hubp2->base); 331 - } 321 + REG_UPDATE_4(DCSURF_TILING_CONFIG, 322 + SW_MODE, info->gfx9.swizzle, 323 + META_LINEAR, 0, 324 + RB_ALIGNED, 0, 325 + PIPE_ALIGNED, 0); 332 326 } 333 327 334 328 void hubp2_program_size(
+9 -15
drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
··· 321 321 const struct dc_tiling_info *info, 322 322 const enum surface_pixel_format pixel_format) 323 323 { 324 - ASSERT(info->gfxversion == DcGfxVersion9 || info->gfxversion == DcGfxBase); 324 + REG_UPDATE_4(DCSURF_ADDR_CONFIG, 325 + NUM_PIPES, log_2(info->gfx9.num_pipes), 326 + PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 327 + MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags), 328 + NUM_PKRS, log_2(info->gfx9.num_pkrs)); 325 329 326 - if (info->gfxversion == DcGfxVersion9) { 327 - REG_UPDATE_4(DCSURF_ADDR_CONFIG, 328 - NUM_PIPES, log_2(info->gfx9.num_pipes), 329 - PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 330 - MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags), 331 - NUM_PKRS, log_2(info->gfx9.num_pkrs)); 332 - 333 - REG_UPDATE_3(DCSURF_TILING_CONFIG, 334 - SW_MODE, info->gfx9.swizzle, 335 - META_LINEAR, info->gfx9.meta_linear, 336 - PIPE_ALIGNED, info->gfx9.pipe_aligned); 337 - } else { 338 - hubp3_clear_tiling(&hubp2->base); 339 - } 330 + REG_UPDATE_3(DCSURF_TILING_CONFIG, 331 + SW_MODE, info->gfx9.swizzle, 332 + META_LINEAR, info->gfx9.meta_linear, 333 + PIPE_ALIGNED, info->gfx9.pipe_aligned); 340 334 341 335 } 342 336
+1 -6
drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
··· 589 589 * 590 590 * DIM_TYPE field in DCSURF_TILING for Display is always 1 (2D dimension) which is HW default. 591 591 */ 592 - if (info->gfxversion == DcGfxAddr3) { 593 - REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, info->gfx_addr3.swizzle); 594 - } else { 595 - /* linear */ 596 - REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, 0); 597 - } 592 + REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, info->gfx_addr3.swizzle); 598 593 } 599 594 600 595 void hubp401_program_size(
+13 -46
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
··· 660 660 } 661 661 } 662 662 663 - static void 664 - dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable) 665 - { 666 - struct dc_link *link = pipe_ctx->stream->link; 667 - struct dc_bios *bios = link->ctx->dc_bios; 668 - struct bp_encoder_control encoder_control = {0}; 669 - 670 - encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE; 671 - encoder_control.engine_id = link->link_enc->analog_engine; 672 - encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10; 673 - 674 - bios->funcs->encoder_control(bios, &encoder_control); 675 - } 676 - 677 663 void dce110_enable_stream(struct pipe_ctx *pipe_ctx) 678 664 { 679 665 enum dc_lane_count lane_count = ··· 690 704 691 705 tg->funcs->set_early_control(tg, early_control); 692 706 693 - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) 694 - dce110_dac_encoder_control(pipe_ctx, true); 695 707 } 696 708 697 709 static enum bp_result link_transmitter_control( ··· 1183 1199 dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, 1184 1200 link_enc->transmitter - TRANSMITTER_UNIPHY_A); 1185 1201 } 1186 - 1187 - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) 1188 - dce110_dac_encoder_control(pipe_ctx, false); 1189 1202 } 1190 1203 1191 1204 void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, ··· 1565 1584 return DC_OK; 1566 1585 } 1567 1586 1568 - static void 1569 - dce110_select_crtc_source(struct pipe_ctx *pipe_ctx) 1570 - { 1571 - struct dc_link *link = pipe_ctx->stream->link; 1572 - struct dc_bios *bios = link->ctx->dc_bios; 1573 - struct bp_crtc_source_select crtc_source_select = {0}; 1574 - enum engine_id engine_id = link->link_enc->preferred_engine; 1575 - 1576 - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) 1577 - engine_id = link->link_enc->analog_engine; 1578 - 1579 - crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst; 1580 - crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth; 1581 - crtc_source_select.engine_id = engine_id; 1582 - crtc_source_select.sink_signal = pipe_ctx->stream->signal; 1583 - 1584 - bios->funcs->select_crtc_source(bios, &crtc_source_select); 1585 - } 1586 - 1587 1587 enum dc_status dce110_apply_single_controller_ctx_to_hw( 1588 1588 struct pipe_ctx *pipe_ctx, 1589 1589 struct dc_state *context, ··· 1582 1620 1583 1621 if (hws->funcs.disable_stream_gating) { 1584 1622 hws->funcs.disable_stream_gating(dc, pipe_ctx); 1585 - } 1586 - 1587 - if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) { 1588 - dce110_select_crtc_source(pipe_ctx); 1589 1623 } 1590 1624 1591 1625 if (pipe_ctx->stream_res.audio != NULL) { ··· 1663 1705 pipe_ctx->stream_res.tg->funcs->set_static_screen_control( 1664 1706 pipe_ctx->stream_res.tg, event_triggers, 2); 1665 1707 1666 - if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && 1667 - !dc_is_rgb_signal(pipe_ctx->stream->signal)) 1708 + if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) 1668 1709 pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( 1669 1710 pipe_ctx->stream_res.stream_enc, 1670 1711 pipe_ctx->stream_res.tg->inst); ··· 1921 1964 1922 1965 get_edp_streams(context, edp_streams, &edp_stream_num); 1923 1966 1924 - /* Check fastboot support, disable on DCE 6-8 because of blank screens */ 1925 - if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) { 1967 + /* Check fastboot support, disable on DCE 6-8-10 because of blank screens */ 1968 + if (edp_num && edp_stream_num && dc->ctx->dce_version > DCE_VERSION_10_0) { 1926 1969 for (i = 0; i < edp_num; i++) { 1927 1970 edp_link = edp_links[i]; 1928 1971 if (edp_link != edp_streams[0]->link) ··· 3261 3304 link->phy_state.symclk_state = SYMCLK_ON_TX_ON; 3262 3305 } 3263 3306 3307 + static void dce110_enable_analog_link_output( 3308 + struct dc_link *link, 3309 + uint32_t pix_clk_100hz) 3310 + { 3311 + link->link_enc->funcs->enable_analog_output( 3312 + link->link_enc, 3313 + pix_clk_100hz); 3314 + } 3315 + 3264 3316 void dce110_enable_dp_link_output( 3265 3317 struct dc_link *link, 3266 3318 const struct link_resource *link_res, ··· 3407 3441 .enable_lvds_link_output = dce110_enable_lvds_link_output, 3408 3442 .enable_tmds_link_output = dce110_enable_tmds_link_output, 3409 3443 .enable_dp_link_output = dce110_enable_dp_link_output, 3444 + .enable_analog_link_output = dce110_enable_analog_link_output, 3410 3445 .disable_link_output = dce110_disable_link_output, 3411 3446 }; 3412 3447
+2 -3
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 1887 1887 1888 1888 if (!dc->debug.disable_clock_gate) { 1889 1889 /* enable all DCN clock gating */ 1890 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 1891 - 1892 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 1890 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 1891 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 1893 1892 1894 1893 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 1895 1894 }
+8 -21
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 357 357 358 358 void dcn20_dccg_init(struct dce_hwseq *hws) 359 359 { 360 - /* 361 - * set MICROSECOND_TIME_BASE_DIV 362 - * 100Mhz refclk -> 0x120264 363 - * 27Mhz refclk -> 0x12021b 364 - * 48Mhz refclk -> 0x120230 365 - * 366 - */ 367 - REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264); 360 + struct dc *dc = hws->ctx->dc; 368 361 369 - /* 370 - * set MILLISECOND_TIME_BASE_DIV 371 - * 100Mhz refclk -> 0x1186a0 372 - * 27Mhz refclk -> 0x106978 373 - * 48Mhz refclk -> 0x10bb80 374 - * 375 - */ 376 - REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0); 377 - 378 - /* This value is dependent on the hardware pipeline delay so set once per SOC */ 379 - REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c); 362 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->dccg_init) 363 + dc->res_pool->dccg->funcs->dccg_init(dc->res_pool->dccg); 380 364 } 381 365 382 366 void dcn20_disable_vga( ··· 3140 3156 3141 3157 dcn10_hubbub_global_timer_enable(dc->res_pool->hubbub, true, 2); 3142 3158 3143 - if (REG(REFCLK_CNTL)) 3144 - REG_WRITE(REFCLK_CNTL, 0); 3159 + if (hws->funcs.dccg_init) 3160 + hws->funcs.dccg_init(hws); 3161 + 3162 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->refclk_setup) 3163 + dc->res_pool->dccg->funcs->refclk_setup(dc->res_pool->dccg); 3145 3164 // 3146 3165 3147 3166
+2 -3
drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
··· 367 367 368 368 if (!dc->debug.disable_clock_gate) { 369 369 /* enable all DCN clock gating */ 370 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 371 - 372 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 370 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 371 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 373 372 374 373 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 375 374 }
+4 -5
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
··· 33 33 #include "vmid.h" 34 34 #include "reg_helper.h" 35 35 #include "hw/clk_mgr.h" 36 + #include "hw/dccg.h" 36 37 #include "dc_dmub_srv.h" 37 38 #include "abm.h" 38 39 #include "link_service.h" ··· 88 87 89 88 bool dcn21_s0i3_golden_init_wa(struct dc *dc) 90 89 { 91 - struct dce_hwseq *hws = dc->hwseq; 92 - uint32_t value = 0; 90 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->is_s0i3_golden_init_wa_done) 91 + return !dc->res_pool->dccg->funcs->is_s0i3_golden_init_wa_done(dc->res_pool->dccg); 93 92 94 - value = REG_READ(MICROSECOND_TIME_BASE_DIV); 95 - 96 - return value != 0x00120464; 93 + return false; 97 94 } 98 95 99 96 void dcn21_exit_optimized_pwr_state(
+2 -3
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
··· 801 801 802 802 if (!dc->debug.disable_clock_gate) { 803 803 /* enable all DCN clock gating */ 804 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 805 - 806 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 804 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 805 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 807 806 808 807 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 809 808 }
+2 -3
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
··· 247 247 248 248 if (!dc->debug.disable_clock_gate) { 249 249 /* enable all DCN clock gating */ 250 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 251 - 252 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 250 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 251 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 253 252 254 253 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 255 254 }
+5 -6
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 486 486 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 487 487 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 488 488 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 489 - bool result = true; 489 + bool rval, result; 490 490 const struct pwl_params *lut_params = NULL; 491 491 492 492 // 1D LUT ··· 509 509 lut_params = &plane_state->in_shaper_func.pwl; 510 510 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 511 511 // TODO: dpp_base replace 512 - cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 512 + rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 513 513 &plane_state->in_shaper_func, 514 514 &dpp_base->shaper_params, true); 515 - lut_params = &dpp_base->shaper_params; 515 + lut_params = rval ? &dpp_base->shaper_params : NULL; 516 516 } 517 517 518 518 mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); ··· 963 963 964 964 if (!dc->debug.disable_clock_gate) { 965 965 /* enable all DCN clock gating */ 966 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 967 - 968 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 966 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 967 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 969 968 970 969 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 971 970 }
+2 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 286 286 } 287 287 288 288 if (dc->debug.disable_mem_low_power) { 289 - REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1); 289 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->enable_memory_low_power) 290 + dc->res_pool->dccg->funcs->enable_memory_low_power(dc->res_pool->dccg, false); 290 291 } 291 292 if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) 292 293 dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
+2 -3
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 326 326 327 327 if (!dc->debug.disable_clock_gate) { 328 328 /* enable all DCN clock gating */ 329 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 330 - 331 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 329 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 330 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 332 331 333 332 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 334 333 }
+2
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
··· 1184 1184 const struct link_resource *link_res, 1185 1185 enum clock_source_id clock_source, 1186 1186 uint32_t pixel_clock); 1187 + void (*enable_analog_link_output)(struct dc_link *link, 1188 + uint32_t pixel_clock); 1187 1189 void (*disable_link_output)(struct dc_link *link, 1188 1190 const struct link_resource *link_res, 1189 1191 enum signal_type signal);
+4
drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
··· 224 224 void (*otg_drop_pixel)(struct dccg *dccg, 225 225 uint32_t otg_inst); 226 226 void (*dccg_init)(struct dccg *dccg); 227 + void (*refclk_setup)(struct dccg *dccg); /* Deprecated - for backward compatibility only */ 228 + void (*allow_clock_gating)(struct dccg *dccg, bool allow); 229 + void (*enable_memory_low_power)(struct dccg *dccg, bool enable); 230 + bool (*is_s0i3_golden_init_wa_done)(struct dccg *dccg); 227 231 void (*set_dpstreamclk_root_clock_gating)( 228 232 struct dccg *dccg, 229 233 int dp_hpo_inst,
+1 -2
drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
··· 724 724 return false; 725 725 } 726 726 727 - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) 728 - mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true); 727 + mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true); 729 728 730 729 current_mode = mpc32_get_shaper_current(mpc, mpcc_id); 731 730
+3 -3
drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
··· 270 270 return true; 271 271 } 272 272 273 - static bool optc35_configure_crc(struct timing_generator *optc, 273 + bool optc35_configure_crc(struct timing_generator *optc, 274 274 const struct crc_params *params) 275 275 { 276 276 struct optc *optc1 = DCN10TG_FROM_TG(optc); ··· 437 437 REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0); 438 438 } 439 439 440 - static void optc35_set_long_vtotal( 440 + void optc35_set_long_vtotal( 441 441 struct timing_generator *optc, 442 442 const struct long_vtotal_params *params) 443 443 { ··· 524 524 } 525 525 } 526 526 527 - static void optc35_wait_otg_disable(struct timing_generator *optc) 527 + void optc35_wait_otg_disable(struct timing_generator *optc) 528 528 { 529 529 struct optc *optc1; 530 530 uint32_t is_master_en;
+9
drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
··· 90 90 91 91 void optc35_set_drr(struct timing_generator *optc, const struct drr_params *params); 92 92 93 + void optc35_set_long_vtotal( 94 + struct timing_generator *optc, 95 + const struct long_vtotal_params *params); 96 + 97 + bool optc35_configure_crc(struct timing_generator *optc, 98 + const struct crc_params *params); 99 + 100 + void optc35_wait_otg_disable(struct timing_generator *optc); 101 + 93 102 #endif /* __DC_OPTC_DCN35_H__ */
+11 -5
drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
··· 226 226 link_regs(4), 227 227 link_regs(5), 228 228 link_regs(6), 229 - { .DAC_ENABLE = mmDAC_ENABLE }, 229 + {0} 230 230 }; 231 231 232 232 #define stream_enc_regs(id)\ ··· 242 242 stream_enc_regs(3), 243 243 stream_enc_regs(4), 244 244 stream_enc_regs(5), 245 - stream_enc_regs(6) 245 + stream_enc_regs(6), 246 + {SR(DAC_SOURCE_SELECT),} /* DACA */ 246 247 }; 247 248 248 249 static const struct dce_stream_encoder_shift se_shift = { ··· 492 491 return NULL; 493 492 494 493 if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { 495 - dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); 494 + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, 495 + &stream_enc_regs[eng_id], &se_shift, &se_mask); 496 496 return &enc110->base; 497 497 } 498 498 ··· 640 638 if (!enc110) 641 639 return NULL; 642 640 643 - if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { 641 + if (enc_init_data->connector.id == CONNECTOR_ID_VGA && 642 + enc_init_data->analog_engine != ENGINE_ID_UNKNOWN) { 644 643 dce110_link_encoder_construct(enc110, 645 644 enc_init_data, 646 645 &link_enc_feature, ··· 981 978 struct dc_link *link = stream->link; 982 979 enum engine_id preferred_engine = link->link_enc->preferred_engine; 983 980 984 - if (dc_is_rgb_signal(stream->signal)) 981 + /* Prefer analog engine if the link encoder has one. 982 + * Otherwise, it's an external encoder. 983 + */ 984 + if (dc_is_rgb_signal(stream->signal) && link->link_enc->analog_engine != ENGINE_ID_UNKNOWN) 985 985 preferred_engine = link->link_enc->analog_engine; 986 986 987 987 for (i = 0; i < pool->stream_enc_count; i++) {
+9 -5
drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
··· 243 243 link_regs(4), 244 244 link_regs(5), 245 245 {0}, 246 - { .DAC_ENABLE = mmDAC_ENABLE }, 246 + {0} 247 247 }; 248 248 249 249 #define stream_enc_regs(id)\ ··· 258 258 stream_enc_regs(2), 259 259 stream_enc_regs(3), 260 260 stream_enc_regs(4), 261 - stream_enc_regs(5) 261 + stream_enc_regs(5), 262 + {0}, 263 + {SR(DAC_SOURCE_SELECT),} /* DACA */ 262 264 }; 263 265 264 266 static const struct dce_stream_encoder_shift se_shift = { ··· 609 607 return NULL; 610 608 611 609 if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { 612 - dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); 610 + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, 611 + &stream_enc_regs[eng_id], &se_shift, &se_mask); 613 612 return &enc110->base; 614 613 } 615 614 ··· 736 733 if (!enc110) 737 734 return NULL; 738 735 739 - if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { 740 - dce110_link_encoder_construct(enc110, 736 + if (enc_init_data->connector.id == CONNECTOR_ID_VGA && 737 + enc_init_data->analog_engine != ENGINE_ID_UNKNOWN) { 738 + dce60_link_encoder_construct(enc110, 741 739 enc_init_data, 742 740 &link_enc_feature, 743 741 &link_enc_regs[ENGINE_ID_DACA],
+7 -4
drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
··· 242 242 link_regs(4), 243 243 link_regs(5), 244 244 link_regs(6), 245 - { .DAC_ENABLE = mmDAC_ENABLE }, 245 + {0} 246 246 }; 247 247 248 248 #define stream_enc_regs(id)\ ··· 258 258 stream_enc_regs(3), 259 259 stream_enc_regs(4), 260 260 stream_enc_regs(5), 261 - stream_enc_regs(6) 261 + stream_enc_regs(6), 262 + {SR(DAC_SOURCE_SELECT),} /* DACA */ 262 263 }; 263 264 264 265 static const struct dce_stream_encoder_shift se_shift = { ··· 615 614 return NULL; 616 615 617 616 if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { 618 - dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); 617 + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, 618 + &stream_enc_regs[eng_id], &se_shift, &se_mask); 619 619 return &enc110->base; 620 620 } 621 621 ··· 742 740 if (!enc110) 743 741 return NULL; 744 742 745 - if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { 743 + if (enc_init_data->connector.id == CONNECTOR_ID_VGA && 744 + enc_init_data->analog_engine != ENGINE_ID_UNKNOWN) { 746 745 dce110_link_encoder_construct(enc110, 747 746 enc_init_data, 748 747 &link_enc_feature,
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
··· 401 401 */ 402 402 if (pipe_cnt == 1) { 403 403 pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE; 404 - if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfxversion != DcGfxBase && 405 - !(pipe->plane_state->tiling_info.gfxversion == DcGfxVersion9 && pipe->plane_state->tiling_info.gfx9.swizzle == DC_SW_LINEAR)) { 404 + if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 406 405 if (!is_dual_plane(pipe->plane_state->format)) { 407 406 pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE; 408 407 pipes[0].pipe.src.unbounded_req_mode = true;
+2 -2
drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c
··· 293 293 }; 294 294 295 295 /* Pre-generated 1DLUT for given setup and sharpness level */ 296 - struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = { 296 + static struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = { 297 297 { 298 298 0, 0, 299 299 { ··· 332 332 }, 333 333 }; 334 334 335 - struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_LEVELS] = { 335 + static struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_LEVELS] = { 336 336 {1125, 1000, 0}, 337 337 {11, 10, 1}, 338 338 {1075, 1000, 2},