Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-fixes-for-v4.17-rc2' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"Exynos, i915, vc4, amdgpu fixes.

i915:
- an oops fix
- two race fixes
- some gvt fixes

amdgpu:
- dark screen fix
- clk/voltage fix
- vega12 smu fix

vc4:
- memory leak fix

exynos just drops some code"

* tag 'drm-fixes-for-v4.17-rc2' of git://people.freedesktop.org/~airlied/linux: (23 commits)
drm/amd/powerplay: header file interface to SMU update
drm/amd/pp: Fix bug voltage can't be OD separately on VI
drm/amd/display: Don't program bypass on linear regamma LUT
drm/i915: Fix LSPCON TMDS output buffer enabling from low-power state
drm/i915/audio: Fix audio detection issue on GLK
drm/i915: Call i915_perf_fini() on init_hw error unwind
drm/i915/bios: filter out invalid DDC pins from VBT child devices
drm/i915/pmu: Inspect runtime PM state more carefully while estimating RC6
drm/i915: Do no use kfree() to free a kmem_cache_alloc() return value
drm/exynos: exynos_drm_fb -> drm_framebuffer
drm/exynos: Move dma_addr out of exynos_drm_fb
drm/exynos: Move GEM BOs to drm_framebuffer
drm: Fix HDCP downstream dev count read
drm/vc4: Fix memory leak during BO teardown
drm/i915/execlists: Clear user-active flag on preemption completion
drm/i915/gvt: Add drm_format_mod update
drm/i915/gvt: Disable primary/sprite/cursor plane at virtual display initialization
drm/i915/gvt: Delete redundant error message in fb_decode.c
drm/i915/gvt: Cancel dma map when resetting ggtt entries
drm/i915/gvt: Missed to cancel dma map for ggtt entries
...

+195 -137
-7
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
··· 138 138 lut = (struct drm_color_lut *)blob->data; 139 139 lut_size = blob->length / sizeof(struct drm_color_lut); 140 140 141 - if (__is_lut_linear(lut, lut_size)) { 142 - /* Set to bypass if lut is set to linear */ 143 - stream->out_transfer_func->type = TF_TYPE_BYPASS; 144 - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; 145 - return 0; 146 - } 147 - 148 141 gamma = dc_create_gamma(); 149 142 if (!gamma) 150 143 return -ENOMEM;
+10 -6
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
··· 4743 4743 4744 4744 for (i=0; i < dep_table->count; i++) { 4745 4745 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 4746 - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4747 - break; 4746 + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; 4747 + return; 4748 4748 } 4749 4749 } 4750 - if (i == dep_table->count) 4750 + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 4751 4751 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 4752 + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 4753 + } 4752 4754 4753 4755 dep_table = table_info->vdd_dep_on_sclk; 4754 4756 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); 4755 4757 for (i=0; i < dep_table->count; i++) { 4756 4758 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 4757 - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4758 - break; 4759 + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; 4760 + return; 4759 4761 } 4760 4762 } 4761 - if (i == dep_table->count) 4763 + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 4762 4764 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 4765 + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 4766 + } 4763 4767 } 4764 4768 4765 4769 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+3 -1
drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
··· 412 412 QuadraticInt_t ReservedEquation2; 413 413 QuadraticInt_t ReservedEquation3; 414 414 415 + uint16_t MinVoltageUlvGfx; 416 + uint16_t MinVoltageUlvSoc; 415 417 416 - uint32_t Reserved[15]; 418 + uint32_t Reserved[14]; 417 419 418 420 419 421
+32 -7
drivers/gpu/drm/drm_dp_dual_mode_helper.c
··· 350 350 { 351 351 uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; 352 352 ssize_t ret; 353 + int retry; 353 354 354 355 if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) 355 356 return 0; 356 357 357 - ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, 358 - &tmds_oen, sizeof(tmds_oen)); 359 - if (ret) { 360 - DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", 361 - enable ? "enable" : "disable"); 362 - return ret; 358 + /* 359 + * LSPCON adapters in low-power state may ignore the first write, so 360 + * read back and verify the written value a few times. 361 + */ 362 + for (retry = 0; retry < 3; retry++) { 363 + uint8_t tmp; 364 + 365 + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, 366 + &tmds_oen, sizeof(tmds_oen)); 367 + if (ret) { 368 + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n", 369 + enable ? "enable" : "disable", 370 + retry + 1); 371 + return ret; 372 + } 373 + 374 + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, 375 + &tmp, sizeof(tmp)); 376 + if (ret) { 377 + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n", 378 + enable ? "enabling" : "disabling", 379 + retry + 1); 380 + return ret; 381 + } 382 + 383 + if (tmp == tmds_oen) 384 + return 0; 363 385 } 364 386 365 - return 0; 387 + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n", 388 + enable ? "enabling" : "disabling"); 389 + 390 + return -EIO; 366 391 } 367 392 EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); 368 393
+14 -59
drivers/gpu/drm/exynos/exynos_drm_fb.c
··· 18 18 #include <drm/drm_fb_helper.h> 19 19 #include <drm/drm_atomic.h> 20 20 #include <drm/drm_atomic_helper.h> 21 + #include <drm/drm_gem_framebuffer_helper.h> 21 22 #include <uapi/drm/exynos_drm.h> 22 23 23 24 #include "exynos_drm_drv.h" ··· 26 25 #include "exynos_drm_fbdev.h" 27 26 #include "exynos_drm_iommu.h" 28 27 #include "exynos_drm_crtc.h" 29 - 30 - #define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) 31 - 32 - /* 33 - * exynos specific framebuffer structure. 34 - * 35 - * @fb: drm framebuffer obejct. 36 - * @exynos_gem: array of exynos specific gem object containing a gem object. 37 - */ 38 - struct exynos_drm_fb { 39 - struct drm_framebuffer fb; 40 - struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 41 - dma_addr_t dma_addr[MAX_FB_BUFFER]; 42 - }; 43 28 44 29 static int check_fb_gem_memory_type(struct drm_device *drm_dev, 45 30 struct exynos_drm_gem *exynos_gem) ··· 53 66 return 0; 54 67 } 55 68 56 - static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 57 - { 58 - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 59 - unsigned int i; 60 - 61 - drm_framebuffer_cleanup(fb); 62 - 63 - for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) { 64 - struct drm_gem_object *obj; 65 - 66 - if (exynos_fb->exynos_gem[i] == NULL) 67 - continue; 68 - 69 - obj = &exynos_fb->exynos_gem[i]->base; 70 - drm_gem_object_unreference_unlocked(obj); 71 - } 72 - 73 - kfree(exynos_fb); 74 - exynos_fb = NULL; 75 - } 76 - 77 - static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, 78 - struct drm_file *file_priv, 79 - unsigned int *handle) 80 - { 81 - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 82 - 83 - return drm_gem_handle_create(file_priv, 84 - &exynos_fb->exynos_gem[0]->base, handle); 85 - } 86 - 87 69 static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { 88 - .destroy = exynos_drm_fb_destroy, 89 - .create_handle = exynos_drm_fb_create_handle, 70 + .destroy = drm_gem_fb_destroy, 71 + .create_handle = drm_gem_fb_create_handle, 90 72 }; 91 73 92 74 struct drm_framebuffer * ··· 64 108 struct exynos_drm_gem **exynos_gem, 65 109 int count) 66 110 { 67 - struct exynos_drm_fb *exynos_fb; 111 + struct drm_framebuffer *fb; 68 112 int i; 69 113 int ret; 70 114 71 - exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 72 - if (!exynos_fb) 115 + fb = kzalloc(sizeof(*fb), GFP_KERNEL); 116 + if (!fb) 73 117 return ERR_PTR(-ENOMEM); 74 118 75 119 for (i = 0; i < count; i++) { ··· 77 121 if (ret < 0) 78 122 goto err; 79 123 80 - exynos_fb->exynos_gem[i] = exynos_gem[i]; 81 - exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr 82 - + mode_cmd->offsets[i]; 124 + fb->obj[i] = &exynos_gem[i]->base; 83 125 } 84 126 85 - drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); 127 + drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 86 128 87 - ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 129 + ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs); 88 130 if (ret < 0) { 89 131 DRM_ERROR("failed to initialize framebuffer\n"); 90 132 goto err; 91 133 } 92 134 93 - return &exynos_fb->fb; 135 + return fb; 94 136 95 137 err: 96 - kfree(exynos_fb); 138 + kfree(fb); 97 139 return ERR_PTR(ret); 98 140 } 99 141 ··· 145 191 146 192 dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) 147 193 { 148 - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 194 + struct exynos_drm_gem *exynos_gem; 149 195 150 196 if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) 151 197 return 0; 152 198 153 - return exynos_fb->dma_addr[index]; 199 + exynos_gem = to_exynos_gem(fb->obj[index]); 200 + return exynos_gem->dma_addr + fb->offsets[index]; 154 201 } 155 202 156 203 static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
+1
drivers/gpu/drm/i915/gvt/cmd_parser.c
··· 1080 1080 { 1081 1081 set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, 1082 1082 s->workload->pending_events); 1083 + patch_value(s, cmd_ptr(s, 0), MI_NOOP); 1083 1084 return 0; 1084 1085 } 1085 1086
+10
drivers/gpu/drm/i915/gvt/display.c
··· 169 169 static void emulate_monitor_status_change(struct intel_vgpu *vgpu) 170 170 { 171 171 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 172 + int pipe; 173 + 172 174 vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | 173 175 SDE_PORTC_HOTPLUG_CPT | 174 176 SDE_PORTD_HOTPLUG_CPT); ··· 268 266 /* Clear host CRT status, so guest couldn't detect this host CRT. */ 269 267 if (IS_BROADWELL(dev_priv)) 270 268 vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; 269 + 270 + /* Disable Primary/Sprite/Cursor plane */ 271 + for_each_pipe(dev_priv, pipe) { 272 + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; 273 + vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; 274 + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE; 275 + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE; 276 + } 271 277 272 278 vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; 273 279 }
+1
drivers/gpu/drm/i915/gvt/dmabuf.c
··· 323 323 struct intel_vgpu_fb_info *fb_info) 324 324 { 325 325 gvt_dmabuf->drm_format = fb_info->drm_format; 326 + gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; 326 327 gvt_dmabuf->width = fb_info->width; 327 328 gvt_dmabuf->height = fb_info->height; 328 329 gvt_dmabuf->stride = fb_info->stride;
+9 -18
drivers/gpu/drm/i915/gvt/fb_decoder.c
··· 245 245 plane->hw_format = fmt; 246 246 247 247 plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; 248 - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 249 - gvt_vgpu_err("invalid gma address: %lx\n", 250 - (unsigned long)plane->base); 248 + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) 251 249 return -EINVAL; 252 - } 253 250 254 251 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 255 252 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 256 - gvt_vgpu_err("invalid gma address: %lx\n", 257 - (unsigned long)plane->base); 253 + gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n", 254 + plane->base); 258 255 return -EINVAL; 259 256 } 260 257 ··· 368 371 alpha_plane, alpha_force); 369 372 370 373 plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; 371 - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 372 - gvt_vgpu_err("invalid gma address: %lx\n", 373 - (unsigned long)plane->base); 374 + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) 374 375 return -EINVAL; 375 - } 376 376 377 377 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 378 378 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 379 - gvt_vgpu_err("invalid gma address: %lx\n", 380 - (unsigned long)plane->base); 379 + gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n", 380 + plane->base); 381 381 return -EINVAL; 382 382 } 383 383 ··· 470 476 plane->drm_format = drm_format; 471 477 472 478 plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; 473 - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 474 - gvt_vgpu_err("invalid gma address: %lx\n", 475 - (unsigned long)plane->base); 479 + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) 476 480 return -EINVAL; 477 - } 478 481 479 482 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 480 483 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 481 - gvt_vgpu_err("invalid gma address: %lx\n", 482 - (unsigned long)plane->base); 484 + gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n", 485 + plane->base); 483 486 return -EINVAL; 484 487 } 485 488
+45 -7
drivers/gpu/drm/i915/gvt/gtt.c
··· 530 530 false, 0, mm->vgpu); 531 531 } 532 532 533 + static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, 534 + struct intel_gvt_gtt_entry *entry, unsigned long index) 535 + { 536 + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 537 + 538 + GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 539 + 540 + pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); 541 + } 542 + 533 543 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, 534 544 struct intel_gvt_gtt_entry *entry, unsigned long index) 535 545 { ··· 1828 1818 return ret; 1829 1819 } 1830 1820 1821 + static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, 1822 + struct intel_gvt_gtt_entry *entry) 1823 + { 1824 + struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 1825 + unsigned long pfn; 1826 + 1827 + pfn = pte_ops->get_pfn(entry); 1828 + if (pfn != vgpu->gvt->gtt.scratch_mfn) 1829 + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, 1830 + pfn << PAGE_SHIFT); 1831 + } 1832 + 1831 1833 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1832 1834 void *p_data, unsigned int bytes) 1833 1835 { ··· 1866 1844 1867 1845 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 1868 1846 bytes); 1869 - m = e; 1870 1847 1871 1848 if (ops->test_present(&e)) { 1872 1849 gfn = ops->get_pfn(&e); 1850 + m = e; 1873 1851 1874 1852 /* one PTE update may be issued in multiple writes and the 1875 1853 * first write may not construct a valid gfn ··· 1890 1868 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1891 1869 } else 1892 1870 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 1893 - } else 1871 + } else { 1872 + ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); 1873 + ggtt_invalidate_pte(vgpu, &m); 1894 1874 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1875 + ops->clear_present(&m); 1876 + } 1895 1877 1896 1878 out: 1897 1879 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); ··· 2056 2030 return PTR_ERR(gtt->ggtt_mm); 2057 2031 } 2058 2032 2059 - intel_vgpu_reset_ggtt(vgpu); 2033 + intel_vgpu_reset_ggtt(vgpu, false); 2060 2034 2061 2035 return create_scratch_page_tree(vgpu); 2062 2036 } ··· 2341 2315 /** 2342 2316 * intel_vgpu_reset_ggtt - reset the GGTT entry 2343 2317 * @vgpu: a vGPU 2318 + * @invalidate_old: invalidate old entries 2344 2319 * 2345 2320 * This function is called at the vGPU create stage 2346 2321 * to reset all the GGTT entries. 2347 2322 * 2348 2323 */ 2349 - void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) 2324 + void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) 2350 2325 { 2351 2326 struct intel_gvt *gvt = vgpu->gvt; 2352 2327 struct drm_i915_private *dev_priv = gvt->dev_priv; 2353 2328 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2354 2329 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; 2330 + struct intel_gvt_gtt_entry old_entry; 2355 2331 u32 index; 2356 2332 u32 num_entries; 2357 2333 ··· 2362 2334 2363 2335 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2364 2336 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2365 - while (num_entries--) 2337 + while (num_entries--) { 2338 + if (invalidate_old) { 2339 + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2340 + ggtt_invalidate_pte(vgpu, &old_entry); 2341 + } 2366 2342 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2343 + } 2367 2344 2368 2345 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2369 2346 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2370 - while (num_entries--) 2347 + while (num_entries--) { 2348 + if (invalidate_old) { 2349 + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2350 + ggtt_invalidate_pte(vgpu, &old_entry); 2351 + } 2371 2352 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2353 + } 2372 2354 2373 2355 ggtt_invalidate(dev_priv); 2374 2356 } ··· 2398 2360 * removing the shadow pages. 2399 2361 */ 2400 2362 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2401 - intel_vgpu_reset_ggtt(vgpu); 2363 + intel_vgpu_reset_ggtt(vgpu, true); 2402 2364 }
+1 -1
drivers/gpu/drm/i915/gvt/gtt.h
··· 193 193 194 194 extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 195 195 extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 196 - void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 196 + void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); 197 197 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); 198 198 199 199 extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+1
drivers/gpu/drm/i915/gvt/handlers.c
··· 1150 1150 switch (notification) { 1151 1151 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: 1152 1152 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1153 + /* fall through */ 1153 1154 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: 1154 1155 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); 1155 1156 return PTR_ERR_OR_ZERO(mm);
+1 -1
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 1301 1301 1302 1302 } 1303 1303 1304 - return 0; 1304 + return -ENOTTY; 1305 1305 } 1306 1306 1307 1307 static ssize_t
+15 -12
drivers/gpu/drm/i915/i915_drv.c
··· 1105 1105 1106 1106 ret = i915_ggtt_probe_hw(dev_priv); 1107 1107 if (ret) 1108 - return ret; 1108 + goto err_perf; 1109 1109 1110 - /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1111 - * otherwise the vga fbdev driver falls over. */ 1110 + /* 1111 + * WARNING: Apparently we must kick fbdev drivers before vgacon, 1112 + * otherwise the vga fbdev driver falls over. 1113 + */ 1112 1114 ret = i915_kick_out_firmware_fb(dev_priv); 1113 1115 if (ret) { 1114 1116 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 1115 - goto out_ggtt; 1117 + goto err_ggtt; 1116 1118 } 1117 1119 1118 1120 ret = i915_kick_out_vgacon(dev_priv); 1119 1121 if (ret) { 1120 1122 DRM_ERROR("failed to remove conflicting VGA console\n"); 1121 - goto out_ggtt; 1123 + goto err_ggtt; 1122 1124 } 1123 1125 1124 1126 ret = i915_ggtt_init_hw(dev_priv); 1125 1127 if (ret) 1126 - return ret; 1128 + goto err_ggtt; 1127 1129 1128 1130 ret = i915_ggtt_enable_hw(dev_priv); 1129 1131 if (ret) { 1130 1132 DRM_ERROR("failed to enable GGTT\n"); 1131 - goto out_ggtt; 1133 + goto err_ggtt; 1132 1134 } 1133 1135 1134 1136 pci_set_master(pdev); ··· 1141 1139 if (ret) { 1142 1140 DRM_ERROR("failed to set DMA mask\n"); 1143 1141 1144 - goto out_ggtt; 1142 + goto err_ggtt; 1145 1143 } 1146 1144 } 1147 1145 ··· 1159 1157 if (ret) { 1160 1158 DRM_ERROR("failed to set DMA mask\n"); 1161 1159 1162 - goto out_ggtt; 1160 + goto err_ggtt; 1163 1161 } 1164 1162 } 1165 1163 ··· 1192 1190 1193 1191 ret = intel_gvt_init(dev_priv); 1194 1192 if (ret) 1195 - goto out_ggtt; 1193 + goto err_ggtt; 1196 1194 1197 1195 return 0; 1198 1196 1199 - out_ggtt: 1197 + err_ggtt: 1200 1198 i915_ggtt_cleanup_hw(dev_priv); 1201 - 1199 + err_perf: 1200 + i915_perf_fini(dev_priv); 1202 1201 return ret; 1203 1202 } 1204 1203
+1 -1
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 728 728 729 729 err = radix_tree_insert(handles_vma, handle, vma); 730 730 if (unlikely(err)) { 731 - kfree(lut); 731 + kmem_cache_free(eb->i915->luts, lut); 732 732 goto err_obj; 733 733 } 734 734
+28 -11
drivers/gpu/drm/i915/i915_pmu.c
··· 473 473 spin_lock_irqsave(&i915->pmu.lock, flags); 474 474 spin_lock(&kdev->power.lock); 475 475 476 - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 477 - i915->pmu.suspended_jiffies_last = 478 - kdev->power.suspended_jiffies; 476 + /* 477 + * After the above branch intel_runtime_pm_get_if_in_use failed 478 + * to get the runtime PM reference we cannot assume we are in 479 + * runtime suspend since we can either: a) race with coming out 480 + * of it before we took the power.lock, or b) there are other 481 + * states than suspended which can bring us here. 482 + * 483 + * We need to double-check that we are indeed currently runtime 484 + * suspended and if not we cannot do better than report the last 485 + * known RC6 value. 486 + */ 487 + if (kdev->power.runtime_status == RPM_SUSPENDED) { 488 + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 489 + i915->pmu.suspended_jiffies_last = 490 + kdev->power.suspended_jiffies; 479 491 480 - val = kdev->power.suspended_jiffies - 481 - i915->pmu.suspended_jiffies_last; 482 - val += jiffies - kdev->power.accounting_timestamp; 492 + val = kdev->power.suspended_jiffies - 493 + i915->pmu.suspended_jiffies_last; 494 + val += jiffies - kdev->power.accounting_timestamp; 495 + 496 + val = jiffies_to_nsecs(val); 497 + val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 498 + 499 + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 500 + } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { 501 + val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; 502 + } else { 503 + val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; 504 + } 483 505 484 506 spin_unlock(&kdev->power.lock); 485 - 486 - val = jiffies_to_nsecs(val); 487 - val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 488 - i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 489 - 490 507 spin_unlock_irqrestore(&i915->pmu.lock, flags); 491 508 } 492 509
+1 -1
drivers/gpu/drm/i915/intel_audio.c
··· 729 729 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 730 730 u32 tmp; 731 731 732 - if (!IS_GEN9_BC(dev_priv)) 732 + if (!IS_GEN9(dev_priv)) 733 733 return; 734 734 735 735 i915_audio_component_get_power(kdev);
+9 -4
drivers/gpu/drm/i915/intel_bios.c
··· 1256 1256 return; 1257 1257 1258 1258 aux_channel = child->aux_channel; 1259 - ddc_pin = child->ddc_pin; 1260 1259 1261 1260 is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; 1262 1261 is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; ··· 1302 1303 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); 1303 1304 1304 1305 if (is_dvi) { 1305 - info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin); 1306 - 1307 - sanitize_ddc_pin(dev_priv, port); 1306 + ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin); 1307 + if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) { 1308 + info->alternate_ddc_pin = ddc_pin; 1309 + sanitize_ddc_pin(dev_priv, port); 1310 + } else { 1311 + DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, " 1312 + "sticking to defaults\n", 1313 + port_name(port), ddc_pin); 1314 + } 1308 1315 } 1309 1316 1310 1317 if (is_dp) {
+9
drivers/gpu/drm/i915/intel_lrc.c
··· 577 577 * know the next preemption status we see corresponds 578 578 * to this ELSP update. 579 579 */ 580 + GEM_BUG_ON(!execlists_is_active(execlists, 581 + EXECLISTS_ACTIVE_USER)); 580 582 GEM_BUG_ON(!port_count(&port[0])); 581 583 if (port_count(&port[0]) > 1) 582 584 goto unlock; ··· 740 738 memset(port, 0, sizeof(*port)); 741 739 port++; 742 740 } 741 + 742 + execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); 743 743 } 744 744 745 745 static void execlists_cancel_requests(struct intel_engine_cs *engine) ··· 1005 1001 1006 1002 if (fw) 1007 1003 intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); 1004 + 1005 + /* If the engine is now idle, so should be the flag; and vice versa. */ 1006 + GEM_BUG_ON(execlists_is_active(&engine->execlists, 1007 + EXECLISTS_ACTIVE_USER) == 1008 + !port_isset(engine->execlists.port)); 1008 1009 } 1009 1010 1010 1011 static void queue_request(struct intel_engine_cs *engine,
+2
drivers/gpu/drm/vc4/vc4_bo.c
··· 195 195 vc4_bo_set_label(obj, -1); 196 196 197 197 if (bo->validated_shader) { 198 + kfree(bo->validated_shader->uniform_addr_offsets); 198 199 kfree(bo->validated_shader->texture_samples); 199 200 kfree(bo->validated_shader); 200 201 bo->validated_shader = NULL; ··· 592 591 } 593 592 594 593 if (bo->validated_shader) { 594 + kfree(bo->validated_shader->uniform_addr_offsets); 595 595 kfree(bo->validated_shader->texture_samples); 596 596 kfree(bo->validated_shader); 597 597 bo->validated_shader = NULL;
+1
drivers/gpu/drm/vc4/vc4_validate_shaders.c
··· 942 942 fail: 943 943 kfree(validation_state.branch_targets); 944 944 if (validated_shader) { 945 + kfree(validated_shader->uniform_addr_offsets); 945 946 kfree(validated_shader->texture_samples); 946 947 kfree(validated_shader); 947 948 }
+1 -1
include/drm/drm_hdcp.h
··· 19 19 #define DRM_HDCP_RI_LEN 2 20 20 #define DRM_HDCP_V_PRIME_PART_LEN 4 21 21 #define DRM_HDCP_V_PRIME_NUM_PARTS 5 22 - #define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x3f) 22 + #define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f) 23 23 #define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) 24 24 #define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) 25 25