Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-fixes-for-4.8-rc4' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"A bunch of fixes covering i915, amdgpu, one tegra and some core DRM
ones. Nothing too strange at this point"

* tag 'drm-fixes-for-4.8-rc4' of git://people.freedesktop.org/~airlied/linux: (21 commits)
drm/atomic: Don't potentially reset color_mgmt_changed on successive property updates.
drm: Protect fb_defio in drivers with CONFIG_KMS_FBDEV_EMULATION
drm/amdgpu: skip TV/CV in display parsing
drm/amdgpu: avoid a possible array overflow
drm/amdgpu: fix lru size grouping v2
drm/tegra: dsi: Enhance runtime power management
drm/i915: Fix botched merge that downgrades CSR versions.
drm/i915/skl: Ensure pipes with changed wms get added to the state
drm/i915/gen9: Only copy WM results for changed pipes to skl_hw
drm/i915/skl: Add support for the SAGV, fix underrun hangs
drm/i915/gen6+: Interpret mailbox error flags
drm/i915: Reattach comment, complete type specification
drm/i915: Unconditionally flush any chipset buffers before execbuf
drm/i915/gen9: Drop invalid WARN() during data rate calculation
drm/i915/gen9: Initialize intel_state->active_crtcs during WM sanitization (v2)
drm: Reject page_flip for !DRIVER_MODESET
drm/amdgpu: fix timeout value check in amd_sched_job_recovery
drm/amdgpu: fix sdma_v2_4_ring_test_ib
drm/amdgpu: fix amdgpu_move_blit on 32bit systems
drm/radeon: fix radeon_move_blit on 32bit systems
...

+425 -72
+2
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 426 426 427 427 /* custom LRU management */ 428 428 struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; 429 + /* guard for log2_size array, don't add anything in between */ 430 + struct amdgpu_mman_lru guard; 429 431 }; 430 432 431 433 int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+13
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
··· 321 321 (le16_to_cpu(path->usConnObjectId) & 322 322 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 323 323 324 + /* Skip TV/CV support */ 325 + if ((le16_to_cpu(path->usDeviceTag) == 326 + ATOM_DEVICE_TV1_SUPPORT) || 327 + (le16_to_cpu(path->usDeviceTag) == 328 + ATOM_DEVICE_CV_SUPPORT)) 329 + continue; 330 + 331 + if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) { 332 + DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n", 333 + con_obj_id, le16_to_cpu(path->usDeviceTag)); 334 + continue; 335 + } 336 + 324 337 connector_type = 325 338 object_connector_convert[con_obj_id]; 326 339 connector_object_id = con_obj_id;
+10 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 251 251 252 252 adev = amdgpu_get_adev(bo->bdev); 253 253 ring = adev->mman.buffer_funcs_ring; 254 - old_start = old_mem->start << PAGE_SHIFT; 255 - new_start = new_mem->start << PAGE_SHIFT; 254 + old_start = (u64)old_mem->start << PAGE_SHIFT; 255 + new_start = (u64)new_mem->start << PAGE_SHIFT; 256 256 257 257 switch (old_mem->mem_type) { 258 258 case TTM_PL_VRAM: ··· 950 950 struct list_head *res = lru->lru[tbo->mem.mem_type]; 951 951 952 952 lru->lru[tbo->mem.mem_type] = &tbo->lru; 953 + while ((++lru)->lru[tbo->mem.mem_type] == res) 954 + lru->lru[tbo->mem.mem_type] = &tbo->lru; 953 955 954 956 return res; 955 957 } ··· 962 960 struct list_head *res = lru->swap_lru; 963 961 964 962 lru->swap_lru = &tbo->swap; 963 + while ((++lru)->swap_lru == res) 964 + lru->swap_lru = &tbo->swap; 965 965 966 966 return res; 967 967 } ··· 1014 1010 lru->lru[j] = &adev->mman.bdev.man[j].lru; 1015 1011 lru->swap_lru = &adev->mman.bdev.glob->swap_lru; 1016 1012 } 1013 + 1014 + for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) 1015 + adev->mman.guard.lru[j] = NULL; 1016 + adev->mman.guard.swap_lru = NULL; 1017 1017 1018 1018 adev->mman.initialized = true; 1019 1019 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
+1 -1
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 714 714 DRM_ERROR("amdgpu: IB test timed out\n"); 715 715 r = -ETIMEDOUT; 716 716 goto err1; 717 - } else if (r) { 717 + } else if (r < 0) { 718 718 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 719 719 goto err1; 720 720 }
+1 -1
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
··· 405 405 spin_lock(&sched->job_list_lock); 406 406 s_job = list_first_entry_or_null(&sched->ring_mirror_list, 407 407 struct amd_sched_job, node); 408 - if (s_job) 408 + if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) 409 409 schedule_delayed_work(&s_job->work_tdr, sched->timeout); 410 410 411 411 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+3 -3
drivers/gpu/drm/drm_atomic.c
··· 475 475 val, 476 476 -1, 477 477 &replaced); 478 - state->color_mgmt_changed = replaced; 478 + state->color_mgmt_changed |= replaced; 479 479 return ret; 480 480 } else if (property == config->ctm_property) { 481 481 ret = drm_atomic_replace_property_blob_from_id(crtc, ··· 483 483 val, 484 484 sizeof(struct drm_color_ctm), 485 485 &replaced); 486 - state->color_mgmt_changed = replaced; 486 + state->color_mgmt_changed |= replaced; 487 487 return ret; 488 488 } else if (property == config->gamma_lut_property) { 489 489 ret = drm_atomic_replace_property_blob_from_id(crtc, ··· 491 491 val, 492 492 -1, 493 493 &replaced); 494 - state->color_mgmt_changed = replaced; 494 + state->color_mgmt_changed |= replaced; 495 495 return ret; 496 496 } else if (crtc->funcs->atomic_set_property) 497 497 return crtc->funcs->atomic_set_property(crtc, state, property, val);
+3
drivers/gpu/drm/drm_crtc.c
··· 5404 5404 struct drm_pending_vblank_event *e = NULL; 5405 5405 int ret = -EINVAL; 5406 5406 5407 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 5408 + return -EINVAL; 5409 + 5407 5410 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || 5408 5411 page_flip->reserved != 0) 5409 5412 return -EINVAL;
+11 -2
drivers/gpu/drm/i915/i915_drv.h
··· 882 882 883 883 struct i915_ctx_hang_stats hang_stats; 884 884 885 - /* Unique identifier for this context, used by the hw for tracking */ 886 885 unsigned long flags; 887 886 #define CONTEXT_NO_ZEROMAP BIT(0) 888 887 #define CONTEXT_NO_ERROR_CAPTURE BIT(1) 889 - unsigned hw_id; 888 + 889 + /* Unique identifier for this context, used by the hw for tracking */ 890 + unsigned int hw_id; 890 891 u32 user_handle; 891 892 892 893 u32 ggtt_alignment; ··· 1963 1962 bool suspended_to_idle; 1964 1963 struct i915_suspend_saved_registers regfile; 1965 1964 struct vlv_s0ix_state vlv_s0ix_state; 1965 + 1966 + enum { 1967 + I915_SKL_SAGV_UNKNOWN = 0, 1968 + I915_SKL_SAGV_DISABLED, 1969 + I915_SKL_SAGV_ENABLED, 1970 + I915_SKL_SAGV_NOT_CONTROLLED 1971 + } skl_sagv_status; 1966 1972 1967 1973 struct { 1968 1974 /* ··· 3599 3591 /* belongs in i915_gem_gtt.h */ 3600 3592 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3601 3593 { 3594 + wmb(); 3602 3595 if (INTEL_GEN(dev_priv) < 6) 3603 3596 intel_gtt_chipset_flush(); 3604 3597 }
+3 -10
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 943 943 { 944 944 const unsigned other_rings = ~intel_engine_flag(req->engine); 945 945 struct i915_vma *vma; 946 - uint32_t flush_domains = 0; 947 - bool flush_chipset = false; 948 946 int ret; 949 947 950 948 list_for_each_entry(vma, vmas, exec_list) { ··· 955 957 } 956 958 957 959 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) 958 - flush_chipset |= i915_gem_clflush_object(obj, false); 959 - 960 - flush_domains |= obj->base.write_domain; 960 + i915_gem_clflush_object(obj, false); 961 961 } 962 962 963 - if (flush_chipset) 964 - i915_gem_chipset_flush(req->engine->i915); 965 - 966 - if (flush_domains & I915_GEM_DOMAIN_GTT) 967 - wmb(); 963 + /* Unconditionally flush any chipset caches (for streaming writes). */ 964 + i915_gem_chipset_flush(req->engine->i915); 968 965 969 966 /* Unconditionally invalidate gpu caches and ensure that we do flush 970 967 * any residual writes from the previous batch.
+13
drivers/gpu/drm/i915/i915_reg.h
··· 7145 7145 7146 7146 #define GEN6_PCODE_MAILBOX _MMIO(0x138124) 7147 7147 #define GEN6_PCODE_READY (1<<31) 7148 + #define GEN6_PCODE_ERROR_MASK 0xFF 7149 + #define GEN6_PCODE_SUCCESS 0x0 7150 + #define GEN6_PCODE_ILLEGAL_CMD 0x1 7151 + #define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2 7152 + #define GEN6_PCODE_TIMEOUT 0x3 7153 + #define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF 7154 + #define GEN7_PCODE_TIMEOUT 0x2 7155 + #define GEN7_PCODE_ILLEGAL_DATA 0x3 7156 + #define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10 7148 7157 #define GEN6_PCODE_WRITE_RC6VIDS 0x4 7149 7158 #define GEN6_PCODE_READ_RC6VIDS 0x5 7150 7159 #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) ··· 7175 7166 #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 7176 7167 #define DISPLAY_IPS_CONTROL 0x19 7177 7168 #define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A 7169 + #define GEN9_PCODE_SAGV_CONTROL 0x21 7170 + #define GEN9_SAGV_DISABLE 0x0 7171 + #define GEN9_SAGV_IS_DISABLED 0x1 7172 + #define GEN9_SAGV_ENABLE 0x3 7178 7173 #define GEN6_PCODE_DATA _MMIO(0x138128) 7179 7174 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 7180 7175 #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
+4 -4
drivers/gpu/drm/i915/intel_csr.c
··· 41 41 * be moved to FW_FAILED. 42 42 */ 43 43 44 - #define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" 44 + #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" 45 45 MODULE_FIRMWARE(I915_CSR_KBL); 46 46 #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) 47 47 48 - #define I915_CSR_SKL "i915/skl_dmc_ver1.bin" 48 + #define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin" 49 49 MODULE_FIRMWARE(I915_CSR_SKL); 50 - #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) 50 + #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26) 51 51 52 - #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" 52 + #define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin" 53 53 MODULE_FIRMWARE(I915_CSR_BXT); 54 54 #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) 55 55
+11
drivers/gpu/drm/i915/intel_display.c
··· 13759 13759 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) 13760 13760 dev_priv->display.modeset_commit_cdclk(state); 13761 13761 13762 + /* 13763 + * SKL workaround: bspec recommends we disable the SAGV when we 13764 + * have more then one pipe enabled 13765 + */ 13766 + if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state)) 13767 + skl_disable_sagv(dev_priv); 13768 + 13762 13769 intel_modeset_verify_disabled(dev); 13763 13770 } 13764 13771 ··· 13838 13831 13839 13832 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); 13840 13833 } 13834 + 13835 + if (IS_SKYLAKE(dev_priv) && intel_state->modeset && 13836 + skl_can_enable_sagv(state)) 13837 + skl_enable_sagv(dev_priv); 13841 13838 13842 13839 drm_atomic_helper_commit_hw_done(state); 13843 13840
+3
drivers/gpu/drm/i915/intel_drv.h
··· 1716 1716 void skl_wm_get_hw_state(struct drm_device *dev); 1717 1717 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, 1718 1718 struct skl_ddb_allocation *ddb /* out */); 1719 + bool skl_can_enable_sagv(struct drm_atomic_state *state); 1720 + int skl_enable_sagv(struct drm_i915_private *dev_priv); 1721 + int skl_disable_sagv(struct drm_i915_private *dev_priv); 1719 1722 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); 1720 1723 bool ilk_disable_lp_wm(struct drm_device *dev); 1721 1724 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
+264 -6
drivers/gpu/drm/i915/intel_pm.c
··· 2852 2852 2853 2853 #define SKL_DDB_SIZE 896 /* in blocks */ 2854 2854 #define BXT_DDB_SIZE 512 2855 + #define SKL_SAGV_BLOCK_TIME 30 /* µs */ 2855 2856 2856 2857 /* 2857 2858 * Return the index of a plane in the SKL DDB and wm result arrays. Primary ··· 2874 2873 MISSING_CASE(plane->base.type); 2875 2874 return plane->plane; 2876 2875 } 2876 + } 2877 + 2878 + /* 2879 + * SAGV dynamically adjusts the system agent voltage and clock frequencies 2880 + * depending on power and performance requirements. The display engine access 2881 + * to system memory is blocked during the adjustment time. Because of the 2882 + * blocking time, having this enabled can cause full system hangs and/or pipe 2883 + * underruns if we don't meet all of the following requirements: 2884 + * 2885 + * - <= 1 pipe enabled 2886 + * - All planes can enable watermarks for latencies >= SAGV engine block time 2887 + * - We're not using an interlaced display configuration 2888 + */ 2889 + int 2890 + skl_enable_sagv(struct drm_i915_private *dev_priv) 2891 + { 2892 + int ret; 2893 + 2894 + if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED || 2895 + dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED) 2896 + return 0; 2897 + 2898 + DRM_DEBUG_KMS("Enabling the SAGV\n"); 2899 + mutex_lock(&dev_priv->rps.hw_lock); 2900 + 2901 + ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, 2902 + GEN9_SAGV_ENABLE); 2903 + 2904 + /* We don't need to wait for the SAGV when enabling */ 2905 + mutex_unlock(&dev_priv->rps.hw_lock); 2906 + 2907 + /* 2908 + * Some skl systems, pre-release machines in particular, 2909 + * don't actually have an SAGV. 2910 + */ 2911 + if (ret == -ENXIO) { 2912 + DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); 2913 + dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED; 2914 + return 0; 2915 + } else if (ret < 0) { 2916 + DRM_ERROR("Failed to enable the SAGV\n"); 2917 + return ret; 2918 + } 2919 + 2920 + dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED; 2921 + return 0; 2922 + } 2923 + 2924 + static int 2925 + skl_do_sagv_disable(struct drm_i915_private *dev_priv) 2926 + { 2927 + int ret; 2928 + uint32_t temp = GEN9_SAGV_DISABLE; 2929 + 2930 + ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL, 2931 + &temp); 2932 + if (ret) 2933 + return ret; 2934 + else 2935 + return temp & GEN9_SAGV_IS_DISABLED; 2936 + } 2937 + 2938 + int 2939 + skl_disable_sagv(struct drm_i915_private *dev_priv) 2940 + { 2941 + int ret, result; 2942 + 2943 + if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED || 2944 + dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED) 2945 + return 0; 2946 + 2947 + DRM_DEBUG_KMS("Disabling the SAGV\n"); 2948 + mutex_lock(&dev_priv->rps.hw_lock); 2949 + 2950 + /* bspec says to keep retrying for at least 1 ms */ 2951 + ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1); 2952 + mutex_unlock(&dev_priv->rps.hw_lock); 2953 + 2954 + if (ret == -ETIMEDOUT) { 2955 + DRM_ERROR("Request to disable SAGV timed out\n"); 2956 + return -ETIMEDOUT; 2957 + } 2958 + 2959 + /* 2960 + * Some skl systems, pre-release machines in particular, 2961 + * don't actually have an SAGV. 2962 + */ 2963 + if (result == -ENXIO) { 2964 + DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); 2965 + dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED; 2966 + return 0; 2967 + } else if (result < 0) { 2968 + DRM_ERROR("Failed to disable the SAGV\n"); 2969 + return result; 2970 + } 2971 + 2972 + dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED; 2973 + return 0; 2974 + } 2975 + 2976 + bool skl_can_enable_sagv(struct drm_atomic_state *state) 2977 + { 2978 + struct drm_device *dev = state->dev; 2979 + struct drm_i915_private *dev_priv = to_i915(dev); 2980 + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 2981 + struct drm_crtc *crtc; 2982 + enum pipe pipe; 2983 + int level, plane; 2984 + 2985 + /* 2986 + * SKL workaround: bspec recommends we disable the SAGV when we have 2987 + * more then one pipe enabled 2988 + * 2989 + * If there are no active CRTCs, no additional checks need be performed 2990 + */ 2991 + if (hweight32(intel_state->active_crtcs) == 0) 2992 + return true; 2993 + else if (hweight32(intel_state->active_crtcs) > 1) 2994 + return false; 2995 + 2996 + /* Since we're now guaranteed to only have one active CRTC... */ 2997 + pipe = ffs(intel_state->active_crtcs) - 1; 2998 + crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2999 + 3000 + if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE) 3001 + return false; 3002 + 3003 + for_each_plane(dev_priv, pipe, plane) { 3004 + /* Skip this plane if it's not enabled */ 3005 + if (intel_state->wm_results.plane[pipe][plane][0] == 0) 3006 + continue; 3007 + 3008 + /* Find the highest enabled wm level for this plane */ 3009 + for (level = ilk_wm_max_level(dev); 3010 + intel_state->wm_results.plane[pipe][plane][level] == 0; --level) 3011 + { } 3012 + 3013 + /* 3014 + * If any of the planes on this pipe don't enable wm levels 3015 + * that incur memory latencies higher then 30µs we can't enable 3016 + * the SAGV 3017 + */ 3018 + if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME) 3019 + return false; 3020 + } 3021 + 3022 + return true; 2877 3023 } 2878 3024 2879 3025 static void ··· 3254 3106 total_data_rate += intel_cstate->wm.skl.plane_data_rate[id]; 3255 3107 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; 3256 3108 } 3257 - 3258 - WARN_ON(cstate->plane_mask && total_data_rate == 0); 3259 3109 3260 3110 return total_data_rate; 3261 3111 } ··· 4058 3912 * pretend that all pipes switched active status so that we'll 4059 3913 * ensure a full DDB recompute. 4060 3914 */ 4061 - if (dev_priv->wm.distrust_bios_wm) 3915 + if (dev_priv->wm.distrust_bios_wm) { 3916 + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 3917 + state->acquire_ctx); 3918 + if (ret) 3919 + return ret; 3920 + 4062 3921 intel_state->active_pipe_changes = ~0; 3922 + 3923 + /* 3924 + * We usually only initialize intel_state->active_crtcs if we 3925 + * we're doing a modeset; make sure this field is always 3926 + * initialized during the sanitization process that happens 3927 + * on the first commit too. 3928 + */ 3929 + if (!intel_state->modeset) 3930 + intel_state->active_crtcs = dev_priv->active_crtcs; 3931 + } 4063 3932 4064 3933 /* 4065 3934 * If the modeset changes which CRTC's are active, we need to ··· 4104 3943 ret = skl_allocate_pipe_ddb(cstate, ddb); 4105 3944 if (ret) 4106 3945 return ret; 3946 + 3947 + ret = drm_atomic_add_affected_planes(state, &intel_crtc->base); 3948 + if (ret) 3949 + return ret; 4107 3950 } 4108 3951 4109 3952 return 0; 3953 + } 3954 + 3955 + static void 3956 + skl_copy_wm_for_pipe(struct skl_wm_values *dst, 3957 + struct skl_wm_values *src, 3958 + enum pipe pipe) 3959 + { 3960 + dst->wm_linetime[pipe] = src->wm_linetime[pipe]; 3961 + memcpy(dst->plane[pipe], src->plane[pipe], 3962 + sizeof(dst->plane[pipe])); 3963 + memcpy(dst->plane_trans[pipe], src->plane_trans[pipe], 3964 + sizeof(dst->plane_trans[pipe])); 3965 + 3966 + dst->ddb.pipe[pipe] = src->ddb.pipe[pipe]; 3967 + memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe], 3968 + sizeof(dst->ddb.y_plane[pipe])); 3969 + memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe], 3970 + sizeof(dst->ddb.plane[pipe])); 4110 3971 } 4111 3972 4112 3973 static int ··· 4203 4020 struct drm_device *dev = crtc->dev; 4204 4021 struct drm_i915_private *dev_priv = to_i915(dev); 4205 4022 struct skl_wm_values *results = &dev_priv->wm.skl_results; 4023 + struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw; 4206 4024 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4207 4025 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; 4026 + int pipe; 4208 4027 4209 4028 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) 4210 4029 return; ··· 4218 4033 skl_write_wm_values(dev_priv, results); 4219 4034 skl_flush_wm_values(dev_priv, results); 4220 4035 4221 - /* store the new configuration */ 4222 - dev_priv->wm.skl_hw = *results; 4036 + /* 4037 + * Store the new configuration (but only for the pipes that have 4038 + * changed; the other values weren't recomputed). 4039 + */ 4040 + for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes) 4041 + skl_copy_wm_for_pipe(hw_vals, results, pipe); 4223 4042 4224 4043 mutex_unlock(&dev_priv->wm.wm_mutex); 4225 4044 } ··· 7847 7658 } 7848 7659 } 7849 7660 7661 + static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv) 7662 + { 7663 + uint32_t flags = 7664 + I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; 7665 + 7666 + switch (flags) { 7667 + case GEN6_PCODE_SUCCESS: 7668 + return 0; 7669 + case GEN6_PCODE_UNIMPLEMENTED_CMD: 7670 + case GEN6_PCODE_ILLEGAL_CMD: 7671 + return -ENXIO; 7672 + case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 7673 + return -EOVERFLOW; 7674 + case GEN6_PCODE_TIMEOUT: 7675 + return -ETIMEDOUT; 7676 + default: 7677 + MISSING_CASE(flags) 7678 + return 0; 7679 + } 7680 + } 7681 + 7682 + static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv) 7683 + { 7684 + uint32_t flags = 7685 + I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; 7686 + 7687 + switch (flags) { 7688 + case GEN6_PCODE_SUCCESS: 7689 + return 0; 7690 + case GEN6_PCODE_ILLEGAL_CMD: 7691 + return -ENXIO; 7692 + case GEN7_PCODE_TIMEOUT: 7693 + return -ETIMEDOUT; 7694 + case GEN7_PCODE_ILLEGAL_DATA: 7695 + return -EINVAL; 7696 + case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 7697 + return -EOVERFLOW; 7698 + default: 7699 + MISSING_CASE(flags); 7700 + return 0; 7701 + } 7702 + } 7703 + 7850 7704 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) 7851 7705 { 7706 + int status; 7707 + 7852 7708 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7853 7709 7854 7710 /* GEN6_PCODE_* are outside of the forcewake domain, we can ··· 7920 7686 *val = I915_READ_FW(GEN6_PCODE_DATA); 7921 7687 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7922 7688 7689 + if (INTEL_GEN(dev_priv) > 6) 7690 + status = gen7_check_mailbox_status(dev_priv); 7691 + else 7692 + status = gen6_check_mailbox_status(dev_priv); 7693 + 7694 + if (status) { 7695 + DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n", 7696 + status); 7697 + return status; 7698 + } 7699 + 7923 7700 return 0; 7924 7701 } 7925 7702 7926 7703 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, 7927 - u32 mbox, u32 val) 7704 + u32 mbox, u32 val) 7928 7705 { 7706 + int status; 7707 + 7929 7708 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7930 7709 7931 7710 /* GEN6_PCODE_* are outside of the forcewake domain, we can ··· 7962 7715 } 7963 7716 7964 7717 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7718 + 7719 + if (INTEL_GEN(dev_priv) > 6) 7720 + status = gen7_check_mailbox_status(dev_priv); 7721 + else 7722 + status = gen6_check_mailbox_status(dev_priv); 7723 + 7724 + if (status) { 7725 + DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n", 7726 + status); 7727 + return status; 7728 + } 7965 7729 7966 7730 return 0; 7967 7731 }
+4
drivers/gpu/drm/qxl/qxl_fb.c
··· 73 73 } 74 74 } 75 75 76 + #ifdef CONFIG_DRM_FBDEV_EMULATION 76 77 static struct fb_deferred_io qxl_defio = { 77 78 .delay = QXL_DIRTY_DELAY, 78 79 .deferred_io = drm_fb_helper_deferred_io, 79 80 }; 81 + #endif 80 82 81 83 static struct fb_ops qxlfb_ops = { 82 84 .owner = THIS_MODULE, ··· 315 313 goto out_destroy_fbi; 316 314 } 317 315 316 + #ifdef CONFIG_DRM_FBDEV_EMULATION 318 317 info->fbdefio = &qxl_defio; 319 318 fb_deferred_io_init(info); 319 + #endif 320 320 321 321 qdev->fbdev_info = info; 322 322 qdev->fbdev_qfb = &qfbdev->qfb;
+3 -1
drivers/gpu/drm/radeon/atombios_crtc.c
··· 627 627 if (radeon_crtc->ss.refdiv) { 628 628 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 629 629 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; 630 - if (rdev->family >= CHIP_RV770) 630 + if (ASIC_IS_AVIVO(rdev) && 631 + rdev->family != CHIP_RS780 && 632 + rdev->family != CHIP_RS880) 631 633 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 632 634 } 633 635 }
+2 -2
drivers/gpu/drm/radeon/radeon_ttm.c
··· 263 263 264 264 rdev = radeon_get_rdev(bo->bdev); 265 265 ridx = radeon_copy_ring_index(rdev); 266 - old_start = old_mem->start << PAGE_SHIFT; 267 - new_start = new_mem->start << PAGE_SHIFT; 266 + old_start = (u64)old_mem->start << PAGE_SHIFT; 267 + new_start = (u64)new_mem->start << PAGE_SHIFT; 268 268 269 269 switch (old_mem->mem_type) { 270 270 case TTM_PL_VRAM:
+36 -7
drivers/gpu/drm/tegra/dsi.c
··· 840 840 .destroy = tegra_output_encoder_destroy, 841 841 }; 842 842 843 + static void tegra_dsi_unprepare(struct tegra_dsi *dsi) 844 + { 845 + int err; 846 + 847 + if (dsi->slave) 848 + tegra_dsi_unprepare(dsi->slave); 849 + 850 + err = tegra_mipi_disable(dsi->mipi); 851 + if (err < 0) 852 + dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n", 853 + err); 854 + 855 + pm_runtime_put(dsi->dev); 856 + } 857 + 843 858 static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) 844 859 { 845 860 struct tegra_output *output = encoder_to_output(encoder); ··· 891 876 892 877 tegra_dsi_disable(dsi); 893 878 894 - pm_runtime_put(dsi->dev); 879 + tegra_dsi_unprepare(dsi); 880 + } 881 + 882 + static void tegra_dsi_prepare(struct tegra_dsi *dsi) 883 + { 884 + int err; 885 + 886 + pm_runtime_get_sync(dsi->dev); 887 + 888 + err = tegra_mipi_enable(dsi->mipi); 889 + if (err < 0) 890 + dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n", 891 + err); 892 + 893 + err = tegra_dsi_pad_calibrate(dsi); 894 + if (err < 0) 895 + dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); 896 + 897 + if (dsi->slave) 898 + tegra_dsi_prepare(dsi->slave); 895 899 } 896 900 897 901 static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) ··· 921 887 struct tegra_dsi *dsi = to_dsi(output); 922 888 struct tegra_dsi_state *state; 923 889 u32 value; 924 - int err; 925 890 926 - pm_runtime_get_sync(dsi->dev); 927 - 928 - err = tegra_dsi_pad_calibrate(dsi); 929 - if (err < 0) 930 - dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); 891 + tegra_dsi_prepare(dsi); 931 892 932 893 state = tegra_dsi_get_state(dsi); 933 894
+4
drivers/gpu/drm/udl/udl_fb.c
··· 203 203 204 204 ufbdev->fb_count++; 205 205 206 + #ifdef CONFIG_DRM_FBDEV_EMULATION 206 207 if (fb_defio && (info->fbdefio == NULL)) { 207 208 /* enable defio at last moment if not disabled by client */ 208 209 ··· 219 218 info->fbdefio = fbdefio; 220 219 fb_deferred_io_init(info); 221 220 } 221 + #endif 222 222 223 223 pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", 224 224 info->node, user, info, ufbdev->fb_count); ··· 237 235 238 236 ufbdev->fb_count--; 239 237 238 + #ifdef CONFIG_DRM_FBDEV_EMULATION 240 239 if ((ufbdev->fb_count == 0) && (info->fbdefio)) { 241 240 fb_deferred_io_cleanup(info); 242 241 kfree(info->fbdefio); 243 242 info->fbdefio = NULL; 244 243 info->fbops->fb_mmap = udl_fb_mmap; 245 244 } 245 + #endif 246 246 247 247 pr_warn("released /dev/fb%d user=%d count=%d\n", 248 248 info->node, user, ufbdev->fb_count);
+32 -33
drivers/gpu/host1x/mipi.c
··· 242 242 dev->pads = args.args[0]; 243 243 dev->device = device; 244 244 245 - mutex_lock(&dev->mipi->lock); 246 - 247 - if (dev->mipi->usage_count++ == 0) { 248 - err = tegra_mipi_power_up(dev->mipi); 249 - if (err < 0) { 250 - dev_err(dev->mipi->dev, 251 - "failed to power up MIPI bricks: %d\n", 252 - err); 253 - return ERR_PTR(err); 254 - } 255 - } 256 - 257 - mutex_unlock(&dev->mipi->lock); 258 - 259 245 return dev; 260 246 261 247 put: ··· 256 270 257 271 void tegra_mipi_free(struct tegra_mipi_device *device) 258 272 { 259 - int err; 260 - 261 - mutex_lock(&device->mipi->lock); 262 - 263 - if (--device->mipi->usage_count == 0) { 264 - err = tegra_mipi_power_down(device->mipi); 265 - if (err < 0) { 266 - /* 267 - * Not much that can be done here, so an error message 268 - * will have to do. 269 - */ 270 - dev_err(device->mipi->dev, 271 - "failed to power down MIPI bricks: %d\n", 272 - err); 273 - } 274 - } 275 - 276 - mutex_unlock(&device->mipi->lock); 277 - 278 273 platform_device_put(device->pdev); 279 274 kfree(device); 280 275 } 281 276 EXPORT_SYMBOL(tegra_mipi_free); 277 + 278 + int tegra_mipi_enable(struct tegra_mipi_device *dev) 279 + { 280 + int err = 0; 281 + 282 + mutex_lock(&dev->mipi->lock); 283 + 284 + if (dev->mipi->usage_count++ == 0) 285 + err = tegra_mipi_power_up(dev->mipi); 286 + 287 + mutex_unlock(&dev->mipi->lock); 288 + 289 + return err; 290 + 291 + } 292 + EXPORT_SYMBOL(tegra_mipi_enable); 293 + 294 + int tegra_mipi_disable(struct tegra_mipi_device *dev) 295 + { 296 + int err = 0; 297 + 298 + mutex_lock(&dev->mipi->lock); 299 + 300 + if (--dev->mipi->usage_count == 0) 301 + err = tegra_mipi_power_down(dev->mipi); 302 + 303 + mutex_unlock(&dev->mipi->lock); 304 + 305 + return err; 306 + 307 + } 308 + EXPORT_SYMBOL(tegra_mipi_disable); 282 309 283 310 static int tegra_mipi_wait(struct tegra_mipi *mipi) 284 311 {
+2
include/linux/host1x.h
··· 304 304 305 305 struct tegra_mipi_device *tegra_mipi_request(struct device *device); 306 306 void tegra_mipi_free(struct tegra_mipi_device *device); 307 + int tegra_mipi_enable(struct tegra_mipi_device *device); 308 + int tegra_mipi_disable(struct tegra_mipi_device *device); 307 309 int tegra_mipi_calibrate(struct tegra_mipi_device *device); 308 310 309 311 #endif