Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/i915: Add missing mutex_lock(&dev->struct_mutex)
drm/i915: fix WC mapping in non-GEM i915 code.
drm/i915: Fix regression in 95ca9d
drm/i915: Retire requests from i915_gem_busy_ioctl.
drm/i915: suspend/resume GEM when KMS is active
drm/i915: Don't let a device flush to prepare buffers clear new write_domains.
drm/i915: Cut two args to set_to_gpu_domain that confused this tricky path.

+67 -27
+1 -1
drivers/gpu/drm/i915/i915_dma.c
··· 202 202 dev_priv->ring.map.flags = 0; 203 203 dev_priv->ring.map.mtrr = 0; 204 204 205 - drm_core_ioremap(&dev_priv->ring.map, dev); 205 + drm_core_ioremap_wc(&dev_priv->ring.map, dev); 206 206 207 207 if (dev_priv->ring.map.handle == NULL) { 208 208 i915_dma_cleanup(dev);
+22 -1
drivers/gpu/drm/i915/i915_drv.c
··· 27 27 * 28 28 */ 29 29 30 + #include <linux/device.h> 30 31 #include "drmP.h" 31 32 #include "drm.h" 32 33 #include "i915_drm.h" ··· 67 66 68 67 i915_save_state(dev); 69 68 69 + /* If KMS is active, we do the leavevt stuff here */ 70 + if (drm_core_check_feature(dev, DRIVER_MODESET) && i915_gem_idle(dev)) { 71 + dev_err(&dev->pdev->dev, "GEM idle failed, aborting suspend\n"); 72 + return -EBUSY; 73 + } 74 + 70 75 intel_opregion_free(dev); 71 76 72 77 if (state.event == PM_EVENT_SUSPEND) { ··· 86 79 87 80 static int i915_resume(struct drm_device *dev) 88 81 { 82 + struct drm_i915_private *dev_priv = dev->dev_private; 83 + int ret = 0; 84 + 89 85 pci_set_power_state(dev->pdev, PCI_D0); 90 86 pci_restore_state(dev->pdev); 91 87 if (pci_enable_device(dev->pdev)) ··· 99 89 100 90 intel_opregion_init(dev); 101 91 102 - return 0; 92 + /* KMS EnterVT equivalent */ 93 + if (drm_core_check_feature(dev, DRIVER_MODESET)) { 94 + mutex_lock(&dev->struct_mutex); 95 + dev_priv->mm.suspended = 0; 96 + 97 + ret = i915_gem_init_ringbuffer(dev); 98 + if (ret != 0) 99 + ret = -1; 100 + mutex_unlock(&dev->struct_mutex); 101 + } 102 + 103 + return ret; 103 104 } 104 105 105 106 static struct vm_operations_struct i915_gem_vm_ops = {
+1
drivers/gpu/drm/i915/i915_drv.h
··· 618 618 void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 619 619 int i915_gem_do_init(struct drm_device *dev, unsigned long start, 620 620 unsigned long end); 621 + int i915_gem_idle(struct drm_device *dev); 621 622 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 622 623 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 623 624 int write);
+42 -25
drivers/gpu/drm/i915/i915_gem.c
··· 34 34 35 35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 36 36 37 - static void 38 - i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 39 - uint32_t read_domains, 40 - uint32_t write_domain); 41 37 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 42 38 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 43 39 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); ··· 2017 2021 * drm_agp_chipset_flush 2018 2022 */ 2019 2023 static void 2020 - i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 2021 - uint32_t read_domains, 2022 - uint32_t write_domain) 2024 + i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) 2023 2025 { 2024 2026 struct drm_device *dev = obj->dev; 2025 2027 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2026 2028 uint32_t invalidate_domains = 0; 2027 2029 uint32_t flush_domains = 0; 2028 2030 2029 - BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); 2030 - BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); 2031 + BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); 2032 + BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); 2031 2033 2032 2034 #if WATCH_BUF 2033 2035 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 2034 2036 __func__, obj, 2035 - obj->read_domains, read_domains, 2036 - obj->write_domain, write_domain); 2037 + obj->read_domains, obj->pending_read_domains, 2038 + obj->write_domain, obj->pending_write_domain); 2037 2039 #endif 2038 2040 /* 2039 2041 * If the object isn't moving to a new write domain, 2040 2042 * let the object stay in multiple read domains 2041 2043 */ 2042 - if (write_domain == 0) 2043 - read_domains |= obj->read_domains; 2044 + if (obj->pending_write_domain == 0) 2045 + obj->pending_read_domains |= obj->read_domains; 2044 2046 else 2045 2047 obj_priv->dirty = 1; 2046 2048 ··· 2048 2054 * any read domains which differ from the old 2049 2055 * write domain 2050 2056 */ 2051 - if (obj->write_domain && obj->write_domain != read_domains) { 2057 + if (obj->write_domain && 2058 + obj->write_domain != obj->pending_read_domains) { 2052 2059 flush_domains |= obj->write_domain; 2053 - invalidate_domains |= read_domains & ~obj->write_domain; 2060 + invalidate_domains |= 2061 + obj->pending_read_domains & ~obj->write_domain; 2054 2062 } 2055 2063 /* 2056 2064 * Invalidate any read caches which may have 2057 2065 * stale data. That is, any new read domains. 2058 2066 */ 2059 - invalidate_domains |= read_domains & ~obj->read_domains; 2067 + invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; 2060 2068 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 2061 2069 #if WATCH_BUF 2062 2070 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", ··· 2067 2071 i915_gem_clflush_object(obj); 2068 2072 } 2069 2073 2070 - if ((write_domain | flush_domains) != 0) 2071 - obj->write_domain = write_domain; 2072 - obj->read_domains = read_domains; 2074 + /* The actual obj->write_domain will be updated with 2075 + * pending_write_domain after we emit the accumulated flush for all 2076 + * of our domain changes in execbuffers (which clears objects' 2077 + * write_domains). So if we have a current write domain that we 2078 + * aren't changing, set pending_write_domain to that. 2079 + */ 2080 + if (flush_domains == 0 && obj->pending_write_domain == 0) 2081 + obj->pending_write_domain = obj->write_domain; 2082 + obj->read_domains = obj->pending_read_domains; 2073 2083 2074 2084 dev->invalidate_domains |= invalidate_domains; 2075 2085 dev->flush_domains |= flush_domains; ··· 2585 2583 struct drm_gem_object *obj = object_list[i]; 2586 2584 2587 2585 /* Compute new gpu domains and update invalidate/flush */ 2588 - i915_gem_object_set_to_gpu_domain(obj, 2589 - obj->pending_read_domains, 2590 - obj->pending_write_domain); 2586 + i915_gem_object_set_to_gpu_domain(obj); 2591 2587 } 2592 2588 2593 2589 i915_verify_inactive(dev, __FILE__, __LINE__); ··· 2602 2602 dev->flush_domains); 2603 2603 if (dev->flush_domains) 2604 2604 (void)i915_add_request(dev, dev->flush_domains); 2605 + } 2606 + 2607 + for (i = 0; i < args->buffer_count; i++) { 2608 + struct drm_gem_object *obj = object_list[i]; 2609 + 2610 + obj->write_domain = obj->pending_write_domain; 2605 2611 } 2606 2612 2607 2613 i915_verify_inactive(dev, __FILE__, __LINE__); ··· 2872 2866 return -EBADF; 2873 2867 } 2874 2868 2869 + /* Update the active list for the hardware's current position. 2870 + * Otherwise this only updates on a delayed timer or when irqs are 2871 + * actually unmasked, and our working set ends up being larger than 2872 + * required. 2873 + */ 2874 + i915_gem_retire_requests(dev); 2875 + 2875 2876 obj_priv = obj->driver_private; 2876 2877 /* Don't count being on the flushing list against the object being 2877 2878 * done. Otherwise, a buffer left on the flushing list but not getting ··· 2980 2967 return 0; 2981 2968 } 2982 2969 2983 - static int 2970 + int 2984 2971 i915_gem_idle(struct drm_device *dev) 2985 2972 { 2986 2973 drm_i915_private_t *dev_priv = dev->dev_private; ··· 3143 3130 i915_gem_cleanup_hws(struct drm_device *dev) 3144 3131 { 3145 3132 drm_i915_private_t *dev_priv = dev->dev_private; 3146 - struct drm_gem_object *obj = dev_priv->hws_obj; 3147 - struct drm_i915_gem_object *obj_priv = obj->driver_private; 3133 + struct drm_gem_object *obj; 3134 + struct drm_i915_gem_object *obj_priv; 3148 3135 3149 3136 if (dev_priv->hws_obj == NULL) 3150 3137 return; 3138 + 3139 + obj = dev_priv->hws_obj; 3140 + obj_priv = obj->driver_private; 3151 3141 3152 3142 kunmap(obj_priv->page_list[0]); 3153 3143 i915_gem_object_unpin(obj); 3154 3144 drm_gem_object_unreference(obj); 3155 3145 dev_priv->hws_obj = NULL; 3146 + 3156 3147 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3157 3148 dev_priv->hw_status_page = NULL; 3158 3149
+1
drivers/gpu/drm/i915/intel_display.c
··· 1008 1008 temp = CURSOR_MODE_DISABLE; 1009 1009 addr = 0; 1010 1010 bo = NULL; 1011 + mutex_lock(&dev->struct_mutex); 1011 1012 goto finish; 1012 1013 } 1013 1014