Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm: index i shadowed in 2nd loop
drm/nv50-nvc0: prevent multiple vm/bar flushes occuring simultanenously
drm/nouveau: fix regression causing ttm to not be able to evict vram
drm/i915: Rebind the buffer if its alignment constraints changes with tiling
drm/i915: Disable GPU semaphores by default
drm/i915: Do not overflow the MMADDR write FIFO
Revert "drm/i915: fix corruptions on i8xx due to relaxed fencing"

+89 -46
+2 -2
drivers/gpu/drm/drm_fb_helper.c
··· 672 672 struct drm_crtc_helper_funcs *crtc_funcs; 673 673 u16 *red, *green, *blue, *transp; 674 674 struct drm_crtc *crtc; 675 - int i, rc = 0; 675 + int i, j, rc = 0; 676 676 int start; 677 677 678 678 for (i = 0; i < fb_helper->crtc_count; i++) { ··· 685 685 transp = cmap->transp; 686 686 start = cmap->start; 687 687 688 - for (i = 0; i < cmap->len; i++) { 688 + for (j = 0; j < cmap->len; j++) { 689 689 u16 hred, hgreen, hblue, htransp = 0xffff; 690 690 691 691 hred = *red++;
+2 -2
drivers/gpu/drm/i915/i915_debugfs.c
··· 865 865 int max_freq; 866 866 867 867 /* RPSTAT1 is in the GT power well */ 868 - __gen6_force_wake_get(dev_priv); 868 + __gen6_gt_force_wake_get(dev_priv); 869 869 870 870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 871 871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); ··· 888 888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 889 889 max_freq * 100); 890 890 891 - __gen6_force_wake_put(dev_priv); 891 + __gen6_gt_force_wake_put(dev_priv); 892 892 } else { 893 893 seq_printf(m, "no P-state info available\n"); 894 894 }
+15 -2
drivers/gpu/drm/i915/i915_drv.c
··· 46 46 unsigned int i915_powersave = 1; 47 47 module_param_named(powersave, i915_powersave, int, 0600); 48 48 49 + unsigned int i915_semaphores = 0; 50 + module_param_named(semaphores, i915_semaphores, int, 0600); 51 + 49 52 unsigned int i915_enable_rc6 = 0; 50 53 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 51 54 ··· 257 254 } 258 255 } 259 256 260 - void __gen6_force_wake_get(struct drm_i915_private *dev_priv) 257 + void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 261 258 { 262 259 int count; 263 260 ··· 273 270 udelay(10); 274 271 } 275 272 276 - void __gen6_force_wake_put(struct drm_i915_private *dev_priv) 273 + void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 277 274 { 278 275 I915_WRITE_NOTRACE(FORCEWAKE, 0); 279 276 POSTING_READ(FORCEWAKE); 277 + } 278 + 279 + void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 280 + { 281 + int loop = 500; 282 + u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 283 + while (fifo < 20 && loop--) { 284 + udelay(10); 285 + fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 286 + } 280 287 } 281 288 282 289 static int i915_drm_freeze(struct drm_device *dev)
+19 -5
drivers/gpu/drm/i915/i915_drv.h
··· 956 956 extern int i915_max_ioctl; 957 957 extern unsigned int i915_fbpercrtc; 958 958 extern unsigned int i915_powersave; 959 + extern unsigned int i915_semaphores; 959 960 extern unsigned int i915_lvds_downclock; 960 961 extern unsigned int i915_panel_use_ssc; 961 962 extern unsigned int i915_enable_rc6; ··· 1178 1177 void i915_gem_free_all_phys_object(struct drm_device *dev); 1179 1178 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1180 1179 1180 + uint32_t 1181 + i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); 1182 + 1181 1183 /* i915_gem_gtt.c */ 1182 1184 void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1183 1185 int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); ··· 1357 1353 * must be set to prevent GT core from power down and stale values being 1358 1354 * returned. 1359 1355 */ 1360 - void __gen6_force_wake_get(struct drm_i915_private *dev_priv); 1361 - void __gen6_force_wake_put (struct drm_i915_private *dev_priv); 1362 - static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) 1356 + void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1357 + void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1358 + void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1359 + 1360 + static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg) 1363 1361 { 1364 1362 u32 val; 1365 1363 1366 1364 if (dev_priv->info->gen >= 6) { 1367 - __gen6_force_wake_get(dev_priv); 1365 + __gen6_gt_force_wake_get(dev_priv); 1368 1366 val = I915_READ(reg); 1369 - __gen6_force_wake_put(dev_priv); 1367 + __gen6_gt_force_wake_put(dev_priv); 1370 1368 } else 1371 1369 val = I915_READ(reg); 1372 1370 1373 1371 return val; 1372 + } 1373 + 1374 + static inline void i915_gt_write(struct drm_i915_private *dev_priv, 1375 + u32 reg, u32 val) 1376 + { 1377 + if (dev_priv->info->gen >= 6) 1378 + __gen6_gt_wait_for_fifo(dev_priv); 1379 + I915_WRITE(reg, val); 1374 1380 } 1375 1381 1376 1382 static inline void
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 1398 1398 * Return the required GTT alignment for an object, only taking into account 1399 1399 * unfenced tiled surface requirements. 1400 1400 */ 1401 - static uint32_t 1401 + uint32_t 1402 1402 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) 1403 1403 { 1404 1404 struct drm_device *dev = obj->base.dev;
+2 -2
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 772 772 if (from == NULL || to == from) 773 773 return 0; 774 774 775 - /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ 776 - if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) 775 + /* XXX gpu semaphores are implicated in various hard hangs on SNB */ 776 + if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) 777 777 return i915_gem_object_wait_rendering(obj, true); 778 778 779 779 idx = intel_ring_sync_index(from, to);
+18 -19
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 184 184 static bool 185 185 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 186 186 { 187 - int tile_width, tile_height; 187 + int tile_width; 188 188 189 189 /* Linear is always fine */ 190 190 if (tiling_mode == I915_TILING_NONE) ··· 214 214 return false; 215 215 } 216 216 } 217 - 218 - if (IS_GEN2(dev) || 219 - (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) 220 - tile_height = 32; 221 - else 222 - tile_height = 8; 223 - /* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even 224 - * number of tile rows. */ 225 - if (IS_GEN2(dev)) 226 - tile_height *= 2; 227 - 228 - /* Size needs to be aligned to a full tile row */ 229 - if (size & (tile_height * stride - 1)) 230 - return false; 231 217 232 218 /* 965+ just needs multiples of tile width */ 233 219 if (INTEL_INFO(dev)->gen >= 4) { ··· 349 363 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && 350 364 i915_gem_object_fence_ok(obj, args->tiling_mode)); 351 365 352 - obj->tiling_changed = true; 353 - obj->tiling_mode = args->tiling_mode; 354 - obj->stride = args->stride; 366 + /* Rebind if we need a change of alignment */ 367 + if (!obj->map_and_fenceable) { 368 + u32 unfenced_alignment = 369 + i915_gem_get_unfenced_gtt_alignment(obj); 370 + if (obj->gtt_offset & (unfenced_alignment - 1)) 371 + ret = i915_gem_object_unbind(obj); 372 + } 373 + 374 + if (ret == 0) { 375 + obj->tiling_changed = true; 376 + obj->tiling_mode = args->tiling_mode; 377 + obj->stride = args->stride; 378 + } 355 379 } 380 + /* we have to maintain this existing ABI... */ 381 + args->stride = obj->stride; 382 + args->tiling_mode = obj->tiling_mode; 356 383 drm_gem_object_unreference(&obj->base); 357 384 mutex_unlock(&dev->struct_mutex); 358 385 359 - return 0; 386 + return ret; 360 387 } 361 388 362 389 /**
+2
drivers/gpu/drm/i915/i915_reg.h
··· 3261 3261 #define FORCEWAKE 0xA18C 3262 3262 #define FORCEWAKE_ACK 0x130090 3263 3263 3264 + #define GT_FIFO_FREE_ENTRIES 0x120008 3265 + 3264 3266 #define GEN6_RPNSWREQ 0xA008 3265 3267 #define GEN6_TURBO_DISABLE (1<<31) 3266 3268 #define GEN6_FREQUENCY(x) ((x)<<25)
+4 -4
drivers/gpu/drm/i915/intel_display.c
··· 1219 1219 u32 blt_ecoskpd; 1220 1220 1221 1221 /* Make sure blitter notifies FBC of writes */ 1222 - __gen6_force_wake_get(dev_priv); 1222 + __gen6_gt_force_wake_get(dev_priv); 1223 1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 1224 1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 1225 1225 GEN6_BLITTER_LOCK_SHIFT; ··· 1230 1230 GEN6_BLITTER_LOCK_SHIFT); 1231 1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1232 1232 POSTING_READ(GEN6_BLITTER_ECOSKPD); 1233 - __gen6_force_wake_put(dev_priv); 1233 + __gen6_gt_force_wake_put(dev_priv); 1234 1234 } 1235 1235 1236 1236 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ··· 6282 6282 * userspace... 6283 6283 */ 6284 6284 I915_WRITE(GEN6_RC_STATE, 0); 6285 - __gen6_force_wake_get(dev_priv); 6285 + __gen6_gt_force_wake_get(dev_priv); 6286 6286 6287 6287 /* disable the counters and set deterministic thresholds */ 6288 6288 I915_WRITE(GEN6_RC_CONTROL, 0); ··· 6380 6380 /* enable all PM interrupts */ 6381 6381 I915_WRITE(GEN6_PMINTRMSK, 0); 6382 6382 6383 - __gen6_force_wake_put(dev_priv); 6383 + __gen6_gt_force_wake_put(dev_priv); 6384 6384 } 6385 6385 6386 6386 void intel_enable_clock_gating(struct drm_device *dev)
+7 -6
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 14 14 struct drm_i915_gem_object *obj; 15 15 }; 16 16 17 - #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) 17 + #define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) 18 + #define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) 18 19 19 20 #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) 20 - #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 21 + #define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) 21 22 22 23 #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) 23 - #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 24 + #define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) 24 25 25 26 #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) 26 - #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 27 + #define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) 27 28 28 29 #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) 29 - #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 30 + #define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) 30 31 31 - #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 32 32 #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) 33 + #define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) 33 34 34 35 #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) 35 36 #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
+4 -2
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 725 725 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, 726 726 mem->page_alignment << PAGE_SHIFT, size_nc, 727 727 (nvbo->tile_flags >> 8) & 0xff, &node); 728 - if (ret) 729 - return ret; 728 + if (ret) { 729 + mem->mm_node = NULL; 730 + return (ret == -ENOSPC) ? 0 : ret; 731 + } 730 732 731 733 node->page_shift = 12; 732 734 if (nvbo->vma.node)
+1 -1
drivers/gpu/drm/nouveau/nouveau_mm.c
··· 123 123 return 0; 124 124 } 125 125 126 - return -ENOMEM; 126 + return -ENOSPC; 127 127 } 128 128 129 129 int
+8
drivers/gpu/drm/nouveau/nv50_instmem.c
··· 403 403 void 404 404 nv50_instmem_flush(struct drm_device *dev) 405 405 { 406 + struct drm_nouveau_private *dev_priv = dev->dev_private; 407 + 408 + spin_lock(&dev_priv->ramin_lock); 406 409 nv_wr32(dev, 0x00330c, 0x00000001); 407 410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 408 411 NV_ERROR(dev, "PRAMIN flush timeout\n"); 412 + spin_unlock(&dev_priv->ramin_lock); 409 413 } 410 414 411 415 void 412 416 nv84_instmem_flush(struct drm_device *dev) 413 417 { 418 + struct drm_nouveau_private *dev_priv = dev->dev_private; 419 + 420 + spin_lock(&dev_priv->ramin_lock); 414 421 nv_wr32(dev, 0x070000, 0x00000001); 415 422 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 416 423 NV_ERROR(dev, "PRAMIN flush timeout\n"); 424 + spin_unlock(&dev_priv->ramin_lock); 417 425 } 418 426
+4
drivers/gpu/drm/nouveau/nv50_vm.c
··· 169 169 void 170 170 nv50_vm_flush_engine(struct drm_device *dev, int engine) 171 171 { 172 + struct drm_nouveau_private *dev_priv = dev->dev_private; 173 + 174 + spin_lock(&dev_priv->ramin_lock); 172 175 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 173 176 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 174 177 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 178 + spin_unlock(&dev_priv->ramin_lock); 175 179 }