Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel

* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel:
drm/i915: fix scheduling while holding the new active list spinlock
drm/i915: Allow tiling of objects with bit 17 swizzling by the CPU.
drm/i915: Correctly set the write flag for get_user_pages in pread.
drm/i915: Fix use of uninitialized var in 40a5f0de
drm/i915: indicate framebuffer restore key in SysRq help message
drm/i915: sync hdmi detection by hdmi identifier with 2D
drm/i915: Fix a mismerge of the IGD patch (new .find_pll hooks missed)
drm/i915: Implement batch and ring buffer dumping

+412 -40
+7
drivers/gpu/drm/i915/i915_drv.h
··· 446 446 uint32_t tiling_mode; 447 447 uint32_t stride; 448 448 449 + /** Record of address bit 17 of each page at last unbind. */ 450 + long *bit_17; 451 + 449 452 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 450 453 uint32_t agp_type; 451 454 ··· 638 635 void i915_gem_detach_phys_object(struct drm_device *dev, 639 636 struct drm_gem_object *obj); 640 637 void i915_gem_free_all_phys_object(struct drm_device *dev); 638 + int i915_gem_object_get_pages(struct drm_gem_object *obj); 639 + void i915_gem_object_put_pages(struct drm_gem_object *obj); 641 640 642 641 /* i915_gem_tiling.c */ 643 642 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 643 + void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 644 + void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 644 645 645 646 /* i915_gem_debug.c */ 646 647 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+156 -31
drivers/gpu/drm/i915/i915_gem.c
··· 43 43 uint64_t offset, 44 44 uint64_t size); 45 45 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 46 - static int i915_gem_object_get_pages(struct drm_gem_object *obj); 47 - static void i915_gem_object_put_pages(struct drm_gem_object *obj); 48 46 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 49 47 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 50 48 unsigned alignment); ··· 141 143 int length) 142 144 { 143 145 char __iomem *vaddr; 144 - int ret; 146 + int unwritten; 145 147 146 148 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 147 149 if (vaddr == NULL) 148 150 return -ENOMEM; 149 - ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); 151 + unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); 150 152 kunmap_atomic(vaddr, KM_USER0); 151 153 152 - return ret; 154 + if (unwritten) 155 + return -EFAULT; 156 + 157 + return 0; 158 + } 159 + 160 + static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) 161 + { 162 + drm_i915_private_t *dev_priv = obj->dev->dev_private; 163 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 164 + 165 + return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 166 + obj_priv->tiling_mode != I915_TILING_NONE; 153 167 } 154 168 155 169 static inline int ··· 187 177 188 178 kunmap_atomic(src_vaddr, KM_USER1); 189 179 kunmap_atomic(dst_vaddr, KM_USER0); 180 + 181 + return 0; 182 + } 183 + 184 + static inline int 185 + slow_shmem_bit17_copy(struct page *gpu_page, 186 + int gpu_offset, 187 + struct page *cpu_page, 188 + int cpu_offset, 189 + int length, 190 + int is_read) 191 + { 192 + char *gpu_vaddr, *cpu_vaddr; 193 + 194 + /* Use the unswizzled path if this page isn't affected. */ 195 + if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { 196 + if (is_read) 197 + return slow_shmem_copy(cpu_page, cpu_offset, 198 + gpu_page, gpu_offset, length); 199 + else 200 + return slow_shmem_copy(gpu_page, gpu_offset, 201 + cpu_page, cpu_offset, length); 202 + } 203 + 204 + gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); 205 + if (gpu_vaddr == NULL) 206 + return -ENOMEM; 207 + 208 + cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); 209 + if (cpu_vaddr == NULL) { 210 + kunmap_atomic(gpu_vaddr, KM_USER0); 211 + return -ENOMEM; 212 + } 213 + 214 + /* Copy the data, XORing A6 with A17 (1). The user already knows he's 215 + * XORing with the other bits (A9 for Y, A9 and A10 for X) 216 + */ 217 + while (length > 0) { 218 + int cacheline_end = ALIGN(gpu_offset + 1, 64); 219 + int this_length = min(cacheline_end - gpu_offset, length); 220 + int swizzled_gpu_offset = gpu_offset ^ 64; 221 + 222 + if (is_read) { 223 + memcpy(cpu_vaddr + cpu_offset, 224 + gpu_vaddr + swizzled_gpu_offset, 225 + this_length); 226 + } else { 227 + memcpy(gpu_vaddr + swizzled_gpu_offset, 228 + cpu_vaddr + cpu_offset, 229 + this_length); 230 + } 231 + cpu_offset += this_length; 232 + gpu_offset += this_length; 233 + length -= this_length; 234 + } 235 + 236 + kunmap_atomic(cpu_vaddr, KM_USER1); 237 + kunmap_atomic(gpu_vaddr, KM_USER0); 190 238 191 239 return 0; 192 240 } ··· 337 269 int page_length; 338 270 int ret; 339 271 uint64_t data_ptr = args->data_ptr; 272 + int do_bit17_swizzling; 340 273 341 274 remain = args->size; 342 275 ··· 355 286 356 287 down_read(&mm->mmap_sem); 357 288 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 358 - num_pages, 0, 0, user_pages, NULL); 289 + num_pages, 1, 0, user_pages, NULL); 359 290 up_read(&mm->mmap_sem); 360 291 if (pinned_pages < num_pages) { 361 292 ret = -EFAULT; 362 293 goto fail_put_user_pages; 363 294 } 295 + 296 + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 364 297 365 298 mutex_lock(&dev->struct_mutex); 366 299 ··· 398 327 if ((data_page_offset + page_length) > PAGE_SIZE) 399 328 page_length = PAGE_SIZE - data_page_offset; 400 329 401 - ret = slow_shmem_copy(user_pages[data_page_index], 402 - data_page_offset, 403 - obj_priv->pages[shmem_page_index], 404 - shmem_page_offset, 405 - page_length); 330 + if (do_bit17_swizzling) { 331 + ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 332 + shmem_page_offset, 333 + user_pages[data_page_index], 334 + data_page_offset, 335 + page_length, 336 + 1); 337 + } else { 338 + ret = slow_shmem_copy(user_pages[data_page_index], 339 + data_page_offset, 340 + obj_priv->pages[shmem_page_index], 341 + shmem_page_offset, 342 + page_length); 343 + } 406 344 if (ret) 407 345 goto fail_put_pages; 408 346 ··· 463 383 return -EINVAL; 464 384 } 465 385 466 - ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 467 - if (ret != 0) 386 + if (i915_gem_object_needs_bit17_swizzle(obj)) { 468 387 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 388 + } else { 389 + ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 390 + if (ret != 0) 391 + ret = i915_gem_shmem_pread_slow(dev, obj, args, 392 + file_priv); 393 + } 469 394 470 395 drm_gem_object_unreference(obj); 471 396 ··· 812 727 int page_length; 813 728 int ret; 814 729 uint64_t data_ptr = args->data_ptr; 730 + int do_bit17_swizzling; 815 731 816 732 remain = args->size; 817 733 ··· 836 750 ret = -EFAULT; 837 751 goto fail_put_user_pages; 838 752 } 753 + 754 + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 839 755 840 756 mutex_lock(&dev->struct_mutex); 841 757 ··· 873 785 if ((data_page_offset + page_length) > PAGE_SIZE) 874 786 page_length = PAGE_SIZE - data_page_offset; 875 787 876 - ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], 877 - shmem_page_offset, 878 - user_pages[data_page_index], 879 - data_page_offset, 880 - page_length); 788 + if (do_bit17_swizzling) { 789 + ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 790 + shmem_page_offset, 791 + user_pages[data_page_index], 792 + data_page_offset, 793 + page_length, 794 + 0); 795 + } else { 796 + ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], 797 + shmem_page_offset, 798 + user_pages[data_page_index], 799 + data_page_offset, 800 + page_length); 801 + } 881 802 if (ret) 882 803 goto fail_put_pages; 883 804 ··· 951 854 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, 952 855 file_priv); 953 856 } 857 + } else if (i915_gem_object_needs_bit17_swizzle(obj)) { 858 + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); 954 859 } else { 955 860 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); 956 861 if (ret == -EFAULT) { ··· 1384 1285 return 0; 1385 1286 } 1386 1287 1387 - static void 1288 + void 1388 1289 i915_gem_object_put_pages(struct drm_gem_object *obj) 1389 1290 { 1390 1291 struct drm_i915_gem_object *obj_priv = obj->driver_private; ··· 1395 1296 1396 1297 if (--obj_priv->pages_refcount != 0) 1397 1298 return; 1299 + 1300 + if (obj_priv->tiling_mode != I915_TILING_NONE) 1301 + i915_gem_object_save_bit_17_swizzle(obj); 1398 1302 1399 1303 for (i = 0; i < page_count; i++) 1400 1304 if (obj_priv->pages[i] != NULL) { ··· 1596 1494 1597 1495 if (obj->write_domain != 0) 1598 1496 i915_gem_object_move_to_flushing(obj); 1599 - else 1497 + else { 1498 + /* Take a reference on the object so it won't be 1499 + * freed while the spinlock is held. The list 1500 + * protection for this spinlock is safe when breaking 1501 + * the lock like this since the next thing we do 1502 + * is just get the head of the list again. 1503 + */ 1504 + drm_gem_object_reference(obj); 1600 1505 i915_gem_object_move_to_inactive(obj); 1506 + spin_unlock(&dev_priv->mm.active_list_lock); 1507 + drm_gem_object_unreference(obj); 1508 + spin_lock(&dev_priv->mm.active_list_lock); 1509 + } 1601 1510 } 1602 1511 out: 1603 1512 spin_unlock(&dev_priv->mm.active_list_lock); ··· 1997 1884 return ret; 1998 1885 } 1999 1886 2000 - static int 1887 + int 2001 1888 i915_gem_object_get_pages(struct drm_gem_object *obj) 2002 1889 { 2003 1890 struct drm_i915_gem_object *obj_priv = obj->driver_private; ··· 2035 1922 } 2036 1923 obj_priv->pages[i] = page; 2037 1924 } 1925 + 1926 + if (obj_priv->tiling_mode != I915_TILING_NONE) 1927 + i915_gem_object_do_bit_17_swizzle(obj); 1928 + 2038 1929 return 0; 2039 1930 } 2040 1931 ··· 3119 3002 drm_free(*relocs, reloc_count * sizeof(**relocs), 3120 3003 DRM_MEM_DRIVER); 3121 3004 *relocs = NULL; 3122 - return ret; 3005 + return -EFAULT; 3123 3006 } 3124 3007 3125 3008 reloc_index += exec_list[i].relocation_count; 3126 3009 } 3127 3010 3128 - return ret; 3011 + return 0; 3129 3012 } 3130 3013 3131 3014 static int ··· 3134 3017 struct drm_i915_gem_relocation_entry *relocs) 3135 3018 { 3136 3019 uint32_t reloc_count = 0, i; 3137 - int ret; 3020 + int ret = 0; 3138 3021 3139 3022 for (i = 0; i < buffer_count; i++) { 3140 3023 struct drm_i915_gem_relocation_entry __user *user_relocs; 3024 + int unwritten; 3141 3025 3142 3026 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; 3143 3027 3144 - if (ret == 0) { 3145 - ret = copy_to_user(user_relocs, 3146 - &relocs[reloc_count], 3147 - exec_list[i].relocation_count * 3148 - sizeof(*relocs)); 3028 + unwritten = copy_to_user(user_relocs, 3029 + &relocs[reloc_count], 3030 + exec_list[i].relocation_count * 3031 + sizeof(*relocs)); 3032 + 3033 + if (unwritten) { 3034 + ret = -EFAULT; 3035 + goto err; 3149 3036 } 3150 3037 3151 3038 reloc_count += exec_list[i].relocation_count; 3152 3039 } 3153 3040 3041 + err: 3154 3042 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); 3155 3043 3156 3044 return ret; ··· 3365 3243 exec_offset = exec_list[args->buffer_count - 1].offset; 3366 3244 3367 3245 #if WATCH_EXEC 3368 - i915_gem_dump_object(object_list[args->buffer_count - 1], 3246 + i915_gem_dump_object(batch_obj, 3369 3247 args->batch_len, 3370 3248 __func__, 3371 3249 ~0); ··· 3430 3308 (uintptr_t) args->buffers_ptr, 3431 3309 exec_list, 3432 3310 sizeof(*exec_list) * args->buffer_count); 3433 - if (ret) 3311 + if (ret) { 3312 + ret = -EFAULT; 3434 3313 DRM_ERROR("failed to copy %d exec entries " 3435 3314 "back to user (%d)\n", 3436 3315 args->buffer_count, ret); 3316 + } 3437 3317 } 3438 3318 3439 3319 /* Copy the updated relocations out regardless of current error ··· 3717 3593 i915_gem_free_mmap_offset(obj); 3718 3594 3719 3595 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 3596 + kfree(obj_priv->bit_17); 3720 3597 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 3721 3598 } 3722 3599
+93
drivers/gpu/drm/i915/i915_gem_debugfs.c
··· 234 234 return 0; 235 235 } 236 236 237 + static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) 238 + { 239 + int page, i; 240 + uint32_t *mem; 241 + 242 + for (page = 0; page < page_count; page++) { 243 + mem = kmap(pages[page]); 244 + for (i = 0; i < PAGE_SIZE; i += 4) 245 + seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 246 + kunmap(pages[page]); 247 + } 248 + } 249 + 250 + static int i915_batchbuffer_info(struct seq_file *m, void *data) 251 + { 252 + struct drm_info_node *node = (struct drm_info_node *) m->private; 253 + struct drm_device *dev = node->minor->dev; 254 + drm_i915_private_t *dev_priv = dev->dev_private; 255 + struct drm_gem_object *obj; 256 + struct drm_i915_gem_object *obj_priv; 257 + int ret; 258 + 259 + spin_lock(&dev_priv->mm.active_list_lock); 260 + 261 + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 262 + obj = obj_priv->obj; 263 + if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 264 + ret = i915_gem_object_get_pages(obj); 265 + if (ret) { 266 + DRM_ERROR("Failed to get pages: %d\n", ret); 267 + spin_unlock(&dev_priv->mm.active_list_lock); 268 + return ret; 269 + } 270 + 271 + seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset); 272 + i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE); 273 + 274 + i915_gem_object_put_pages(obj); 275 + } 276 + } 277 + 278 + spin_unlock(&dev_priv->mm.active_list_lock); 279 + 280 + return 0; 281 + } 282 + 283 + static int i915_ringbuffer_data(struct seq_file *m, void *data) 284 + { 285 + struct drm_info_node *node = (struct drm_info_node *) m->private; 286 + struct drm_device *dev = node->minor->dev; 287 + drm_i915_private_t *dev_priv = dev->dev_private; 288 + u8 *virt; 289 + uint32_t *ptr, off; 290 + 291 + if (!dev_priv->ring.ring_obj) { 292 + seq_printf(m, "No ringbuffer setup\n"); 293 + return 0; 294 + } 295 + 296 + virt = dev_priv->ring.virtual_start; 297 + 298 + for (off = 0; off < dev_priv->ring.Size; off += 4) { 299 + ptr = (uint32_t *)(virt + off); 300 + seq_printf(m, "%08x : %08x\n", off, *ptr); 301 + } 302 + 303 + return 0; 304 + } 305 + 306 + static int i915_ringbuffer_info(struct seq_file *m, void *data) 307 + { 308 + struct drm_info_node *node = (struct drm_info_node *) m->private; 309 + struct drm_device *dev = node->minor->dev; 310 + drm_i915_private_t *dev_priv = dev->dev_private; 311 + unsigned int head, tail, mask; 312 + 313 + head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 314 + tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 315 + mask = dev_priv->ring.tail_mask; 316 + 317 + seq_printf(m, "RingHead : %08x\n", head); 318 + seq_printf(m, "RingTail : %08x\n", tail); 319 + seq_printf(m, "RingMask : %08x\n", mask); 320 + seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); 321 + seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 322 + 323 + return 0; 324 + } 325 + 326 + 237 327 static struct drm_info_list i915_gem_debugfs_list[] = { 238 328 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 239 329 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, ··· 333 243 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 334 244 {"i915_gem_interrupt", i915_interrupt_info, 0}, 335 245 {"i915_gem_hws", i915_hws_info, 0}, 246 + {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, 247 + {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 248 + {"i915_batchbuffers", i915_batchbuffer_info, 0}, 336 249 }; 337 250 #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) 338 251
+109 -2
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 25 25 * 26 26 */ 27 27 28 + #include "linux/string.h" 29 + #include "linux/bitops.h" 28 30 #include "drmP.h" 29 31 #include "drm.h" 30 32 #include "i915_drm.h" ··· 129 127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 130 128 } else { 131 129 /* Bit 17 swizzling by the CPU in addition. */ 132 - swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 133 - swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 130 + swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; 131 + swizzle_y = I915_BIT_6_SWIZZLE_9_17; 134 132 } 135 133 break; 136 134 } ··· 290 288 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 291 289 else 292 290 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 291 + 292 + /* Hide bit 17 swizzling from the user. This prevents old Mesa 293 + * from aborting the application on sw fallbacks to bit 17, 294 + * and we use the pread/pwrite bit17 paths to swizzle for it. 295 + * If there was a user that was relying on the swizzle 296 + * information for drm_intel_bo_map()ed reads/writes this would 297 + * break it, but we don't have any of those. 298 + */ 299 + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 300 + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 301 + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 302 + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; 303 + 293 304 /* If we can't handle the swizzling, make it untiled. */ 294 305 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 295 306 args->tiling_mode = I915_TILING_NONE; ··· 369 354 DRM_ERROR("unknown tiling mode\n"); 370 355 } 371 356 357 + /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ 358 + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 359 + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 360 + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 361 + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; 362 + 372 363 drm_gem_object_unreference(obj); 373 364 mutex_unlock(&dev->struct_mutex); 374 365 375 366 return 0; 367 + } 368 + 369 + /** 370 + * Swap every 64 bytes of this page around, to account for it having a new 371 + * bit 17 of its physical address and therefore being interpreted differently 372 + * by the GPU. 373 + */ 374 + static int 375 + i915_gem_swizzle_page(struct page *page) 376 + { 377 + char *vaddr; 378 + int i; 379 + char temp[64]; 380 + 381 + vaddr = kmap(page); 382 + if (vaddr == NULL) 383 + return -ENOMEM; 384 + 385 + for (i = 0; i < PAGE_SIZE; i += 128) { 386 + memcpy(temp, &vaddr[i], 64); 387 + memcpy(&vaddr[i], &vaddr[i + 64], 64); 388 + memcpy(&vaddr[i + 64], temp, 64); 389 + } 390 + 391 + kunmap(page); 392 + 393 + return 0; 394 + } 395 + 396 + void 397 + i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) 398 + { 399 + struct drm_device *dev = obj->dev; 400 + drm_i915_private_t *dev_priv = dev->dev_private; 401 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 402 + int page_count = obj->size >> PAGE_SHIFT; 403 + int i; 404 + 405 + if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) 406 + return; 407 + 408 + if (obj_priv->bit_17 == NULL) 409 + return; 410 + 411 + for (i = 0; i < page_count; i++) { 412 + char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; 413 + if ((new_bit_17 & 0x1) != 414 + (test_bit(i, obj_priv->bit_17) != 0)) { 415 + int ret = i915_gem_swizzle_page(obj_priv->pages[i]); 416 + if (ret != 0) { 417 + DRM_ERROR("Failed to swizzle page\n"); 418 + return; 419 + } 420 + set_page_dirty(obj_priv->pages[i]); 421 + } 422 + } 423 + } 424 + 425 + void 426 + i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) 427 + { 428 + struct drm_device *dev = obj->dev; 429 + drm_i915_private_t *dev_priv = dev->dev_private; 430 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 431 + int page_count = obj->size >> PAGE_SHIFT; 432 + int i; 433 + 434 + if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) 435 + return; 436 + 437 + if (obj_priv->bit_17 == NULL) { 438 + obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * 439 + sizeof(long), GFP_KERNEL); 440 + if (obj_priv->bit_17 == NULL) { 441 + DRM_ERROR("Failed to allocate memory for bit 17 " 442 + "record\n"); 443 + return; 444 + } 445 + } 446 + 447 + for (i = 0; i < page_count; i++) { 448 + if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) 449 + __set_bit(i, obj_priv->bit_17); 450 + else 451 + __clear_bit(i, obj_priv->bit_17); 452 + } 376 453 }
+2
drivers/gpu/drm/i915/intel_display.c
··· 367 367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 368 368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 369 369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 370 + .find_pll = intel_find_best_PLL, 370 371 }, 371 372 { /* INTEL_LIMIT_IGD_LVDS */ 372 373 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, ··· 381 380 /* IGD only supports single-channel mode. */ 382 381 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 383 382 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 383 + .find_pll = intel_find_best_PLL, 384 384 }, 385 385 386 386 };
+2 -2
drivers/gpu/drm/i915/intel_fb.c
··· 864 864 865 865 static struct sysrq_key_op sysrq_intelfb_restore_op = { 866 866 .handler = intelfb_sysrq, 867 - .help_msg = "force fb", 868 - .action_msg = "force restore of fb console", 867 + .help_msg = "force-fb(G)", 868 + .action_msg = "Restore framebuffer console", 869 869 }; 870 870 871 871 int intelfb_probe(struct drm_device *dev)
+20 -3
drivers/gpu/drm/i915/intel_hdmi.c
··· 38 38 struct intel_hdmi_priv { 39 39 u32 sdvox_reg; 40 40 u32 save_SDVOX; 41 - int has_hdmi_sink; 41 + bool has_hdmi_sink; 42 42 }; 43 43 44 44 static void intel_hdmi_mode_set(struct drm_encoder *encoder, ··· 128 128 return true; 129 129 } 130 130 131 + static void 132 + intel_hdmi_sink_detect(struct drm_connector *connector) 133 + { 134 + struct intel_output *intel_output = to_intel_output(connector); 135 + struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 136 + struct edid *edid = NULL; 137 + 138 + edid = drm_get_edid(&intel_output->base, 139 + &intel_output->ddc_bus->adapter); 140 + if (edid != NULL) { 141 + hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 142 + kfree(edid); 143 + intel_output->base.display_info.raw_edid = NULL; 144 + } 145 + } 146 + 131 147 static enum drm_connector_status 132 148 intel_hdmi_detect(struct drm_connector *connector) 133 149 { ··· 174 158 return connector_status_unknown; 175 159 } 176 160 177 - if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) 161 + if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { 162 + intel_hdmi_sink_detect(connector); 178 163 return connector_status_connected; 179 - else 164 + } else 180 165 return connector_status_disconnected; 181 166 } 182 167
+20 -2
drivers/gpu/drm/i915/intel_sdvo.c
··· 1357 1357 intel_sdvo_read_response(intel_output, &response, 2); 1358 1358 } 1359 1359 1360 + static void 1361 + intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) 1362 + { 1363 + struct intel_output *intel_output = to_intel_output(connector); 1364 + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1365 + struct edid *edid = NULL; 1366 + 1367 + intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); 1368 + edid = drm_get_edid(&intel_output->base, 1369 + &intel_output->ddc_bus->adapter); 1370 + if (edid != NULL) { 1371 + sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); 1372 + kfree(edid); 1373 + intel_output->base.display_info.raw_edid = NULL; 1374 + } 1375 + } 1376 + 1360 1377 static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1361 1378 { 1362 1379 u8 response[2]; ··· 1388 1371 if (status != SDVO_CMD_STATUS_SUCCESS) 1389 1372 return connector_status_unknown; 1390 1373 1391 - if ((response[0] != 0) || (response[1] != 0)) 1374 + if ((response[0] != 0) || (response[1] != 0)) { 1375 + intel_sdvo_hdmi_sink_detect(connector); 1392 1376 return connector_status_connected; 1393 - else 1377 + } else 1394 1378 return connector_status_disconnected; 1395 1379 } 1396 1380
+3
include/drm/i915_drm.h
··· 594 594 #define I915_BIT_6_SWIZZLE_9_10_11 4 595 595 /* Not seen by userland */ 596 596 #define I915_BIT_6_SWIZZLE_UNKNOWN 5 597 + /* Seen by userland. */ 598 + #define I915_BIT_6_SWIZZLE_9_17 6 599 + #define I915_BIT_6_SWIZZLE_9_10_17 7 597 600 598 601 struct drm_i915_gem_set_tiling { 599 602 /** Handle of the buffer to have its tiling state updated */