Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel:
drm/i915: Fix leak of relocs along do_execbuffer error path
drm/i915: slow acpi_lid_open() causes flickering - V2
drm/i915: Disable SR when more than one pipe is enabled
drm/i915: page flip support for Ironlake
drm/i915: Fix the incorrect DMI string for Samsung SX20S laptop
drm/i915: Add support for SDVO composite TV
drm/i915: don't trigger ironlake vblank interrupt at irq install
drm/i915: handle non-flip pending case when unpinning the scanout buffer
drm/i915: Fix the device info of Pineview
drm/i915: enable vblank interrupt on ironlake
drm/i915: Prevent use of uninitialized pointers along error path.
drm/i915: disable hotplug detect before Ironlake CRT detect

+91 -19
+1 -1
drivers/gpu/drm/i915/i915_drv.c
··· 120 120 121 121 const static struct intel_device_info intel_pineview_info = { 122 122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 123 - .has_pipe_cxsr = 1, 123 + .need_gfx_hws = 1, 124 124 .has_hotplug = 1, 125 125 }; 126 126
+9 -2
drivers/gpu/drm/i915/i915_gem.c
··· 3564 3564 uint32_t reloc_count = 0, i; 3565 3565 int ret = 0; 3566 3566 3567 + if (relocs == NULL) 3568 + return 0; 3569 + 3567 3570 for (i = 0; i < buffer_count; i++) { 3568 3571 struct drm_i915_gem_relocation_entry __user *user_relocs; 3569 3572 int unwritten; ··· 3656 3653 struct drm_gem_object *batch_obj; 3657 3654 struct drm_i915_gem_object *obj_priv; 3658 3655 struct drm_clip_rect *cliprects = NULL; 3659 - struct drm_i915_gem_relocation_entry *relocs; 3656 + struct drm_i915_gem_relocation_entry *relocs = NULL; 3660 3657 int ret = 0, ret2, i, pinned = 0; 3661 3658 uint64_t exec_offset; 3662 3659 uint32_t seqno, flush_domains, reloc_index; ··· 3725 3722 if (object_list[i] == NULL) { 3726 3723 DRM_ERROR("Invalid object handle %d at index %d\n", 3727 3724 exec_list[i].handle, i); 3725 + /* prevent error path from reading uninitialized data */ 3726 + args->buffer_count = i + 1; 3728 3727 ret = -EBADF; 3729 3728 goto err; 3730 3729 } ··· 3735 3730 if (obj_priv->in_execbuffer) { 3736 3731 DRM_ERROR("Object %p appears more than once in object list\n", 3737 3732 object_list[i]); 3733 + /* prevent error path from reading uninitialized data */ 3734 + args->buffer_count = i + 1; 3738 3735 ret = -EBADF; 3739 3736 goto err; 3740 3737 } ··· 3933 3926 3934 3927 mutex_unlock(&dev->struct_mutex); 3935 3928 3929 + pre_mutex_err: 3936 3930 /* Copy the updated relocations out regardless of current error 3937 3931 * state. Failure to update the relocs would mean that the next 3938 3932 * time userland calls execbuf, it would do so with presumed offset ··· 3948 3940 ret = ret2; 3949 3941 } 3950 3942 3951 - pre_mutex_err: 3952 3943 drm_free_large(object_list); 3953 3944 kfree(cliprects); 3954 3945
+30 -12
drivers/gpu/drm/i915/i915_irq.c
··· 309 309 if (de_iir & DE_GSE) 310 310 ironlake_opregion_gse_intr(dev); 311 311 312 + if (de_iir & DE_PLANEA_FLIP_DONE) 313 + intel_prepare_page_flip(dev, 0); 314 + 315 + if (de_iir & DE_PLANEB_FLIP_DONE) 316 + intel_prepare_page_flip(dev, 1); 317 + 318 + if (de_iir & DE_PIPEA_VBLANK) { 319 + drm_handle_vblank(dev, 0); 320 + intel_finish_page_flip(dev, 0); 321 + } 322 + 323 + if (de_iir & DE_PIPEB_VBLANK) { 324 + drm_handle_vblank(dev, 1); 325 + intel_finish_page_flip(dev, 1); 326 + } 327 + 312 328 /* check event from PCH */ 313 329 if ((de_iir & DE_PCH_EVENT) && 314 330 (pch_iir & SDE_HOTPLUG_MASK)) { ··· 860 844 if (!(pipeconf & PIPEACONF_ENABLE)) 861 845 return -EINVAL; 862 846 863 - if (IS_IRONLAKE(dev)) 864 - return 0; 865 - 866 847 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 867 - if (IS_I965G(dev)) 848 + if (IS_IRONLAKE(dev)) 849 + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 850 + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 851 + else if (IS_I965G(dev)) 868 852 i915_enable_pipestat(dev_priv, pipe, 869 853 PIPE_START_VBLANK_INTERRUPT_ENABLE); 870 854 else ··· 882 866 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 883 867 unsigned long irqflags; 884 868 885 - if (IS_IRONLAKE(dev)) 886 - return; 887 - 888 869 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 889 - i915_disable_pipestat(dev_priv, pipe, 890 - PIPE_VBLANK_INTERRUPT_ENABLE | 891 - PIPE_START_VBLANK_INTERRUPT_ENABLE); 870 + if (IS_IRONLAKE(dev)) 871 + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 872 + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 873 + else 874 + i915_disable_pipestat(dev_priv, pipe, 875 + PIPE_VBLANK_INTERRUPT_ENABLE | 876 + PIPE_START_VBLANK_INTERRUPT_ENABLE); 892 877 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 893 878 } 894 879 ··· 1032 1015 { 1033 1016 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1034 1017 /* enable kind of interrupts always enabled */ 1035 - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; 1018 + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1019 + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1036 1020 u32 render_mask = GT_USER_INTERRUPT; 1037 1021 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1038 1022 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1039 1023 1040 1024 dev_priv->irq_mask_reg = ~display_mask; 1041 - dev_priv->de_irq_enable_reg = display_mask; 1025 + dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; 1042 1026 1043 1027 /* should always can generate irq */ 1044 1028 I915_WRITE(DEIIR, I915_READ(DEIIR));
+3
drivers/gpu/drm/i915/intel_crt.c
··· 157 157 adpa = I915_READ(PCH_ADPA); 158 158 159 159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 160 + /* disable HPD first */ 161 + I915_WRITE(PCH_ADPA, adpa); 162 + (void)I915_READ(PCH_ADPA); 160 163 161 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 162 165 ADPA_CRT_HOTPLUG_WARMUP_10MS |
+31 -2
drivers/gpu/drm/i915/intel_display.c
··· 1638 1638 case DRM_MODE_DPMS_OFF: 1639 1639 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1640 1640 1641 + drm_vblank_off(dev, pipe); 1641 1642 /* Disable display plane */ 1642 1643 temp = I915_READ(dspcntr_reg); 1643 1644 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { ··· 2520 2519 sr_entries = roundup(sr_entries / cacheline_size, 1); 2521 2520 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2522 2521 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2522 + } else { 2523 + /* Turn off self refresh if both pipes are enabled */ 2524 + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2525 + & ~FW_BLC_SELF_EN); 2523 2526 } 2524 2527 2525 2528 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", ··· 2567 2562 srwm = 1; 2568 2563 srwm &= 0x3f; 2569 2564 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2565 + } else { 2566 + /* Turn off self refresh if both pipes are enabled */ 2567 + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2568 + & ~FW_BLC_SELF_EN); 2570 2569 } 2571 2570 2572 2571 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", ··· 2639 2630 if (srwm < 0) 2640 2631 srwm = 1; 2641 2632 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2633 + } else { 2634 + /* Turn off self refresh if both pipes are enabled */ 2635 + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2636 + & ~FW_BLC_SELF_EN); 2642 2637 } 2643 2638 2644 2639 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", ··· 3997 3984 spin_lock_irqsave(&dev->event_lock, flags); 3998 3985 work = intel_crtc->unpin_work; 3999 3986 if (work == NULL || !work->pending) { 3987 + if (work && !work->pending) { 3988 + obj_priv = work->obj->driver_private; 3989 + DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", 3990 + obj_priv, 3991 + atomic_read(&obj_priv->pending_flip)); 3992 + } 4000 3993 spin_unlock_irqrestore(&dev->event_lock, flags); 4001 3994 return; 4002 3995 } ··· 4024 4005 spin_unlock_irqrestore(&dev->event_lock, flags); 4025 4006 4026 4007 obj_priv = work->obj->driver_private; 4027 - if (atomic_dec_and_test(&obj_priv->pending_flip)) 4008 + 4009 + /* Initial scanout buffer will have a 0 pending flip count */ 4010 + if ((atomic_read(&obj_priv->pending_flip) == 0) || 4011 + atomic_dec_and_test(&obj_priv->pending_flip)) 4028 4012 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4029 4013 schedule_work(&work->work); 4030 4014 } ··· 4040 4018 unsigned long flags; 4041 4019 4042 4020 spin_lock_irqsave(&dev->event_lock, flags); 4043 - if (intel_crtc->unpin_work) 4021 + if (intel_crtc->unpin_work) { 4044 4022 intel_crtc->unpin_work->pending = 1; 4023 + } else { 4024 + DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); 4025 + } 4045 4026 spin_unlock_irqrestore(&dev->event_lock, flags); 4046 4027 } 4047 4028 ··· 4078 4053 /* We borrow the event spin lock for protecting unpin_work */ 4079 4054 spin_lock_irqsave(&dev->event_lock, flags); 4080 4055 if (intel_crtc->unpin_work) { 4056 + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 4081 4057 spin_unlock_irqrestore(&dev->event_lock, flags); 4082 4058 kfree(work); 4083 4059 mutex_unlock(&dev->struct_mutex); ··· 4092 4066 4093 4067 ret = intel_pin_and_fence_fb_obj(dev, obj); 4094 4068 if (ret != 0) { 4069 + DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", 4070 + obj->driver_private); 4095 4071 kfree(work); 4072 + intel_crtc->unpin_work = NULL; 4096 4073 mutex_unlock(&dev->struct_mutex); 4097 4074 return ret; 4098 4075 }
+9 -2
drivers/gpu/drm/i915/intel_lvds.c
··· 611 611 { 612 612 .ident = "Samsung SX20S", 613 613 .matches = { 614 - DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), 614 + DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), 615 615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"), 616 616 }, 617 617 }, ··· 620 620 .matches = { 621 621 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 622 622 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), 623 + }, 624 + }, 625 + { 626 + .ident = "Aspire 1810T", 627 + .matches = { 628 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 629 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), 623 630 }, 624 631 }, 625 632 { ··· 650 643 { 651 644 enum drm_connector_status status = connector_status_connected; 652 645 653 - if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 646 + if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) 654 647 status = connector_status_disconnected; 655 648 656 649 return status;
+8
drivers/gpu/drm/i915/intel_sdvo.c
··· 2345 2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2346 2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2347 2347 (1 << INTEL_ANALOG_CLONE_BIT); 2348 + } else if (flags & SDVO_OUTPUT_CVBS0) { 2349 + 2350 + sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; 2351 + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2352 + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2353 + sdvo_priv->is_tv = true; 2354 + intel_output->needs_tv_clock = true; 2355 + intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2348 2356 } else if (flags & SDVO_OUTPUT_LVDS0) { 2349 2357 2350 2358 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;