Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull DRM fixes from Dave Airlie:
"I was going to leave this until post -rc1 but sysfs fixes broke
hotplug in userspace, so I had to fix it harder, otherwise a set of
pulls from intel, radeon and vmware,

The vmware/ttm changes are bit larger but since its early and they are
unlikely to break anything else I put them in, it lets vmware work
with dri3"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (36 commits)
drm/sysfs: fix hotplug regression since lifetime changes
drm/exynos: g2d: fix memory leak to userptr
drm/i915: Fix gen3 self-refresh watermarks
drm/ttm: Remove set_need_resched from the ttm fault handler
drm/ttm: Don't move non-existing data
drm/radeon: hook up backlight functions for CI and KV family.
drm/i915: Replicate BIOS eDP bpp clamping hack for hsw
drm/i915: Do not enable package C8 on unsupported hardware
drm/i915: Hold pc8 lock around toggling pc8.gpu_idle
drm/i915: encoder->get_config is no longer optional
drm/i915/tv: add ->get_config callback
drm/radeon/cik: Add macrotile mode array query
drm/radeon/cik: Return backend map information to userspace
drm/vmwgfx: Make vmwgfx dma buffers prime aware
drm/vmwgfx: Make surfaces prime-aware
drm/vmwgfx: Hook up the prime ioctls
drm/ttm: Add a minimal prime implementation for ttm base objects
drm/vmwgfx: Fix false lockdep warning
drm/ttm: Allow execbuf util reserves without ticket
drm/i915: restore the early forcewake cleanup
...

+1060 -327
+2 -2
arch/x86/kernel/early-quirks.c
··· 330 330 INTEL_I915GM_IDS(gen3_stolen_size), 331 331 INTEL_I945G_IDS(gen3_stolen_size), 332 332 INTEL_I945GM_IDS(gen3_stolen_size), 333 - INTEL_VLV_M_IDS(gen3_stolen_size), 334 - INTEL_VLV_D_IDS(gen3_stolen_size), 333 + INTEL_VLV_M_IDS(gen6_stolen_size), 334 + INTEL_VLV_D_IDS(gen6_stolen_size), 335 335 INTEL_PINEVIEW_IDS(gen3_stolen_size), 336 336 INTEL_I965G_IDS(gen3_stolen_size), 337 337 INTEL_G33_IDS(gen3_stolen_size),
+33 -7
drivers/gpu/drm/drm_sysfs.c
··· 489 489 } 490 490 EXPORT_SYMBOL(drm_sysfs_hotplug_event); 491 491 492 + static void drm_sysfs_release(struct device *dev) 493 + { 494 + kfree(dev); 495 + } 496 + 492 497 /** 493 498 * drm_sysfs_device_add - adds a class device to sysfs for a character driver 494 499 * @dev: DRM device to be added ··· 506 501 int drm_sysfs_device_add(struct drm_minor *minor) 507 502 { 508 503 char *minor_str; 504 + int r; 509 505 510 506 if (minor->type == DRM_MINOR_CONTROL) 511 507 minor_str = "controlD%d"; ··· 515 509 else 516 510 minor_str = "card%d"; 517 511 518 - minor->kdev = device_create(drm_class, minor->dev->dev, 519 - MKDEV(DRM_MAJOR, minor->index), 520 - minor, minor_str, minor->index); 521 - if (IS_ERR(minor->kdev)) { 522 - DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev)); 523 - return PTR_ERR(minor->kdev); 512 + minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL); 513 + if (!minor->dev) { 514 + r = -ENOMEM; 515 + goto error; 524 516 } 517 + 518 + device_initialize(minor->kdev); 519 + minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index); 520 + minor->kdev->class = drm_class; 521 + minor->kdev->type = &drm_sysfs_device_minor; 522 + minor->kdev->parent = minor->dev->dev; 523 + minor->kdev->release = drm_sysfs_release; 524 + dev_set_drvdata(minor->kdev, minor); 525 + 526 + r = dev_set_name(minor->kdev, minor_str, minor->index); 527 + if (r < 0) 528 + goto error; 529 + 530 + r = device_add(minor->kdev); 531 + if (r < 0) 532 + goto error; 533 + 525 534 return 0; 535 + 536 + error: 537 + DRM_ERROR("device create failed %d\n", r); 538 + put_device(minor->kdev); 539 + return r; 526 540 } 527 541 528 542 /** ··· 555 529 void drm_sysfs_device_remove(struct drm_minor *minor) 556 530 { 557 531 if (minor->kdev) 558 - device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index)); 532 + device_unregister(minor->kdev); 559 533 minor->kdev = NULL; 560 534 } 561 535
+2
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 383 383 g2d_userptr->npages, 384 384 g2d_userptr->vma); 385 385 386 + exynos_gem_put_vma(g2d_userptr->vma); 387 + 386 388 if (!g2d_userptr->out_of_list) 387 389 list_del_init(&g2d_userptr->list); 388 390
+1
drivers/gpu/drm/i915/i915_drv.h
··· 1816 1816 #define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1817 1817 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1818 1818 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1819 + #define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ 1819 1820 1820 1821 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 1821 1822 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+6 -1
drivers/gpu/drm/i915/intel_bios.c
··· 790 790 791 791 /* Default to using SSC */ 792 792 dev_priv->vbt.lvds_use_ssc = 1; 793 - dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); 793 + /* 794 + * Core/SandyBridge/IvyBridge use alternative (120MHz) reference 795 + * clock for LVDS. 796 + */ 797 + dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 798 + !HAS_PCH_SPLIT(dev)); 794 799 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); 795 800 796 801 for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+20
drivers/gpu/drm/i915/intel_ddi.c
··· 1406 1406 default: 1407 1407 break; 1408 1408 } 1409 + 1410 + if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp && 1411 + pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { 1412 + /* 1413 + * This is a big fat ugly hack. 1414 + * 1415 + * Some machines in UEFI boot mode provide us a VBT that has 18 1416 + * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 1417 + * unknown we fail to light up. Yet the same BIOS boots up with 1418 + * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 1419 + * max, not what it tells us to use. 1420 + * 1421 + * Note: This will still be broken if the eDP panel is not lit 1422 + * up by the BIOS, and thus we can't get the mode at module 1423 + * load. 1424 + */ 1425 + DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 1426 + pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); 1427 + dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 1428 + } 1409 1429 } 1410 1430 1411 1431 static void intel_ddi_destroy(struct drm_encoder *encoder)
+27 -6
drivers/gpu/drm/i915/intel_display.c
··· 6518 6518 6519 6519 void hsw_enable_package_c8(struct drm_i915_private *dev_priv) 6520 6520 { 6521 + if (!HAS_PC8(dev_priv->dev)) 6522 + return; 6523 + 6521 6524 mutex_lock(&dev_priv->pc8.lock); 6522 6525 __hsw_enable_package_c8(dev_priv); 6523 6526 mutex_unlock(&dev_priv->pc8.lock); ··· 6528 6525 6529 6526 void hsw_disable_package_c8(struct drm_i915_private *dev_priv) 6530 6527 { 6528 + if (!HAS_PC8(dev_priv->dev)) 6529 + return; 6530 + 6531 6531 mutex_lock(&dev_priv->pc8.lock); 6532 6532 __hsw_disable_package_c8(dev_priv); 6533 6533 mutex_unlock(&dev_priv->pc8.lock); ··· 6568 6562 struct drm_i915_private *dev_priv = dev->dev_private; 6569 6563 bool allow; 6570 6564 6565 + if (!HAS_PC8(dev_priv->dev)) 6566 + return; 6567 + 6571 6568 if (!i915_enable_pc8) 6572 6569 return; 6573 6570 ··· 6594 6585 6595 6586 static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv) 6596 6587 { 6588 + if (!HAS_PC8(dev_priv->dev)) 6589 + return; 6590 + 6591 + mutex_lock(&dev_priv->pc8.lock); 6597 6592 if (!dev_priv->pc8.gpu_idle) { 6598 6593 dev_priv->pc8.gpu_idle = true; 6599 - hsw_enable_package_c8(dev_priv); 6594 + __hsw_enable_package_c8(dev_priv); 6600 6595 } 6596 + mutex_unlock(&dev_priv->pc8.lock); 6601 6597 } 6602 6598 6603 6599 static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv) 6604 6600 { 6601 + if (!HAS_PC8(dev_priv->dev)) 6602 + return; 6603 + 6604 + mutex_lock(&dev_priv->pc8.lock); 6605 6605 if (dev_priv->pc8.gpu_idle) { 6606 6606 dev_priv->pc8.gpu_idle = false; 6607 - hsw_disable_package_c8(dev_priv); 6607 + __hsw_disable_package_c8(dev_priv); 6608 6608 } 6609 + mutex_unlock(&dev_priv->pc8.lock); 6609 6610 } 6610 6611 6611 6612 #define for_each_power_domain(domain, mask) \ ··· 7203 7184 intel_crtc->cursor_visible = visible; 7204 7185 } 7205 7186 /* and commit changes on next vblank */ 7187 + POSTING_READ(CURCNTR(pipe)); 7206 7188 I915_WRITE(CURBASE(pipe), base); 7189 + POSTING_READ(CURBASE(pipe)); 7207 7190 } 7208 7191 7209 7192 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) ··· 7234 7213 intel_crtc->cursor_visible = visible; 7235 7214 } 7236 7215 /* and commit changes on next vblank */ 7216 + POSTING_READ(CURCNTR_IVB(pipe)); 7237 7217 I915_WRITE(CURBASE_IVB(pipe), base); 7218 + POSTING_READ(CURBASE_IVB(pipe)); 7238 7219 } 7239 7220 7240 7221 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ ··· 9271 9248 enum pipe pipe; 9272 9249 if (encoder->base.crtc != &crtc->base) 9273 9250 continue; 9274 - if (encoder->get_config && 9275 - encoder->get_hw_state(encoder, &pipe)) 9251 + if (encoder->get_hw_state(encoder, &pipe)) 9276 9252 encoder->get_config(encoder, &pipe_config); 9277 9253 } 9278 9254 ··· 10931 10909 if (encoder->get_hw_state(encoder, &pipe)) { 10932 10910 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 10933 10911 encoder->base.crtc = &crtc->base; 10934 - if (encoder->get_config) 10935 - encoder->get_config(encoder, &crtc->config); 10912 + encoder->get_config(encoder, &crtc->config); 10936 10913 } else { 10937 10914 encoder->base.crtc = NULL; 10938 10915 }
+1 -1
drivers/gpu/drm/i915/intel_dp.c
··· 1774 1774 * ensure that we have vdd while we switch off the panel. */ 1775 1775 ironlake_edp_panel_vdd_on(intel_dp); 1776 1776 ironlake_edp_backlight_off(intel_dp); 1777 - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1777 + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1778 1778 ironlake_edp_panel_off(intel_dp); 1779 1779 1780 1780 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+2 -2
drivers/gpu/drm/i915/intel_pm.c
··· 1625 1625 &to_intel_crtc(enabled)->config.adjusted_mode; 1626 1626 int clock = adjusted_mode->crtc_clock; 1627 1627 int htotal = adjusted_mode->htotal; 1628 - int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1628 + int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; 1629 1629 int pixel_size = enabled->fb->bits_per_pixel / 8; 1630 1630 unsigned long line_time_us; 1631 1631 int entries; ··· 3888 3888 3889 3889 I915_WRITE(GEN6_RC_SLEEP, 0); 3890 3890 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 3891 - if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) 3891 + if (IS_IVYBRIDGE(dev)) 3892 3892 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 3893 3893 else 3894 3894 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+8
drivers/gpu/drm/i915/intel_tv.c
··· 902 902 } 903 903 904 904 905 + static void 906 + intel_tv_get_config(struct intel_encoder *encoder, 907 + struct intel_crtc_config *pipe_config) 908 + { 909 + pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; 910 + } 911 + 905 912 static bool 906 913 intel_tv_compute_config(struct intel_encoder *encoder, 907 914 struct intel_crtc_config *pipe_config) ··· 1628 1621 DRM_MODE_ENCODER_TVDAC); 1629 1622 1630 1623 intel_encoder->compute_config = intel_tv_compute_config; 1624 + intel_encoder->get_config = intel_tv_get_config; 1631 1625 intel_encoder->mode_set = intel_tv_mode_set; 1632 1626 intel_encoder->enable = intel_enable_tv; 1633 1627 intel_encoder->disable = intel_disable_tv;
+14 -12
drivers/gpu/drm/i915/intel_uncore.c
··· 217 217 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 218 218 } 219 219 220 + static void intel_uncore_forcewake_reset(struct drm_device *dev) 221 + { 222 + struct drm_i915_private *dev_priv = dev->dev_private; 223 + 224 + if (IS_VALLEYVIEW(dev)) { 225 + vlv_force_wake_reset(dev_priv); 226 + } else if (INTEL_INFO(dev)->gen >= 6) { 227 + __gen6_gt_force_wake_reset(dev_priv); 228 + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 229 + __gen6_gt_force_wake_mt_reset(dev_priv); 230 + } 231 + } 232 + 220 233 void intel_uncore_early_sanitize(struct drm_device *dev) 221 234 { 222 235 struct drm_i915_private *dev_priv = dev->dev_private; ··· 247 234 dev_priv->ellc_size = 128; 248 235 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 249 236 } 250 - } 251 237 252 - static void intel_uncore_forcewake_reset(struct drm_device *dev) 253 - { 254 - struct drm_i915_private *dev_priv = dev->dev_private; 255 - 256 - if (IS_VALLEYVIEW(dev)) { 257 - vlv_force_wake_reset(dev_priv); 258 - } else if (INTEL_INFO(dev)->gen >= 6) { 259 - __gen6_gt_force_wake_reset(dev_priv); 260 - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 261 - __gen6_gt_force_wake_mt_reset(dev_priv); 262 - } 238 + intel_uncore_forcewake_reset(dev); 263 239 } 264 240 265 241 void intel_uncore_sanitize(struct drm_device *dev)
+4 -2
drivers/gpu/drm/radeon/atombios_i2c.c
··· 56 56 return -EINVAL; 57 57 } 58 58 args.ucRegIndex = buf[0]; 59 - if (num > 1) 60 - memcpy(&out, &buf[1], num - 1); 59 + if (num > 1) { 60 + num--; 61 + memcpy(&out, &buf[1], num); 62 + } 61 63 args.lpI2CDataOut = cpu_to_le16(out); 62 64 } else { 63 65 if (num > ATOM_MAX_HW_I2C_READ) {
+29 -28
drivers/gpu/drm/radeon/cik.c
··· 1560 1560 * cik_mm_rdoorbell - read a doorbell dword 1561 1561 * 1562 1562 * @rdev: radeon_device pointer 1563 - * @offset: byte offset into the aperture 1563 + * @index: doorbell index 1564 1564 * 1565 1565 * Returns the value in the doorbell aperture at the 1566 - * requested offset (CIK). 1566 + * requested doorbell index (CIK). 1567 1567 */ 1568 - u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset) 1568 + u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index) 1569 1569 { 1570 - if (offset < rdev->doorbell.size) { 1571 - return readl(((void __iomem *)rdev->doorbell.ptr) + offset); 1570 + if (index < rdev->doorbell.num_doorbells) { 1571 + return readl(rdev->doorbell.ptr + index); 1572 1572 } else { 1573 - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset); 1573 + DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 1574 1574 return 0; 1575 1575 } 1576 1576 } ··· 1579 1579 * cik_mm_wdoorbell - write a doorbell dword 1580 1580 * 1581 1581 * @rdev: radeon_device pointer 1582 - * @offset: byte offset into the aperture 1582 + * @index: doorbell index 1583 1583 * @v: value to write 1584 1584 * 1585 1585 * Writes @v to the doorbell aperture at the 1586 - * requested offset (CIK). 1586 + * requested doorbell index (CIK). 1587 1587 */ 1588 - void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v) 1588 + void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v) 1589 1589 { 1590 - if (offset < rdev->doorbell.size) { 1591 - writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset); 1590 + if (index < rdev->doorbell.num_doorbells) { 1591 + writel(v, rdev->doorbell.ptr + index); 1592 1592 } else { 1593 - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset); 1593 + DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 1594 1594 } 1595 1595 } 1596 1596 ··· 2427 2427 gb_tile_moden = 0; 2428 2428 break; 2429 2429 } 2430 + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; 2430 2431 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2431 2432 } 2432 2433 } else if (num_pipe_configs == 4) { ··· 2774 2773 gb_tile_moden = 0; 2775 2774 break; 2776 2775 } 2776 + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; 2777 2777 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2778 2778 } 2779 2779 } else if (num_pipe_configs == 2) { ··· 2992 2990 gb_tile_moden = 0; 2993 2991 break; 2994 2992 } 2993 + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; 2995 2994 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2996 2995 } 2997 2996 } else ··· 3559 3556 radeon_ring_write(ring, 0); 3560 3557 } 3561 3558 3562 - void cik_semaphore_ring_emit(struct radeon_device *rdev, 3559 + bool cik_semaphore_ring_emit(struct radeon_device *rdev, 3563 3560 struct radeon_ring *ring, 3564 3561 struct radeon_semaphore *semaphore, 3565 3562 bool emit_wait) 3566 3563 { 3564 + /* TODO: figure out why semaphore cause lockups */ 3565 + #if 0 3567 3566 uint64_t addr = semaphore->gpu_addr; 3568 3567 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 3569 3568 3570 3569 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 3571 3570 radeon_ring_write(ring, addr & 0xffffffff); 3572 3571 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3572 + 3573 + return true; 3574 + #else 3575 + return false; 3576 + #endif 3573 3577 } 3574 3578 3575 3579 /** ··· 3619 3609 return r; 3620 3610 } 3621 3611 3622 - if (radeon_fence_need_sync(*fence, ring->idx)) { 3623 - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 3624 - ring->idx); 3625 - radeon_fence_note_sync(*fence, ring->idx); 3626 - } else { 3627 - radeon_semaphore_free(rdev, &sem, NULL); 3628 - } 3612 + radeon_semaphore_sync_to(sem, *fence); 3613 + radeon_semaphore_sync_rings(rdev, sem, ring->idx); 3629 3614 3630 3615 for (i = 0; i < num_loops; i++) { 3631 3616 cur_size_in_bytes = size_in_bytes; ··· 4057 4052 struct radeon_ring *ring) 4058 4053 { 4059 4054 rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr); 4060 - WDOORBELL32(ring->doorbell_offset, ring->wptr); 4055 + WDOORBELL32(ring->doorbell_index, ring->wptr); 4061 4056 } 4062 4057 4063 4058 /** ··· 4398 4393 return r; 4399 4394 } 4400 4395 4401 - /* doorbell offset */ 4402 - rdev->ring[idx].doorbell_offset = 4403 - (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0; 4404 - 4405 4396 /* init the mqd struct */ 4406 4397 memset(buf, 0, sizeof(struct bonaire_mqd)); 4407 4398 ··· 4509 4508 RREG32(CP_HQD_PQ_DOORBELL_CONTROL); 4510 4509 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK; 4511 4510 mqd->queue_state.cp_hqd_pq_doorbell_control |= 4512 - DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4); 4511 + DOORBELL_OFFSET(rdev->ring[idx].doorbell_index); 4513 4512 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN; 4514 4513 mqd->queue_state.cp_hqd_pq_doorbell_control &= 4515 4514 ~(DOORBELL_SOURCE | DOORBELL_HIT); ··· 7840 7839 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 7841 7840 ring->ring_obj = NULL; 7842 7841 r600_ring_init(rdev, ring, 1024 * 1024); 7843 - r = radeon_doorbell_get(rdev, &ring->doorbell_page_num); 7842 + r = radeon_doorbell_get(rdev, &ring->doorbell_index); 7844 7843 if (r) 7845 7844 return r; 7846 7845 7847 7846 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 7848 7847 ring->ring_obj = NULL; 7849 7848 r600_ring_init(rdev, ring, 1024 * 1024); 7850 - r = radeon_doorbell_get(rdev, &ring->doorbell_page_num); 7849 + r = radeon_doorbell_get(rdev, &ring->doorbell_index); 7851 7850 if (r) 7852 7851 return r; 7853 7852
+5 -8
drivers/gpu/drm/radeon/cik_sdma.c
··· 130 130 * Add a DMA semaphore packet to the ring wait on or signal 131 131 * other rings (CIK). 132 132 */ 133 - void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 133 + bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 134 134 struct radeon_ring *ring, 135 135 struct radeon_semaphore *semaphore, 136 136 bool emit_wait) ··· 141 141 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); 142 142 radeon_ring_write(ring, addr & 0xfffffff8); 143 143 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 144 + 145 + return true; 144 146 } 145 147 146 148 /** ··· 445 443 return r; 446 444 } 447 445 448 - if (radeon_fence_need_sync(*fence, ring->idx)) { 449 - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 450 - ring->idx); 451 - radeon_fence_note_sync(*fence, ring->idx); 452 - } else { 453 - radeon_semaphore_free(rdev, &sem, NULL); 454 - } 446 + radeon_semaphore_sync_to(sem, *fence); 447 + radeon_semaphore_sync_rings(rdev, sem, ring->idx); 455 448 456 449 for (i = 0; i < num_loops; i++) { 457 450 cur_size_in_bytes = size_in_bytes;
+2
drivers/gpu/drm/radeon/cypress_dpm.c
··· 299 299 static int cypress_pcie_performance_request(struct radeon_device *rdev, 300 300 u8 perf_req, bool advertise) 301 301 { 302 + #if defined(CONFIG_ACPI) 302 303 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 304 + #endif 303 305 u32 tmp; 304 306 305 307 udelay(10);
+2 -7
drivers/gpu/drm/radeon/evergreen_dma.c
··· 131 131 return r; 132 132 } 133 133 134 - if (radeon_fence_need_sync(*fence, ring->idx)) { 135 - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 136 - ring->idx); 137 - radeon_fence_note_sync(*fence, ring->idx); 138 - } else { 139 - radeon_semaphore_free(rdev, &sem, NULL); 140 - } 134 + radeon_semaphore_sync_to(sem, *fence); 135 + radeon_semaphore_sync_rings(rdev, sem, ring->idx); 141 136 142 137 for (i = 0; i < num_loops; i++) { 143 138 cur_size_in_dw = size_in_dw;
+1 -1
drivers/gpu/drm/radeon/ni_dpm.c
··· 3445 3445 static int ni_pcie_performance_request(struct radeon_device *rdev, 3446 3446 u8 perf_req, bool advertise) 3447 3447 { 3448 + #if defined(CONFIG_ACPI) 3448 3449 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3449 3450 3450 - #if defined(CONFIG_ACPI) 3451 3451 if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) || 3452 3452 (perf_req == PCIE_PERF_REQ_PECI_GEN2)) { 3453 3453 if (eg_pi->pcie_performance_request_registered == false)
+2 -1
drivers/gpu/drm/radeon/r100.c
··· 869 869 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 870 870 } 871 871 872 - void r100_semaphore_ring_emit(struct radeon_device *rdev, 872 + bool r100_semaphore_ring_emit(struct radeon_device *rdev, 873 873 struct radeon_ring *ring, 874 874 struct radeon_semaphore *semaphore, 875 875 bool emit_wait) 876 876 { 877 877 /* Unused on older asics, since we don't have semaphores or multiple rings */ 878 878 BUG(); 879 + return false; 879 880 } 880 881 881 882 int r100_copy_blit(struct radeon_device *rdev,
+5 -8
drivers/gpu/drm/radeon/r600.c
··· 2650 2650 } 2651 2651 } 2652 2652 2653 - void r600_semaphore_ring_emit(struct radeon_device *rdev, 2653 + bool r600_semaphore_ring_emit(struct radeon_device *rdev, 2654 2654 struct radeon_ring *ring, 2655 2655 struct radeon_semaphore *semaphore, 2656 2656 bool emit_wait) ··· 2664 2664 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 2665 2665 radeon_ring_write(ring, addr & 0xffffffff); 2666 2666 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2667 + 2668 + return true; 2667 2669 } 2668 2670 2669 2671 /** ··· 2708 2706 return r; 2709 2707 } 2710 2708 2711 - if (radeon_fence_need_sync(*fence, ring->idx)) { 2712 - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 2713 - ring->idx); 2714 - radeon_fence_note_sync(*fence, ring->idx); 2715 - } else { 2716 - radeon_semaphore_free(rdev, &sem, NULL); 2717 - } 2709 + radeon_semaphore_sync_to(sem, *fence); 2710 + radeon_semaphore_sync_rings(rdev, sem, ring->idx); 2718 2711 2719 2712 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2720 2713 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+5 -8
drivers/gpu/drm/radeon/r600_dma.c
··· 311 311 * Add a DMA semaphore packet to the ring wait on or signal 312 312 * other rings (r6xx-SI). 313 313 */ 314 - void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, 314 + bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev, 315 315 struct radeon_ring *ring, 316 316 struct radeon_semaphore *semaphore, 317 317 bool emit_wait) ··· 322 322 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); 323 323 radeon_ring_write(ring, addr & 0xfffffffc); 324 324 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 325 + 326 + return true; 325 327 } 326 328 327 329 /** ··· 464 462 return r; 465 463 } 466 464 467 - if (radeon_fence_need_sync(*fence, ring->idx)) { 468 - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 469 - ring->idx); 470 - radeon_fence_note_sync(*fence, ring->idx); 471 - } else { 472 - radeon_semaphore_free(rdev, &sem, NULL); 473 - } 465 + radeon_semaphore_sync_to(sem, *fence); 466 + radeon_semaphore_sync_rings(rdev, sem, ring->idx); 474 467 475 468 for (i = 0; i < num_loops; i++) { 476 469 cur_size_in_dw = size_in_dw;
+21 -17
drivers/gpu/drm/radeon/radeon.h
··· 348 348 void radeon_fence_process(struct radeon_device *rdev, int ring); 349 349 bool radeon_fence_signaled(struct radeon_fence *fence); 350 350 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 351 + int radeon_fence_wait_locked(struct radeon_fence *fence); 351 352 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); 352 353 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); 353 354 int radeon_fence_wait_any(struct radeon_device *rdev, ··· 549 548 struct radeon_sa_bo *sa_bo; 550 549 signed waiters; 551 550 uint64_t gpu_addr; 551 + struct radeon_fence *sync_to[RADEON_NUM_RINGS]; 552 552 }; 553 553 554 554 int radeon_semaphore_create(struct radeon_device *rdev, 555 555 struct radeon_semaphore **semaphore); 556 - void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 556 + bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 557 557 struct radeon_semaphore *semaphore); 558 - void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 558 + bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 559 559 struct radeon_semaphore *semaphore); 560 + void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, 561 + struct radeon_fence *fence); 560 562 int radeon_semaphore_sync_rings(struct radeon_device *rdev, 561 563 struct radeon_semaphore *semaphore, 562 - int signaler, int waiter); 564 + int waiting_ring); 563 565 void radeon_semaphore_free(struct radeon_device *rdev, 564 566 struct radeon_semaphore **semaphore, 565 567 struct radeon_fence *fence); ··· 649 645 /* 650 646 * GPU doorbell structures, functions & helpers 651 647 */ 648 + #define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */ 649 + 652 650 struct radeon_doorbell { 653 - u32 num_pages; 654 - bool free[1024]; 655 651 /* doorbell mmio */ 656 - resource_size_t base; 657 - resource_size_t size; 658 - void __iomem *ptr; 652 + resource_size_t base; 653 + resource_size_t size; 654 + u32 __iomem *ptr; 655 + u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */ 656 + unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)]; 659 657 }; 660 658 661 659 int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); ··· 771 765 struct radeon_fence *fence; 772 766 struct radeon_vm *vm; 773 767 bool is_const_ib; 774 - struct radeon_fence *sync_to[RADEON_NUM_RINGS]; 775 768 struct radeon_semaphore *semaphore; 776 769 }; 777 770 ··· 804 799 u32 pipe; 805 800 u32 queue; 806 801 struct radeon_bo *mqd_obj; 807 - u32 doorbell_page_num; 808 - u32 doorbell_offset; 802 + u32 doorbell_index; 809 803 unsigned wptr_offs; 810 804 }; 811 805 ··· 925 921 struct radeon_ib *ib, struct radeon_vm *vm, 926 922 unsigned size); 927 923 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 928 - void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence); 929 924 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 930 925 struct radeon_ib *const_ib); 931 926 int radeon_ib_pool_init(struct radeon_device *rdev); ··· 1641 1638 /* command emmit functions */ 1642 1639 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 1643 1640 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); 1644 - void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, 1641 + bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, 1645 1642 struct radeon_semaphore *semaphore, bool emit_wait); 1646 1643 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 1647 1644 ··· 1982 1979 1983 1980 unsigned tile_config; 1984 1981 uint32_t tile_mode_array[32]; 1982 + uint32_t macrotile_mode_array[16]; 1985 1983 }; 1986 1984 1987 1985 union radeon_asic_config { ··· 2243 2239 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 2244 2240 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2245 2241 2246 - u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset); 2247 - void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); 2242 + u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index); 2243 + void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v); 2248 2244 2249 2245 /* 2250 2246 * Cast helper ··· 2307 2303 #define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) 2308 2304 #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) 2309 2305 2310 - #define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset)) 2311 - #define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v)) 2306 + #define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index)) 2307 + #define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v)) 2312 2308 2313 2309 /* 2314 2310 * Indirect registers accessor
+4
drivers/gpu/drm/radeon/radeon_asic.c
··· 2015 2015 .bandwidth_update = &dce8_bandwidth_update, 2016 2016 .get_vblank_counter = &evergreen_get_vblank_counter, 2017 2017 .wait_for_vblank = &dce4_wait_for_vblank, 2018 + .set_backlight_level = &atombios_set_backlight_level, 2019 + .get_backlight_level = &atombios_get_backlight_level, 2018 2020 .hdmi_enable = &evergreen_hdmi_enable, 2019 2021 .hdmi_setmode = &evergreen_hdmi_setmode, 2020 2022 }, ··· 2116 2114 .bandwidth_update = &dce8_bandwidth_update, 2117 2115 .get_vblank_counter = &evergreen_get_vblank_counter, 2118 2116 .wait_for_vblank = &dce4_wait_for_vblank, 2117 + .set_backlight_level = &atombios_set_backlight_level, 2118 + .get_backlight_level = &atombios_get_backlight_level, 2119 2119 .hdmi_enable = &evergreen_hdmi_enable, 2120 2120 .hdmi_setmode = &evergreen_hdmi_setmode, 2121 2121 },
+7 -11
drivers/gpu/drm/radeon/radeon_asic.h
··· 80 80 int r100_irq_process(struct radeon_device *rdev); 81 81 void r100_fence_ring_emit(struct radeon_device *rdev, 82 82 struct radeon_fence *fence); 83 - void r100_semaphore_ring_emit(struct radeon_device *rdev, 83 + bool r100_semaphore_ring_emit(struct radeon_device *rdev, 84 84 struct radeon_ring *cp, 85 85 struct radeon_semaphore *semaphore, 86 86 bool emit_wait); ··· 313 313 int r600_dma_cs_parse(struct radeon_cs_parser *p); 314 314 void r600_fence_ring_emit(struct radeon_device *rdev, 315 315 struct radeon_fence *fence); 316 - void r600_semaphore_ring_emit(struct radeon_device *rdev, 316 + bool r600_semaphore_ring_emit(struct radeon_device *rdev, 317 317 struct radeon_ring *cp, 318 318 struct radeon_semaphore *semaphore, 319 319 bool emit_wait); 320 320 void r600_dma_fence_ring_emit(struct radeon_device *rdev, 321 321 struct radeon_fence *fence); 322 - void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, 322 + bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev, 323 323 struct radeon_ring *ring, 324 324 struct radeon_semaphore *semaphore, 325 325 bool emit_wait); ··· 566 566 */ 567 567 void cayman_fence_ring_emit(struct radeon_device *rdev, 568 568 struct radeon_fence *fence); 569 - void cayman_uvd_semaphore_emit(struct radeon_device *rdev, 570 - struct radeon_ring *ring, 571 - struct radeon_semaphore *semaphore, 572 - bool emit_wait); 573 569 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev); 574 570 int cayman_init(struct radeon_device *rdev); 575 571 void cayman_fini(struct radeon_device *rdev); ··· 693 697 int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 694 698 void cik_sdma_fence_ring_emit(struct radeon_device *rdev, 695 699 struct radeon_fence *fence); 696 - void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 700 + bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 697 701 struct radeon_ring *ring, 698 702 struct radeon_semaphore *semaphore, 699 703 bool emit_wait); ··· 713 717 struct radeon_fence *fence); 714 718 void cik_fence_compute_ring_emit(struct radeon_device *rdev, 715 719 struct radeon_fence *fence); 716 - void cik_semaphore_ring_emit(struct radeon_device *rdev, 720 + bool cik_semaphore_ring_emit(struct radeon_device *rdev, 717 721 struct radeon_ring *cp, 718 722 struct radeon_semaphore *semaphore, 719 723 bool emit_wait); ··· 803 807 804 808 int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); 805 809 int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 806 - void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, 810 + bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, 807 811 struct radeon_ring *ring, 808 812 struct radeon_semaphore *semaphore, 809 813 bool emit_wait); ··· 815 819 struct radeon_fence *fence); 816 820 817 821 /* uvd v3.1 */ 818 - void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, 822 + bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, 819 823 struct radeon_ring *ring, 820 824 struct radeon_semaphore *semaphore, 821 825 bool emit_wait);
+5 -4
drivers/gpu/drm/radeon/radeon_cs.c
··· 159 159 if (!p->relocs[i].robj) 160 160 continue; 161 161 162 - radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj); 162 + radeon_semaphore_sync_to(p->ib.semaphore, 163 + p->relocs[i].robj->tbo.sync_obj); 163 164 } 164 165 } 165 166 ··· 412 411 goto out; 413 412 } 414 413 radeon_cs_sync_rings(parser); 415 - radeon_ib_sync_to(&parser->ib, vm->fence); 416 - radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id( 417 - rdev, vm, parser->ring)); 414 + radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); 415 + radeon_semaphore_sync_to(parser->ib.semaphore, 416 + radeon_vm_grab_id(rdev, vm, parser->ring)); 418 417 419 418 if ((rdev->family >= CHIP_TAHITI) && 420 419 (parser->chunk_const_ib_idx != -1)) {
+20 -27
drivers/gpu/drm/radeon/radeon_device.c
··· 251 251 */ 252 252 int radeon_doorbell_init(struct radeon_device *rdev) 253 253 { 254 - int i; 255 - 256 254 /* doorbell bar mapping */ 257 255 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); 258 256 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2); 259 257 260 - /* limit to 4 MB for now */ 261 - if (rdev->doorbell.size > (4 * 1024 * 1024)) 262 - rdev->doorbell.size = 4 * 1024 * 1024; 258 + rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS); 259 + if (rdev->doorbell.num_doorbells == 0) 260 + return -EINVAL; 263 261 264 - rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size); 262 + rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32)); 265 263 if (rdev->doorbell.ptr == NULL) { 266 264 return -ENOMEM; 267 265 } 268 266 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base); 269 267 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size); 270 268 271 - rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE; 269 + memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used)); 272 270 273 - for (i = 0; i < rdev->doorbell.num_pages; i++) { 274 - rdev->doorbell.free[i] = true; 275 - } 276 271 return 0; 277 272 } 278 273 ··· 285 290 } 286 291 287 292 /** 288 - * radeon_doorbell_get - Allocate a doorbell page 293 + * radeon_doorbell_get - Allocate a doorbell entry 289 294 * 290 295 * @rdev: radeon_device pointer 291 - * @doorbell: doorbell page number 296 + * @doorbell: doorbell index 292 297 * 293 - * Allocate a doorbell page for use by the driver (all asics). 298 + * Allocate a doorbell for use by the driver (all asics). 294 299 * Returns 0 on success or -EINVAL on failure. 295 300 */ 296 301 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell) 297 302 { 298 - int i; 299 - 300 - for (i = 0; i < rdev->doorbell.num_pages; i++) { 301 - if (rdev->doorbell.free[i]) { 302 - rdev->doorbell.free[i] = false; 303 - *doorbell = i; 304 - return 0; 305 - } 303 + unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells); 304 + if (offset < rdev->doorbell.num_doorbells) { 305 + __set_bit(offset, rdev->doorbell.used); 306 + *doorbell = offset; 307 + return 0; 308 + } else { 309 + return -EINVAL; 306 310 } 307 - return -EINVAL; 308 311 } 309 312 310 313 /** 311 - * radeon_doorbell_free - Free a doorbell page 314 + * radeon_doorbell_free - Free a doorbell entry 312 315 * 313 316 * @rdev: radeon_device pointer 314 - * @doorbell: doorbell page number 317 + * @doorbell: doorbell index 315 318 * 316 - * Free a doorbell page allocated for use by the driver (all asics) 319 + * Free a doorbell allocated for use by the driver (all asics) 317 320 */ 318 321 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell) 319 322 { 320 - if (doorbell < rdev->doorbell.num_pages) 321 - rdev->doorbell.free[doorbell] = true; 323 + if (doorbell < rdev->doorbell.num_doorbells) 324 + __clear_bit(doorbell, rdev->doorbell.used); 322 325 } 323 326 324 327 /*
+2 -1
drivers/gpu/drm/radeon/radeon_drv.c
··· 76 76 * 2.32.0 - new info request for rings working 77 77 * 2.33.0 - Add SI tiling mode array query 78 78 * 2.34.0 - Add CIK tiling mode array query 79 + * 2.35.0 - Add CIK macrotile mode array query 79 80 */ 80 81 #define KMS_DRIVER_MAJOR 2 81 - #define KMS_DRIVER_MINOR 34 82 + #define KMS_DRIVER_MINOR 35 82 83 #define KMS_DRIVER_PATCHLEVEL 0 83 84 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 84 85 int radeon_driver_unload_kms(struct drm_device *dev);
+30
drivers/gpu/drm/radeon/radeon_fence.c
··· 472 472 } 473 473 474 474 /** 475 + * radeon_fence_wait_locked - wait for a fence to signal 476 + * 477 + * @fence: radeon fence object 478 + * 479 + * Wait for the requested fence to signal (all asics). 480 + * Returns 0 if the fence has passed, error for all other cases. 481 + */ 482 + int radeon_fence_wait_locked(struct radeon_fence *fence) 483 + { 484 + uint64_t seq[RADEON_NUM_RINGS] = {}; 485 + int r; 486 + 487 + if (fence == NULL) { 488 + WARN(1, "Querying an invalid fence : %p !\n", fence); 489 + return -EINVAL; 490 + } 491 + 492 + seq[fence->ring] = fence->seq; 493 + if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) 494 + return 0; 495 + 496 + r = radeon_fence_wait_seq(fence->rdev, seq, false, false); 497 + if (r) 498 + return r; 499 + 500 + fence->seq = RADEON_FENCE_SIGNALED_SEQ; 501 + return 0; 502 + } 503 + 504 + /** 475 505 * radeon_fence_wait_next_locked - wait for the next fence to signal 476 506 * 477 507 * @rdev: radeon device pointer
+4 -2
drivers/gpu/drm/radeon/radeon_gart.c
··· 651 651 radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr, 652 652 0, pd_entries, 0, 0); 653 653 654 - radeon_ib_sync_to(&ib, vm->fence); 654 + radeon_semaphore_sync_to(ib.semaphore, vm->fence); 655 655 r = radeon_ib_schedule(rdev, &ib, NULL); 656 656 if (r) { 657 657 radeon_ib_free(rdev, &ib); ··· 1209 1209 return -ENOMEM; 1210 1210 1211 1211 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); 1212 + if (r) 1213 + return r; 1212 1214 ib.length_dw = 0; 1213 1215 1214 1216 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); ··· 1222 1220 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, 1223 1221 addr, radeon_vm_page_flags(bo_va->flags)); 1224 1222 1225 - radeon_ib_sync_to(&ib, vm->fence); 1223 + radeon_semaphore_sync_to(ib.semaphore, vm->fence); 1226 1224 r = radeon_ib_schedule(rdev, &ib, NULL); 1227 1225 if (r) { 1228 1226 radeon_ib_free(rdev, &ib);
+10 -1
drivers/gpu/drm/radeon/radeon_kms.c
··· 340 340 break; 341 341 case RADEON_INFO_BACKEND_MAP: 342 342 if (rdev->family >= CHIP_BONAIRE) 343 - return -EINVAL; 343 + *value = rdev->config.cik.backend_map; 344 344 else if (rdev->family >= CHIP_TAHITI) 345 345 *value = rdev->config.si.backend_map; 346 346 else if (rdev->family >= CHIP_CAYMAN) ··· 446 446 value_size = sizeof(uint32_t)*32; 447 447 } else { 448 448 DRM_DEBUG_KMS("tile mode array is si+ only!\n"); 449 + return -EINVAL; 450 + } 451 + break; 452 + case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: 453 + if (rdev->family >= CHIP_BONAIRE) { 454 + value = rdev->config.cik.macrotile_mode_array; 455 + value_size = sizeof(uint32_t)*16; 456 + } else { 457 + DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n"); 449 458 return -EINVAL; 450 459 } 451 460 break;
+28
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
··· 422 422 /* Pin framebuffer & get tilling informations */ 423 423 obj = radeon_fb->obj; 424 424 rbo = gem_to_radeon_bo(obj); 425 + retry: 425 426 r = radeon_bo_reserve(rbo, false); 426 427 if (unlikely(r != 0)) 427 428 return r; ··· 431 430 &base); 432 431 if (unlikely(r != 0)) { 433 432 radeon_bo_unreserve(rbo); 433 + 434 + /* On old GPU like RN50 with little vram pining can fails because 435 + * current fb is taking all space needed. So instead of unpining 436 + * the old buffer after pining the new one, first unpin old one 437 + * and then retry pining new one. 438 + * 439 + * As only master can set mode only master can pin and it is 440 + * unlikely the master client will race with itself especialy 441 + * on those old gpu with single crtc. 442 + * 443 + * We don't shutdown the display controller because new buffer 444 + * will end up in same spot. 445 + */ 446 + if (!atomic && fb && fb != crtc->fb) { 447 + struct radeon_bo *old_rbo; 448 + unsigned long nsize, osize; 449 + 450 + old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj); 451 + osize = radeon_bo_size(old_rbo); 452 + nsize = radeon_bo_size(rbo); 453 + if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) { 454 + radeon_bo_unpin(old_rbo); 455 + radeon_bo_unreserve(old_rbo); 456 + fb = NULL; 457 + goto retry; 458 + } 459 + } 434 460 return -EINVAL; 435 461 } 436 462 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+1 -1
drivers/gpu/drm/radeon/radeon_pm.c
··· 1252 1252 case CHIP_RS780: 1253 1253 case CHIP_RS880: 1254 1254 case CHIP_CAYMAN: 1255 - case CHIP_ARUBA: 1256 1255 case CHIP_BONAIRE: 1257 1256 case CHIP_KABINI: 1258 1257 case CHIP_KAVERI: ··· 1283 1284 case CHIP_BARTS: 1284 1285 case CHIP_TURKS: 1285 1286 case CHIP_CAICOS: 1287 + case CHIP_ARUBA: 1286 1288 case CHIP_TAHITI: 1287 1289 case CHIP_PITCAIRN: 1288 1290 case CHIP_VERDE:
+10 -36
drivers/gpu/drm/radeon/radeon_ring.c
··· 61 61 struct radeon_ib *ib, struct radeon_vm *vm, 62 62 unsigned size) 63 63 { 64 - int i, r; 64 + int r; 65 65 66 66 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); 67 67 if (r) { ··· 87 87 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); 88 88 } 89 89 ib->is_const_ib = false; 90 - for (i = 0; i < RADEON_NUM_RINGS; ++i) 91 - ib->sync_to[i] = NULL; 92 90 93 91 return 0; 94 92 } ··· 104 106 radeon_semaphore_free(rdev, &ib->semaphore, ib->fence); 105 107 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); 106 108 radeon_fence_unref(&ib->fence); 107 - } 108 - 109 - /** 110 - * radeon_ib_sync_to - sync to fence before executing the IB 111 - * 112 - * @ib: IB object to add fence to 113 - * @fence: fence to sync to 114 - * 115 - * Sync to the fence before executing the IB 116 - */ 117 - void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence) 118 - { 119 - struct radeon_fence *other; 120 - 121 - if (!fence) 122 - return; 123 - 124 - other = ib->sync_to[fence->ring]; 125 - ib->sync_to[fence->ring] = radeon_fence_later(fence, other); 126 109 } 127 110 128 111 /** ··· 130 151 struct radeon_ib *const_ib) 131 152 { 132 153 struct radeon_ring *ring = &rdev->ring[ib->ring]; 133 - bool need_sync = false; 134 - int i, r = 0; 154 + int r = 0; 135 155 136 156 if (!ib->length_dw || !ring->ready) { 137 157 /* TODO: Nothings in the ib we should report. */ ··· 144 166 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); 145 167 return r; 146 168 } 147 - for (i = 0; i < RADEON_NUM_RINGS; ++i) { 148 - struct radeon_fence *fence = ib->sync_to[i]; 149 - if (radeon_fence_need_sync(fence, ib->ring)) { 150 - need_sync = true; 151 - radeon_semaphore_sync_rings(rdev, ib->semaphore, 152 - fence->ring, ib->ring); 153 - radeon_fence_note_sync(fence, ib->ring); 154 - } 169 + 170 + /* sync with other rings */ 171 + r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); 172 + if (r) { 173 + dev_err(rdev->dev, "failed to sync rings (%d)\n", r); 174 + radeon_ring_unlock_undo(rdev, ring); 175 + return r; 155 176 } 156 - /* immediately free semaphore when we don't need to sync */ 157 - if (!need_sync) { 158 - radeon_semaphore_free(rdev, &ib->semaphore, NULL); 159 - } 177 + 160 178 /* if we can't remember our last VM flush then flush now! */ 161 179 /* XXX figure out why we have to flush for every IB */ 162 180 if (ib->vm /*&& !ib->vm->last_flush*/) {
+101 -34
drivers/gpu/drm/radeon/radeon_semaphore.c
··· 29 29 */ 30 30 #include <drm/drmP.h> 31 31 #include "radeon.h" 32 - 32 + #include "radeon_trace.h" 33 33 34 34 int radeon_semaphore_create(struct radeon_device *rdev, 35 35 struct radeon_semaphore **semaphore) 36 36 { 37 - int r; 37 + int i, r; 38 38 39 39 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); 40 40 if (*semaphore == NULL) { ··· 50 50 (*semaphore)->waiters = 0; 51 51 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); 52 52 *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; 53 + 54 + for (i = 0; i < RADEON_NUM_RINGS; ++i) 55 + (*semaphore)->sync_to[i] = NULL; 56 + 53 57 return 0; 54 58 } 55 59 56 - void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 60 + bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx, 57 61 struct radeon_semaphore *semaphore) 58 62 { 59 - --semaphore->waiters; 60 - radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false); 63 + struct radeon_ring *ring = &rdev->ring[ridx]; 64 + 65 + trace_radeon_semaphore_signale(ridx, semaphore); 66 + 67 + if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) { 68 + --semaphore->waiters; 69 + 70 + /* for debugging lockup only, used by sysfs debug files */ 71 + ring->last_semaphore_signal_addr = semaphore->gpu_addr; 72 + return true; 73 + } 74 + return false; 61 75 } 62 76 63 - void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 77 + bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx, 64 78 struct radeon_semaphore *semaphore) 65 79 { 66 - ++semaphore->waiters; 67 - radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true); 80 + struct radeon_ring *ring = &rdev->ring[ridx]; 81 + 82 + trace_radeon_semaphore_wait(ridx, semaphore); 83 + 84 + if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) { 85 + ++semaphore->waiters; 86 + 87 + /* for debugging lockup only, used by sysfs debug files */ 88 + ring->last_semaphore_wait_addr = semaphore->gpu_addr; 89 + return true; 90 + } 91 + return false; 68 92 } 69 93 70 - /* caller must hold ring lock */ 94 + /** 95 + * radeon_semaphore_sync_to - use the semaphore to sync to a fence 96 + * 97 + * @semaphore: semaphore object to add fence to 98 + * @fence: fence to sync to 99 + * 100 + * Sync to the fence using this semaphore object 101 + */ 102 + void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, 103 + struct radeon_fence *fence) 104 + { 105 + struct radeon_fence *other; 106 + 107 + if (!fence) 108 + return; 109 + 110 + other = semaphore->sync_to[fence->ring]; 111 + semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other); 112 + } 113 + 114 + /** 115 + * radeon_semaphore_sync_rings - sync ring to all registered fences 116 + * 117 + * @rdev: radeon_device pointer 118 + * @semaphore: semaphore object to use for sync 119 + * @ring: ring that needs sync 120 + * 121 + * Ensure that all registered fences are signaled before letting 122 + * the ring continue. The caller must hold the ring lock. 123 + */ 71 124 int radeon_semaphore_sync_rings(struct radeon_device *rdev, 72 125 struct radeon_semaphore *semaphore, 73 - int signaler, int waiter) 126 + int ring) 74 127 { 75 - int r; 128 + int i, r; 76 129 77 - /* no need to signal and wait on the same ring */ 78 - if (signaler == waiter) { 79 - return 0; 130 + for (i = 0; i < RADEON_NUM_RINGS; ++i) { 131 + struct radeon_fence *fence = semaphore->sync_to[i]; 132 + 133 + /* check if we really need to sync */ 134 + if (!radeon_fence_need_sync(fence, ring)) 135 + continue; 136 + 137 + /* prevent GPU deadlocks */ 138 + if (!rdev->ring[i].ready) { 139 + dev_err(rdev->dev, "Syncing to a disabled ring!"); 140 + return -EINVAL; 141 + } 142 + 143 + /* allocate enough space for sync command */ 144 + r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); 145 + if (r) { 146 + return r; 147 + } 148 + 149 + /* emit the signal semaphore */ 150 + if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { 151 + /* signaling wasn't successful wait manually */ 152 + radeon_ring_undo(&rdev->ring[i]); 153 + radeon_fence_wait_locked(fence); 154 + continue; 155 + } 156 + 157 + /* we assume caller has already allocated space on waiters ring */ 158 + if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { 159 + /* waiting wasn't successful wait manually */ 160 + radeon_ring_undo(&rdev->ring[i]); 161 + radeon_fence_wait_locked(fence); 162 + continue; 163 + } 164 + 165 + radeon_ring_commit(rdev, &rdev->ring[i]); 166 + radeon_fence_note_sync(fence, ring); 80 167 } 81 - 82 - /* prevent GPU deadlocks */ 83 - if (!rdev->ring[signaler].ready) { 84 - dev_err(rdev->dev, "Trying to sync to a disabled ring!"); 85 - return -EINVAL; 86 - } 87 - 88 - r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8); 89 - if (r) { 90 - return r; 91 - } 92 - radeon_semaphore_emit_signal(rdev, signaler, semaphore); 93 - radeon_ring_commit(rdev, &rdev->ring[signaler]); 94 - 95 - /* we assume caller has already allocated space on waiters ring */ 96 - radeon_semaphore_emit_wait(rdev, waiter, semaphore); 97 - 98 - /* for debugging lockup only, used by sysfs debug files */ 99 - rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; 100 - rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; 101 168 102 169 return 0; 103 170 }
+36
drivers/gpu/drm/radeon/radeon_trace.h
··· 111 111 TP_ARGS(dev, seqno) 112 112 ); 113 113 114 + DECLARE_EVENT_CLASS(radeon_semaphore_request, 115 + 116 + TP_PROTO(int ring, struct radeon_semaphore *sem), 117 + 118 + TP_ARGS(ring, sem), 119 + 120 + TP_STRUCT__entry( 121 + __field(int, ring) 122 + __field(signed, waiters) 123 + __field(uint64_t, gpu_addr) 124 + ), 125 + 126 + TP_fast_assign( 127 + __entry->ring = ring; 128 + __entry->waiters = sem->waiters; 129 + __entry->gpu_addr = sem->gpu_addr; 130 + ), 131 + 132 + TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring, 133 + __entry->waiters, __entry->gpu_addr) 134 + ); 135 + 136 + DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale, 137 + 138 + TP_PROTO(int ring, struct radeon_semaphore *sem), 139 + 140 + TP_ARGS(ring, sem) 141 + ); 142 + 143 + DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait, 144 + 145 + TP_PROTO(int ring, struct radeon_semaphore *sem), 146 + 147 + TP_ARGS(ring, sem) 148 + ); 149 + 114 150 #endif 115 151 116 152 /* This part must be outside protection */
+2 -7
drivers/gpu/drm/radeon/rv770_dma.c
··· 66 66 return r; 67 67 } 68 68 69 - if (radeon_fence_need_sync(*fence, ring->idx)) { 70 - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 71 - ring->idx); 72 - radeon_fence_note_sync(*fence, ring->idx); 73 - } else { 74 - radeon_semaphore_free(rdev, &sem, NULL); 75 - } 69 + radeon_semaphore_sync_to(sem, *fence); 70 + radeon_semaphore_sync_rings(rdev, sem, ring->idx); 76 71 77 72 for (i = 0; i < num_loops; i++) { 78 73 cur_size_in_dw = size_in_dw;
+2 -7
drivers/gpu/drm/radeon/si_dma.c
··· 195 195 return r; 196 196 } 197 197 198 - if (radeon_fence_need_sync(*fence, ring->idx)) { 199 - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 200 - ring->idx); 201 - radeon_fence_note_sync(*fence, ring->idx); 202 - } else { 203 - radeon_semaphore_free(rdev, &sem, NULL); 204 - } 198 + radeon_semaphore_sync_to(sem, *fence); 199 + radeon_semaphore_sync_rings(rdev, sem, ring->idx); 205 200 206 201 for (i = 0; i < num_loops; i++) { 207 202 cur_size_in_bytes = size_in_bytes;
+3 -3
drivers/gpu/drm/radeon/trinity_dpm.c
··· 1873 1873 pi->enable_sclk_ds = true; 1874 1874 pi->enable_gfx_power_gating = true; 1875 1875 pi->enable_gfx_clock_gating = true; 1876 - pi->enable_mg_clock_gating = true; 1877 - pi->enable_gfx_dynamic_mgpg = true; /* ??? */ 1878 - pi->override_dynamic_mgpg = true; 1876 + pi->enable_mg_clock_gating = false; 1877 + pi->enable_gfx_dynamic_mgpg = false; 1878 + pi->override_dynamic_mgpg = false; 1879 1879 pi->enable_auto_thermal_throttling = true; 1880 1880 pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */ 1881 1881 pi->uvd_dpm = true; /* ??? */
+3 -1
drivers/gpu/drm/radeon/uvd_v1_0.c
··· 357 357 * 358 358 * Emit a semaphore command (either wait or signal) to the UVD ring. 359 359 */ 360 - void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, 360 + bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, 361 361 struct radeon_ring *ring, 362 362 struct radeon_semaphore *semaphore, 363 363 bool emit_wait) ··· 372 372 373 373 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); 374 374 radeon_ring_write(ring, emit_wait ? 1 : 0); 375 + 376 + return true; 375 377 } 376 378 377 379 /**
+3 -1
drivers/gpu/drm/radeon/uvd_v3_1.c
··· 37 37 * 38 38 * Emit a semaphore command (either wait or signal) to the UVD ring. 39 39 */ 40 - void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, 40 + bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, 41 41 struct radeon_ring *ring, 42 42 struct radeon_semaphore *semaphore, 43 43 bool emit_wait) ··· 52 52 53 53 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); 54 54 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); 55 + 56 + return true; 55 57 }
+34 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 151 151 atomic_dec(&bo->glob->bo_count); 152 152 if (bo->resv == &bo->ttm_resv) 153 153 reservation_object_fini(&bo->ttm_resv); 154 - 154 + mutex_destroy(&bo->wu_mutex); 155 155 if (bo->destroy) 156 156 bo->destroy(bo); 157 157 else { ··· 1123 1123 INIT_LIST_HEAD(&bo->ddestroy); 1124 1124 INIT_LIST_HEAD(&bo->swap); 1125 1125 INIT_LIST_HEAD(&bo->io_reserve_lru); 1126 + mutex_init(&bo->wu_mutex); 1126 1127 bo->bdev = bdev; 1127 1128 bo->glob = bdev->glob; 1128 1129 bo->type = type; ··· 1705 1704 ; 1706 1705 } 1707 1706 EXPORT_SYMBOL(ttm_bo_swapout_all); 1707 + 1708 + /** 1709 + * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become 1710 + * unreserved 1711 + * 1712 + * @bo: Pointer to buffer 1713 + */ 1714 + int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) 1715 + { 1716 + int ret; 1717 + 1718 + /* 1719 + * In the absense of a wait_unlocked API, 1720 + * Use the bo::wu_mutex to avoid triggering livelocks due to 1721 + * concurrent use of this function. Note that this use of 1722 + * bo::wu_mutex can go away if we change locking order to 1723 + * mmap_sem -> bo::reserve. 1724 + */ 1725 + ret = mutex_lock_interruptible(&bo->wu_mutex); 1726 + if (unlikely(ret != 0)) 1727 + return -ERESTARTSYS; 1728 + if (!ww_mutex_is_locked(&bo->resv->lock)) 1729 + goto out_unlock; 1730 + ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL); 1731 + if (unlikely(ret != 0)) 1732 + goto out_unlock; 1733 + ww_mutex_unlock(&bo->resv->lock); 1734 + 1735 + out_unlock: 1736 + mutex_unlock(&bo->wu_mutex); 1737 + return ret; 1738 + }
+5 -2
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 350 350 goto out2; 351 351 352 352 /* 353 - * Move nonexistent data. NOP. 353 + * Don't move nonexistent data. Clear destination instead. 354 354 */ 355 - if (old_iomap == NULL && ttm == NULL) 355 + if (old_iomap == NULL && 356 + (ttm == NULL || ttm->state == tt_unpopulated)) { 357 + memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 356 358 goto out2; 359 + } 357 360 358 361 /* 359 362 * TTM might be null for moves within the same region.
+20 -6
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 107 107 /* 108 108 * Work around locking order reversal in fault / nopfn 109 109 * between mmap_sem and bo_reserve: Perform a trylock operation 110 - * for reserve, and if it fails, retry the fault after scheduling. 110 + * for reserve, and if it fails, retry the fault after waiting 111 + * for the buffer to become unreserved. 111 112 */ 112 - 113 - ret = ttm_bo_reserve(bo, true, true, false, 0); 113 + ret = ttm_bo_reserve(bo, true, true, false, NULL); 114 114 if (unlikely(ret != 0)) { 115 - if (ret == -EBUSY) 116 - set_need_resched(); 115 + if (ret != -EBUSY) 116 + return VM_FAULT_NOPAGE; 117 + 118 + if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { 119 + if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 120 + up_read(&vma->vm_mm->mmap_sem); 121 + (void) ttm_bo_wait_unreserved(bo); 122 + } 123 + 124 + return VM_FAULT_RETRY; 125 + } 126 + 127 + /* 128 + * If we'd want to change locking order to 129 + * mmap_sem -> bo::reserve, we'd use a blocking reserve here 130 + * instead of retrying the fault... 131 + */ 117 132 return VM_FAULT_NOPAGE; 118 133 } 119 134 ··· 138 123 case 0: 139 124 break; 140 125 case -EBUSY: 141 - set_need_resched(); 142 126 case -ERESTARTSYS: 143 127 retval = VM_FAULT_NOPAGE; 144 128 goto out_unlock;
+19 -13
drivers/gpu/drm/ttm/ttm_execbuf_util.c
··· 32 32 #include <linux/sched.h> 33 33 #include <linux/module.h> 34 34 35 - static void ttm_eu_backoff_reservation_locked(struct list_head *list, 36 - struct ww_acquire_ctx *ticket) 35 + static void ttm_eu_backoff_reservation_locked(struct list_head *list) 37 36 { 38 37 struct ttm_validate_buffer *entry; 39 38 ··· 92 93 entry = list_first_entry(list, struct ttm_validate_buffer, head); 93 94 glob = entry->bo->glob; 94 95 spin_lock(&glob->lru_lock); 95 - ttm_eu_backoff_reservation_locked(list, ticket); 96 - ww_acquire_fini(ticket); 96 + ttm_eu_backoff_reservation_locked(list); 97 + if (ticket) 98 + ww_acquire_fini(ticket); 97 99 spin_unlock(&glob->lru_lock); 98 100 } 99 101 EXPORT_SYMBOL(ttm_eu_backoff_reservation); ··· 130 130 entry = list_first_entry(list, struct ttm_validate_buffer, head); 131 131 glob = entry->bo->glob; 132 132 133 - ww_acquire_init(ticket, &reservation_ww_class); 133 + if (ticket) 134 + ww_acquire_init(ticket, &reservation_ww_class); 134 135 retry: 135 136 list_for_each_entry(entry, list, head) { 136 137 struct ttm_buffer_object *bo = entry->bo; ··· 140 139 if (entry->reserved) 141 140 continue; 142 141 143 - 144 - ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); 142 + ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true, 143 + ticket); 145 144 146 145 if (ret == -EDEADLK) { 147 146 /* uh oh, we lost out, drop every reservation and try 148 147 * to only reserve this buffer, then start over if 149 148 * this succeeds. 150 149 */ 150 + BUG_ON(ticket == NULL); 151 151 spin_lock(&glob->lru_lock); 152 - ttm_eu_backoff_reservation_locked(list, ticket); 152 + ttm_eu_backoff_reservation_locked(list); 153 153 spin_unlock(&glob->lru_lock); 154 154 ttm_eu_list_ref_sub(list); 155 155 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, ··· 177 175 } 178 176 } 179 177 180 - ww_acquire_done(ticket); 178 + if (ticket) 179 + ww_acquire_done(ticket); 181 180 spin_lock(&glob->lru_lock); 182 181 ttm_eu_del_from_lru_locked(list); 183 182 spin_unlock(&glob->lru_lock); ··· 187 184 188 185 err: 189 186 spin_lock(&glob->lru_lock); 190 - ttm_eu_backoff_reservation_locked(list, ticket); 187 + ttm_eu_backoff_reservation_locked(list); 191 188 spin_unlock(&glob->lru_lock); 192 189 ttm_eu_list_ref_sub(list); 193 190 err_fini: 194 - ww_acquire_done(ticket); 195 - ww_acquire_fini(ticket); 191 + if (ticket) { 192 + ww_acquire_done(ticket); 193 + ww_acquire_fini(ticket); 194 + } 196 195 return ret; 197 196 } 198 197 EXPORT_SYMBOL(ttm_eu_reserve_buffers); ··· 229 224 } 230 225 spin_unlock(&bdev->fence_lock); 231 226 spin_unlock(&glob->lru_lock); 232 - ww_acquire_fini(ticket); 227 + if (ticket) 228 + ww_acquire_fini(ticket); 233 229 234 230 list_for_each_entry(entry, list, head) { 235 231 if (entry->old_sync_obj)
+248 -6
drivers/gpu/drm/ttm/ttm_object.c
··· 1 1 /************************************************************************** 2 2 * 3 - * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 3 + * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA 4 4 * All Rights Reserved. 5 5 * 6 6 * Permission is hereby granted, free of charge, to any person obtaining a ··· 26 26 **************************************************************************/ 27 27 /* 28 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 + * 30 + * While no substantial code is shared, the prime code is inspired by 31 + * drm_prime.c, with 32 + * Authors: 33 + * Dave Airlie <airlied@redhat.com> 34 + * Rob Clark <rob.clark@linaro.org> 29 35 */ 30 36 /** @file ttm_ref_object.c 31 37 * ··· 39 33 * ttm objects. Implements reference counting, minimal security checks 40 34 * and release on file close. 41 35 */ 36 + 42 37 43 38 /** 44 39 * struct ttm_object_file ··· 91 84 struct drm_open_hash object_hash; 92 85 atomic_t object_count; 93 86 struct ttm_mem_global *mem_glob; 87 + struct dma_buf_ops ops; 88 + void (*dmabuf_release)(struct dma_buf *dma_buf); 89 + size_t dma_buf_size; 94 90 }; 95 91 96 92 /** ··· 125 115 struct ttm_base_object *obj; 126 116 struct ttm_object_file *tfile; 127 117 }; 118 + 119 + static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf); 128 120 129 121 static inline struct ttm_object_file * 130 122 ttm_object_file_ref(struct ttm_object_file *tfile) ··· 428 416 } 429 417 EXPORT_SYMBOL(ttm_object_file_init); 430 418 431 - struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global 432 - *mem_glob, 433 - unsigned int hash_order) 419 + struct ttm_object_device * 420 + ttm_object_device_init(struct ttm_mem_global *mem_glob, 421 + unsigned int hash_order, 422 + const struct dma_buf_ops *ops) 434 423 { 435 424 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); 436 425 int ret; ··· 443 430 spin_lock_init(&tdev->object_lock); 444 431 atomic_set(&tdev->object_count, 0); 445 432 ret = drm_ht_create(&tdev->object_hash, hash_order); 433 + if (ret != 0) 434 + goto out_no_object_hash; 446 435 447 - if (likely(ret == 0)) 448 - return tdev; 436 + tdev->ops = *ops; 437 + tdev->dmabuf_release = tdev->ops.release; 438 + tdev->ops.release = ttm_prime_dmabuf_release; 439 + tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) + 440 + ttm_round_pot(sizeof(struct file)); 441 + return tdev; 449 442 443 + out_no_object_hash: 450 444 kfree(tdev); 451 445 return NULL; 452 446 } ··· 472 452 kfree(tdev); 473 453 } 474 454 EXPORT_SYMBOL(ttm_object_device_release); 455 + 456 + /** 457 + * get_dma_buf_unless_doomed - get a dma_buf reference if possible. 458 + * 459 + * @dma_buf: Non-refcounted pointer to a struct dma-buf. 460 + * 461 + * Obtain a file reference from a lookup structure that doesn't refcount 462 + * the file, but synchronizes with its release method to make sure it has 463 + * not been freed yet. See for example kref_get_unless_zero documentation. 464 + * Returns true if refcounting succeeds, false otherwise. 465 + * 466 + * Nobody really wants this as a public API yet, so let it mature here 467 + * for some time... 468 + */ 469 + static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf) 470 + { 471 + return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L; 472 + } 473 + 474 + /** 475 + * ttm_prime_refcount_release - refcount release method for a prime object. 476 + * 477 + * @p_base: Pointer to ttm_base_object pointer. 478 + * 479 + * This is a wrapper that calls the refcount_release founction of the 480 + * underlying object. At the same time it cleans up the prime object. 481 + * This function is called when all references to the base object we 482 + * derive from are gone. 483 + */ 484 + static void ttm_prime_refcount_release(struct ttm_base_object **p_base) 485 + { 486 + struct ttm_base_object *base = *p_base; 487 + struct ttm_prime_object *prime; 488 + 489 + *p_base = NULL; 490 + prime = container_of(base, struct ttm_prime_object, base); 491 + BUG_ON(prime->dma_buf != NULL); 492 + mutex_destroy(&prime->mutex); 493 + if (prime->refcount_release) 494 + prime->refcount_release(&base); 495 + } 496 + 497 + /** 498 + * ttm_prime_dmabuf_release - Release method for the dma-bufs we export 499 + * 500 + * @dma_buf: 501 + * 502 + * This function first calls the dma_buf release method the driver 503 + * provides. Then it cleans up our dma_buf pointer used for lookup, 504 + * and finally releases the reference the dma_buf has on our base 505 + * object. 506 + */ 507 + static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) 508 + { 509 + struct ttm_prime_object *prime = 510 + (struct ttm_prime_object *) dma_buf->priv; 511 + struct ttm_base_object *base = &prime->base; 512 + struct ttm_object_device *tdev = base->tfile->tdev; 513 + 514 + if (tdev->dmabuf_release) 515 + tdev->dmabuf_release(dma_buf); 516 + mutex_lock(&prime->mutex); 517 + if (prime->dma_buf == dma_buf) 518 + prime->dma_buf = NULL; 519 + mutex_unlock(&prime->mutex); 520 + ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size); 521 + ttm_base_object_unref(&base); 522 + } 523 + 524 + /** 525 + * ttm_prime_fd_to_handle - Get a base object handle from a prime fd 526 + * 527 + * @tfile: A struct ttm_object_file identifying the caller. 528 + * @fd: The prime / dmabuf fd. 529 + * @handle: The returned handle. 530 + * 531 + * This function returns a handle to an object that previously exported 532 + * a dma-buf. Note that we don't handle imports yet, because we simply 533 + * have no consumers of that implementation. 534 + */ 535 + int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, 536 + int fd, u32 *handle) 537 + { 538 + struct ttm_object_device *tdev = tfile->tdev; 539 + struct dma_buf *dma_buf; 540 + struct ttm_prime_object *prime; 541 + struct ttm_base_object *base; 542 + int ret; 543 + 544 + dma_buf = dma_buf_get(fd); 545 + if (IS_ERR(dma_buf)) 546 + return PTR_ERR(dma_buf); 547 + 548 + if (dma_buf->ops != &tdev->ops) 549 + return -ENOSYS; 550 + 551 + prime = (struct ttm_prime_object *) dma_buf->priv; 552 + base = &prime->base; 553 + *handle = base->hash.key; 554 + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 555 + 556 + dma_buf_put(dma_buf); 557 + 558 + return ret; 559 + } 560 + EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle); 561 + 562 + /** 563 + * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object 564 + * 565 + * @tfile: Struct ttm_object_file identifying the caller. 566 + * @handle: Handle to the object we're exporting from. 567 + * @flags: flags for dma-buf creation. We just pass them on. 568 + * @prime_fd: The returned file descriptor. 569 + * 570 + */ 571 + int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, 572 + uint32_t handle, uint32_t flags, 573 + int *prime_fd) 574 + { 575 + struct ttm_object_device *tdev = tfile->tdev; 576 + struct ttm_base_object *base; 577 + struct dma_buf *dma_buf; 578 + struct ttm_prime_object *prime; 579 + int ret; 580 + 581 + base = ttm_base_object_lookup(tfile, handle); 582 + if (unlikely(base == NULL || 583 + base->object_type != ttm_prime_type)) { 584 + ret = -ENOENT; 585 + goto out_unref; 586 + } 587 + 588 + prime = container_of(base, struct ttm_prime_object, base); 589 + if (unlikely(!base->shareable)) { 590 + ret = -EPERM; 591 + goto out_unref; 592 + } 593 + 594 + ret = mutex_lock_interruptible(&prime->mutex); 595 + if (unlikely(ret != 0)) { 596 + ret = -ERESTARTSYS; 597 + goto out_unref; 598 + } 599 + 600 + dma_buf = prime->dma_buf; 601 + if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) { 602 + 603 + /* 604 + * Need to create a new dma_buf, with memory accounting. 605 + */ 606 + ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size, 607 + false, true); 608 + if (unlikely(ret != 0)) { 609 + mutex_unlock(&prime->mutex); 610 + goto out_unref; 611 + } 612 + 613 + dma_buf = dma_buf_export(prime, &tdev->ops, 614 + prime->size, flags); 615 + if (IS_ERR(dma_buf)) { 616 + ret = PTR_ERR(dma_buf); 617 + ttm_mem_global_free(tdev->mem_glob, 618 + tdev->dma_buf_size); 619 + mutex_unlock(&prime->mutex); 620 + goto out_unref; 621 + } 622 + 623 + /* 624 + * dma_buf has taken the base object reference 625 + */ 626 + base = NULL; 627 + prime->dma_buf = dma_buf; 628 + } 629 + mutex_unlock(&prime->mutex); 630 + 631 + ret = dma_buf_fd(dma_buf, flags); 632 + if (ret >= 0) { 633 + *prime_fd = ret; 634 + ret = 0; 635 + } else 636 + dma_buf_put(dma_buf); 637 + 638 + out_unref: 639 + if (base) 640 + ttm_base_object_unref(&base); 641 + return ret; 642 + } 643 + EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd); 644 + 645 + /** 646 + * ttm_prime_object_init - Initialize a ttm_prime_object 647 + * 648 + * @tfile: struct ttm_object_file identifying the caller 649 + * @size: The size of the dma_bufs we export. 650 + * @prime: The object to be initialized. 651 + * @shareable: See ttm_base_object_init 652 + * @type: See ttm_base_object_init 653 + * @refcount_release: See ttm_base_object_init 654 + * @ref_obj_release: See ttm_base_object_init 655 + * 656 + * Initializes an object which is compatible with the drm_prime model 657 + * for data sharing between processes and devices. 658 + */ 659 + int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, 660 + struct ttm_prime_object *prime, bool shareable, 661 + enum ttm_object_type type, 662 + void (*refcount_release) (struct ttm_base_object **), 663 + void (*ref_obj_release) (struct ttm_base_object *, 664 + enum ttm_ref_type ref_type)) 665 + { 666 + mutex_init(&prime->mutex); 667 + prime->size = PAGE_ALIGN(size); 668 + prime->real_type = type; 669 + prime->dma_buf = NULL; 670 + prime->refcount_release = refcount_release; 671 + return ttm_base_object_init(tfile, &prime->base, shareable, 672 + ttm_prime_type, 673 + ttm_prime_refcount_release, 674 + ref_obj_release); 675 + } 676 + EXPORT_SYMBOL(ttm_prime_object_init);
+1 -1
drivers/gpu/drm/vmwgfx/Makefile
··· 6 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 7 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 8 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ 9 - vmwgfx_surface.o 9 + vmwgfx_surface.o vmwgfx_prime.o 10 10 11 11 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
+5 -2
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 677 677 } 678 678 679 679 dev_priv->tdev = ttm_object_device_init 680 - (dev_priv->mem_global_ref.object, 12); 680 + (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); 681 681 682 682 if (unlikely(dev_priv->tdev == NULL)) { 683 683 DRM_ERROR("Unable to initialize TTM object management.\n"); ··· 1210 1210 1211 1211 static struct drm_driver driver = { 1212 1212 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 1213 - DRIVER_MODESET, 1213 + DRIVER_MODESET | DRIVER_PRIME, 1214 1214 .load = vmw_driver_load, 1215 1215 .unload = vmw_driver_unload, 1216 1216 .lastclose = vmw_lastclose, ··· 1234 1234 .dumb_create = vmw_dumb_create, 1235 1235 .dumb_map_offset = vmw_dumb_map_offset, 1236 1236 .dumb_destroy = vmw_dumb_destroy, 1237 + 1238 + .prime_fd_to_handle = vmw_prime_fd_to_handle, 1239 + .prime_handle_to_fd = vmw_prime_handle_to_fd, 1237 1240 1238 1241 .fops = &vmwgfx_driver_fops, 1239 1242 .name = VMWGFX_DRIVER_NAME,
+14
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 819 819 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; 820 820 821 821 /** 822 + * Prime - vmwgfx_prime.c 823 + */ 824 + 825 + extern const struct dma_buf_ops vmw_prime_dmabuf_ops; 826 + extern int vmw_prime_fd_to_handle(struct drm_device *dev, 827 + struct drm_file *file_priv, 828 + int fd, u32 *handle); 829 + extern int vmw_prime_handle_to_fd(struct drm_device *dev, 830 + struct drm_file *file_priv, 831 + uint32_t handle, uint32_t flags, 832 + int *prime_fd); 833 + 834 + 835 + /** 822 836 * Inline helper functions 823 837 */ 824 838
+137
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
··· 1 + /************************************************************************** 2 + * 3 + * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA 4 + * All Rights Reserved. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the 8 + * "Software"), to deal in the Software without restriction, including 9 + * without limitation the rights to use, copy, modify, merge, publish, 10 + * distribute, sub license, and/or sell copies of the Software, and to 11 + * permit persons to whom the Software is furnished to do so, subject to 12 + * the following conditions: 13 + * 14 + * The above copyright notice and this permission notice (including the 15 + * next paragraph) shall be included in all copies or substantial portions 16 + * of the Software. 17 + * 18 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 + * 26 + **************************************************************************/ 27 + /* 28 + * Authors: 29 + * Thomas Hellstrom <thellstrom@vmware.com> 30 + * 31 + */ 32 + 33 + #include "vmwgfx_drv.h" 34 + #include <linux/dma-buf.h> 35 + #include <drm/ttm/ttm_object.h> 36 + 37 + /* 38 + * DMA-BUF attach- and mapping methods. No need to implement 39 + * these until we have other virtual devices use them. 40 + */ 41 + 42 + static int vmw_prime_map_attach(struct dma_buf *dma_buf, 43 + struct device *target_dev, 44 + struct dma_buf_attachment *attach) 45 + { 46 + return -ENOSYS; 47 + } 48 + 49 + static void vmw_prime_map_detach(struct dma_buf *dma_buf, 50 + struct dma_buf_attachment *attach) 51 + { 52 + } 53 + 54 + static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach, 55 + enum dma_data_direction dir) 56 + { 57 + return ERR_PTR(-ENOSYS); 58 + } 59 + 60 + static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach, 61 + struct sg_table *sgb, 62 + enum dma_data_direction dir) 63 + { 64 + } 65 + 66 + static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf) 67 + { 68 + return NULL; 69 + } 70 + 71 + static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 72 + { 73 + } 74 + 75 + static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 76 + unsigned long page_num) 77 + { 78 + return NULL; 79 + } 80 + 81 + static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 82 + unsigned long page_num, void *addr) 83 + { 84 + 85 + } 86 + static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf, 87 + unsigned long page_num) 88 + { 89 + return NULL; 90 + } 91 + 92 + static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf, 93 + unsigned long page_num, void *addr) 94 + { 95 + 96 + } 97 + 98 + static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf, 99 + struct vm_area_struct *vma) 100 + { 101 + WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n"); 102 + return -ENOSYS; 103 + } 104 + 105 + const struct dma_buf_ops vmw_prime_dmabuf_ops = { 106 + .attach = vmw_prime_map_attach, 107 + .detach = vmw_prime_map_detach, 108 + .map_dma_buf = vmw_prime_map_dma_buf, 109 + .unmap_dma_buf = vmw_prime_unmap_dma_buf, 110 + .release = NULL, 111 + .kmap = vmw_prime_dmabuf_kmap, 112 + .kmap_atomic = vmw_prime_dmabuf_kmap_atomic, 113 + .kunmap = vmw_prime_dmabuf_kunmap, 114 + .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic, 115 + .mmap = vmw_prime_dmabuf_mmap, 116 + .vmap = vmw_prime_dmabuf_vmap, 117 + .vunmap = vmw_prime_dmabuf_vunmap, 118 + }; 119 + 120 + int vmw_prime_fd_to_handle(struct drm_device *dev, 121 + struct drm_file *file_priv, 122 + int fd, u32 *handle) 123 + { 124 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 125 + 126 + return ttm_prime_fd_to_handle(tfile, fd, handle); 127 + } 128 + 129 + int vmw_prime_handle_to_fd(struct drm_device *dev, 130 + struct drm_file *file_priv, 131 + uint32_t handle, uint32_t flags, 132 + int *prime_fd) 133 + { 134 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 135 + 136 + return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); 137 + }
+32 -31
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 35 35 #define VMW_RES_EVICT_ERR_COUNT 10 36 36 37 37 struct vmw_user_dma_buffer { 38 - struct ttm_base_object base; 38 + struct ttm_prime_object prime; 39 39 struct vmw_dma_buffer dma; 40 40 }; 41 41 ··· 297 297 if (unlikely(base == NULL)) 298 298 return -EINVAL; 299 299 300 - if (unlikely(base->object_type != converter->object_type)) 300 + if (unlikely(ttm_base_object_type(base) != converter->object_type)) 301 301 goto out_bad_resource; 302 302 303 303 res = converter->base_obj_to_res(base); ··· 387 387 { 388 388 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 389 389 390 - ttm_base_object_kfree(vmw_user_bo, base); 390 + ttm_prime_object_kfree(vmw_user_bo, prime); 391 391 } 392 392 393 393 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) ··· 401 401 if (unlikely(base == NULL)) 402 402 return; 403 403 404 - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 404 + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 405 + prime.base); 405 406 bo = &vmw_user_bo->dma.base; 406 407 ttm_bo_unref(&bo); 407 408 } ··· 443 442 return ret; 444 443 445 444 tmp = ttm_bo_reference(&user_bo->dma.base); 446 - ret = ttm_base_object_init(tfile, 447 - &user_bo->base, 448 - shareable, 449 - ttm_buffer_type, 450 - &vmw_user_dmabuf_release, NULL); 445 + ret = ttm_prime_object_init(tfile, 446 + size, 447 + &user_bo->prime, 448 + shareable, 449 + ttm_buffer_type, 450 + &vmw_user_dmabuf_release, NULL); 451 451 if (unlikely(ret != 0)) { 452 452 ttm_bo_unref(&tmp); 453 453 goto out_no_base_object; 454 454 } 455 455 456 456 *p_dma_buf = &user_bo->dma; 457 - *handle = user_bo->base.hash.key; 457 + *handle = user_bo->prime.base.hash.key; 458 458 459 459 out_no_base_object: 460 460 return ret; ··· 477 475 return -EPERM; 478 476 479 477 vmw_user_bo = vmw_user_dma_buffer(bo); 480 - return (vmw_user_bo->base.tfile == tfile || 481 - vmw_user_bo->base.shareable) ? 0 : -EPERM; 478 + return (vmw_user_bo->prime.base.tfile == tfile || 479 + vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; 482 480 } 483 481 484 482 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, ··· 540 538 return -ESRCH; 541 539 } 542 540 543 - if (unlikely(base->object_type != ttm_buffer_type)) { 541 + if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { 544 542 ttm_base_object_unref(&base); 545 543 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", 546 544 (unsigned long)handle); 547 545 return -EINVAL; 548 546 } 549 547 550 - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 548 + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 549 + prime.base); 551 550 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 552 551 ttm_base_object_unref(&base); 553 552 *out = &vmw_user_bo->dma; ··· 565 562 return -EINVAL; 566 563 567 564 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); 568 - return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); 565 + return ttm_ref_object_add(tfile, &user_bo->prime.base, 566 + TTM_REF_USAGE, NULL); 569 567 } 570 568 571 569 /* ··· 811 807 goto out_no_dmabuf; 812 808 813 809 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 814 - ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, 815 - &vmw_user_bo->base, 816 - false, 817 - ttm_buffer_type, 818 - &vmw_user_dmabuf_release, NULL); 810 + ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile, 811 + args->size, 812 + &vmw_user_bo->prime, 813 + false, 814 + ttm_buffer_type, 815 + &vmw_user_dmabuf_release, NULL); 819 816 if (unlikely(ret != 0)) 820 817 goto out_no_base_object; 821 818 822 - args->handle = vmw_user_bo->base.hash.key; 819 + args->handle = vmw_user_bo->prime.base.hash.key; 823 820 824 821 out_no_base_object: 825 822 ttm_bo_unref(&tmp); ··· 999 994 */ 1000 995 static int 1001 996 vmw_resource_check_buffer(struct vmw_resource *res, 1002 - struct ww_acquire_ctx *ticket, 1003 997 bool interruptible, 1004 998 struct ttm_validate_buffer *val_buf) 1005 999 { ··· 1015 1011 INIT_LIST_HEAD(&val_list); 1016 1012 val_buf->bo = ttm_bo_reference(&res->backup->base); 1017 1013 list_add_tail(&val_buf->head, &val_list); 1018 - ret = ttm_eu_reserve_buffers(ticket, &val_list); 1014 + ret = ttm_eu_reserve_buffers(NULL, &val_list); 1019 1015 if (unlikely(ret != 0)) 1020 1016 goto out_no_reserve; 1021 1017 ··· 1033 1029 return 0; 1034 1030 1035 1031 out_no_validate: 1036 - ttm_eu_backoff_reservation(ticket, &val_list); 1032 + ttm_eu_backoff_reservation(NULL, &val_list); 1037 1033 out_no_reserve: 1038 1034 ttm_bo_unref(&val_buf->bo); 1039 1035 if (backup_dirty) ··· 1078 1074 * @val_buf: Backup buffer information. 1079 1075 */ 1080 1076 static void 1081 - vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, 1082 - struct ttm_validate_buffer *val_buf) 1077 + vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) 1083 1078 { 1084 1079 struct list_head val_list; 1085 1080 ··· 1087 1084 1088 1085 INIT_LIST_HEAD(&val_list); 1089 1086 list_add_tail(&val_buf->head, &val_list); 1090 - ttm_eu_backoff_reservation(ticket, &val_list); 1087 + ttm_eu_backoff_reservation(NULL, &val_list); 1091 1088 ttm_bo_unref(&val_buf->bo); 1092 1089 } 1093 1090 ··· 1102 1099 { 1103 1100 struct ttm_validate_buffer val_buf; 1104 1101 const struct vmw_res_func *func = res->func; 1105 - struct ww_acquire_ctx ticket; 1106 1102 int ret; 1107 1103 1108 1104 BUG_ON(!func->may_evict); 1109 1105 1110 1106 val_buf.bo = NULL; 1111 - ret = vmw_resource_check_buffer(res, &ticket, interruptible, 1112 - &val_buf); 1107 + ret = vmw_resource_check_buffer(res, interruptible, &val_buf); 1113 1108 if (unlikely(ret != 0)) 1114 1109 return ret; 1115 1110 ··· 1122 1121 res->backup_dirty = true; 1123 1122 res->res_dirty = false; 1124 1123 out_no_unbind: 1125 - vmw_resource_backoff_reservation(&ticket, &val_buf); 1124 + vmw_resource_backoff_reservation(&val_buf); 1126 1125 1127 1126 return ret; 1128 1127 }
+16 -14
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 38 38 * @size: TTM accounting size for the surface. 39 39 */ 40 40 struct vmw_user_surface { 41 - struct ttm_base_object base; 41 + struct ttm_prime_object prime; 42 42 struct vmw_surface srf; 43 43 uint32_t size; 44 44 uint32_t backup_handle; ··· 580 580 static struct vmw_resource * 581 581 vmw_user_surface_base_to_res(struct ttm_base_object *base) 582 582 { 583 - return &(container_of(base, struct vmw_user_surface, base)->srf.res); 583 + return &(container_of(base, struct vmw_user_surface, 584 + prime.base)->srf.res); 584 585 } 585 586 586 587 /** ··· 600 599 kfree(srf->offsets); 601 600 kfree(srf->sizes); 602 601 kfree(srf->snooper.image); 603 - ttm_base_object_kfree(user_srf, base); 602 + ttm_prime_object_kfree(user_srf, prime); 604 603 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 605 604 } 606 605 ··· 617 616 { 618 617 struct ttm_base_object *base = *p_base; 619 618 struct vmw_user_surface *user_srf = 620 - container_of(base, struct vmw_user_surface, base); 619 + container_of(base, struct vmw_user_surface, prime.base); 621 620 struct vmw_resource *res = &user_srf->srf.res; 622 621 623 622 *p_base = NULL; ··· 791 790 } 792 791 srf->snooper.crtc = NULL; 793 792 794 - user_srf->base.shareable = false; 795 - user_srf->base.tfile = NULL; 793 + user_srf->prime.base.shareable = false; 794 + user_srf->prime.base.tfile = NULL; 796 795 797 796 /** 798 797 * From this point, the generic resource management functions ··· 804 803 goto out_unlock; 805 804 806 805 tmp = vmw_resource_reference(&srf->res); 807 - ret = ttm_base_object_init(tfile, &user_srf->base, 808 - req->shareable, VMW_RES_SURFACE, 809 - &vmw_user_surface_base_release, NULL); 806 + ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, 807 + req->shareable, VMW_RES_SURFACE, 808 + &vmw_user_surface_base_release, NULL); 810 809 811 810 if (unlikely(ret != 0)) { 812 811 vmw_resource_unreference(&tmp); ··· 814 813 goto out_unlock; 815 814 } 816 815 817 - rep->sid = user_srf->base.hash.key; 816 + rep->sid = user_srf->prime.base.hash.key; 818 817 vmw_resource_unreference(&res); 819 818 820 819 ttm_read_unlock(&vmaster->lock); ··· 824 823 out_no_offsets: 825 824 kfree(srf->sizes); 826 825 out_no_sizes: 827 - ttm_base_object_kfree(user_srf, base); 826 + ttm_prime_object_kfree(user_srf, prime); 828 827 out_no_user_srf: 829 828 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 830 829 out_unlock: ··· 860 859 return -EINVAL; 861 860 } 862 861 863 - if (unlikely(base->object_type != VMW_RES_SURFACE)) 862 + if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) 864 863 goto out_bad_resource; 865 864 866 - user_srf = container_of(base, struct vmw_user_surface, base); 865 + user_srf = container_of(base, struct vmw_user_surface, prime.base); 867 866 srf = &user_srf->srf; 868 867 869 - ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); 868 + ret = ttm_ref_object_add(tfile, &user_srf->prime.base, 869 + TTM_REF_USAGE, NULL); 870 870 if (unlikely(ret != 0)) { 871 871 DRM_ERROR("Could not add a reference to a surface.\n"); 872 872 goto out_no_reference;
+3 -1
include/drm/ttm/ttm_bo_api.h
··· 169 169 * @offset: The current GPU offset, which can have different meanings 170 170 * depending on the memory type. For SYSTEM type memory, it should be 0. 171 171 * @cur_placement: Hint of current placement. 172 + * @wu_mutex: Wait unreserved mutex. 172 173 * 173 174 * Base class for TTM buffer object, that deals with data placement and CPU 174 175 * mappings. GPU mappings are really up to the driver, but for simpler GPUs ··· 251 250 252 251 struct reservation_object *resv; 253 252 struct reservation_object ttm_resv; 253 + struct mutex wu_mutex; 254 254 }; 255 255 256 256 /** ··· 704 702 size_t count, loff_t *f_pos, bool write); 705 703 706 704 extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); 707 - 705 + extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); 708 706 #endif
+2 -1
include/drm/ttm/ttm_execbuf_util.h
··· 70 70 /** 71 71 * function ttm_eu_reserve_buffers 72 72 * 73 - * @ticket: [out] ww_acquire_ctx returned by call. 73 + * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only 74 + * non-blocking reserves should be tried. 74 75 * @list: thread private list of ttm_validate_buffer structs. 75 76 * 76 77 * Tries to reserve bos pointed to by the list entries for validation.
+59 -2
include/drm/ttm/ttm_object.h
··· 41 41 #include <drm/drm_hashtab.h> 42 42 #include <linux/kref.h> 43 43 #include <linux/rcupdate.h> 44 + #include <linux/dma-buf.h> 44 45 #include <ttm/ttm_memory.h> 45 46 46 47 /** ··· 78 77 ttm_fence_type, 79 78 ttm_buffer_type, 80 79 ttm_lock_type, 80 + ttm_prime_type, 81 81 ttm_driver_type0 = 256, 82 82 ttm_driver_type1, 83 83 ttm_driver_type2, ··· 132 130 void (*refcount_release) (struct ttm_base_object **base); 133 131 void (*ref_obj_release) (struct ttm_base_object *base, 134 132 enum ttm_ref_type ref_type); 133 + }; 134 + 135 + 136 + /** 137 + * struct ttm_prime_object - Modified base object that is prime-aware 138 + * 139 + * @base: struct ttm_base_object that we derive from 140 + * @mutex: Mutex protecting the @dma_buf member. 141 + * @size: Size of the dma_buf associated with this object 142 + * @real_type: Type of the underlying object. Needed since we're setting 143 + * the value of @base::object_type to ttm_prime_type 144 + * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this 145 + * object. 146 + * @refcount_release: The underlying object's release method. Needed since 147 + * we set @base::refcount_release to our own release method. 148 + */ 149 + 150 + struct ttm_prime_object { 151 + struct ttm_base_object base; 152 + struct mutex mutex; 153 + size_t size; 154 + enum ttm_object_type real_type; 155 + struct dma_buf *dma_buf; 156 + void (*refcount_release) (struct ttm_base_object **); 135 157 }; 136 158 137 159 /** ··· 274 248 /** 275 249 * ttm_object device init - initialize a struct ttm_object_device 276 250 * 251 + * @mem_glob: struct ttm_mem_global for memory accounting. 277 252 * @hash_order: Order of hash table used to hash the base objects. 253 + * @ops: DMA buf ops for prime objects of this device. 278 254 * 279 255 * This function is typically called on device initialization to prepare 280 256 * data structures needed for ttm base and ref objects. 281 257 */ 282 258 283 - extern struct ttm_object_device *ttm_object_device_init 284 - (struct ttm_mem_global *mem_glob, unsigned int hash_order); 259 + extern struct ttm_object_device * 260 + ttm_object_device_init(struct ttm_mem_global *mem_glob, 261 + unsigned int hash_order, 262 + const struct dma_buf_ops *ops); 285 263 286 264 /** 287 265 * ttm_object_device_release - release data held by a ttm_object_device ··· 302 272 303 273 #define ttm_base_object_kfree(__object, __base)\ 304 274 kfree_rcu(__object, __base.rhead) 275 + 276 + extern int ttm_prime_object_init(struct ttm_object_file *tfile, 277 + size_t size, 278 + struct ttm_prime_object *prime, 279 + bool shareable, 280 + enum ttm_object_type type, 281 + void (*refcount_release) 282 + (struct ttm_base_object **), 283 + void (*ref_obj_release) 284 + (struct ttm_base_object *, 285 + enum ttm_ref_type ref_type)); 286 + 287 + static inline enum ttm_object_type 288 + ttm_base_object_type(struct ttm_base_object *base) 289 + { 290 + return (base->object_type == ttm_prime_type) ? 291 + container_of(base, struct ttm_prime_object, base)->real_type : 292 + base->object_type; 293 + } 294 + extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, 295 + int fd, u32 *handle); 296 + extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, 297 + uint32_t handle, uint32_t flags, 298 + int *prime_fd); 299 + 300 + #define ttm_prime_object_kfree(__obj, __prime) \ 301 + kfree_rcu(__obj, __prime.base.rhead) 305 302 #endif
+2
include/uapi/drm/radeon_drm.h
··· 981 981 #define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16 982 982 /* query if CP DMA is supported on the compute ring */ 983 983 #define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 984 + /* CIK macrotile mode array */ 985 + #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 984 986 985 987 986 988 struct drm_radeon_info {