Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-fixes-2025-07-24' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"This might just be part one, but I'm sending it a bit early as it has
two sets of reverts for regressions, one is all the gem/dma-buf
handling and another was a nouveau ioctl change.

Otherwise there is an amdgpu fix, nouveau fix and a scheduler fix.

If any other changes come in I'll follow up with another more usual
Fri/Sat MR.

gem:
- revert all the dma-buf/gem changes as there as lifetime issues
with them

nouveau:
- revert an ioctl change as it causes issues
- fix NULL ptr on fermi

bridge:
- remove extra semicolon

sched:
- remove hang causing optimisation

amdgpu:
- fix garbage in cleared vram after resume"

* tag 'drm-fixes-2025-07-24' of https://gitlab.freedesktop.org/drm/kernel:
drm/bridge: ti-sn65dsi86: Remove extra semicolon in ti_sn_bridge_probe()
Revert "drm/nouveau: check ioctl command codes better"
drm/nouveau/nvif: fix null ptr deref on pre-fermi boards
Revert "drm/gem-dma: Use dma_buf from GEM object instance"
Revert "drm/gem-shmem: Use dma_buf from GEM object instance"
Revert "drm/gem-framebuffer: Use dma_buf from GEM object instance"
Revert "drm/prime: Use dma_buf from GEM object instance"
Revert "drm/etnaviv: Use dma_buf from GEM object instance"
Revert "drm/vmwgfx: Use dma_buf from GEM object instance"
Revert "drm/virtio: Use dma_buf from GEM object instance"
drm/sched: Remove optimization that causes hang when killing dependent jobs
drm/amdgpu: Reset the clear flag in buddy during resume

+101 -38
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 5193 5193 dev->dev->power.disable_depth--; 5194 5194 #endif 5195 5195 } 5196 + 5197 + amdgpu_vram_mgr_clear_reset_blocks(adev); 5196 5198 adev->in_suspend = false; 5197 5199 5198 5200 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 154 154 uint64_t start, uint64_t size); 155 155 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, 156 156 uint64_t start); 157 + void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev); 157 158 158 159 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, 159 160 struct ttm_resource *res);
+17
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 783 783 } 784 784 785 785 /** 786 + * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks 787 + * 788 + * @adev: amdgpu device pointer 789 + * 790 + * Reset the cleared drm buddy blocks. 791 + */ 792 + void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev) 793 + { 794 + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 795 + struct drm_buddy *mm = &mgr->mm; 796 + 797 + mutex_lock(&mgr->lock); 798 + drm_buddy_reset_clear(mm, false); 799 + mutex_unlock(&mgr->lock); 800 + } 801 + 802 + /** 786 803 * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection 787 804 * 788 805 * @man: TTM memory type manager
+1 -1
drivers/gpu/drm/bridge/ti-sn65dsi86.c
··· 1373 1373 regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, 1374 1374 HPD_DISABLE, 0); 1375 1375 mutex_unlock(&pdata->comms_mutex); 1376 - }; 1376 + } 1377 1377 1378 1378 drm_bridge_add(&pdata->bridge); 1379 1379
+43
drivers/gpu/drm/drm_buddy.c
··· 405 405 EXPORT_SYMBOL(drm_get_buddy); 406 406 407 407 /** 408 + * drm_buddy_reset_clear - reset blocks clear state 409 + * 410 + * @mm: DRM buddy manager 411 + * @is_clear: blocks clear state 412 + * 413 + * Reset the clear state based on @is_clear value for each block 414 + * in the freelist. 415 + */ 416 + void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) 417 + { 418 + u64 root_size, size, start; 419 + unsigned int order; 420 + int i; 421 + 422 + size = mm->size; 423 + for (i = 0; i < mm->n_roots; ++i) { 424 + order = ilog2(size) - ilog2(mm->chunk_size); 425 + start = drm_buddy_block_offset(mm->roots[i]); 426 + __force_merge(mm, start, start + size, order); 427 + 428 + root_size = mm->chunk_size << order; 429 + size -= root_size; 430 + } 431 + 432 + for (i = 0; i <= mm->max_order; ++i) { 433 + struct drm_buddy_block *block; 434 + 435 + list_for_each_entry_reverse(block, &mm->free_list[i], link) { 436 + if (is_clear != drm_buddy_block_is_clear(block)) { 437 + if (is_clear) { 438 + mark_cleared(block); 439 + mm->clear_avail += drm_buddy_block_size(mm, block); 440 + } else { 441 + clear_reset(block); 442 + mm->clear_avail -= drm_buddy_block_size(mm, block); 443 + } 444 + } 445 + } 446 + } 447 + } 448 + EXPORT_SYMBOL(drm_buddy_reset_clear); 449 + 450 + /** 408 451 * drm_buddy_free_block - free a block 409 452 * 410 453 * @mm: DRM buddy manager
+1 -1
drivers/gpu/drm/drm_gem_dma_helper.c
··· 230 230 231 231 if (drm_gem_is_imported(gem_obj)) { 232 232 if (dma_obj->vaddr) 233 - dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map); 233 + dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map); 234 234 drm_prime_gem_destroy(gem_obj, dma_obj->sgt); 235 235 } else if (dma_obj->vaddr) { 236 236 if (dma_obj->map_noncoherent)
+6 -2
drivers/gpu/drm/drm_gem_framebuffer_helper.c
··· 419 419 static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir, 420 420 unsigned int num_planes) 421 421 { 422 + struct dma_buf_attachment *import_attach; 422 423 struct drm_gem_object *obj; 423 424 int ret; 424 425 ··· 428 427 obj = drm_gem_fb_get_obj(fb, num_planes); 429 428 if (!obj) 430 429 continue; 430 + import_attach = obj->import_attach; 431 431 if (!drm_gem_is_imported(obj)) 432 432 continue; 433 - ret = dma_buf_end_cpu_access(obj->dma_buf, dir); 433 + ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir); 434 434 if (ret) 435 435 drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n", 436 436 ret, num_planes, dir); ··· 454 452 */ 455 453 int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir) 456 454 { 455 + struct dma_buf_attachment *import_attach; 457 456 struct drm_gem_object *obj; 458 457 unsigned int i; 459 458 int ret; ··· 465 462 ret = -EINVAL; 466 463 goto err___drm_gem_fb_end_cpu_access; 467 464 } 465 + import_attach = obj->import_attach; 468 466 if (!drm_gem_is_imported(obj)) 469 467 continue; 470 - ret = dma_buf_begin_cpu_access(obj->dma_buf, dir); 468 + ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir); 471 469 if (ret) 472 470 goto err___drm_gem_fb_end_cpu_access; 473 471 }
+2 -2
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 349 349 int ret = 0; 350 350 351 351 if (drm_gem_is_imported(obj)) { 352 - ret = dma_buf_vmap(obj->dma_buf, map); 352 + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); 353 353 } else { 354 354 pgprot_t prot = PAGE_KERNEL; 355 355 ··· 409 409 struct drm_gem_object *obj = &shmem->base; 410 410 411 411 if (drm_gem_is_imported(obj)) { 412 - dma_buf_vunmap(obj->dma_buf, map); 412 + dma_buf_vunmap(obj->import_attach->dmabuf, map); 413 413 } else { 414 414 dma_resv_assert_held(shmem->base.resv); 415 415
+7 -1
drivers/gpu/drm/drm_prime.c
··· 453 453 } 454 454 455 455 mutex_lock(&dev->object_name_lock); 456 - /* re-export the original imported/exported object */ 456 + /* re-export the original imported object */ 457 + if (obj->import_attach) { 458 + dmabuf = obj->import_attach->dmabuf; 459 + get_dma_buf(dmabuf); 460 + goto out_have_obj; 461 + } 462 + 457 463 if (obj->dma_buf) { 458 464 get_dma_buf(obj->dma_buf); 459 465 dmabuf = obj->dma_buf;
+2 -2
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
··· 65 65 struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr); 66 66 67 67 if (etnaviv_obj->vaddr) 68 - dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map); 68 + dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map); 69 69 70 70 /* Don't drop the pages for imported dmabuf, as they are not 71 71 * ours, just free the array we allocated: ··· 82 82 83 83 lockdep_assert_held(&etnaviv_obj->lock); 84 84 85 - ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map); 85 + ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map); 86 86 if (ret) 87 87 return NULL; 88 88 return map.vaddr;
+6 -5
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 1284 1284 DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW), 1285 1285 }; 1286 1286 1287 - #define DRM_IOCTL_NOUVEAU_NVIF _IOC(_IOC_READ | _IOC_WRITE, DRM_IOCTL_BASE, \ 1288 - DRM_COMMAND_BASE + DRM_NOUVEAU_NVIF, 0) 1289 - 1290 1287 long 1291 1288 nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1292 1289 { ··· 1297 1300 return ret; 1298 1301 } 1299 1302 1300 - if ((cmd & ~IOCSIZE_MASK) == DRM_IOCTL_NOUVEAU_NVIF) 1303 + switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) { 1304 + case DRM_NOUVEAU_NVIF: 1301 1305 ret = nouveau_abi16_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd)); 1302 - else 1306 + break; 1307 + default: 1303 1308 ret = drm_ioctl(file, cmd, arg); 1309 + break; 1310 + } 1304 1311 1305 1312 pm_runtime_mark_last_busy(dev->dev); 1306 1313 pm_runtime_put_autosuspend(dev->dev);
+3
drivers/gpu/drm/nouveau/nvif/chan.c
··· 39 39 const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size; 40 40 const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max; 41 41 42 + if (!chan->func->gpfifo.post) 43 + return 0; 44 + 42 45 return chan->func->gpfifo.post(chan, gpptr, pbptr); 43 46 } 44 47
+2 -19
drivers/gpu/drm/scheduler/sched_entity.c
··· 355 355 } 356 356 EXPORT_SYMBOL(drm_sched_entity_destroy); 357 357 358 - /* drm_sched_entity_clear_dep - callback to clear the entities dependency */ 359 - static void drm_sched_entity_clear_dep(struct dma_fence *f, 360 - struct dma_fence_cb *cb) 361 - { 362 - struct drm_sched_entity *entity = 363 - container_of(cb, struct drm_sched_entity, cb); 364 - 365 - entity->dependency = NULL; 366 - dma_fence_put(f); 367 - } 368 - 369 358 /* 370 359 * drm_sched_entity_wakeup - callback to clear the entity's dependency and 371 360 * wake up the scheduler ··· 365 376 struct drm_sched_entity *entity = 366 377 container_of(cb, struct drm_sched_entity, cb); 367 378 368 - drm_sched_entity_clear_dep(f, cb); 379 + entity->dependency = NULL; 380 + dma_fence_put(f); 369 381 drm_sched_wakeup(entity->rq->sched); 370 382 } 371 383 ··· 419 429 fence = dma_fence_get(&s_fence->scheduled); 420 430 dma_fence_put(entity->dependency); 421 431 entity->dependency = fence; 422 - if (!dma_fence_add_callback(fence, &entity->cb, 423 - drm_sched_entity_clear_dep)) 424 - return true; 425 - 426 - /* Ignore it when it is already scheduled */ 427 - dma_fence_put(fence); 428 - return false; 429 432 } 430 433 431 434 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+3 -2
drivers/gpu/drm/virtio/virtgpu_prime.c
··· 204 204 { 205 205 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 206 206 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 207 + struct dma_buf_attachment *attach = obj->import_attach; 207 208 208 209 if (drm_gem_is_imported(obj)) { 209 - struct dma_buf *dmabuf = obj->dma_buf; 210 + struct dma_buf *dmabuf = attach->dmabuf; 210 211 211 212 dma_resv_lock(dmabuf->resv, NULL); 212 213 virtgpu_dma_buf_unmap(bo); 213 214 dma_resv_unlock(dmabuf->resv); 214 215 215 - dma_buf_detach(dmabuf, obj->import_attach); 216 + dma_buf_detach(dmabuf, attach); 216 217 dma_buf_put(dmabuf); 217 218 } 218 219
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
··· 85 85 int ret; 86 86 87 87 if (drm_gem_is_imported(obj)) { 88 - ret = dma_buf_vmap(obj->dma_buf, map); 88 + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); 89 89 if (!ret) { 90 90 if (drm_WARN_ON(obj->dev, map->is_iomem)) { 91 - dma_buf_vunmap(obj->dma_buf, map); 91 + dma_buf_vunmap(obj->import_attach->dmabuf, map); 92 92 return -EIO; 93 93 } 94 94 } ··· 102 102 static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 103 103 { 104 104 if (drm_gem_is_imported(obj)) 105 - dma_buf_vunmap(obj->dma_buf, map); 105 + dma_buf_vunmap(obj->import_attach->dmabuf, map); 106 106 else 107 107 drm_gem_ttm_vunmap(obj, map); 108 108 }
+2
include/drm/drm_buddy.h
··· 160 160 u64 new_size, 161 161 struct list_head *blocks); 162 162 163 + void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear); 164 + 163 165 void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block); 164 166 165 167 void drm_buddy_free_list(struct drm_buddy *mm,