Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/msm: Stop passing vm to msm_framebuffer

The fb only deals with kms->vm, so make that explicit. This will start
letting us refcount the # of times the fb is pinned, so we can only
unpin the vma after last user of the fb is done. Having a single
reference count really only works if there is only a single vm.

Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Tested-by: Antonino Maniscalco <antomani103@gmail.com>
Reviewed-by: Antonino Maniscalco <antomani103@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/661476/

Rob Clark 4d0f62e4 001ddc85

+39 -75
+3 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
··· 563 563 struct drm_writeback_job *job) 564 564 { 565 565 const struct msm_format *format; 566 - struct msm_gem_vm *vm; 567 566 struct dpu_hw_wb_cfg *wb_cfg; 568 567 int ret; 569 568 struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); ··· 572 573 573 574 wb_enc->wb_job = job; 574 575 wb_enc->wb_conn = job->connector; 575 - vm = phys_enc->dpu_kms->base.vm; 576 576 577 577 wb_cfg = &wb_enc->wb_cfg; 578 578 579 579 memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg)); 580 580 581 - ret = msm_framebuffer_prepare(job->fb, vm, false); 581 + ret = msm_framebuffer_prepare(job->fb, false); 582 582 if (ret) { 583 583 DPU_ERROR("prep fb failed, %d\n", ret); 584 584 return; ··· 591 593 return; 592 594 } 593 595 594 - dpu_format_populate_addrs(vm, job->fb, &wb_cfg->dest); 596 + dpu_format_populate_addrs(job->fb, &wb_cfg->dest); 595 597 596 598 wb_cfg->dest.width = job->fb->width; 597 599 wb_cfg->dest.height = job->fb->height; ··· 614 616 struct drm_writeback_job *job) 615 617 { 616 618 struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); 617 - struct msm_gem_vm *vm; 618 619 619 620 if (!job->fb) 620 621 return; 621 622 622 - vm = phys_enc->dpu_kms->base.vm; 623 - 624 - msm_framebuffer_cleanup(job->fb, vm, false); 623 + msm_framebuffer_cleanup(job->fb, false); 625 624 wb_enc->wb_job = NULL; 626 625 wb_enc->wb_conn = NULL; 627 626 }
+7 -11
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
··· 274 274 return _dpu_format_populate_plane_sizes_linear(fmt, fb, layout); 275 275 } 276 276 277 - static void _dpu_format_populate_addrs_ubwc(struct msm_gem_vm *vm, 278 - struct drm_framebuffer *fb, 277 + static void _dpu_format_populate_addrs_ubwc(struct drm_framebuffer *fb, 279 278 struct dpu_hw_fmt_layout *layout) 280 279 { 281 280 const struct msm_format *fmt; 282 281 uint32_t base_addr = 0; 283 282 bool meta; 284 283 285 - base_addr = msm_framebuffer_iova(fb, vm, 0); 284 + base_addr = msm_framebuffer_iova(fb, 0); 286 285 287 286 fmt = msm_framebuffer_format(fb); 288 287 meta = MSM_FORMAT_IS_UBWC(fmt); ··· 354 355 } 355 356 } 356 357 357 - static void _dpu_format_populate_addrs_linear(struct msm_gem_vm *vm, 358 - struct drm_framebuffer *fb, 358 + static void _dpu_format_populate_addrs_linear(struct drm_framebuffer *fb, 359 359 struct dpu_hw_fmt_layout *layout) 360 360 { 361 361 unsigned int i; 362 362 363 363 /* Populate addresses for simple formats here */ 364 364 for (i = 0; i < layout->num_planes; ++i) 365 - layout->plane_addr[i] = msm_framebuffer_iova(fb, vm, i); 365 + layout->plane_addr[i] = msm_framebuffer_iova(fb, i); 366 366 } 367 367 368 368 /** 369 369 * dpu_format_populate_addrs - populate buffer addresses based on 370 370 * mmu, fb, and format found in the fb 371 - * @vm: address space pointer 372 371 * @fb: framebuffer pointer 373 372 * @layout: format layout structure to populate 374 373 */ 375 - void dpu_format_populate_addrs(struct msm_gem_vm *vm, 376 - struct drm_framebuffer *fb, 374 + void dpu_format_populate_addrs(struct drm_framebuffer *fb, 377 375 struct dpu_hw_fmt_layout *layout) 378 376 { 379 377 const struct msm_format *fmt; ··· 380 384 /* Populate the addresses given the fb */ 381 385 if (MSM_FORMAT_IS_UBWC(fmt) || 382 386 MSM_FORMAT_IS_TILE(fmt)) 383 - _dpu_format_populate_addrs_ubwc(vm, fb, layout); 387 + _dpu_format_populate_addrs_ubwc(fb, layout); 384 388 else 385 - _dpu_format_populate_addrs_linear(vm, fb, layout); 389 + _dpu_format_populate_addrs_linear(fb, layout); 386 390 }
+1 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
··· 31 31 return false; 32 32 } 33 33 34 - void dpu_format_populate_addrs(struct msm_gem_vm *vm, 35 - struct drm_framebuffer *fb, 34 + void dpu_format_populate_addrs(struct drm_framebuffer *fb, 36 35 struct dpu_hw_fmt_layout *layout); 37 36 38 37 int dpu_format_populate_plane_sizes(
+6 -14
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 646 646 struct drm_framebuffer *fb = new_state->fb; 647 647 struct dpu_plane *pdpu = to_dpu_plane(plane); 648 648 struct dpu_plane_state *pstate = to_dpu_plane_state(new_state); 649 - struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); 650 649 int ret; 651 650 652 651 if (!new_state->fb) 653 652 return 0; 654 653 655 654 DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id); 656 - 657 - /* cache vm */ 658 - pstate->vm = kms->base.vm; 659 655 660 656 /* 661 657 * TODO: Need to sort out the msm_framebuffer_prepare() call below so ··· 660 664 */ 661 665 drm_gem_plane_helper_prepare_fb(plane, new_state); 662 666 663 - if (pstate->vm) { 664 - ret = msm_framebuffer_prepare(new_state->fb, 665 - pstate->vm, pstate->needs_dirtyfb); 666 - if (ret) { 667 - DPU_ERROR("failed to prepare framebuffer\n"); 668 - return ret; 669 - } 667 + ret = msm_framebuffer_prepare(new_state->fb, pstate->needs_dirtyfb); 668 + if (ret) { 669 + DPU_ERROR("failed to prepare framebuffer\n"); 670 + return ret; 670 671 } 671 672 672 673 return 0; ··· 682 689 683 690 DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id); 684 691 685 - msm_framebuffer_cleanup(old_state->fb, old_pstate->vm, 686 - old_pstate->needs_dirtyfb); 692 + msm_framebuffer_cleanup(old_state->fb, old_pstate->needs_dirtyfb); 687 693 } 688 694 689 695 static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, ··· 1449 1457 pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe); 1450 1458 pdpu->is_rt_pipe = is_rt_pipe; 1451 1459 1452 - dpu_format_populate_addrs(pstate->vm, new_state->fb, &pstate->layout); 1460 + dpu_format_populate_addrs(new_state->fb, &pstate->layout); 1453 1461 1454 1462 DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT 1455 1463 ", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
-2
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
··· 17 17 /** 18 18 * struct dpu_plane_state: Define dpu extension of drm plane state object 19 19 * @base: base drm plane state object 20 - * @vm: pointer to address space for input/output buffers 21 20 * @pipe: software pipe description 22 21 * @r_pipe: software pipe description of the second pipe 23 22 * @pipe_cfg: software pipe configuration ··· 33 34 */ 34 35 struct dpu_plane_state { 35 36 struct drm_plane_state base; 36 - struct msm_gem_vm *vm; 37 37 struct dpu_sw_pipe pipe; 38 38 struct dpu_sw_pipe r_pipe; 39 39 struct dpu_sw_pipe_cfg pipe_cfg;
+6 -12
drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
··· 79 79 static int mdp4_plane_prepare_fb(struct drm_plane *plane, 80 80 struct drm_plane_state *new_state) 81 81 { 82 - struct msm_drm_private *priv = plane->dev->dev_private; 83 - struct msm_kms *kms = priv->kms; 84 - 85 82 if (!new_state->fb) 86 83 return 0; 87 84 88 85 drm_gem_plane_helper_prepare_fb(plane, new_state); 89 86 90 - return msm_framebuffer_prepare(new_state->fb, kms->vm, false); 87 + return msm_framebuffer_prepare(new_state->fb, false); 91 88 } 92 89 93 90 static void mdp4_plane_cleanup_fb(struct drm_plane *plane, 94 91 struct drm_plane_state *old_state) 95 92 { 96 93 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 97 - struct mdp4_kms *mdp4_kms = get_kms(plane); 98 - struct msm_kms *kms = &mdp4_kms->base.base; 99 94 struct drm_framebuffer *fb = old_state->fb; 100 95 101 96 if (!fb) 102 97 return; 103 98 104 99 DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id); 105 - msm_framebuffer_cleanup(fb, kms->vm, false); 100 + msm_framebuffer_cleanup(fb, false); 106 101 } 107 102 108 103 ··· 136 141 { 137 142 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 138 143 struct mdp4_kms *mdp4_kms = get_kms(plane); 139 - struct msm_kms *kms = &mdp4_kms->base.base; 140 144 enum mdp4_pipe pipe = mdp4_plane->pipe; 141 145 142 146 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), ··· 147 153 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); 148 154 149 155 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), 150 - msm_framebuffer_iova(fb, kms->vm, 0)); 156 + msm_framebuffer_iova(fb, 0)); 151 157 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe), 152 - msm_framebuffer_iova(fb, kms->vm, 1)); 158 + msm_framebuffer_iova(fb, 1)); 153 159 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe), 154 - msm_framebuffer_iova(fb, kms->vm, 2)); 160 + msm_framebuffer_iova(fb, 2)); 155 161 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe), 156 - msm_framebuffer_iova(fb, kms->vm, 3)); 162 + msm_framebuffer_iova(fb, 3)); 157 163 } 158 164 159 165 static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
+6 -12
drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
··· 135 135 static int mdp5_plane_prepare_fb(struct drm_plane *plane, 136 136 struct drm_plane_state *new_state) 137 137 { 138 - struct msm_drm_private *priv = plane->dev->dev_private; 139 - struct msm_kms *kms = priv->kms; 140 138 bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb; 141 139 142 140 if (!new_state->fb) ··· 142 144 143 145 drm_gem_plane_helper_prepare_fb(plane, new_state); 144 146 145 - return msm_framebuffer_prepare(new_state->fb, kms->vm, needs_dirtyfb); 147 + return msm_framebuffer_prepare(new_state->fb, needs_dirtyfb); 146 148 } 147 149 148 150 static void mdp5_plane_cleanup_fb(struct drm_plane *plane, 149 151 struct drm_plane_state *old_state) 150 152 { 151 - struct mdp5_kms *mdp5_kms = get_kms(plane); 152 - struct msm_kms *kms = &mdp5_kms->base.base; 153 153 struct drm_framebuffer *fb = old_state->fb; 154 154 bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb; 155 155 ··· 155 159 return; 156 160 157 161 DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id); 158 - msm_framebuffer_cleanup(fb, kms->vm, needed_dirtyfb); 162 + msm_framebuffer_cleanup(fb, needed_dirtyfb); 159 163 } 160 164 161 165 static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, ··· 463 467 enum mdp5_pipe pipe, 464 468 struct drm_framebuffer *fb) 465 469 { 466 - struct msm_kms *kms = &mdp5_kms->base.base; 467 - 468 470 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), 469 471 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 470 472 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); ··· 472 478 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); 473 479 474 480 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), 475 - msm_framebuffer_iova(fb, kms->vm, 0)); 481 + msm_framebuffer_iova(fb, 0)); 476 482 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), 477 - msm_framebuffer_iova(fb, kms->vm, 1)); 483 + msm_framebuffer_iova(fb, 1)); 478 484 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), 479 - msm_framebuffer_iova(fb, kms->vm, 2)); 485 + msm_framebuffer_iova(fb, 2)); 480 486 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), 481 - msm_framebuffer_iova(fb, kms->vm, 3)); 487 + msm_framebuffer_iova(fb, 3)); 482 488 } 483 489 484 490 /* Note: mdp5_plane->pipe_lock must be locked */
+3 -6
drivers/gpu/drm/msm/msm_drv.h
··· 274 274 int msm_gem_prime_pin(struct drm_gem_object *obj); 275 275 void msm_gem_prime_unpin(struct drm_gem_object *obj); 276 276 277 - int msm_framebuffer_prepare(struct drm_framebuffer *fb, 278 - struct msm_gem_vm *vm, bool needs_dirtyfb); 279 - void msm_framebuffer_cleanup(struct drm_framebuffer *fb, 280 - struct msm_gem_vm *vm, bool needed_dirtyfb); 281 - uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, 282 - struct msm_gem_vm *vm, int plane); 277 + int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb); 278 + void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb); 279 + uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane); 283 280 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); 284 281 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); 285 282 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+7 -8
drivers/gpu/drm/msm/msm_fb.c
··· 75 75 76 76 /* prepare/pin all the fb's bo's for scanout. 77 77 */ 78 - int msm_framebuffer_prepare(struct drm_framebuffer *fb, 79 - struct msm_gem_vm *vm, 80 - bool needs_dirtyfb) 78 + int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb) 81 79 { 80 + struct msm_drm_private *priv = fb->dev->dev_private; 81 + struct msm_gem_vm *vm = priv->kms->vm; 82 82 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); 83 83 int ret, i, n = fb->format->num_planes; 84 84 ··· 98 98 return 0; 99 99 } 100 100 101 - void msm_framebuffer_cleanup(struct drm_framebuffer *fb, 102 - struct msm_gem_vm *vm, 103 - bool needed_dirtyfb) 101 + void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb) 104 102 { 103 + struct msm_drm_private *priv = fb->dev->dev_private; 104 + struct msm_gem_vm *vm = priv->kms->vm; 105 105 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); 106 106 int i, n = fb->format->num_planes; 107 107 ··· 115 115 memset(msm_fb->iova, 0, sizeof(msm_fb->iova)); 116 116 } 117 117 118 - uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, 119 - struct msm_gem_vm *vm, int plane) 118 + uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane) 120 119 { 121 120 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); 122 121 return msm_fb->iova[plane] + fb->offsets[plane];