Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/panfrost: Replace DRM driver allocation method with newer one

Drop the deprecated DRM driver allocation method in favour of
devm_drm_dev_alloc(). Overall just make it the same as in Panthor.
Also discard now superfluous generic and platform device pointers inside
the main panfrost device structure.

Some ancient checkpatch issues unearthed as a result of these changes
were also fixed, like lines too long or double assignment in one line.

Reviewed-by: Steven Price <steven.price@arm.com>
Acked-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
Link: https://lore.kernel.org/r/20251019145225.3621989-2-adrian.larumbe@collabora.com
Signed-off-by: Steven Price <steven.price@arm.com>

authored by

Adrián Larumbe and committed by
Steven Price
5c0c825a ddf70cb6

+146 -157
+2 -2
drivers/gpu/drm/panfrost/panfrost_devfreq.c
··· 74 74 75 75 spin_unlock_irqrestore(&pfdevfreq->lock, irqflags); 76 76 77 - dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", 77 + dev_dbg(pfdev->base.dev, "busy %lu total %lu %lu %% freq %lu MHz\n", 78 78 status->busy_time, status->total_time, 79 79 status->busy_time / (status->total_time / 100), 80 80 status->current_frequency / 1000 / 1000); ··· 119 119 int ret; 120 120 struct dev_pm_opp *opp; 121 121 unsigned long cur_freq; 122 - struct device *dev = &pfdev->pdev->dev; 122 + struct device *dev = pfdev->base.dev; 123 123 struct devfreq *devfreq; 124 124 struct thermal_cooling_device *cooling; 125 125 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
+25 -24
drivers/gpu/drm/panfrost/panfrost_device.c
··· 20 20 21 21 static int panfrost_reset_init(struct panfrost_device *pfdev) 22 22 { 23 - pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->dev); 23 + pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->base.dev); 24 24 if (IS_ERR(pfdev->rstc)) { 25 - dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc)); 25 + dev_err(pfdev->base.dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc)); 26 26 return PTR_ERR(pfdev->rstc); 27 27 } 28 28 ··· 39 39 int err; 40 40 unsigned long rate; 41 41 42 - pfdev->clock = devm_clk_get(pfdev->dev, NULL); 42 + pfdev->clock = devm_clk_get(pfdev->base.dev, NULL); 43 43 if (IS_ERR(pfdev->clock)) { 44 - dev_err(pfdev->dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock)); 44 + dev_err(pfdev->base.dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock)); 45 45 return PTR_ERR(pfdev->clock); 46 46 } 47 47 48 48 rate = clk_get_rate(pfdev->clock); 49 - dev_info(pfdev->dev, "clock rate = %lu\n", rate); 49 + dev_info(pfdev->base.dev, "clock rate = %lu\n", rate); 50 50 51 51 err = clk_prepare_enable(pfdev->clock); 52 52 if (err) 53 53 return err; 54 54 55 - pfdev->bus_clock = devm_clk_get_optional(pfdev->dev, "bus"); 55 + pfdev->bus_clock = devm_clk_get_optional(pfdev->base.dev, "bus"); 56 56 if (IS_ERR(pfdev->bus_clock)) { 57 - dev_err(pfdev->dev, "get bus_clock failed %ld\n", 57 + dev_err(pfdev->base.dev, "get bus_clock failed %ld\n", 58 58 PTR_ERR(pfdev->bus_clock)); 59 59 err = PTR_ERR(pfdev->bus_clock); 60 60 goto disable_clock; ··· 62 62 63 63 if (pfdev->bus_clock) { 64 64 rate = clk_get_rate(pfdev->bus_clock); 65 - dev_info(pfdev->dev, "bus_clock rate = %lu\n", rate); 65 + dev_info(pfdev->base.dev, "bus_clock rate = %lu\n", rate); 66 66 67 67 err = clk_prepare_enable(pfdev->bus_clock); 68 68 if (err) ··· 87 87 { 88 88 int ret, i; 89 89 90 - pfdev->regulators = devm_kcalloc(pfdev->dev, pfdev->comp->num_supplies, 90 + pfdev->regulators = devm_kcalloc(pfdev->base.dev, pfdev->comp->num_supplies, 91 91 sizeof(*pfdev->regulators), 92 92 GFP_KERNEL); 93 93 if (!pfdev->regulators) ··· 96 96 for (i = 0; i < pfdev->comp->num_supplies; i++) 97 97 pfdev->regulators[i].supply = pfdev->comp->supply_names[i]; 98 98 99 - ret = devm_regulator_bulk_get(pfdev->dev, 99 + ret = devm_regulator_bulk_get(pfdev->base.dev, 100 100 pfdev->comp->num_supplies, 101 101 pfdev->regulators); 102 102 if (ret < 0) { 103 103 if (ret != -EPROBE_DEFER) 104 - dev_err(pfdev->dev, "failed to get regulators: %d\n", 104 + dev_err(pfdev->base.dev, "failed to get regulators: %d\n", 105 105 ret); 106 106 return ret; 107 107 } ··· 109 109 ret = regulator_bulk_enable(pfdev->comp->num_supplies, 110 110 pfdev->regulators); 111 111 if (ret < 0) { 112 - dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret); 112 + dev_err(pfdev->base.dev, "failed to enable regulators: %d\n", ret); 113 113 return ret; 114 114 } 115 115 ··· 144 144 int err; 145 145 int i, num_domains; 146 146 147 - num_domains = of_count_phandle_with_args(pfdev->dev->of_node, 147 + num_domains = of_count_phandle_with_args(pfdev->base.dev->of_node, 148 148 "power-domains", 149 149 "#power-domain-cells"); 150 150 ··· 156 156 return 0; 157 157 158 158 if (num_domains != pfdev->comp->num_pm_domains) { 159 - dev_err(pfdev->dev, 159 + dev_err(pfdev->base.dev, 160 160 "Incorrect number of power domains: %d provided, %d needed\n", 161 161 num_domains, pfdev->comp->num_pm_domains); 162 162 return -EINVAL; ··· 168 168 169 169 for (i = 0; i < num_domains; i++) { 170 170 pfdev->pm_domain_devs[i] = 171 - dev_pm_domain_attach_by_name(pfdev->dev, 172 - pfdev->comp->pm_domain_names[i]); 171 + dev_pm_domain_attach_by_name(pfdev->base.dev, 172 + pfdev->comp->pm_domain_names[i]); 173 173 if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) { 174 174 err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA; 175 175 pfdev->pm_domain_devs[i] = NULL; 176 - dev_err(pfdev->dev, 176 + dev_err(pfdev->base.dev, 177 177 "failed to get pm-domain %s(%d): %d\n", 178 178 pfdev->comp->pm_domain_names[i], i, err); 179 179 goto err; 180 180 } 181 181 182 - pfdev->pm_domain_links[i] = device_link_add(pfdev->dev, 183 - pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME | 184 - DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE); 182 + pfdev->pm_domain_links[i] = 183 + device_link_add(pfdev->base.dev, 184 + pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME | 185 + DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE); 185 186 if (!pfdev->pm_domain_links[i]) { 186 187 dev_err(pfdev->pm_domain_devs[i], 187 188 "adding device link failed!\n"); ··· 221 220 222 221 err = panfrost_reset_init(pfdev); 223 222 if (err) { 224 - dev_err(pfdev->dev, "reset init failed %d\n", err); 223 + dev_err(pfdev->base.dev, "reset init failed %d\n", err); 225 224 goto out_pm_domain; 226 225 } 227 226 228 227 err = panfrost_clk_init(pfdev); 229 228 if (err) { 230 - dev_err(pfdev->dev, "clk init failed %d\n", err); 229 + dev_err(pfdev->base.dev, "clk init failed %d\n", err); 231 230 goto out_reset; 232 231 } 233 232 234 233 err = panfrost_devfreq_init(pfdev); 235 234 if (err) { 236 235 if (err != -EPROBE_DEFER) 237 - dev_err(pfdev->dev, "devfreq init failed %d\n", err); 236 + dev_err(pfdev->base.dev, "devfreq init failed %d\n", err); 238 237 goto out_clk; 239 238 } 240 239 ··· 245 244 goto out_devfreq; 246 245 } 247 246 248 - pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0); 247 + pfdev->iomem = devm_platform_ioremap_resource(to_platform_device(pfdev->base.dev), 0); 249 248 if (IS_ERR(pfdev->iomem)) { 250 249 err = PTR_ERR(pfdev->iomem); 251 250 goto out_regulator;
+2 -4
drivers/gpu/drm/panfrost/panfrost_device.h
··· 124 124 }; 125 125 126 126 struct panfrost_device { 127 - struct device *dev; 128 - struct drm_device *ddev; 129 - struct platform_device *pdev; 127 + struct drm_device base; 130 128 int gpu_irq; 131 129 int mmu_irq; 132 130 ··· 220 222 221 223 static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev) 222 224 { 223 - return ddev->dev_private; 225 + return container_of(ddev, struct panfrost_device, base); 224 226 } 225 227 226 228 static inline int panfrost_model_cmp(struct panfrost_device *pfdev, s32 id)
+29 -45
drivers/gpu/drm/panfrost/panfrost_drv.c
··· 36 36 { 37 37 int ret; 38 38 39 - ret = pm_runtime_resume_and_get(pfdev->dev); 39 + ret = pm_runtime_resume_and_get(pfdev->base.dev); 40 40 if (ret) 41 41 return ret; 42 42 ··· 44 44 *arg = panfrost_timestamp_read(pfdev); 45 45 panfrost_cycle_counter_put(pfdev); 46 46 47 - pm_runtime_put(pfdev->dev); 47 + pm_runtime_put(pfdev->base.dev); 48 48 return 0; 49 49 } 50 50 51 51 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) 52 52 { 53 53 struct drm_panfrost_get_param *param = data; 54 - struct panfrost_device *pfdev = ddev->dev_private; 54 + struct panfrost_device *pfdev = to_panfrost_device(ddev); 55 55 int ret; 56 56 57 57 if (param->pad != 0) ··· 283 283 static int panfrost_ioctl_submit(struct drm_device *dev, void *data, 284 284 struct drm_file *file) 285 285 { 286 - struct panfrost_device *pfdev = dev->dev_private; 286 + struct panfrost_device *pfdev = to_panfrost_device(dev); 287 287 struct panfrost_file_priv *file_priv = file->driver_priv; 288 288 struct drm_panfrost_submit *args = data; 289 289 struct drm_syncobj *sync_out = NULL; ··· 457 457 { 458 458 struct panfrost_file_priv *priv = file_priv->driver_priv; 459 459 struct drm_panfrost_madvise *args = data; 460 - struct panfrost_device *pfdev = dev->dev_private; 460 + struct panfrost_device *pfdev = to_panfrost_device(dev); 461 461 struct drm_gem_object *gem_obj; 462 462 struct panfrost_gem_object *bo; 463 463 int ret = 0; ··· 590 590 panfrost_open(struct drm_device *dev, struct drm_file *file) 591 591 { 592 592 int ret; 593 - struct panfrost_device *pfdev = dev->dev_private; 593 + struct panfrost_device *pfdev = to_panfrost_device(dev); 594 594 struct panfrost_file_priv *panfrost_priv; 595 595 596 596 panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL); ··· 686 686 687 687 static void panfrost_show_fdinfo(struct drm_printer *p, struct drm_file *file) 688 688 { 689 - struct drm_device *dev = file->minor->dev; 690 - struct panfrost_device *pfdev = dev->dev_private; 689 + struct panfrost_device *pfdev = to_panfrost_device(file->minor->dev); 691 690 692 691 panfrost_gpu_show_fdinfo(pfdev, file->driver_priv, p); 693 692 ··· 703 704 static int panthor_gems_show(struct seq_file *m, void *data) 704 705 { 705 706 struct drm_info_node *node = m->private; 706 - struct drm_device *dev = node->minor->dev; 707 - struct panfrost_device *pfdev = dev->dev_private; 707 + struct panfrost_device *pfdev = to_panfrost_device(node->minor->dev); 708 708 709 709 panfrost_gem_debugfs_print_bos(pfdev, m); 710 710 ··· 752 754 } 753 755 754 756 static struct drm_info_list panthor_debugfs_list[] = { 755 - {"gems", panthor_gems_show, 0, NULL}, 757 + {"gems", 758 + panthor_gems_show, 0, NULL}, 756 759 }; 757 760 758 761 static int panthor_gems_debugfs_init(struct drm_minor *minor) ··· 860 861 static int panfrost_probe(struct platform_device *pdev) 861 862 { 862 863 struct panfrost_device *pfdev; 863 - struct drm_device *ddev; 864 864 int err; 865 865 866 - pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL); 867 - if (!pfdev) 868 - return -ENOMEM; 869 - 870 - pfdev->pdev = pdev; 871 - pfdev->dev = &pdev->dev; 866 + pfdev = devm_drm_dev_alloc(&pdev->dev, &panfrost_drm_driver, 867 + struct panfrost_device, base); 868 + if (IS_ERR(pfdev)) 869 + return PTR_ERR(pfdev); 872 870 873 871 platform_set_drvdata(pdev, pfdev); 874 872 ··· 874 878 return -ENODEV; 875 879 876 880 pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT; 877 - 878 - /* Allocate and initialize the DRM device. */ 879 - ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev); 880 - if (IS_ERR(ddev)) 881 - return PTR_ERR(ddev); 882 - 883 - ddev->dev_private = pfdev; 884 - pfdev->ddev = ddev; 885 881 886 882 mutex_init(&pfdev->shrinker_lock); 887 883 INIT_LIST_HEAD(&pfdev->shrinker_list); ··· 885 897 goto err_out0; 886 898 } 887 899 888 - pm_runtime_set_active(pfdev->dev); 889 - pm_runtime_mark_last_busy(pfdev->dev); 890 - pm_runtime_enable(pfdev->dev); 891 - pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ 892 - pm_runtime_use_autosuspend(pfdev->dev); 900 + pm_runtime_set_active(pfdev->base.dev); 901 + pm_runtime_mark_last_busy(pfdev->base.dev); 902 + pm_runtime_enable(pfdev->base.dev); 903 + pm_runtime_set_autosuspend_delay(pfdev->base.dev, 50); /* ~3 frames */ 904 + pm_runtime_use_autosuspend(pfdev->base.dev); 893 905 894 906 /* 895 907 * Register the DRM device with the core and the connectors with 896 908 * sysfs 897 909 */ 898 - err = drm_dev_register(ddev, 0); 910 + err = drm_dev_register(&pfdev->base, 0); 899 911 if (err < 0) 900 912 goto err_out1; 901 913 902 - err = panfrost_gem_shrinker_init(ddev); 914 + err = panfrost_gem_shrinker_init(&pfdev->base); 903 915 if (err) 904 916 goto err_out2; 905 917 906 918 return 0; 907 919 908 920 err_out2: 909 - drm_dev_unregister(ddev); 921 + drm_dev_unregister(&pfdev->base); 910 922 err_out1: 911 - pm_runtime_disable(pfdev->dev); 923 + pm_runtime_disable(pfdev->base.dev); 912 924 panfrost_device_fini(pfdev); 913 - pm_runtime_set_suspended(pfdev->dev); 925 + pm_runtime_set_suspended(pfdev->base.dev); 914 926 err_out0: 915 - drm_dev_put(ddev); 916 927 return err; 917 928 } 918 929 919 930 static void panfrost_remove(struct platform_device *pdev) 920 931 { 921 932 struct panfrost_device *pfdev = platform_get_drvdata(pdev); 922 - struct drm_device *ddev = pfdev->ddev; 923 933 924 - drm_dev_unregister(ddev); 925 - panfrost_gem_shrinker_cleanup(ddev); 934 + drm_dev_unregister(&pfdev->base); 935 + panfrost_gem_shrinker_cleanup(&pfdev->base); 926 936 927 - pm_runtime_get_sync(pfdev->dev); 928 - pm_runtime_disable(pfdev->dev); 937 + pm_runtime_get_sync(pfdev->base.dev); 938 + pm_runtime_disable(pfdev->base.dev); 929 939 panfrost_device_fini(pfdev); 930 - pm_runtime_set_suspended(pfdev->dev); 931 - 932 - drm_dev_put(ddev); 940 + pm_runtime_set_suspended(pfdev->base.dev); 933 941 } 934 942 935 943 static ssize_t profiling_show(struct device *dev,
+4 -4
drivers/gpu/drm/panfrost/panfrost_dump.c
··· 163 163 iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | 164 164 __GFP_NORETRY); 165 165 if (!iter.start) { 166 - dev_warn(pfdev->dev, "failed to allocate devcoredump file\n"); 166 + dev_warn(pfdev->base.dev, "failed to allocate devcoredump file\n"); 167 167 return; 168 168 } 169 169 ··· 204 204 mapping = job->mappings[i]; 205 205 206 206 if (!bo->base.sgt) { 207 - dev_err(pfdev->dev, "Panfrost Dump: BO has no sgt, cannot dump\n"); 207 + dev_err(pfdev->base.dev, "Panfrost Dump: BO has no sgt, cannot dump\n"); 208 208 iter.hdr->bomap.valid = 0; 209 209 goto dump_header; 210 210 } 211 211 212 212 ret = drm_gem_vmap(&bo->base.base, &map); 213 213 if (ret) { 214 - dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n"); 214 + dev_err(pfdev->base.dev, "Panfrost Dump: couldn't map Buffer Object\n"); 215 215 iter.hdr->bomap.valid = 0; 216 216 goto dump_header; 217 217 } ··· 237 237 } 238 238 panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_TRAILER, iter.data); 239 239 240 - dev_coredumpv(pfdev->dev, iter.start, iter.data - iter.start, GFP_KERNEL); 240 + dev_coredumpv(pfdev->base.dev, iter.start, iter.data - iter.start, GFP_KERNEL); 241 241 }
+4 -4
drivers/gpu/drm/panfrost/panfrost_gem.c
··· 26 26 27 27 static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) 28 28 { 29 - struct panfrost_device *pfdev = bo->base.base.dev->dev_private; 29 + struct panfrost_device *pfdev = to_panfrost_device(bo->base.base.dev); 30 30 31 31 if (list_empty(&bo->debugfs.node)) 32 32 return; ··· 48 48 static void panfrost_gem_free_object(struct drm_gem_object *obj) 49 49 { 50 50 struct panfrost_gem_object *bo = to_panfrost_bo(obj); 51 - struct panfrost_device *pfdev = obj->dev->dev_private; 51 + struct panfrost_device *pfdev = to_panfrost_device(obj->dev); 52 52 53 53 /* 54 54 * Make sure the BO is no longer inserted in the shrinker list before ··· 76 76 77 77 for (i = 0; i < n_sgt; i++) { 78 78 if (bo->sgts[i].sgl) { 79 - dma_unmap_sgtable(pfdev->dev, &bo->sgts[i], 79 + dma_unmap_sgtable(pfdev->base.dev, &bo->sgts[i], 80 80 DMA_BIDIRECTIONAL, 0); 81 81 sg_free_table(&bo->sgts[i]); 82 82 } ··· 284 284 */ 285 285 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size) 286 286 { 287 - struct panfrost_device *pfdev = dev->dev_private; 287 + struct panfrost_device *pfdev = to_panfrost_device(dev); 288 288 struct panfrost_gem_object *obj; 289 289 290 290 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+2 -2
drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
··· 97 97 */ 98 98 int panfrost_gem_shrinker_init(struct drm_device *dev) 99 99 { 100 - struct panfrost_device *pfdev = dev->dev_private; 100 + struct panfrost_device *pfdev = to_panfrost_device(dev); 101 101 102 102 pfdev->shrinker = shrinker_alloc(0, "drm-panfrost"); 103 103 if (!pfdev->shrinker) ··· 120 120 */ 121 121 void panfrost_gem_shrinker_cleanup(struct drm_device *dev) 122 122 { 123 - struct panfrost_device *pfdev = dev->dev_private; 123 + struct panfrost_device *pfdev = to_panfrost_device(dev); 124 124 125 125 if (pfdev->shrinker) 126 126 shrinker_free(pfdev->shrinker);
+25 -24
drivers/gpu/drm/panfrost/panfrost_gpu.c
··· 36 36 u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32; 37 37 address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO); 38 38 39 - dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n", 39 + dev_warn(pfdev->base.dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n", 40 40 fault_status, panfrost_exception_name(fault_status & 0xFF), 41 41 address); 42 42 43 43 if (state & GPU_IRQ_MULTIPLE_FAULT) 44 - dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n"); 44 + dev_warn(pfdev->base.dev, "There were multiple GPU faults - some have not been reported\n"); 45 45 46 46 gpu_write(pfdev, GPU_INT_MASK, 0); 47 47 } ··· 72 72 val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000); 73 73 74 74 if (ret) { 75 - dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n"); 75 + dev_err(pfdev->base.dev, "gpu soft reset timed out, attempting hard reset\n"); 76 76 77 77 gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET); 78 78 ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val, 79 79 val & GPU_IRQ_RESET_COMPLETED, 100, 10000); 80 80 if (ret) { 81 - dev_err(pfdev->dev, "gpu hard reset timed out\n"); 81 + dev_err(pfdev->base.dev, "gpu hard reset timed out\n"); 82 82 return ret; 83 83 } 84 84 } ··· 95 95 * All in-flight jobs should have released their cycle 96 96 * counter references upon reset, but let us make sure 97 97 */ 98 - if (drm_WARN_ON(pfdev->ddev, atomic_read(&pfdev->cycle_counter.use_count) != 0)) 98 + if (drm_WARN_ON(&pfdev->base, atomic_read(&pfdev->cycle_counter.use_count) != 0)) 99 99 atomic_set(&pfdev->cycle_counter.use_count, 0); 100 100 101 101 return 0; ··· 330 330 bitmap_from_u64(pfdev->features.hw_features, hw_feat); 331 331 bitmap_from_u64(pfdev->features.hw_issues, hw_issues); 332 332 333 - dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", 333 + dev_info(pfdev->base.dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", 334 334 name, gpu_id, major, minor, status); 335 - dev_info(pfdev->dev, "features: %64pb, issues: %64pb", 335 + dev_info(pfdev->base.dev, "features: %64pb, issues: %64pb", 336 336 pfdev->features.hw_features, 337 337 pfdev->features.hw_issues); 338 338 339 - dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x", 339 + dev_info(pfdev->base.dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x", 340 340 pfdev->features.l2_features, 341 341 pfdev->features.core_features, 342 342 pfdev->features.tiler_features, ··· 345 345 pfdev->features.as_present, 346 346 pfdev->features.js_present); 347 347 348 - dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx", 348 + dev_info(pfdev->base.dev, "shader_present=0x%0llx l2_present=0x%0llx", 349 349 pfdev->features.shader_present, pfdev->features.l2_present); 350 350 } 351 351 ··· 411 411 */ 412 412 core_mask = ~(pfdev->features.l2_present - 1) & 413 413 (pfdev->features.l2_present - 2); 414 - dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n", 414 + dev_info_once(pfdev->base.dev, "using only 1st core group (%lu cores from %lu)\n", 415 415 hweight64(core_mask), 416 416 hweight64(pfdev->features.shader_present)); 417 417 ··· 432 432 val, val == (pfdev->features.l2_present & core_mask), 433 433 10, 20000); 434 434 if (ret) 435 - dev_err(pfdev->dev, "error powering up gpu L2"); 435 + dev_err(pfdev->base.dev, "error powering up gpu L2"); 436 436 437 437 gpu_write(pfdev, SHADER_PWRON_LO, 438 438 pfdev->features.shader_present & core_mask); ··· 440 440 val, val == (pfdev->features.shader_present & core_mask), 441 441 10, 20000); 442 442 if (ret) 443 - dev_err(pfdev->dev, "error powering up gpu shader"); 443 + dev_err(pfdev->base.dev, "error powering up gpu shader"); 444 444 445 445 gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present); 446 446 ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO, 447 447 val, val == pfdev->features.tiler_present, 10, 1000); 448 448 if (ret) 449 - dev_err(pfdev->dev, "error powering up gpu tiler"); 449 + dev_err(pfdev->base.dev, "error powering up gpu tiler"); 450 450 } 451 451 452 452 void panfrost_gpu_power_off(struct panfrost_device *pfdev) ··· 458 458 ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO, 459 459 val, !val, 1, 2000); 460 460 if (ret) 461 - dev_err(pfdev->dev, "shader power transition timeout"); 461 + dev_err(pfdev->base.dev, "shader power transition timeout"); 462 462 463 463 gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present); 464 464 ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO, 465 465 val, !val, 1, 2000); 466 466 if (ret) 467 - dev_err(pfdev->dev, "tiler power transition timeout"); 467 + dev_err(pfdev->base.dev, "tiler power transition timeout"); 468 468 469 469 gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present); 470 470 ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO, 471 471 val, !val, 0, 2000); 472 472 if (ret) 473 - dev_err(pfdev->dev, "l2 power transition timeout"); 473 + dev_err(pfdev->base.dev, "l2 power transition timeout"); 474 474 } 475 475 476 476 void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev) ··· 491 491 492 492 panfrost_gpu_init_features(pfdev); 493 493 494 - err = dma_set_mask_and_coherent(pfdev->dev, 495 - DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); 494 + err = dma_set_mask_and_coherent(pfdev->base.dev, 495 + DMA_BIT_MASK(FIELD_GET(0xff00, 496 + pfdev->features.mmu_features))); 496 497 if (err) 497 498 return err; 498 499 499 - dma_set_max_seg_size(pfdev->dev, UINT_MAX); 500 + dma_set_max_seg_size(pfdev->base.dev, UINT_MAX); 500 501 501 - pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); 502 + pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "gpu"); 502 503 if (pfdev->gpu_irq < 0) 503 504 return pfdev->gpu_irq; 504 505 505 - err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler, 506 + err = devm_request_irq(pfdev->base.dev, pfdev->gpu_irq, panfrost_gpu_irq_handler, 506 507 IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev); 507 508 if (err) { 508 - dev_err(pfdev->dev, "failed to request gpu irq"); 509 + dev_err(pfdev->base.dev, "failed to request gpu irq"); 509 510 return err; 510 511 } 511 512 ··· 526 525 527 526 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) { 528 527 /* Flush reduction only makes sense when the GPU is kept powered on between jobs */ 529 - if (pm_runtime_get_if_in_use(pfdev->dev)) { 528 + if (pm_runtime_get_if_in_use(pfdev->base.dev)) { 530 529 flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID); 531 - pm_runtime_put(pfdev->dev); 530 + pm_runtime_put(pfdev->base.dev); 532 531 return flush_id; 533 532 } 534 533 }
+19 -18
drivers/gpu/drm/panfrost/panfrost_job.c
··· 99 99 if (!fence) 100 100 return ERR_PTR(-ENOMEM); 101 101 102 - fence->dev = pfdev->ddev; 102 + fence->dev = &pfdev->base; 103 103 fence->queue = js_num; 104 104 fence->seqno = ++js->queue[js_num].emit_seqno; 105 105 dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock, ··· 210 210 211 211 panfrost_devfreq_record_busy(&pfdev->pfdevfreq); 212 212 213 - ret = pm_runtime_get_sync(pfdev->dev); 213 + ret = pm_runtime_get_sync(pfdev->base.dev); 214 214 if (ret < 0) 215 215 return; 216 216 ··· 261 261 } 262 262 263 263 job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); 264 - dev_dbg(pfdev->dev, 264 + dev_dbg(pfdev->base.dev, 265 265 "JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d", 266 266 job, js, subslot, jc_head, cfg & 0xf); 267 267 } ··· 446 446 bool signal_fence = true; 447 447 448 448 if (!panfrost_exception_is_fault(js_status)) { 449 - dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x", 449 + dev_dbg(pfdev->base.dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x", 450 450 js, exception_name, 451 451 job_read(pfdev, JS_HEAD_LO(js)), 452 452 job_read(pfdev, JS_TAIL_LO(js))); 453 453 } else { 454 - dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", 454 + dev_err(pfdev->base.dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", 455 455 js, exception_name, 456 456 job_read(pfdev, JS_HEAD_LO(js)), 457 457 job_read(pfdev, JS_TAIL_LO(js))); ··· 483 483 if (signal_fence) 484 484 dma_fence_signal_locked(job->done_fence); 485 485 486 - pm_runtime_put_autosuspend(pfdev->dev); 486 + pm_runtime_put_autosuspend(pfdev->base.dev); 487 487 488 488 if (panfrost_exception_needs_reset(pfdev, js_status)) { 489 489 atomic_set(&pfdev->reset.pending, 1); ··· 502 502 panfrost_devfreq_record_idle(&pfdev->pfdevfreq); 503 503 504 504 dma_fence_signal_locked(job->done_fence); 505 - pm_runtime_put_autosuspend(pfdev->dev); 505 + pm_runtime_put_autosuspend(pfdev->base.dev); 506 506 } 507 507 508 508 static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) ··· 611 611 u32 status = job_read(pfdev, JOB_INT_RAWSTAT); 612 612 613 613 while (status) { 614 - pm_runtime_mark_last_busy(pfdev->dev); 614 + pm_runtime_mark_last_busy(pfdev->base.dev); 615 615 616 616 spin_lock(&pfdev->js->job_lock); 617 617 panfrost_job_handle_irq(pfdev, status); ··· 692 692 10, 10000); 693 693 694 694 if (ret) 695 - dev_err(pfdev->dev, "Soft-stop failed\n"); 695 + dev_err(pfdev->base.dev, "Soft-stop failed\n"); 696 696 697 697 /* Handle the remaining interrupts before we reset. */ 698 698 panfrost_job_handle_irqs(pfdev); ··· 710 710 if (pfdev->jobs[i][j]->requirements & PANFROST_JD_REQ_CYCLE_COUNT || 711 711 pfdev->jobs[i][j]->is_profiled) 712 712 panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev); 713 - pm_runtime_put_noidle(pfdev->dev); 713 + pm_runtime_put_noidle(pfdev->base.dev); 714 714 panfrost_devfreq_record_idle(&pfdev->pfdevfreq); 715 715 } 716 716 } ··· 778 778 synchronize_irq(pfdev->js->irq); 779 779 780 780 if (dma_fence_is_signaled(job->done_fence)) { 781 - dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n"); 781 + dev_warn(pfdev->base.dev, "unexpectedly high interrupt latency\n"); 782 782 return DRM_GPU_SCHED_STAT_NO_HANG; 783 783 } 784 784 785 - dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", 785 + dev_err(pfdev->base.dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", 786 786 js, 787 787 job_read(pfdev, JS_CONFIG(js)), 788 788 job_read(pfdev, JS_STATUS(js)), ··· 850 850 .num_rqs = DRM_SCHED_PRIORITY_COUNT, 851 851 .credit_limit = 2, 852 852 .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), 853 - .dev = pfdev->dev, 853 + .dev = pfdev->base.dev, 854 854 }; 855 855 struct panfrost_job_slot *js; 856 856 int ret, j; ··· 864 864 if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) 865 865 args.credit_limit = 1; 866 866 867 - pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); 867 + js = devm_kzalloc(pfdev->base.dev, sizeof(*js), GFP_KERNEL); 868 868 if (!js) 869 869 return -ENOMEM; 870 + pfdev->js = js; 870 871 871 872 INIT_WORK(&pfdev->reset.work, panfrost_reset_work); 872 873 spin_lock_init(&js->job_lock); 873 874 874 - js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job"); 875 + js->irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "job"); 875 876 if (js->irq < 0) 876 877 return js->irq; 877 878 878 - ret = devm_request_threaded_irq(pfdev->dev, js->irq, 879 + ret = devm_request_threaded_irq(pfdev->base.dev, js->irq, 879 880 panfrost_job_irq_handler, 880 881 panfrost_job_irq_handler_thread, 881 882 IRQF_SHARED, KBUILD_MODNAME "-job", 882 883 pfdev); 883 884 if (ret) { 884 - dev_err(pfdev->dev, "failed to request job irq"); 885 + dev_err(pfdev->base.dev, "failed to request job irq"); 885 886 return ret; 886 887 } 887 888 ··· 897 896 898 897 ret = drm_sched_init(&js->queue[j].sched, &args); 899 898 if (ret) { 900 - dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); 899 + dev_err(pfdev->base.dev, "Failed to create scheduler: %d.", ret); 901 900 goto err_sched; 902 901 } 903 902 }
+25 -21
drivers/gpu/drm/panfrost/panfrost_mmu.c
··· 81 81 if (ret) { 82 82 /* The GPU hung, let's trigger a reset */ 83 83 panfrost_device_schedule_reset(pfdev); 84 - dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n"); 84 + dev_err(pfdev->base.dev, "AS_ACTIVE bit stuck\n"); 85 85 } 86 86 87 87 return ret; ··· 222 222 struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg; 223 223 struct panfrost_device *pfdev = mmu->pfdev; 224 224 225 - if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr & 225 + if (drm_WARN_ON(&pfdev->base, pgtbl_cfg->arm_lpae_s1_cfg.ttbr & 226 226 ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK)) 227 227 return -EINVAL; 228 228 ··· 253 253 return mmu_cfg_init_mali_lpae(mmu); 254 254 default: 255 255 /* This should never happen */ 256 - drm_WARN(pfdev->ddev, 1, "Invalid pgtable format"); 256 + drm_WARN(&pfdev->base, 1, "Invalid pgtable format"); 257 257 return -EINVAL; 258 258 } 259 259 } ··· 315 315 atomic_set(&mmu->as_count, 1); 316 316 list_add(&mmu->list, &pfdev->as_lru_list); 317 317 318 - dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask); 318 + dev_dbg(pfdev->base.dev, 319 + "Assigned AS%d to mmu %p, alloc_mask=%lx", 320 + as, mmu, pfdev->as_alloc_mask); 319 321 320 322 panfrost_mmu_enable(pfdev, mmu); 321 323 ··· 383 381 if (mmu->as < 0) 384 382 return; 385 383 386 - pm_runtime_get_noresume(pfdev->dev); 384 + pm_runtime_get_noresume(pfdev->base.dev); 387 385 388 386 /* Flush the PTs only if we're already awake */ 389 - if (pm_runtime_active(pfdev->dev)) 387 + if (pm_runtime_active(pfdev->base.dev)) 390 388 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); 391 389 392 - pm_runtime_put_autosuspend(pfdev->dev); 390 + pm_runtime_put_autosuspend(pfdev->base.dev); 393 391 } 394 392 395 393 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, ··· 404 402 unsigned long paddr = sg_dma_address(sgl); 405 403 size_t len = sg_dma_len(sgl); 406 404 407 - dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len); 405 + dev_dbg(pfdev->base.dev, 406 + "map: as=%d, iova=%llx, paddr=%lx, len=%zx", 407 + mmu->as, iova, paddr, len); 408 408 409 409 while (len) { 410 410 size_t pgcount, mapped = 0; ··· 466 462 if (WARN_ON(!mapping->active)) 467 463 return; 468 464 469 - dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", 465 + dev_dbg(pfdev->base.dev, "unmap: as=%d, iova=%llx, len=%zx", 470 466 mapping->mmu->as, iova, len); 471 467 472 468 while (unmapped_len < len) { ··· 563 559 564 560 bo = bomapping->obj; 565 561 if (!bo->is_heap) { 566 - dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", 562 + dev_WARN(pfdev->base.dev, "matching BO is not heap type (GPU VA = %llx)", 567 563 bomapping->mmnode.start << PAGE_SHIFT); 568 564 ret = -EINVAL; 569 565 goto err_bo; ··· 630 626 if (ret) 631 627 goto err_unlock; 632 628 633 - ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0); 629 + ret = dma_map_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0); 634 630 if (ret) 635 631 goto err_map; 636 632 ··· 640 636 bomapping->active = true; 641 637 bo->heap_rss_size += SZ_2M; 642 638 643 - dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); 639 + dev_dbg(pfdev->base.dev, "mapped page fault @ AS%d %llx", as, addr); 644 640 645 641 out: 646 642 dma_resv_unlock(obj->resv); ··· 666 662 667 663 spin_lock(&pfdev->as_lock); 668 664 if (mmu->as >= 0) { 669 - pm_runtime_get_noresume(pfdev->dev); 670 - if (pm_runtime_active(pfdev->dev)) 665 + pm_runtime_get_noresume(pfdev->base.dev); 666 + if (pm_runtime_active(pfdev->base.dev)) 671 667 panfrost_mmu_disable(pfdev, mmu->as); 672 - pm_runtime_put_autosuspend(pfdev->dev); 668 + pm_runtime_put_autosuspend(pfdev->base.dev); 673 669 674 670 clear_bit(mmu->as, &pfdev->as_alloc_mask); 675 671 clear_bit(mmu->as, &pfdev->as_in_use_mask); ··· 730 726 731 727 if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) { 732 728 if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) { 733 - dev_err_once(pfdev->dev, 729 + dev_err_once(pfdev->base.dev, 734 730 "AARCH64_4K page table not supported\n"); 735 731 return ERR_PTR(-EINVAL); 736 732 } ··· 759 755 .oas = pa_bits, 760 756 .coherent_walk = pfdev->coherent, 761 757 .tlb = &mmu_tlb_ops, 762 - .iommu_dev = pfdev->dev, 758 + .iommu_dev = pfdev->base.dev, 763 759 }; 764 760 765 761 mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu); ··· 852 848 853 849 if (ret) { 854 850 /* terminal fault, print info about the fault */ 855 - dev_err(pfdev->dev, 851 + dev_err(pfdev->base.dev, 856 852 "Unhandled Page fault in AS%d at VA 0x%016llX\n" 857 853 "Reason: %s\n" 858 854 "raw fault status: 0x%X\n" ··· 900 896 { 901 897 int err; 902 898 903 - pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); 899 + pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "mmu"); 904 900 if (pfdev->mmu_irq < 0) 905 901 return pfdev->mmu_irq; 906 902 907 - err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq, 903 + err = devm_request_threaded_irq(pfdev->base.dev, pfdev->mmu_irq, 908 904 panfrost_mmu_irq_handler, 909 905 panfrost_mmu_irq_handler_thread, 910 906 IRQF_SHARED, KBUILD_MODNAME "-mmu", 911 907 pfdev); 912 908 913 909 if (err) { 914 - dev_err(pfdev->dev, "failed to request mmu irq"); 910 + dev_err(pfdev->base.dev, "failed to request mmu irq"); 915 911 return err; 916 912 } 917 913
+9 -9
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
··· 84 84 else if (perfcnt->user) 85 85 return -EBUSY; 86 86 87 - ret = pm_runtime_get_sync(pfdev->dev); 87 + ret = pm_runtime_get_sync(pfdev->base.dev); 88 88 if (ret < 0) 89 89 goto err_put_pm; 90 90 91 - bo = drm_gem_shmem_create(pfdev->ddev, perfcnt->bosize); 91 + bo = drm_gem_shmem_create(&pfdev->base, perfcnt->bosize); 92 92 if (IS_ERR(bo)) { 93 93 ret = PTR_ERR(bo); 94 94 goto err_put_pm; ··· 175 175 err_put_bo: 176 176 drm_gem_object_put(&bo->base); 177 177 err_put_pm: 178 - pm_runtime_put(pfdev->dev); 178 + pm_runtime_put(pfdev->base.dev); 179 179 return ret; 180 180 } 181 181 ··· 203 203 panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); 204 204 panfrost_gem_mapping_put(perfcnt->mapping); 205 205 perfcnt->mapping = NULL; 206 - pm_runtime_put_autosuspend(pfdev->dev); 206 + pm_runtime_put_autosuspend(pfdev->base.dev); 207 207 208 208 return 0; 209 209 } ··· 211 211 int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, 212 212 struct drm_file *file_priv) 213 213 { 214 - struct panfrost_device *pfdev = dev->dev_private; 214 + struct panfrost_device *pfdev = to_panfrost_device(dev); 215 215 struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; 216 216 struct drm_panfrost_perfcnt_enable *req = data; 217 217 int ret; ··· 238 238 int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data, 239 239 struct drm_file *file_priv) 240 240 { 241 - struct panfrost_device *pfdev = dev->dev_private; 241 + struct panfrost_device *pfdev = to_panfrost_device(dev); 242 242 struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; 243 243 struct drm_panfrost_perfcnt_dump *req = data; 244 244 void __user *user_ptr = (void __user *)(uintptr_t)req->buf_ptr; ··· 273 273 struct panfrost_device *pfdev = pfile->pfdev; 274 274 struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; 275 275 276 - pm_runtime_get_sync(pfdev->dev); 276 + pm_runtime_get_sync(pfdev->base.dev); 277 277 mutex_lock(&perfcnt->lock); 278 278 if (perfcnt->user == pfile) 279 279 panfrost_perfcnt_disable_locked(pfdev, file_priv); 280 280 mutex_unlock(&perfcnt->lock); 281 - pm_runtime_put_autosuspend(pfdev->dev); 281 + pm_runtime_put_autosuspend(pfdev->base.dev); 282 282 } 283 283 284 284 int panfrost_perfcnt_init(struct panfrost_device *pfdev) ··· 316 316 COUNTERS_PER_BLOCK * BYTES_PER_COUNTER; 317 317 } 318 318 319 - perfcnt = devm_kzalloc(pfdev->dev, sizeof(*perfcnt), GFP_KERNEL); 319 + perfcnt = devm_kzalloc(pfdev->base.dev, sizeof(*perfcnt), GFP_KERNEL); 320 320 if (!perfcnt) 321 321 return -ENOMEM; 322 322