Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0 or MIT
2/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4/* Copyright 2019 Collabora ltd. */
5
6#include <linux/bitfield.h>
7#include <linux/bitmap.h>
8#include <linux/delay.h>
9#include <linux/dma-mapping.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/iopoll.h>
13#include <linux/platform_device.h>
14#include <linux/pm_runtime.h>
15
16#include <drm/drm_drv.h>
17#include <drm/drm_managed.h>
18#include <drm/drm_print.h>
19
20#include "panthor_device.h"
21#include "panthor_gpu.h"
22#include "panthor_hw.h"
23#include "panthor_regs.h"
24
25#define CREATE_TRACE_POINTS
26#include "panthor_trace.h"
27
28/**
29 * struct panthor_gpu - GPU block management data.
30 */
31struct panthor_gpu {
32 /** @irq: GPU irq. */
33 struct panthor_irq irq;
34
35 /** @reqs_lock: Lock protecting access to pending_reqs. */
36 spinlock_t reqs_lock;
37
38 /** @pending_reqs: Pending GPU requests. */
39 u32 pending_reqs;
40
41 /** @reqs_acked: GPU request wait queue. */
42 wait_queue_head_t reqs_acked;
43
44 /** @cache_flush_lock: Lock to serialize cache flushes */
45 struct mutex cache_flush_lock;
46};
47
48#define GPU_INTERRUPTS_MASK \
49 (GPU_IRQ_FAULT | \
50 GPU_IRQ_PROTM_FAULT | \
51 GPU_IRQ_RESET_COMPLETED | \
52 GPU_IRQ_CLEAN_CACHES_COMPLETED)
53
54#define GPU_POWER_INTERRUPTS_MASK \
55 (GPU_IRQ_POWER_CHANGED | GPU_IRQ_POWER_CHANGED_ALL)
56
57static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
58{
59 gpu_write(ptdev, GPU_COHERENCY_PROTOCOL,
60 ptdev->gpu_info.selected_coherency);
61}
62
63static void panthor_gpu_l2_config_set(struct panthor_device *ptdev)
64{
65 const struct panthor_soc_data *data = ptdev->soc_data;
66 u32 l2_config;
67 u32 i;
68
69 if (!data || !data->asn_hash_enable)
70 return;
71
72 if (GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) < 11) {
73 drm_err(&ptdev->base, "Custom ASN hash not supported by the device");
74 return;
75 }
76
77 for (i = 0; i < ARRAY_SIZE(data->asn_hash); i++)
78 gpu_write(ptdev, GPU_ASN_HASH(i), data->asn_hash[i]);
79
80 l2_config = gpu_read(ptdev, GPU_L2_CONFIG);
81 l2_config |= GPU_L2_CONFIG_ASN_HASH_ENABLE;
82 gpu_write(ptdev, GPU_L2_CONFIG, l2_config);
83}
84
85static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
86{
87 gpu_write(ptdev, GPU_INT_CLEAR, status);
88
89 if (tracepoint_enabled(gpu_power_status) && (status & GPU_POWER_INTERRUPTS_MASK))
90 trace_gpu_power_status(ptdev->base.dev,
91 gpu_read64(ptdev, SHADER_READY),
92 gpu_read64(ptdev, TILER_READY),
93 gpu_read64(ptdev, L2_READY));
94
95 if (status & GPU_IRQ_FAULT) {
96 u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
97 u64 address = gpu_read64(ptdev, GPU_FAULT_ADDR);
98
99 drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
100 fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
101 address);
102 }
103 if (status & GPU_IRQ_PROTM_FAULT)
104 drm_warn(&ptdev->base, "GPU Fault in protected mode\n");
105
106 spin_lock(&ptdev->gpu->reqs_lock);
107 if (status & ptdev->gpu->pending_reqs) {
108 ptdev->gpu->pending_reqs &= ~status;
109 wake_up_all(&ptdev->gpu->reqs_acked);
110 }
111 spin_unlock(&ptdev->gpu->reqs_lock);
112}
113PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler);
114
115/**
116 * panthor_gpu_unplug() - Called when the GPU is unplugged.
117 * @ptdev: Device to unplug.
118 */
119void panthor_gpu_unplug(struct panthor_device *ptdev)
120{
121 unsigned long flags;
122
123 /* Make sure the IRQ handler is not running after that point. */
124 if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
125 panthor_gpu_irq_suspend(&ptdev->gpu->irq);
126
127 /* Wake-up all waiters. */
128 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
129 ptdev->gpu->pending_reqs = 0;
130 wake_up_all(&ptdev->gpu->reqs_acked);
131 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
132}
133
134/**
135 * panthor_gpu_init() - Initialize the GPU block
136 * @ptdev: Device.
137 *
138 * Return: 0 on success, a negative error code otherwise.
139 */
140int panthor_gpu_init(struct panthor_device *ptdev)
141{
142 struct panthor_gpu *gpu;
143 u32 pa_bits;
144 int ret, irq;
145
146 gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL);
147 if (!gpu)
148 return -ENOMEM;
149
150 spin_lock_init(&gpu->reqs_lock);
151 init_waitqueue_head(&gpu->reqs_acked);
152 mutex_init(&gpu->cache_flush_lock);
153 ptdev->gpu = gpu;
154
155 dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
156 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
157 ret = dma_set_mask_and_coherent(ptdev->base.dev, DMA_BIT_MASK(pa_bits));
158 if (ret)
159 return ret;
160
161 irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
162 if (irq < 0)
163 return irq;
164
165 ret = panthor_request_gpu_irq(ptdev, &ptdev->gpu->irq, irq, GPU_INTERRUPTS_MASK);
166 if (ret)
167 return ret;
168
169 return 0;
170}
171
172int panthor_gpu_power_changed_on(struct panthor_device *ptdev)
173{
174 guard(pm_runtime_active)(ptdev->base.dev);
175
176 panthor_gpu_irq_enable_events(&ptdev->gpu->irq, GPU_POWER_INTERRUPTS_MASK);
177
178 return 0;
179}
180
181void panthor_gpu_power_changed_off(struct panthor_device *ptdev)
182{
183 guard(pm_runtime_active)(ptdev->base.dev);
184
185 panthor_gpu_irq_disable_events(&ptdev->gpu->irq, GPU_POWER_INTERRUPTS_MASK);
186}
187
188/**
189 * panthor_gpu_block_power_off() - Power-off a specific block of the GPU
190 * @ptdev: Device.
191 * @blk_name: Block name.
192 * @pwroff_reg: Power-off register for this block.
193 * @pwrtrans_reg: Power transition register for this block.
194 * @mask: Sub-elements to power-off.
195 * @timeout_us: Timeout in microseconds.
196 *
197 * Return: 0 on success, a negative error code otherwise.
198 */
199int panthor_gpu_block_power_off(struct panthor_device *ptdev,
200 const char *blk_name,
201 u32 pwroff_reg, u32 pwrtrans_reg,
202 u64 mask, u32 timeout_us)
203{
204 u32 val;
205 int ret;
206
207 ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
208 !(mask & val), 100, timeout_us);
209 if (ret) {
210 drm_err(&ptdev->base,
211 "timeout waiting on %s:%llx power transition", blk_name,
212 mask);
213 return ret;
214 }
215
216 gpu_write64(ptdev, pwroff_reg, mask);
217
218 ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
219 !(mask & val), 100, timeout_us);
220 if (ret) {
221 drm_err(&ptdev->base,
222 "timeout waiting on %s:%llx power transition", blk_name,
223 mask);
224 return ret;
225 }
226
227 return 0;
228}
229
230/**
231 * panthor_gpu_block_power_on() - Power-on a specific block of the GPU
232 * @ptdev: Device.
233 * @blk_name: Block name.
234 * @pwron_reg: Power-on register for this block.
235 * @pwrtrans_reg: Power transition register for this block.
236 * @rdy_reg: Power transition ready register.
237 * @mask: Sub-elements to power-on.
238 * @timeout_us: Timeout in microseconds.
239 *
240 * Return: 0 on success, a negative error code otherwise.
241 */
242int panthor_gpu_block_power_on(struct panthor_device *ptdev,
243 const char *blk_name,
244 u32 pwron_reg, u32 pwrtrans_reg,
245 u32 rdy_reg, u64 mask, u32 timeout_us)
246{
247 u32 val;
248 int ret;
249
250 ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
251 !(mask & val), 100, timeout_us);
252 if (ret) {
253 drm_err(&ptdev->base,
254 "timeout waiting on %s:%llx power transition", blk_name,
255 mask);
256 return ret;
257 }
258
259 gpu_write64(ptdev, pwron_reg, mask);
260
261 ret = gpu_read64_relaxed_poll_timeout(ptdev, rdy_reg, val,
262 (mask & val) == val,
263 100, timeout_us);
264 if (ret) {
265 drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
266 blk_name, mask);
267 return ret;
268 }
269
270 return 0;
271}
272
273void panthor_gpu_l2_power_off(struct panthor_device *ptdev)
274{
275 panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
276}
277
278/**
279 * panthor_gpu_l2_power_on() - Power-on the L2-cache
280 * @ptdev: Device.
281 *
282 * Return: 0 on success, a negative error code otherwise.
283 */
284int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
285{
286 if (ptdev->gpu_info.l2_present != 1) {
287 /*
288 * Only support one core group now.
289 * ~(l2_present - 1) unsets all bits in l2_present except
290 * the bottom bit. (l2_present - 2) has all the bits in
291 * the first core group set. AND them together to generate
292 * a mask of cores in the first core group.
293 */
294 u64 core_mask = ~(ptdev->gpu_info.l2_present - 1) &
295 (ptdev->gpu_info.l2_present - 2);
296 drm_info_once(&ptdev->base, "using only 1st core group (%lu cores from %lu)\n",
297 hweight64(core_mask),
298 hweight64(ptdev->gpu_info.shader_present));
299 }
300
301 /* Set the desired coherency mode and L2 config before the power up of L2 */
302 panthor_gpu_coherency_set(ptdev);
303 panthor_gpu_l2_config_set(ptdev);
304
305 return panthor_gpu_power_on(ptdev, L2, 1, 20000);
306}
307
308/**
309 * panthor_gpu_flush_caches() - Flush caches
310 * @ptdev: Device.
311 * @l2: L2 flush type.
312 * @lsc: LSC flush type.
313 * @other: Other flush type.
314 *
315 * Return: 0 on success, a negative error code otherwise.
316 */
317int panthor_gpu_flush_caches(struct panthor_device *ptdev,
318 u32 l2, u32 lsc, u32 other)
319{
320 unsigned long flags;
321 int ret = 0;
322
323 /* Serialize cache flush operations. */
324 guard(mutex)(&ptdev->gpu->cache_flush_lock);
325
326 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
327 if (!(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
328 ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED;
329 gpu_write(ptdev, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
330 } else {
331 ret = -EIO;
332 }
333 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
334
335 if (ret)
336 return ret;
337
338 if (!wait_event_timeout(ptdev->gpu->reqs_acked,
339 !(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED),
340 msecs_to_jiffies(100))) {
341 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
342 if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 &&
343 !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
344 ret = -ETIMEDOUT;
345 else
346 ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED;
347 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
348 }
349
350 if (ret) {
351 panthor_device_schedule_reset(ptdev);
352 drm_err(&ptdev->base, "Flush caches timeout");
353 }
354
355 return ret;
356}
357
358/**
359 * panthor_gpu_soft_reset() - Issue a soft-reset
360 * @ptdev: Device.
361 *
362 * Return: 0 on success, a negative error code otherwise.
363 */
364int panthor_gpu_soft_reset(struct panthor_device *ptdev)
365{
366 bool timedout = false;
367 unsigned long flags;
368
369 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
370 if (!drm_WARN_ON(&ptdev->base,
371 ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) {
372 ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED;
373 gpu_write(ptdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
374 gpu_write(ptdev, GPU_CMD, GPU_SOFT_RESET);
375 }
376 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
377
378 if (!wait_event_timeout(ptdev->gpu->reqs_acked,
379 !(ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED),
380 msecs_to_jiffies(100))) {
381 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
382 if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 &&
383 !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
384 timedout = true;
385 else
386 ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED;
387 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
388 }
389
390 if (timedout) {
391 drm_err(&ptdev->base, "Soft reset timeout");
392 return -ETIMEDOUT;
393 }
394
395 ptdev->gpu->pending_reqs = 0;
396 return 0;
397}
398
399/**
400 * panthor_gpu_suspend() - Suspend the GPU block.
401 * @ptdev: Device.
402 *
403 * Suspend the GPU irq. This should be called last in the suspend procedure,
404 * after all other blocks have been suspented.
405 */
406void panthor_gpu_suspend(struct panthor_device *ptdev)
407{
408 /* On a fast reset, simply power down the L2. */
409 if (!ptdev->reset.fast)
410 panthor_hw_soft_reset(ptdev);
411 else
412 panthor_hw_l2_power_off(ptdev);
413
414 panthor_gpu_irq_suspend(&ptdev->gpu->irq);
415}
416
417/**
418 * panthor_gpu_resume() - Resume the GPU block.
419 * @ptdev: Device.
420 *
421 * Resume the IRQ handler and power-on the L2-cache.
422 * The FW takes care of powering the other blocks.
423 */
424void panthor_gpu_resume(struct panthor_device *ptdev)
425{
426 panthor_gpu_irq_resume(&ptdev->gpu->irq);
427 panthor_hw_l2_power_on(ptdev);
428}
429