Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021-2023 Intel Corporation
4 */
5
6#include "xe_mmio.h"
7
8#include <linux/delay.h>
9#include <linux/io-64-nonatomic-lo-hi.h>
10#include <linux/minmax.h>
11#include <linux/pci.h>
12
13#include <drm/drm_managed.h>
14#include <drm/drm_print.h>
15
16#include "regs/xe_bars.h"
17#include "xe_device.h"
18#include "xe_gt_sriov_vf.h"
19#include "xe_sriov.h"
20#include "xe_trace.h"
21#include "xe_wa.h"
22
23#include "generated/xe_device_wa_oob.h"
24
25static void tiles_fini(void *arg)
26{
27 struct xe_device *xe = arg;
28 struct xe_tile *tile;
29 int id;
30
31 for_each_remote_tile(tile, xe, id)
32 tile->mmio.regs = NULL;
33}
34
35/*
36 * On multi-tile devices, partition the BAR space for MMIO on each tile,
37 * possibly accounting for register override on the number of tiles available.
38 * tile_mmio_size contains both the tile's 4MB register space, as well as
39 * additional space for the GTT and other (possibly unused) regions).
40 * Resulting memory layout is like below:
41 *
42 * .----------------------. <- tile_count * tile_mmio_size
43 * | .... |
44 * |----------------------| <- 2 * tile_mmio_size
45 * | tile1 GTT + other |
46 * |----------------------| <- 1 * tile_mmio_size + 4MB
47 * | tile1->mmio.regs |
48 * |----------------------| <- 1 * tile_mmio_size
49 * | tile0 GTT + other |
50 * |----------------------| <- 4MB
51 * | tile0->mmio.regs |
52 * '----------------------' <- 0MB
53 */
54static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
55{
56 struct xe_tile *tile;
57 u8 id;
58
59 /*
60 * Nothing to be done as tile 0 has already been setup earlier with the
61 * entire BAR mapped - see xe_mmio_probe_early()
62 */
63 if (xe->info.tile_count == 1)
64 return;
65
66 for_each_remote_tile(tile, xe, id)
67 xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M);
68}
69
70int xe_mmio_probe_tiles(struct xe_device *xe)
71{
72 size_t tile_mmio_size = SZ_16M;
73
74 mmio_multi_tile_setup(xe, tile_mmio_size);
75
76 return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
77}
78
79static void mmio_fini(void *arg)
80{
81 struct xe_device *xe = arg;
82 struct xe_tile *root_tile = xe_device_get_root_tile(xe);
83
84 pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
85 xe->mmio.regs = NULL;
86 root_tile->mmio.regs = NULL;
87}
88
89int xe_mmio_probe_early(struct xe_device *xe)
90{
91 struct xe_tile *root_tile = xe_device_get_root_tile(xe);
92 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
93
94 /*
95 * Map the entire BAR.
96 * The first 16MB of the BAR, belong to the root tile, and include:
97 * registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB).
98 */
99 xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR);
100 xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0);
101 if (!xe->mmio.regs) {
102 drm_err(&xe->drm, "failed to map registers\n");
103 return -EIO;
104 }
105
106 /* Setup first tile; other tiles (if present) will be setup later. */
107 xe_mmio_init(&root_tile->mmio, root_tile, xe->mmio.regs, SZ_4M);
108
109 return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
110}
111ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO); /* See xe_pci_probe() */
112
113/**
114 * xe_mmio_init() - Initialize an MMIO instance
115 * @mmio: Pointer to the MMIO instance to initialize
116 * @tile: The tile to which the MMIO region belongs
117 * @ptr: Pointer to the start of the MMIO region
118 * @size: The size of the MMIO region in bytes
119 *
120 * This is a convenience function for minimal initialization of struct xe_mmio.
121 */
122void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size)
123{
124 xe_tile_assert(tile, size <= XE_REG_ADDR_MAX);
125
126 mmio->regs = ptr;
127 mmio->regs_size = size;
128 mmio->tile = tile;
129}
130
131static void mmio_flush_pending_writes(struct xe_mmio *mmio)
132{
133#define DUMMY_REG_OFFSET 0x130030
134 int i;
135
136 if (!XE_DEVICE_WA(mmio->tile->xe, 15015404425))
137 return;
138
139 /* 4 dummy writes */
140 for (i = 0; i < 4; i++)
141 writel(0, mmio->regs + DUMMY_REG_OFFSET);
142}
143
144u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
145{
146 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
147 u8 val;
148
149 mmio_flush_pending_writes(mmio);
150
151 val = readb(mmio->regs + addr);
152 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
153
154 return val;
155}
156
157void xe_mmio_write8(struct xe_mmio *mmio, struct xe_reg reg, u8 val)
158{
159 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
160
161 trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
162
163 writeb(val, mmio->regs + addr);
164}
165
166u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
167{
168 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
169 u16 val;
170
171 mmio_flush_pending_writes(mmio);
172
173 val = readw(mmio->regs + addr);
174 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
175
176 return val;
177}
178
179void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
180{
181 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
182
183 trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
184
185 if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
186 xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?:
187 mmio->tile->primary_gt, reg, val);
188 else
189 writel(val, mmio->regs + addr);
190}
191
192u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
193{
194 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
195 u32 val;
196
197 mmio_flush_pending_writes(mmio);
198
199 if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
200 val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?:
201 mmio->tile->primary_gt, reg);
202 else
203 val = readl(mmio->regs + addr);
204
205 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
206
207 return val;
208}
209
210u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set)
211{
212 u32 old, reg_val;
213
214 old = xe_mmio_read32(mmio, reg);
215 reg_val = (old & ~clr) | set;
216 xe_mmio_write32(mmio, reg, reg_val);
217
218 return old;
219}
220
221int xe_mmio_write32_and_verify(struct xe_mmio *mmio,
222 struct xe_reg reg, u32 val, u32 mask, u32 eval)
223{
224 u32 reg_val;
225
226 xe_mmio_write32(mmio, reg, val);
227 reg_val = xe_mmio_read32(mmio, reg);
228
229 return (reg_val & mask) != eval ? -EINVAL : 0;
230}
231
232bool xe_mmio_in_range(const struct xe_mmio *mmio,
233 const struct xe_mmio_range *range,
234 struct xe_reg reg)
235{
236 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
237
238 return range && addr >= range->start && addr <= range->end;
239}
240
241/**
242 * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
243 * @mmio: MMIO target
244 * @reg: register to read value from
245 *
246 * Although Intel GPUs have some 64-bit registers, the hardware officially
247 * only supports GTTMMADR register reads of 32 bits or smaller. Even if
248 * a readq operation may return a reasonable value, that violation of the
249 * spec shouldn't be relied upon and all 64-bit register reads should be
250 * performed as two 32-bit reads of the upper and lower dwords.
251 *
252 * When reading registers that may be changing (such as
253 * counters), a rollover of the lower dword between the two 32-bit reads
254 * can be problematic. This function attempts to ensure the upper dword has
255 * stabilized before returning the 64-bit value.
256 *
257 * Note that because this function may re-read the register multiple times
258 * while waiting for the value to stabilize it should not be used to read
259 * any registers where read operations have side effects.
260 *
261 * Returns the value of the 64-bit register.
262 */
263u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
264{
265 struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
266 u32 ldw, udw, oldudw, retries;
267
268 /*
269 * The two dwords of a 64-bit register can never straddle the offset
270 * adjustment cutoff.
271 */
272 xe_tile_assert(mmio->tile, !in_range(mmio->adj_limit, reg.addr + 1, 7));
273
274 oldudw = xe_mmio_read32(mmio, reg_udw);
275 for (retries = 5; retries; --retries) {
276 ldw = xe_mmio_read32(mmio, reg);
277 udw = xe_mmio_read32(mmio, reg_udw);
278
279 if (udw == oldudw)
280 break;
281
282 oldudw = udw;
283 }
284
285 drm_WARN(&mmio->tile->xe->drm, retries == 0,
286 "64-bit read of %#x did not stabilize\n", reg.addr);
287
288 return (u64)udw << 32 | ldw;
289}
290
291static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
292 u32 timeout_us, u32 *out_val, bool atomic, bool expect_match)
293{
294 ktime_t cur = ktime_get_raw();
295 const ktime_t end = ktime_add_us(cur, timeout_us);
296 int ret = -ETIMEDOUT;
297 s64 wait = 10;
298 u32 read;
299 bool check;
300
301 for (;;) {
302 read = xe_mmio_read32(mmio, reg);
303
304 check = (read & mask) == val;
305 if (!expect_match)
306 check = !check;
307
308 if (check) {
309 ret = 0;
310 break;
311 }
312
313 cur = ktime_get_raw();
314 if (!ktime_before(cur, end))
315 break;
316
317 if (ktime_after(ktime_add_us(cur, wait), end))
318 wait = ktime_us_delta(end, cur);
319
320 if (atomic)
321 udelay(wait);
322 else
323 usleep_range(wait, wait << 1);
324 wait <<= 1;
325 }
326
327 if (ret != 0) {
328 read = xe_mmio_read32(mmio, reg);
329
330 check = (read & mask) == val;
331 if (!expect_match)
332 check = !check;
333
334 if (check)
335 ret = 0;
336 }
337
338 if (out_val)
339 *out_val = read;
340
341 return ret;
342}
343
344/**
345 * xe_mmio_wait32() - Wait for a register to match the desired masked value
346 * @mmio: MMIO target
347 * @reg: register to read value from
348 * @mask: mask to be applied to the value read from the register
349 * @val: desired value after applying the mask
350 * @timeout_us: time out after this period of time. Wait logic tries to be
351 * smart, applying an exponential backoff until @timeout_us is reached.
352 * @out_val: if not NULL, points where to store the last unmasked value
353 * @atomic: needs to be true if calling from an atomic context
354 *
355 * This function polls for the desired masked value and returns zero on success
356 * or -ETIMEDOUT if timed out.
357 *
358 * Note that @timeout_us represents the minimum amount of time to wait before
359 * giving up. The actual time taken by this function can be a little more than
360 * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
361 * it is possible that this function succeeds even after @timeout_us has passed.
362 */
363int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
364 u32 *out_val, bool atomic)
365{
366 return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true);
367}
368
369/**
370 * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
371 * @mmio: MMIO target
372 * @reg: register to read value from
373 * @mask: mask to be applied to the value read from the register
374 * @val: value not to be matched after applying the mask
375 * @timeout_us: time out after this period of time
376 * @out_val: if not NULL, points where to store the last unmasked value
377 * @atomic: needs to be true if calling from an atomic context
378 *
379 * This function works exactly like xe_mmio_wait32() with the exception that
380 * @val is expected not to be matched.
381 */
382int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
383 u32 *out_val, bool atomic)
384{
385 return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
386}
387
388#ifdef CONFIG_PCI_IOV
389static size_t vf_regs_stride(struct xe_device *xe)
390{
391 return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
392}
393
394/**
395 * xe_mmio_init_vf_view() - Initialize an MMIO instance for accesses like the VF
396 * @mmio: the target &xe_mmio to initialize as VF's view
397 * @base: the source &xe_mmio to initialize from
398 * @vfid: the VF identifier
399 */
400void xe_mmio_init_vf_view(struct xe_mmio *mmio, const struct xe_mmio *base, unsigned int vfid)
401{
402 struct xe_tile *tile = base->tile;
403 struct xe_device *xe = tile->xe;
404 size_t offset = vf_regs_stride(xe) * vfid;
405
406 xe_assert(xe, IS_SRIOV_PF(xe));
407 xe_assert(xe, vfid);
408 xe_assert(xe, !base->sriov_vf_gt);
409 xe_assert(xe, base->regs_size > offset);
410
411 *mmio = *base;
412 mmio->regs += offset;
413 mmio->regs_size -= offset;
414}
415#endif