Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ARM Generic Interrupt Controller (GIC) v3 support
4 */
5
6#include <linux/sizes.h>
7
8#include "kvm_util.h"
9#include "processor.h"
10#include "delay.h"
11
12#include "gic.h"
13#include "gic_v3.h"
14#include "gic_private.h"
15
16#define GICV3_MAX_CPUS 512
17
18#define GICD_INT_DEF_PRI 0xa0
19#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
20 (GICD_INT_DEF_PRI << 16) |\
21 (GICD_INT_DEF_PRI << 8) |\
22 GICD_INT_DEF_PRI)
23
24#define ICC_PMR_DEF_PRIO 0xf0
25
26struct gicv3_data {
27 unsigned int nr_cpus;
28 unsigned int nr_spis;
29};
30
31#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K)
32#define DIST_BIT (1U << 31)
33
34enum gicv3_intid_range {
35 SGI_RANGE,
36 PPI_RANGE,
37 SPI_RANGE,
38 INVALID_RANGE,
39};
40
41static struct gicv3_data gicv3_data;
42
43static void gicv3_gicd_wait_for_rwp(void)
44{
45 unsigned int count = 100000; /* 1s */
46
47 while (readl(GICD_BASE_GVA + GICD_CTLR) & GICD_CTLR_RWP) {
48 GUEST_ASSERT(count--);
49 udelay(10);
50 }
51}
52
53static inline volatile void *gicr_base_cpu(u32 cpu)
54{
55 /* Align all the redistributors sequentially */
56 return GICR_BASE_GVA + cpu * SZ_64K * 2;
57}
58
59static void gicv3_gicr_wait_for_rwp(u32 cpu)
60{
61 unsigned int count = 100000; /* 1s */
62
63 while (readl(gicr_base_cpu(cpu) + GICR_CTLR) & GICR_CTLR_RWP) {
64 GUEST_ASSERT(count--);
65 udelay(10);
66 }
67}
68
69static void gicv3_wait_for_rwp(u32 cpu_or_dist)
70{
71 if (cpu_or_dist & DIST_BIT)
72 gicv3_gicd_wait_for_rwp();
73 else
74 gicv3_gicr_wait_for_rwp(cpu_or_dist);
75}
76
77static enum gicv3_intid_range get_intid_range(unsigned int intid)
78{
79 switch (intid) {
80 case 0 ... 15:
81 return SGI_RANGE;
82 case 16 ... 31:
83 return PPI_RANGE;
84 case 32 ... 1019:
85 return SPI_RANGE;
86 }
87
88 /* We should not be reaching here */
89 GUEST_ASSERT(0);
90
91 return INVALID_RANGE;
92}
93
94static u64 gicv3_read_iar(void)
95{
96 u64 irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
97
98 dsb(sy);
99 return irqstat;
100}
101
102static void gicv3_write_eoir(u32 irq)
103{
104 write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
105 isb();
106}
107
108static void gicv3_write_dir(u32 irq)
109{
110 write_sysreg_s(irq, SYS_ICC_DIR_EL1);
111 isb();
112}
113
114static void gicv3_set_priority_mask(u64 mask)
115{
116 write_sysreg_s(mask, SYS_ICC_PMR_EL1);
117}
118
119static void gicv3_set_eoi_split(bool split)
120{
121 u32 val;
122
123 /*
124 * All other fields are read-only, so no need to read CTLR first. In
125 * fact, the kernel does the same.
126 */
127 val = split ? (1U << 1) : 0;
128 write_sysreg_s(val, SYS_ICC_CTLR_EL1);
129 isb();
130}
131
132u32 gicv3_reg_readl(u32 cpu_or_dist, u64 offset)
133{
134 volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA
135 : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist));
136 return readl(base + offset);
137}
138
139void gicv3_reg_writel(u32 cpu_or_dist, u64 offset, u32 reg_val)
140{
141 volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA
142 : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist));
143 writel(reg_val, base + offset);
144}
145
146u32 gicv3_getl_fields(u32 cpu_or_dist, u64 offset, u32 mask)
147{
148 return gicv3_reg_readl(cpu_or_dist, offset) & mask;
149}
150
151void gicv3_setl_fields(u32 cpu_or_dist, u64 offset,
152 u32 mask, u32 reg_val)
153{
154 u32 tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
155
156 tmp |= (reg_val & mask);
157 gicv3_reg_writel(cpu_or_dist, offset, tmp);
158}
159
160/*
161 * We use a single offset for the distributor and redistributor maps as they
162 * have the same value in both. The only exceptions are registers that only
163 * exist in one and not the other, like GICR_WAKER that doesn't exist in the
164 * distributor map. Such registers are conveniently marked as reserved in the
165 * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
166 * marked as "Reserved" in the Distributor map.
167 */
168static void gicv3_access_reg(u32 intid, u64 offset,
169 u32 reg_bits, u32 bits_per_field,
170 bool write, u32 *val)
171{
172 u32 cpu = guest_get_vcpuid();
173 enum gicv3_intid_range intid_range = get_intid_range(intid);
174 u32 fields_per_reg, index, mask, shift;
175 u32 cpu_or_dist;
176
177 GUEST_ASSERT(bits_per_field <= reg_bits);
178 GUEST_ASSERT(!write || *val < (1U << bits_per_field));
179 /*
180 * This function does not support 64 bit accesses. Just asserting here
181 * until we implement readq/writeq.
182 */
183 GUEST_ASSERT(reg_bits == 32);
184
185 fields_per_reg = reg_bits / bits_per_field;
186 index = intid % fields_per_reg;
187 shift = index * bits_per_field;
188 mask = ((1U << bits_per_field) - 1) << shift;
189
190 /* Set offset to the actual register holding intid's config. */
191 offset += (intid / fields_per_reg) * (reg_bits / 8);
192
193 cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
194
195 if (write)
196 gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift);
197 *val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
198}
199
200static void gicv3_write_reg(u32 intid, u64 offset,
201 u32 reg_bits, u32 bits_per_field, u32 val)
202{
203 gicv3_access_reg(intid, offset, reg_bits,
204 bits_per_field, true, &val);
205}
206
207static u32 gicv3_read_reg(u32 intid, u64 offset,
208 u32 reg_bits, u32 bits_per_field)
209{
210 u32 val;
211
212 gicv3_access_reg(intid, offset, reg_bits,
213 bits_per_field, false, &val);
214 return val;
215}
216
217static void gicv3_set_priority(u32 intid, u32 prio)
218{
219 gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
220}
221
222/* Sets the intid to be level-sensitive or edge-triggered. */
223static void gicv3_irq_set_config(u32 intid, bool is_edge)
224{
225 u32 val;
226
227 /* N/A for private interrupts. */
228 GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
229 val = is_edge ? 2 : 0;
230 gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
231}
232
233static void gicv3_irq_enable(u32 intid)
234{
235 bool is_spi = get_intid_range(intid) == SPI_RANGE;
236 u32 cpu = guest_get_vcpuid();
237
238 gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
239 gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
240}
241
242static void gicv3_irq_disable(u32 intid)
243{
244 bool is_spi = get_intid_range(intid) == SPI_RANGE;
245 u32 cpu = guest_get_vcpuid();
246
247 gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
248 gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
249}
250
251static void gicv3_irq_set_active(u32 intid)
252{
253 gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
254}
255
256static void gicv3_irq_clear_active(u32 intid)
257{
258 gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
259}
260
261static bool gicv3_irq_get_active(u32 intid)
262{
263 return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
264}
265
266static void gicv3_irq_set_pending(u32 intid)
267{
268 gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
269}
270
271static void gicv3_irq_clear_pending(u32 intid)
272{
273 gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
274}
275
276static bool gicv3_irq_get_pending(u32 intid)
277{
278 return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
279}
280
281static void gicv3_enable_redist(volatile void *redist_base)
282{
283 u32 val = readl(redist_base + GICR_WAKER);
284 unsigned int count = 100000; /* 1s */
285
286 val &= ~GICR_WAKER_ProcessorSleep;
287 writel(val, redist_base + GICR_WAKER);
288
289 /* Wait until the processor is 'active' */
290 while (readl(redist_base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
291 GUEST_ASSERT(count--);
292 udelay(10);
293 }
294}
295
296static void gicv3_set_group(u32 intid, bool grp)
297{
298 u32 cpu_or_dist;
299 u32 val;
300
301 cpu_or_dist = (get_intid_range(intid) == SPI_RANGE) ? DIST_BIT : guest_get_vcpuid();
302 val = gicv3_reg_readl(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4);
303 if (grp)
304 val |= BIT(intid % 32);
305 else
306 val &= ~BIT(intid % 32);
307 gicv3_reg_writel(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4, val);
308}
309
310static void gicv3_cpu_init(unsigned int cpu)
311{
312 volatile void *sgi_base;
313 unsigned int i;
314 volatile void *redist_base_cpu;
315 u64 typer;
316
317 GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
318
319 redist_base_cpu = gicr_base_cpu(cpu);
320 sgi_base = sgi_base_from_redist(redist_base_cpu);
321
322 /* Verify assumption that GICR_TYPER.Processor_number == cpu */
323 typer = readq_relaxed(redist_base_cpu + GICR_TYPER);
324 GUEST_ASSERT_EQ(GICR_TYPER_CPU_NUMBER(typer), cpu);
325
326 gicv3_enable_redist(redist_base_cpu);
327
328 /*
329 * Mark all the SGI and PPI interrupts as non-secure Group-1.
330 * Also, deactivate and disable them.
331 */
332 writel(~0, sgi_base + GICR_IGROUPR0);
333 writel(~0, sgi_base + GICR_ICACTIVER0);
334 writel(~0, sgi_base + GICR_ICENABLER0);
335
336 /* Set a default priority for all the SGIs and PPIs */
337 for (i = 0; i < 32; i += 4)
338 writel(GICD_INT_DEF_PRI_X4,
339 sgi_base + GICR_IPRIORITYR0 + i);
340
341 gicv3_gicr_wait_for_rwp(cpu);
342
343 /* Enable the GIC system register (ICC_*) access */
344 write_sysreg_s(read_sysreg_s(SYS_ICC_SRE_EL1) | ICC_SRE_EL1_SRE,
345 SYS_ICC_SRE_EL1);
346
347 /* Set a default priority threshold */
348 write_sysreg_s(ICC_PMR_DEF_PRIO, SYS_ICC_PMR_EL1);
349
350 /* Disable Group-0 interrupts */
351 write_sysreg_s(ICC_IGRPEN0_EL1_MASK, SYS_ICC_IGRPEN1_EL1);
352 /* Enable non-secure Group-1 interrupts */
353 write_sysreg_s(ICC_IGRPEN1_EL1_MASK, SYS_ICC_IGRPEN1_EL1);
354}
355
356static void gicv3_dist_init(void)
357{
358 unsigned int i;
359
360 /* Disable the distributor until we set things up */
361 writel(0, GICD_BASE_GVA + GICD_CTLR);
362 gicv3_gicd_wait_for_rwp();
363
364 /*
365 * Mark all the SPI interrupts as non-secure Group-1.
366 * Also, deactivate and disable them.
367 */
368 for (i = 32; i < gicv3_data.nr_spis; i += 32) {
369 writel(~0, GICD_BASE_GVA + GICD_IGROUPR + i / 8);
370 writel(~0, GICD_BASE_GVA + GICD_ICACTIVER + i / 8);
371 writel(~0, GICD_BASE_GVA + GICD_ICENABLER + i / 8);
372 }
373
374 /* Set a default priority for all the SPIs */
375 for (i = 32; i < gicv3_data.nr_spis; i += 4)
376 writel(GICD_INT_DEF_PRI_X4,
377 GICD_BASE_GVA + GICD_IPRIORITYR + i);
378
379 /* Wait for the settings to sync-in */
380 gicv3_gicd_wait_for_rwp();
381
382 /* Finally, enable the distributor globally with ARE */
383 writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A |
384 GICD_CTLR_ENABLE_G1, GICD_BASE_GVA + GICD_CTLR);
385 gicv3_gicd_wait_for_rwp();
386}
387
388static void gicv3_init(unsigned int nr_cpus)
389{
390 GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS);
391
392 gicv3_data.nr_cpus = nr_cpus;
393 gicv3_data.nr_spis = GICD_TYPER_SPIS(
394 readl(GICD_BASE_GVA + GICD_TYPER));
395 if (gicv3_data.nr_spis > 1020)
396 gicv3_data.nr_spis = 1020;
397
398 /*
399 * Initialize only the distributor for now.
400 * The redistributor and CPU interfaces are initialized
401 * later for every PE.
402 */
403 gicv3_dist_init();
404}
405
406const struct gic_common_ops gicv3_ops = {
407 .gic_init = gicv3_init,
408 .gic_cpu_init = gicv3_cpu_init,
409 .gic_irq_enable = gicv3_irq_enable,
410 .gic_irq_disable = gicv3_irq_disable,
411 .gic_read_iar = gicv3_read_iar,
412 .gic_write_eoir = gicv3_write_eoir,
413 .gic_write_dir = gicv3_write_dir,
414 .gic_set_priority_mask = gicv3_set_priority_mask,
415 .gic_set_eoi_split = gicv3_set_eoi_split,
416 .gic_set_priority = gicv3_set_priority,
417 .gic_irq_set_active = gicv3_irq_set_active,
418 .gic_irq_clear_active = gicv3_irq_clear_active,
419 .gic_irq_get_active = gicv3_irq_get_active,
420 .gic_irq_set_pending = gicv3_irq_set_pending,
421 .gic_irq_clear_pending = gicv3_irq_clear_pending,
422 .gic_irq_get_pending = gicv3_irq_get_pending,
423 .gic_irq_set_config = gicv3_irq_set_config,
424 .gic_irq_set_group = gicv3_set_group,
425};
426
427void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size,
428 gpa_t pend_table)
429{
430 volatile void *rdist_base = gicr_base_cpu(guest_get_vcpuid());
431
432 u32 ctlr;
433 u64 val;
434
435 val = (cfg_table |
436 GICR_PROPBASER_InnerShareable |
437 GICR_PROPBASER_RaWaWb |
438 ((ilog2(cfg_table_size) - 1) & GICR_PROPBASER_IDBITS_MASK));
439 writeq_relaxed(val, rdist_base + GICR_PROPBASER);
440
441 val = (pend_table |
442 GICR_PENDBASER_InnerShareable |
443 GICR_PENDBASER_RaWaWb);
444 writeq_relaxed(val, rdist_base + GICR_PENDBASER);
445
446 ctlr = readl_relaxed(rdist_base + GICR_CTLR);
447 ctlr |= GICR_CTLR_ENABLE_LPIS;
448 writel_relaxed(ctlr, rdist_base + GICR_CTLR);
449}