Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 */
6
7#include <linux/acpi.h>
8#include <linux/bitfield.h>
9#include <linux/bitops.h>
10#include <linux/cpu.h>
11#include <linux/cpumask.h>
12#include <linux/interrupt.h>
13#include <linux/irqchip.h>
14#include <linux/irqchip/chained_irq.h>
15#include <linux/irqchip/riscv-aplic.h>
16#include <linux/module.h>
17#include <linux/of_address.h>
18#include <linux/printk.h>
19#include <linux/smp.h>
20
21#include "irq-riscv-aplic-main.h"
22
23#define APLIC_DISABLE_IDELIVERY 0
24#define APLIC_ENABLE_IDELIVERY 1
25#define APLIC_DISABLE_ITHRESHOLD 1
26#define APLIC_ENABLE_ITHRESHOLD 0
27
28struct aplic_direct {
29 struct aplic_priv priv;
30 struct irq_domain *irqdomain;
31 struct cpumask lmask;
32};
33
34struct aplic_idc {
35 u32 hart_index;
36 void __iomem *regs;
37 struct aplic_direct *direct;
38};
39
40static unsigned int aplic_direct_parent_irq;
41static DEFINE_PER_CPU(struct aplic_idc, aplic_idcs);
42
43static void aplic_direct_irq_eoi(struct irq_data *d)
44{
45 /*
46 * The fasteoi_handler requires irq_eoi() callback hence
47 * provide a dummy handler.
48 */
49}
50
51#ifdef CONFIG_SMP
52static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
53 bool force)
54{
55 struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
56 struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
57 struct aplic_idc *idc;
58 unsigned int cpu, val;
59 void __iomem *target;
60
61 if (force)
62 cpu = cpumask_first_and(&direct->lmask, mask_val);
63 else
64 cpu = cpumask_first_and_and(&direct->lmask, mask_val, cpu_online_mask);
65
66 if (cpu >= nr_cpu_ids)
67 return -EINVAL;
68
69 idc = per_cpu_ptr(&aplic_idcs, cpu);
70 target = priv->regs + APLIC_TARGET_BASE + (d->hwirq - 1) * sizeof(u32);
71 val = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
72 val |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
73 writel(val, target);
74
75 irq_data_update_effective_affinity(d, cpumask_of(cpu));
76
77 return IRQ_SET_MASK_OK_DONE;
78}
79#endif
80
81static struct irq_chip aplic_direct_chip = {
82 .name = "APLIC-DIRECT",
83 .irq_mask = aplic_irq_mask,
84 .irq_unmask = aplic_irq_unmask,
85 .irq_set_type = aplic_irq_set_type,
86 .irq_eoi = aplic_direct_irq_eoi,
87#ifdef CONFIG_SMP
88 .irq_set_affinity = aplic_direct_set_affinity,
89#endif
90 .flags = IRQCHIP_SET_TYPE_MASKED |
91 IRQCHIP_SKIP_SET_WAKE |
92 IRQCHIP_MASK_ON_SUSPEND,
93};
94
95static int aplic_direct_irqdomain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
96 unsigned long *hwirq, unsigned int *type)
97{
98 struct aplic_priv *priv = d->host_data;
99
100 return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type);
101}
102
103static int aplic_direct_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
104 unsigned int nr_irqs, void *arg)
105{
106 struct aplic_priv *priv = domain->host_data;
107 struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
108 struct irq_fwspec *fwspec = arg;
109 irq_hw_number_t hwirq;
110 unsigned int type;
111 int i, ret;
112
113 ret = aplic_irqdomain_translate(fwspec, priv->gsi_base, &hwirq, &type);
114 if (ret)
115 return ret;
116
117 for (i = 0; i < nr_irqs; i++) {
118 irq_domain_set_info(domain, virq + i, hwirq + i, &aplic_direct_chip,
119 priv, handle_fasteoi_irq, NULL, NULL);
120 irq_set_affinity(virq + i, &direct->lmask);
121 }
122
123 return 0;
124}
125
126static const struct irq_domain_ops aplic_direct_irqdomain_ops = {
127 .translate = aplic_direct_irqdomain_translate,
128 .alloc = aplic_direct_irqdomain_alloc,
129 .free = irq_domain_free_irqs_top,
130};
131
132/*
133 * To handle an APLIC direct interrupts, we just read the CLAIMI register
134 * which will return highest priority pending interrupt and clear the
135 * pending bit of the interrupt. This process is repeated until CLAIMI
136 * register return zero value.
137 */
138static void aplic_direct_handle_irq(struct irq_desc *desc)
139{
140 struct aplic_idc *idc = this_cpu_ptr(&aplic_idcs);
141 struct irq_domain *irqdomain = idc->direct->irqdomain;
142 struct irq_chip *chip = irq_desc_get_chip(desc);
143 irq_hw_number_t hw_irq;
144 int irq;
145
146 chained_irq_enter(chip, desc);
147
148 while ((hw_irq = readl(idc->regs + APLIC_IDC_CLAIMI))) {
149 hw_irq = hw_irq >> APLIC_IDC_TOPI_ID_SHIFT;
150 irq = irq_find_mapping(irqdomain, hw_irq);
151
152 if (unlikely(irq <= 0)) {
153 dev_warn_ratelimited(idc->direct->priv.dev,
154 "hw_irq %lu mapping not found\n", hw_irq);
155 } else {
156 generic_handle_irq(irq);
157 }
158 }
159
160 chained_irq_exit(chip, desc);
161}
162
163static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en)
164{
165 u32 de = (en) ? APLIC_ENABLE_IDELIVERY : APLIC_DISABLE_IDELIVERY;
166 u32 th = (en) ? APLIC_ENABLE_ITHRESHOLD : APLIC_DISABLE_ITHRESHOLD;
167
168 /* Priority must be less than threshold for interrupt triggering */
169 writel(th, idc->regs + APLIC_IDC_ITHRESHOLD);
170
171 /* Delivery must be set to 1 for interrupt triggering */
172 writel(de, idc->regs + APLIC_IDC_IDELIVERY);
173}
174
175void aplic_direct_restore_states(struct aplic_priv *priv)
176{
177 struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
178 int cpu;
179
180 for_each_cpu(cpu, &direct->lmask)
181 aplic_idc_set_delivery(per_cpu_ptr(&aplic_idcs, cpu), true);
182}
183
184static int aplic_direct_dying_cpu(unsigned int cpu)
185{
186 if (aplic_direct_parent_irq)
187 disable_percpu_irq(aplic_direct_parent_irq);
188
189 return 0;
190}
191
192static int aplic_direct_starting_cpu(unsigned int cpu)
193{
194 if (aplic_direct_parent_irq) {
195 enable_percpu_irq(aplic_direct_parent_irq,
196 irq_get_trigger_type(aplic_direct_parent_irq));
197 }
198
199 return 0;
200}
201
202static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
203 u32 *parent_hwirq, unsigned long *parent_hartid,
204 struct aplic_priv *priv)
205{
206 struct of_phandle_args parent;
207 unsigned long hartid;
208 int rc;
209
210 if (!is_of_node(dev->fwnode)) {
211 hartid = acpi_rintc_ext_parent_to_hartid(priv->acpi_aplic_id, index);
212 if (hartid == INVALID_HARTID)
213 return -ENODEV;
214
215 *parent_hartid = hartid;
216 *parent_hwirq = RV_IRQ_EXT;
217 return 0;
218 }
219
220 rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent);
221 if (rc)
222 return rc;
223
224 rc = riscv_of_parent_hartid(parent.np, parent_hartid);
225 if (rc)
226 return rc;
227
228 *parent_hwirq = parent.args[0];
229 return 0;
230}
231
232int aplic_direct_setup(struct device *dev, void __iomem *regs)
233{
234 int i, j, rc, cpu, current_cpu, setup_count = 0;
235 struct aplic_direct *direct;
236 struct irq_domain *domain;
237 struct aplic_priv *priv;
238 struct aplic_idc *idc;
239 unsigned long hartid;
240 u32 v, hwirq;
241
242 direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL);
243 if (!direct)
244 return -ENOMEM;
245 priv = &direct->priv;
246
247 rc = aplic_setup_priv(priv, dev, regs);
248 if (rc) {
249 dev_err(dev, "failed to create APLIC context\n");
250 return rc;
251 }
252
253 /* Setup per-CPU IDC and target CPU mask */
254 current_cpu = get_cpu();
255 for (i = 0; i < priv->nr_idcs; i++) {
256 rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid, priv);
257 if (rc) {
258 dev_warn(dev, "parent irq for IDC%d not found\n", i);
259 continue;
260 }
261
262 /*
263 * Skip interrupts other than external interrupts for
264 * current privilege level.
265 */
266 if (hwirq != RV_IRQ_EXT)
267 continue;
268
269 cpu = riscv_hartid_to_cpuid(hartid);
270 if (cpu < 0) {
271 dev_warn(dev, "invalid cpuid for IDC%d\n", i);
272 continue;
273 }
274
275 cpumask_set_cpu(cpu, &direct->lmask);
276
277 idc = per_cpu_ptr(&aplic_idcs, cpu);
278 rc = riscv_get_hart_index(dev->fwnode, i, &idc->hart_index);
279 if (rc) {
280 dev_warn(dev, "hart index not found for IDC%d\n", i);
281 continue;
282 }
283 idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index * APLIC_IDC_SIZE;
284 idc->direct = direct;
285
286 aplic_idc_set_delivery(idc, true);
287
288 /*
289 * Boot cpu might not have APLIC hart_index = 0 so check
290 * and update target registers of all interrupts.
291 */
292 if (cpu == current_cpu && idc->hart_index) {
293 v = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
294 v |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
295 for (j = 1; j <= priv->nr_irqs; j++)
296 writel(v, priv->regs + APLIC_TARGET_BASE + (j - 1) * sizeof(u32));
297 }
298
299 setup_count++;
300 }
301 put_cpu();
302
303 /* Find parent domain and register chained handler */
304 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
305 DOMAIN_BUS_ANY);
306 if (!aplic_direct_parent_irq && domain) {
307 aplic_direct_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
308 if (aplic_direct_parent_irq) {
309 irq_set_chained_handler(aplic_direct_parent_irq,
310 aplic_direct_handle_irq);
311
312 /*
313 * Setup CPUHP notifier to enable parent
314 * interrupt on all CPUs
315 */
316 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
317 "irqchip/riscv/aplic:starting",
318 aplic_direct_starting_cpu,
319 aplic_direct_dying_cpu);
320 }
321 }
322
323 /* Fail if we were not able to setup IDC for any CPU */
324 if (!setup_count)
325 return -ENODEV;
326
327 /* Setup global config and interrupt delivery */
328 aplic_init_hw_global(priv, false);
329
330 /* Create irq domain instance for the APLIC */
331 direct->irqdomain = irq_domain_create_linear(dev->fwnode, priv->nr_irqs + 1,
332 &aplic_direct_irqdomain_ops, priv);
333 if (!direct->irqdomain) {
334 dev_err(dev, "failed to create direct irq domain\n");
335 return -ENOMEM;
336 }
337
338 /* Advertise the interrupt controller */
339 dev_info(dev, "%d interrupts directly connected to %d CPUs\n",
340 priv->nr_irqs, priv->nr_idcs);
341
342 return 0;
343}