Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015, 2016 ARM Ltd.
4 */
5#ifndef __KVM_ARM_VGIC_H
6#define __KVM_ARM_VGIC_H
7
8#include <linux/bits.h>
9#include <linux/kvm.h>
10#include <linux/irqreturn.h>
11#include <linux/mutex.h>
12#include <linux/refcount.h>
13#include <linux/spinlock.h>
14#include <linux/static_key.h>
15#include <linux/types.h>
16#include <linux/xarray.h>
17#include <kvm/iodev.h>
18#include <linux/list.h>
19#include <linux/jump_label.h>
20
21#include <linux/irqchip/arm-gic-v4.h>
22#include <linux/irqchip/arm-gic-v5.h>
23
24#define VGIC_V5_MAX_CPUS 512
25#define VGIC_V3_MAX_CPUS 512
26#define VGIC_V2_MAX_CPUS 8
27#define VGIC_NR_IRQS_LEGACY 256
28#define VGIC_NR_SGIS 16
29#define VGIC_NR_PPIS 16
30#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
31#define VGIC_MAX_SPI 1019
32#define VGIC_MAX_RESERVED 1023
33#define VGIC_MIN_LPI 8192
34#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
35
36/*
37 * GICv5 supports 128 PPIs, but only the first 64 are architected. We only
38 * support the timers and PMU in KVM, both of which are architected. Rather than
39 * handling twice the state, we instead opt to only support the architected set
40 * in KVM for now. At a future stage, this can be bumped up to 128, if required.
41 */
42#define VGIC_V5_NR_PRIVATE_IRQS 64
43
44#define is_v5_type(t, i) (FIELD_GET(GICV5_HWIRQ_TYPE, (i)) == (t))
45
46#define __irq_is_sgi(t, i) \
47 ({ \
48 bool __ret; \
49 \
50 switch (t) { \
51 case KVM_DEV_TYPE_ARM_VGIC_V5: \
52 __ret = false; \
53 break; \
54 default: \
55 __ret = (i) < VGIC_NR_SGIS; \
56 } \
57 \
58 __ret; \
59 })
60
61#define __irq_is_ppi(t, i) \
62 ({ \
63 bool __ret; \
64 \
65 switch (t) { \
66 case KVM_DEV_TYPE_ARM_VGIC_V5: \
67 __ret = is_v5_type(GICV5_HWIRQ_TYPE_PPI, (i)); \
68 break; \
69 default: \
70 __ret = (i) >= VGIC_NR_SGIS; \
71 __ret &= (i) < VGIC_NR_PRIVATE_IRQS; \
72 } \
73 \
74 __ret; \
75 })
76
77#define __irq_is_spi(t, i) \
78 ({ \
79 bool __ret; \
80 \
81 switch (t) { \
82 case KVM_DEV_TYPE_ARM_VGIC_V5: \
83 __ret = is_v5_type(GICV5_HWIRQ_TYPE_SPI, (i)); \
84 break; \
85 default: \
86 __ret = (i) <= VGIC_MAX_SPI; \
87 __ret &= (i) >= VGIC_NR_PRIVATE_IRQS; \
88 } \
89 \
90 __ret; \
91 })
92
93#define __irq_is_lpi(t, i) \
94 ({ \
95 bool __ret; \
96 \
97 switch (t) { \
98 case KVM_DEV_TYPE_ARM_VGIC_V5: \
99 __ret = is_v5_type(GICV5_HWIRQ_TYPE_LPI, (i)); \
100 break; \
101 default: \
102 __ret = (i) >= 8192; \
103 } \
104 \
105 __ret; \
106 })
107
108#define irq_is_sgi(k, i) __irq_is_sgi((k)->arch.vgic.vgic_model, i)
109#define irq_is_ppi(k, i) __irq_is_ppi((k)->arch.vgic.vgic_model, i)
110#define irq_is_spi(k, i) __irq_is_spi((k)->arch.vgic.vgic_model, i)
111#define irq_is_lpi(k, i) __irq_is_lpi((k)->arch.vgic.vgic_model, i)
112
113#define irq_is_private(k, i) (irq_is_ppi(k, i) || irq_is_sgi(k, i))
114
115#define vgic_v5_get_hwirq_id(x) FIELD_GET(GICV5_HWIRQ_ID, (x))
116#define vgic_v5_set_hwirq_id(x) FIELD_PREP(GICV5_HWIRQ_ID, (x))
117
118#define __vgic_v5_set_type(t) (FIELD_PREP(GICV5_HWIRQ_TYPE, GICV5_HWIRQ_TYPE_##t))
119#define vgic_v5_make_ppi(x) (__vgic_v5_set_type(PPI) | vgic_v5_set_hwirq_id(x))
120#define vgic_v5_make_spi(x) (__vgic_v5_set_type(SPI) | vgic_v5_set_hwirq_id(x))
121#define vgic_v5_make_lpi(x) (__vgic_v5_set_type(LPI) | vgic_v5_set_hwirq_id(x))
122
123#define __vgic_is_v(k, v) ((k)->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V##v)
124#define vgic_is_v3(k) (__vgic_is_v(k, 3))
125#define vgic_is_v5(k) (__vgic_is_v(k, 5))
126
127enum vgic_type {
128 VGIC_V2, /* Good ol' GICv2 */
129 VGIC_V3, /* New fancy GICv3 */
130 VGIC_V5, /* Newer, fancier GICv5 */
131};
132
133/* same for all guests, as depending only on the _host's_ GIC model */
134struct vgic_global {
135 /* type of the host GIC */
136 enum vgic_type type;
137
138 /* Physical address of vgic virtual cpu interface */
139 phys_addr_t vcpu_base;
140
141 /* GICV mapping, kernel VA */
142 void __iomem *vcpu_base_va;
143 /* GICV mapping, HYP VA */
144 void __iomem *vcpu_hyp_va;
145
146 /* virtual control interface mapping, kernel VA */
147 void __iomem *vctrl_base;
148 /* virtual control interface mapping, HYP VA */
149 void __iomem *vctrl_hyp;
150
151 /* Physical CPU interface, kernel VA */
152 void __iomem *gicc_base;
153
154 /* Number of implemented list registers */
155 int nr_lr;
156
157 /* Maintenance IRQ number */
158 unsigned int maint_irq;
159
160 /* maximum number of VCPUs allowed (GICv2 limits us to 8) */
161 int max_gic_vcpus;
162
163 /* Only needed for the legacy KVM_CREATE_IRQCHIP */
164 bool can_emulate_gicv2;
165
166 /* Hardware has GICv4? */
167 bool has_gicv4;
168 bool has_gicv4_1;
169
170 /* Pseudo GICv3 from outer space */
171 bool no_hw_deactivation;
172
173 /* GICv3 system register CPU interface */
174 struct static_key_false gicv3_cpuif;
175
176 /* GICv3 compat mode on a GICv5 host */
177 bool has_gcie_v3_compat;
178
179 u32 ich_vtr_el2;
180};
181
182extern struct vgic_global kvm_vgic_global_state;
183
184#define VGIC_V2_MAX_LRS (1 << 6)
185#define VGIC_V3_MAX_LRS 16
186#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
187
188enum vgic_irq_config {
189 VGIC_CONFIG_EDGE = 0,
190 VGIC_CONFIG_LEVEL
191};
192
193struct vgic_irq;
194
195/*
196 * Per-irq ops overriding some common behavious.
197 *
198 * Always called in non-preemptible section and the functions can use
199 * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs.
200 */
201struct irq_ops {
202 /* Per interrupt flags for special-cased interrupts */
203 unsigned long flags;
204
205#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */
206
207 /*
208 * Callback function pointer to in-kernel devices that can tell us the
209 * state of the input level of mapped level-triggered IRQ faster than
210 * peaking into the physical GIC.
211 */
212 bool (*get_input_level)(int vintid);
213
214 /*
215 * Function pointer to override the queuing of an IRQ.
216 */
217 bool (*queue_irq_unlock)(struct kvm *kvm, struct vgic_irq *irq,
218 unsigned long flags) __releases(&irq->irq_lock);
219
220 /*
221 * Callback function pointer to either enable or disable direct
222 * injection for a mapped interrupt.
223 */
224 void (*set_direct_injection)(struct kvm_vcpu *vcpu,
225 struct vgic_irq *irq, bool direct);
226};
227
228struct vgic_irq {
229 raw_spinlock_t irq_lock; /* Protects the content of the struct */
230 u32 intid; /* Guest visible INTID */
231 struct rcu_head rcu;
232 struct list_head ap_list;
233
234 struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
235 * SPIs and LPIs: The VCPU whose ap_list
236 * this is queued on.
237 */
238
239 struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should
240 * be sent to, as a result of the
241 * targets reg (v2) or the
242 * affinity reg (v3).
243 */
244
245 bool pending_release:1; /* Used for LPIs only, unreferenced IRQ
246 * pending a release */
247
248 bool pending_latch:1; /* The pending latch state used to calculate
249 * the pending state for both level
250 * and edge triggered IRQs. */
251 enum vgic_irq_config config:1; /* Level or edge */
252 bool line_level:1; /* Level only */
253 bool enabled:1;
254 bool active:1;
255 bool hw:1; /* Tied to HW IRQ */
256 bool on_lr:1; /* Present in a CPU LR */
257 refcount_t refcount; /* Used for LPIs */
258 u32 hwintid; /* HW INTID number */
259 unsigned int host_irq; /* linux irq corresponding to hwintid */
260 union {
261 u8 targets; /* GICv2 target VCPUs mask */
262 u32 mpidr; /* GICv3 target VCPU */
263 };
264 u8 source; /* GICv2 SGIs only */
265 u8 active_source; /* GICv2 SGIs only */
266 u8 priority;
267 u8 group; /* 0 == group 0, 1 == group 1 */
268
269 struct irq_ops *ops;
270
271 void *owner; /* Opaque pointer to reserve an interrupt
272 for in-kernel devices. */
273};
274
275static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq)
276{
277 return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE);
278}
279
280struct vgic_register_region;
281struct vgic_its;
282
283enum iodev_type {
284 IODEV_CPUIF,
285 IODEV_DIST,
286 IODEV_REDIST,
287 IODEV_ITS
288};
289
290struct vgic_io_device {
291 gpa_t base_addr;
292 union {
293 struct kvm_vcpu *redist_vcpu;
294 struct vgic_its *its;
295 };
296 const struct vgic_register_region *regions;
297 enum iodev_type iodev_type;
298 int nr_regions;
299 struct kvm_io_device dev;
300};
301
302struct vgic_its {
303 /* The base address of the ITS control register frame */
304 gpa_t vgic_its_base;
305
306 bool enabled;
307 struct vgic_io_device iodev;
308 struct kvm_device *dev;
309
310 /* These registers correspond to GITS_BASER{0,1} */
311 u64 baser_device_table;
312 u64 baser_coll_table;
313
314 /* Protects the command queue */
315 struct mutex cmd_lock;
316 u64 cbaser;
317 u32 creadr;
318 u32 cwriter;
319
320 /* migration ABI revision in use */
321 u32 abi_rev;
322
323 /* Protects the device and collection lists */
324 struct mutex its_lock;
325 struct list_head device_list;
326 struct list_head collection_list;
327
328 /*
329 * Caches the (device_id, event_id) -> vgic_irq translation for
330 * LPIs that are mapped and enabled.
331 */
332 struct xarray translation_cache;
333};
334
335struct vgic_state_iter;
336
337struct vgic_redist_region {
338 u32 index;
339 gpa_t base;
340 u32 count; /* number of redistributors or 0 if single region */
341 u32 free_index; /* index of the next free redistributor */
342 struct list_head list;
343};
344
345struct vgic_v5_vm {
346 /*
347 * We only expose a subset of PPIs to the guest. This subset is a
348 * combination of the PPIs that are actually implemented and what we
349 * actually choose to expose.
350 */
351 DECLARE_BITMAP(vgic_ppi_mask, VGIC_V5_NR_PRIVATE_IRQS);
352
353 /* A mask of the PPIs that are exposed for userspace to drive. */
354 DECLARE_BITMAP(userspace_ppis, VGIC_V5_NR_PRIVATE_IRQS);
355
356 /*
357 * The HMR itself is handled by the hardware, but we still need to have
358 * a mask that we can use when merging in pending state (only the state
359 * of Edge PPIs is merged back in from the guest an the HMR provides a
360 * convenient way to do that).
361 */
362 DECLARE_BITMAP(vgic_ppi_hmr, VGIC_V5_NR_PRIVATE_IRQS);
363};
364
365struct vgic_dist {
366 bool in_kernel;
367 bool ready;
368 bool initialized;
369
370 /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
371 u32 vgic_model;
372
373 /* Implementation revision as reported in the GICD_IIDR */
374 u32 implementation_rev;
375#define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */
376#define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */
377#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3
378
379 /* Userspace can write to GICv2 IGROUPR */
380 bool v2_groups_user_writable;
381
382 /* Do injected MSIs require an additional device ID? */
383 bool msis_require_devid;
384
385 int nr_spis;
386
387 /* The GIC maintenance IRQ for nested hypervisors. */
388 u32 mi_intid;
389
390 /* Track the number of in-flight active SPIs */
391 atomic_t active_spis;
392
393 /* base addresses in guest physical address space: */
394 gpa_t vgic_dist_base; /* distributor */
395 union {
396 /* either a GICv2 CPU interface */
397 gpa_t vgic_cpu_base;
398 /* or a number of GICv3 redistributor regions */
399 struct list_head rd_regions;
400 };
401
402 /* distributor enabled */
403 bool enabled;
404
405 /* Supports SGIs without active state */
406 bool nassgicap;
407
408 /* Wants SGIs without active state */
409 bool nassgireq;
410
411 struct vgic_irq *spis;
412
413 struct vgic_io_device dist_iodev;
414 struct vgic_io_device cpuif_iodev;
415
416 bool has_its;
417 bool table_write_in_progress;
418
419 /*
420 * Contains the attributes and gpa of the LPI configuration table.
421 * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share
422 * one address across all redistributors.
423 * GICv3 spec: IHI 0069E 6.1.1 "LPI Configuration tables"
424 */
425 u64 propbaser;
426
427 struct xarray lpi_xa;
428
429 /*
430 * GICv4 ITS per-VM data, containing the IRQ domain, the VPE
431 * array, the property table pointer as well as allocation
432 * data. This essentially ties the Linux IRQ core and ITS
433 * together, and avoids leaking KVM's data structures anywhere
434 * else.
435 */
436 struct its_vm its_vm;
437
438 /*
439 * GICv5 per-VM data.
440 */
441 struct vgic_v5_vm gicv5_vm;
442};
443
444struct vgic_v2_cpu_if {
445 u32 vgic_hcr;
446 u32 vgic_vmcr;
447 u32 vgic_apr;
448 u32 vgic_lr[VGIC_V2_MAX_LRS];
449
450 unsigned int used_lrs;
451};
452
453struct vgic_v3_cpu_if {
454 u32 vgic_hcr;
455 u32 vgic_vmcr;
456 u32 vgic_sre; /* Restored only, change ignored */
457 u32 vgic_ap0r[4];
458 u32 vgic_ap1r[4];
459 u64 vgic_lr[VGIC_V3_MAX_LRS];
460
461 /*
462 * GICv4 ITS per-VPE data, containing the doorbell IRQ, the
463 * pending table pointer, the its_vm pointer and a few other
464 * HW specific things. As for the its_vm structure, this is
465 * linking the Linux IRQ subsystem and the ITS together.
466 */
467 struct its_vpe its_vpe;
468
469 unsigned int used_lrs;
470};
471
472struct vgic_v5_cpu_if {
473 u64 vgic_apr;
474 u64 vgic_vmcr;
475
476 /* PPI register state */
477 DECLARE_BITMAP(vgic_ppi_dvir, VGIC_V5_NR_PRIVATE_IRQS);
478 DECLARE_BITMAP(vgic_ppi_activer, VGIC_V5_NR_PRIVATE_IRQS);
479 DECLARE_BITMAP(vgic_ppi_enabler, VGIC_V5_NR_PRIVATE_IRQS);
480 /* We have one byte (of which 5 bits are used) per PPI for priority */
481 u64 vgic_ppi_priorityr[VGIC_V5_NR_PRIVATE_IRQS / 8];
482
483 /*
484 * The ICSR is re-used across host and guest, and hence it needs to be
485 * saved/restored. Only one copy is required as the host should block
486 * preemption between executing GIC CDRCFG and acccessing the
487 * ICC_ICSR_EL1. A guest, of course, can never guarantee this, and hence
488 * it is the hyp's responsibility to keep the state constistent.
489 */
490 u64 vgic_icsr;
491
492 struct gicv5_vpe gicv5_vpe;
493};
494
495/* What PPI capabilities does a GICv5 host have */
496struct vgic_v5_ppi_caps {
497 DECLARE_BITMAP(impl_ppi_mask, VGIC_V5_NR_PRIVATE_IRQS);
498};
499
500struct vgic_cpu {
501 /* CPU vif control registers for world switch */
502 union {
503 struct vgic_v2_cpu_if vgic_v2;
504 struct vgic_v3_cpu_if vgic_v3;
505 struct vgic_v5_cpu_if vgic_v5;
506 };
507
508 struct vgic_irq *private_irqs;
509
510 raw_spinlock_t ap_list_lock; /* Protects the ap_list */
511
512 /*
513 * List of IRQs that this VCPU should consider because they are either
514 * Active or Pending (hence the name; AP list), or because they recently
515 * were one of the two and need to be migrated off this list to another
516 * VCPU.
517 */
518 struct list_head ap_list_head;
519
520 /*
521 * Members below are used with GICv3 emulation only and represent
522 * parts of the redistributor.
523 */
524 struct vgic_io_device rd_iodev;
525 struct vgic_redist_region *rdreg;
526 u32 rdreg_index;
527 atomic_t syncr_busy;
528
529 /* Contains the attributes and gpa of the LPI pending tables. */
530 u64 pendbaser;
531 /* GICR_CTLR.{ENABLE_LPIS,RWP} */
532 atomic_t ctlr;
533
534 /* Cache guest priority bits */
535 u32 num_pri_bits;
536
537 /* Cache guest interrupt ID bits */
538 u32 num_id_bits;
539};
540
541extern struct static_key_false vgic_v2_cpuif_trap;
542extern struct static_key_false vgic_v3_cpuif_trap;
543extern struct static_key_false vgic_v3_has_v2_compat;
544
545int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr);
546void kvm_vgic_early_init(struct kvm *kvm);
547int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
548int kvm_vgic_vcpu_nv_init(struct kvm_vcpu *vcpu);
549int kvm_vgic_create(struct kvm *kvm, u32 type);
550void kvm_vgic_destroy(struct kvm *kvm);
551void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
552int kvm_vgic_map_resources(struct kvm *kvm);
553void kvm_vgic_finalize_idregs(struct kvm *kvm);
554int kvm_vgic_hyp_init(void);
555void kvm_vgic_init_cpu_hardware(void);
556
557int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
558 unsigned int intid, bool level, void *owner);
559void kvm_vgic_set_irq_ops(struct kvm_vcpu *vcpu, u32 vintid,
560 struct irq_ops *ops);
561void kvm_vgic_clear_irq_ops(struct kvm_vcpu *vcpu, u32 vintid);
562int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
563 u32 vintid);
564int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
565int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid);
566bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
567
568int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
569
570void kvm_vgic_load(struct kvm_vcpu *vcpu);
571void kvm_vgic_put(struct kvm_vcpu *vcpu);
572
573u16 vgic_v3_get_eisr(struct kvm_vcpu *vcpu);
574u16 vgic_v3_get_elrsr(struct kvm_vcpu *vcpu);
575u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu);
576
577#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
578#define vgic_initialized(k) ((k)->arch.vgic.initialized)
579#define vgic_valid_spi(k, i) \
580 ({ \
581 bool __ret = irq_is_spi(k, i); \
582 \
583 switch ((k)->arch.vgic.vgic_model) { \
584 case KVM_DEV_TYPE_ARM_VGIC_V5: \
585 __ret &= FIELD_GET(GICV5_HWIRQ_ID, i) < (k)->arch.vgic.nr_spis; \
586 break; \
587 default: \
588 __ret &= (i) < ((k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS); \
589 } \
590 \
591 __ret; \
592 })
593
594bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
595void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
596void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
597void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
598void kvm_vgic_process_async_update(struct kvm_vcpu *vcpu);
599
600void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
601
602/**
603 * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
604 *
605 * The host's GIC naturally limits the maximum amount of VCPUs a guest
606 * can use.
607 */
608static inline int kvm_vgic_get_max_vcpus(void)
609{
610 return kvm_vgic_global_state.max_gic_vcpus;
611}
612
613/**
614 * kvm_vgic_setup_default_irq_routing:
615 * Setup a default flat gsi routing table mapping all SPIs
616 */
617int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
618
619int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
620
621struct kvm_kernel_irq_routing_entry;
622
623int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
624 struct kvm_kernel_irq_routing_entry *irq_entry);
625
626void kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq);
627
628int vgic_v4_load(struct kvm_vcpu *vcpu);
629void vgic_v4_commit(struct kvm_vcpu *vcpu);
630int vgic_v4_put(struct kvm_vcpu *vcpu);
631
632int vgic_v5_finalize_ppi_state(struct kvm *kvm);
633bool vgic_v5_ppi_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
634 unsigned long flags);
635void vgic_v5_set_ppi_dvi(struct kvm_vcpu *vcpu, struct vgic_irq *irq, bool dvi);
636
637bool vgic_state_is_nested(struct kvm_vcpu *vcpu);
638
639/* CPU HP callbacks */
640void kvm_vgic_cpu_up(void);
641void kvm_vgic_cpu_down(void);
642
643#endif /* __KVM_ARM_VGIC_H */