Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch kvm-arm64/gicv5-prologue into kvmarm-master/next

* kvm-arm64/gicv5-prologue:
: .
: Prologue to GICv5 support, courtesy of Sascha Bischoff.
:
: This is preliminary work that sets the scene for the full-blow
: support.
: .
irqchip/gic-v5: Check if impl is virt capable
KVM: arm64: gic: Set vgic_model before initing private IRQs
arm64/sysreg: Drop ICH_HFGRTR_EL2.ICC_HAPR_EL1 and make RES1
KVM: arm64: gic-v3: Switch vGIC-v3 to use generated ICH_VMCR_EL2

Signed-off-by: Marc Zyngier <maz@kernel.org>

+72 -101
-1
arch/arm64/include/asm/el2_setup.h
··· 235 235 ICH_HFGRTR_EL2_ICC_ICSR_EL1 | \ 236 236 ICH_HFGRTR_EL2_ICC_PCR_EL1 | \ 237 237 ICH_HFGRTR_EL2_ICC_HPPIR_EL1 | \ 238 - ICH_HFGRTR_EL2_ICC_HAPR_EL1 | \ 239 238 ICH_HFGRTR_EL2_ICC_CR0_EL1 | \ 240 239 ICH_HFGRTR_EL2_ICC_IDRn_EL1 | \ 241 240 ICH_HFGRTR_EL2_ICC_APR_EL1)
-21
arch/arm64/include/asm/sysreg.h
··· 560 560 #define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) 561 561 #define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) 562 562 #define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5) 563 - #define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) 564 563 565 564 #define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x) 566 565 #define SYS_ICH_LR0_EL2 __SYS__LR0_EL2(0) ··· 986 987 #define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) 987 988 #define ICH_LR_PRIORITY_SHIFT 48 988 989 #define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT) 989 - 990 - /* ICH_VMCR_EL2 bit definitions */ 991 - #define ICH_VMCR_ACK_CTL_SHIFT 2 992 - #define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) 993 - #define ICH_VMCR_FIQ_EN_SHIFT 3 994 - #define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) 995 - #define ICH_VMCR_CBPR_SHIFT 4 996 - #define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) 997 - #define ICH_VMCR_EOIM_SHIFT 9 998 - #define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT) 999 - #define ICH_VMCR_BPR1_SHIFT 18 1000 - #define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) 1001 - #define ICH_VMCR_BPR0_SHIFT 21 1002 - #define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) 1003 - #define ICH_VMCR_PMR_SHIFT 24 1004 - #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) 1005 - #define ICH_VMCR_ENG0_SHIFT 0 1006 - #define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) 1007 - #define ICH_VMCR_ENG1_SHIFT 1 1008 - #define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) 1009 990 1010 991 /* 1011 992 * Permission Indirection Extension (PIE) permission encodings.
+25 -44
arch/arm64/kvm/hyp/vgic-v3-sr.c
··· 569 569 continue; 570 570 571 571 /* Group-0 interrupt, but Group-0 disabled? */ 572 - if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK)) 572 + if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_EL2_VENG0_MASK)) 573 573 continue; 574 574 575 575 /* Group-1 interrupt, but Group-1 disabled? */ 576 - if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK)) 576 + if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_EL2_VENG1_MASK)) 577 577 continue; 578 578 579 579 /* Not the highest priority? */ ··· 646 646 647 647 static unsigned int __vgic_v3_get_bpr0(u32 vmcr) 648 648 { 649 - return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 649 + return FIELD_GET(ICH_VMCR_EL2_VBPR0, vmcr); 650 650 } 651 651 652 652 static unsigned int __vgic_v3_get_bpr1(u32 vmcr) 653 653 { 654 654 unsigned int bpr; 655 655 656 - if (vmcr & ICH_VMCR_CBPR_MASK) { 656 + if (vmcr & ICH_VMCR_EL2_VCBPR_MASK) { 657 657 bpr = __vgic_v3_get_bpr0(vmcr); 658 658 if (bpr < 7) 659 659 bpr++; 660 660 } else { 661 - bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 661 + bpr = FIELD_GET(ICH_VMCR_EL2_VBPR1, vmcr); 662 662 } 663 663 664 664 return bpr; ··· 758 758 if (grp != !!(lr_val & ICH_LR_GROUP)) 759 759 goto spurious; 760 760 761 - pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; 761 + pmr = FIELD_GET(ICH_VMCR_EL2_VPMR, vmcr); 762 762 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; 763 763 if (pmr <= lr_prio) 764 764 goto spurious; ··· 806 806 int lr; 807 807 808 808 /* EOImode == 0, nothing to be done here */ 809 - if (!(vmcr & ICH_VMCR_EOIM_MASK)) 809 + if (!(vmcr & ICH_VMCR_EL2_VEOIM_MASK)) 810 810 return 1; 811 811 812 812 /* No deactivate to be performed on an LPI */ ··· 849 849 } 850 850 851 851 /* EOImode == 1 and not an LPI, nothing to be done here */ 852 - if ((vmcr & ICH_VMCR_EOIM_MASK) && !(vid >= VGIC_MIN_LPI)) 852 + if ((vmcr & ICH_VMCR_EL2_VEOIM_MASK) && !(vid >= VGIC_MIN_LPI)) 853 853 return; 854 854 855 855 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; ··· 865 865 866 866 static void __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 867 867 { 868 - vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK)); 868 + vcpu_set_reg(vcpu, rt, FIELD_GET(ICH_VMCR_EL2_VENG0, vmcr)); 869 869 } 870 870 871 871 static void __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 872 872 { 873 - vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK)); 873 + vcpu_set_reg(vcpu, rt, FIELD_GET(ICH_VMCR_EL2_VENG1, vmcr)); 874 874 } 875 875 876 876 static void __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 877 877 { 878 878 u64 val = vcpu_get_reg(vcpu, rt); 879 879 880 - if (val & 1) 881 - vmcr |= ICH_VMCR_ENG0_MASK; 882 - else 883 - vmcr &= ~ICH_VMCR_ENG0_MASK; 880 + FIELD_MODIFY(ICH_VMCR_EL2_VENG0, &vmcr, val & 1); 884 881 885 882 __vgic_v3_write_vmcr(vmcr); 886 883 } ··· 886 889 { 887 890 u64 val = vcpu_get_reg(vcpu, rt); 888 891 889 - if (val & 1) 890 - vmcr |= ICH_VMCR_ENG1_MASK; 891 - else 892 - vmcr &= ~ICH_VMCR_ENG1_MASK; 892 + FIELD_MODIFY(ICH_VMCR_EL2_VENG1, &vmcr, val & 1); 893 893 894 894 __vgic_v3_write_vmcr(vmcr); 895 895 } ··· 910 916 if (val < bpr_min) 911 917 val = bpr_min; 912 918 913 - val <<= ICH_VMCR_BPR0_SHIFT; 914 - val &= ICH_VMCR_BPR0_MASK; 915 - vmcr &= ~ICH_VMCR_BPR0_MASK; 916 - vmcr |= val; 919 + FIELD_MODIFY(ICH_VMCR_EL2_VBPR0, &vmcr, val); 917 920 918 921 __vgic_v3_write_vmcr(vmcr); 919 922 } ··· 920 929 u64 val = vcpu_get_reg(vcpu, rt); 921 930 u8 bpr_min = __vgic_v3_bpr_min(); 922 931 923 - if (vmcr & ICH_VMCR_CBPR_MASK) 932 + if (FIELD_GET(ICH_VMCR_EL2_VCBPR, val)) 924 933 return; 925 934 926 935 /* Enforce BPR limiting */ 927 936 if (val < bpr_min) 928 937 val = bpr_min; 929 938 930 - val <<= ICH_VMCR_BPR1_SHIFT; 931 - val &= ICH_VMCR_BPR1_MASK; 932 - vmcr &= ~ICH_VMCR_BPR1_MASK; 933 - vmcr |= val; 939 + FIELD_MODIFY(ICH_VMCR_EL2_VBPR1, &vmcr, val); 934 940 935 941 __vgic_v3_write_vmcr(vmcr); 936 942 } ··· 1017 1029 1018 1030 static void __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 1019 1031 { 1020 - vmcr &= ICH_VMCR_PMR_MASK; 1021 - vmcr >>= ICH_VMCR_PMR_SHIFT; 1022 - vcpu_set_reg(vcpu, rt, vmcr); 1032 + vcpu_set_reg(vcpu, rt, FIELD_GET(ICH_VMCR_EL2_VPMR, vmcr)); 1023 1033 } 1024 1034 1025 1035 static void __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 1026 1036 { 1027 1037 u32 val = vcpu_get_reg(vcpu, rt); 1028 1038 1029 - val <<= ICH_VMCR_PMR_SHIFT; 1030 - val &= ICH_VMCR_PMR_MASK; 1031 - vmcr &= ~ICH_VMCR_PMR_MASK; 1032 - vmcr |= val; 1039 + FIELD_MODIFY(ICH_VMCR_EL2_VPMR, &vmcr, val); 1033 1040 1034 1041 write_gicreg(vmcr, ICH_VMCR_EL2); 1035 1042 } ··· 1047 1064 /* A3V */ 1048 1065 val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT; 1049 1066 /* EOImode */ 1050 - val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT; 1067 + val |= FIELD_PREP(ICC_CTLR_EL1_EOImode_MASK, 1068 + FIELD_GET(ICH_VMCR_EL2_VEOIM, vmcr)); 1051 1069 /* CBPR */ 1052 - val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; 1070 + val |= FIELD_PREP(ICC_CTLR_EL1_CBPR_MASK, 1071 + FIELD_GET(ICH_VMCR_EL2_VCBPR, vmcr)); 1053 1072 1054 1073 vcpu_set_reg(vcpu, rt, val); 1055 1074 } ··· 1060 1075 { 1061 1076 u32 val = vcpu_get_reg(vcpu, rt); 1062 1077 1063 - if (val & ICC_CTLR_EL1_CBPR_MASK) 1064 - vmcr |= ICH_VMCR_CBPR_MASK; 1065 - else 1066 - vmcr &= ~ICH_VMCR_CBPR_MASK; 1078 + FIELD_MODIFY(ICH_VMCR_EL2_VCBPR, &vmcr, 1079 + FIELD_GET(ICC_CTLR_EL1_CBPR_MASK, val)); 1067 1080 1068 - if (val & ICC_CTLR_EL1_EOImode_MASK) 1069 - vmcr |= ICH_VMCR_EOIM_MASK; 1070 - else 1071 - vmcr &= ~ICH_VMCR_EOIM_MASK; 1081 + FIELD_MODIFY(ICH_VMCR_EL2_VEOIM, &vmcr, 1082 + FIELD_GET(ICC_CTLR_EL1_EOImode_MASK, val)); 1072 1083 1073 1084 write_gicreg(vmcr, ICH_VMCR_EL2); 1074 1085 }
+4 -4
arch/arm64/kvm/vgic/vgic-init.c
··· 140 140 goto out_unlock; 141 141 } 142 142 143 + kvm->arch.vgic.in_kernel = true; 144 + kvm->arch.vgic.vgic_model = type; 145 + kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST; 146 + 143 147 kvm_for_each_vcpu(i, vcpu, kvm) { 144 148 ret = vgic_allocate_private_irqs_locked(vcpu, type); 145 149 if (ret) ··· 159 155 160 156 goto out_unlock; 161 157 } 162 - 163 - kvm->arch.vgic.in_kernel = true; 164 - kvm->arch.vgic.vgic_model = type; 165 - kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST; 166 158 167 159 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; 168 160
+4 -4
arch/arm64/kvm/vgic/vgic-v3-nested.c
··· 202 202 if ((hcr & ICH_HCR_EL2_NPIE) && !mi_state.pend) 203 203 reg |= ICH_MISR_EL2_NP; 204 204 205 - if ((hcr & ICH_HCR_EL2_VGrp0EIE) && (vmcr & ICH_VMCR_ENG0_MASK)) 205 + if ((hcr & ICH_HCR_EL2_VGrp0EIE) && (vmcr & ICH_VMCR_EL2_VENG0_MASK)) 206 206 reg |= ICH_MISR_EL2_VGrp0E; 207 207 208 - if ((hcr & ICH_HCR_EL2_VGrp0DIE) && !(vmcr & ICH_VMCR_ENG0_MASK)) 208 + if ((hcr & ICH_HCR_EL2_VGrp0DIE) && !(vmcr & ICH_VMCR_EL2_VENG0_MASK)) 209 209 reg |= ICH_MISR_EL2_VGrp0D; 210 210 211 - if ((hcr & ICH_HCR_EL2_VGrp1EIE) && (vmcr & ICH_VMCR_ENG1_MASK)) 211 + if ((hcr & ICH_HCR_EL2_VGrp1EIE) && (vmcr & ICH_VMCR_EL2_VENG1_MASK)) 212 212 reg |= ICH_MISR_EL2_VGrp1E; 213 213 214 - if ((hcr & ICH_HCR_EL2_VGrp1DIE) && !(vmcr & ICH_VMCR_ENG1_MASK)) 214 + if ((hcr & ICH_HCR_EL2_VGrp1DIE) && !(vmcr & ICH_VMCR_EL2_VENG1_MASK)) 215 215 reg |= ICH_MISR_EL2_VGrp1D; 216 216 217 217 return reg;
+22 -26
arch/arm64/kvm/vgic/vgic-v3.c
··· 41 41 if (!als->nr_sgi) 42 42 cpuif->vgic_hcr |= ICH_HCR_EL2_vSGIEOICount; 43 43 44 - cpuif->vgic_hcr |= (cpuif->vgic_vmcr & ICH_VMCR_ENG0_MASK) ? 44 + cpuif->vgic_hcr |= (cpuif->vgic_vmcr & ICH_VMCR_EL2_VENG0_MASK) ? 45 45 ICH_HCR_EL2_VGrp0DIE : ICH_HCR_EL2_VGrp0EIE; 46 - cpuif->vgic_hcr |= (cpuif->vgic_vmcr & ICH_VMCR_ENG1_MASK) ? 46 + cpuif->vgic_hcr |= (cpuif->vgic_vmcr & ICH_VMCR_EL2_VENG1_MASK) ? 47 47 ICH_HCR_EL2_VGrp1DIE : ICH_HCR_EL2_VGrp1EIE; 48 48 49 49 /* ··· 215 215 * We only deal with DIR when EOIMode==1, and only for SGI, 216 216 * PPI or SPI. 217 217 */ 218 - if (!(cpuif->vgic_vmcr & ICH_VMCR_EOIM_MASK) || 218 + if (!(cpuif->vgic_vmcr & ICH_VMCR_EL2_VEOIM_MASK) || 219 219 val >= vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS) 220 220 return; 221 221 ··· 408 408 u32 vmcr; 409 409 410 410 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { 411 - vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) & 412 - ICH_VMCR_ACK_CTL_MASK; 413 - vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) & 414 - ICH_VMCR_FIQ_EN_MASK; 411 + vmcr = FIELD_PREP(ICH_VMCR_EL2_VAckCtl, vmcrp->ackctl); 412 + vmcr |= FIELD_PREP(ICH_VMCR_EL2_VFIQEn, vmcrp->fiqen); 415 413 } else { 416 414 /* 417 415 * When emulating GICv3 on GICv3 with SRE=1 on the 418 416 * VFIQEn bit is RES1 and the VAckCtl bit is RES0. 419 417 */ 420 - vmcr = ICH_VMCR_FIQ_EN_MASK; 418 + vmcr = ICH_VMCR_EL2_VFIQEn_MASK; 421 419 } 422 420 423 - vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; 424 - vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; 425 - vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; 426 - vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; 427 - vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; 428 - vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK; 429 - vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK; 421 + vmcr |= FIELD_PREP(ICH_VMCR_EL2_VCBPR, vmcrp->cbpr); 422 + vmcr |= FIELD_PREP(ICH_VMCR_EL2_VEOIM, vmcrp->eoim); 423 + vmcr |= FIELD_PREP(ICH_VMCR_EL2_VBPR1, vmcrp->abpr); 424 + vmcr |= FIELD_PREP(ICH_VMCR_EL2_VBPR0, vmcrp->bpr); 425 + vmcr |= FIELD_PREP(ICH_VMCR_EL2_VPMR, vmcrp->pmr); 426 + vmcr |= FIELD_PREP(ICH_VMCR_EL2_VENG0, vmcrp->grpen0); 427 + vmcr |= FIELD_PREP(ICH_VMCR_EL2_VENG1, vmcrp->grpen1); 430 428 431 429 cpu_if->vgic_vmcr = vmcr; 432 430 } ··· 438 440 vmcr = cpu_if->vgic_vmcr; 439 441 440 442 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { 441 - vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >> 442 - ICH_VMCR_ACK_CTL_SHIFT; 443 - vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >> 444 - ICH_VMCR_FIQ_EN_SHIFT; 443 + vmcrp->ackctl = FIELD_GET(ICH_VMCR_EL2_VAckCtl, vmcr); 444 + vmcrp->fiqen = FIELD_GET(ICH_VMCR_EL2_VFIQEn, vmcr); 445 445 } else { 446 446 /* 447 447 * When emulating GICv3 on GICv3 with SRE=1 on the ··· 449 453 vmcrp->ackctl = 0; 450 454 } 451 455 452 - vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; 453 - vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT; 454 - vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 455 - vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 456 - vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; 457 - vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT; 458 - vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT; 456 + vmcrp->cbpr = FIELD_GET(ICH_VMCR_EL2_VCBPR, vmcr); 457 + vmcrp->eoim = FIELD_GET(ICH_VMCR_EL2_VEOIM, vmcr); 458 + vmcrp->abpr = FIELD_GET(ICH_VMCR_EL2_VBPR1, vmcr); 459 + vmcrp->bpr = FIELD_GET(ICH_VMCR_EL2_VBPR0, vmcr); 460 + vmcrp->pmr = FIELD_GET(ICH_VMCR_EL2_VPMR, vmcr); 461 + vmcrp->grpen0 = FIELD_GET(ICH_VMCR_EL2_VENG0, vmcr); 462 + vmcrp->grpen1 = FIELD_GET(ICH_VMCR_EL2_VENG1, vmcr); 459 463 } 460 464 461 465 #define INITIAL_PENDBASER_VALUE \
+1 -1
arch/arm64/tools/sysreg
··· 4637 4637 Field 6 ICC_ICSR_EL1 4638 4638 Field 5 ICC_PCR_EL1 4639 4639 Field 4 ICC_HPPIR_EL1 4640 - Field 3 ICC_HAPR_EL1 4640 + Res1 3 4641 4641 Field 2 ICC_CR0_EL1 4642 4642 Field 1 ICC_IDRn_EL1 4643 4643 Field 0 ICC_APR_EL1
+2
drivers/irqchip/irq-gic-v5-irs.c
··· 743 743 * be consistent across IRSes by the architecture. 744 744 */ 745 745 if (list_empty(&irs_nodes)) { 746 + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR0); 747 + gicv5_global_data.virt_capable = !FIELD_GET(GICV5_IRS_IDR0_VIRT, idr); 746 748 747 749 idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1); 748 750 irs_setup_pri_bits(idr);
+10
drivers/irqchip/irq-gic-v5.c
··· 1064 1064 1065 1065 static void __init gic_of_setup_kvm_info(struct device_node *node) 1066 1066 { 1067 + /* 1068 + * If we don't have native GICv5 virtualisation support, then 1069 + * we also don't have FEAT_GCIE_LEGACY - the architecture 1070 + * forbids this combination. 1071 + */ 1072 + if (!gicv5_global_data.virt_capable) { 1073 + pr_info("GIC implementation is not virtualization capable\n"); 1074 + return; 1075 + } 1076 + 1067 1077 gic_v5_kvm_info.type = GIC_V5; 1068 1078 1069 1079 /* GIC Virtual CPU interface maintenance interrupt */
+4
include/linux/irqchip/arm-gic-v5.h
··· 43 43 /* 44 44 * IRS registers and tables structures 45 45 */ 46 + #define GICV5_IRS_IDR0 0x0000 46 47 #define GICV5_IRS_IDR1 0x0004 47 48 #define GICV5_IRS_IDR2 0x0008 48 49 #define GICV5_IRS_IDR5 0x0014 ··· 63 62 #define GICV5_IRS_IST_CFGR 0x0190 64 63 #define GICV5_IRS_IST_STATUSR 0x0194 65 64 #define GICV5_IRS_MAP_L2_ISTR 0x01c0 65 + 66 + #define GICV5_IRS_IDR0_VIRT BIT(6) 66 67 67 68 #define GICV5_IRS_IDR1_PRIORITY_BITS GENMASK(22, 20) 68 69 #define GICV5_IRS_IDR1_IAFFID_BITS GENMASK(19, 16) ··· 281 278 u8 cpuif_pri_bits; 282 279 u8 cpuif_id_bits; 283 280 u8 irs_pri_bits; 281 + bool virt_capable; 284 282 struct { 285 283 __le64 *l1ist_addr; 286 284 u32 l2_size;