Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mips_6.5_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS fixes from Thomas Bogendoerfer:

- fixes for KVM

- fix for loongson build and cpu probing

- DT fixes

* tag 'mips_6.5_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux:
MIPS: kvm: Fix build error with KVM_MIPS_DEBUG_COP0_COUNTERS enabled
MIPS: dts: add missing space before {
MIPS: Loongson: Fix build error when make modules_install
MIPS: KVM: Fix NULL pointer dereference
MIPS: Loongson: Fix cpu_probe_loongson() again

+46 -53
+3 -7
arch/mips/Makefile
··· 181 181 cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1 182 182 cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap 183 183 184 - cflags-$(CONFIG_CPU_LOONGSON2E) += -march=loongson2e -Wa,--trap 185 - cflags-$(CONFIG_CPU_LOONGSON2F) += -march=loongson2f -Wa,--trap 184 + cflags-$(CONFIG_CPU_LOONGSON2E) += $(call cc-option,-march=loongson2e) -Wa,--trap 185 + cflags-$(CONFIG_CPU_LOONGSON2F) += $(call cc-option,-march=loongson2f) -Wa,--trap 186 + cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-march=loongson3a,-march=mips64r2) -Wa,--trap 186 187 # Some -march= flags enable MMI instructions, and GCC complains about that 187 188 # support being enabled alongside -msoft-float. Thus explicitly disable MMI. 188 189 cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call cc-option,-mno-loongson-mmi) 189 - ifdef CONFIG_CPU_LOONGSON64 190 - cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap 191 - cflags-$(CONFIG_CC_IS_GCC) += -march=loongson3a 192 - cflags-$(CONFIG_CC_IS_CLANG) += -march=mips64r2 193 - endif 194 190 cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-mno-loongson-mmi) 195 191 196 192 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
+1 -1
arch/mips/boot/dts/mscc/serval_common.dtsi
··· 20 20 stdout-path = "serial0:115200n8"; 21 21 }; 22 22 23 - i2c0_imux: i2c0-imux{ 23 + i2c0_imux: i2c0-imux { 24 24 compatible = "i2c-mux-pinctrl"; 25 25 #address-cells = <1>; 26 26 #size-cells = <0>;
+1 -1
arch/mips/boot/dts/pic32/pic32mzda.dtsi
··· 75 75 microchip,external-irqs = <3 8 13 18 23>; 76 76 }; 77 77 78 - pic32_pinctrl: pinctrl@1f801400{ 78 + pic32_pinctrl: pinctrl@1f801400 { 79 79 #address-cells = <1>; 80 80 #size-cells = <1>; 81 81 compatible = "microchip,pic32mzda-pinctrl";
+3 -3
arch/mips/include/asm/kvm_host.h
··· 317 317 unsigned int aux_inuse; 318 318 319 319 /* COP0 State */ 320 - struct mips_coproc *cop0; 320 + struct mips_coproc cop0; 321 321 322 322 /* Resume PC after MMIO completion */ 323 323 unsigned long io_pc; ··· 698 698 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) 699 699 { 700 700 return kvm_mips_guest_can_have_fpu(vcpu) && 701 - kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; 701 + kvm_read_c0_guest_config1(&vcpu->cop0) & MIPS_CONF1_FP; 702 702 } 703 703 704 704 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) ··· 710 710 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) 711 711 { 712 712 return kvm_mips_guest_can_have_msa(vcpu) && 713 - kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; 713 + kvm_read_c0_guest_config3(&vcpu->cop0) & MIPS_CONF3_MSA; 714 714 } 715 715 716 716 struct kvm_mips_callbacks {
+3 -6
arch/mips/kernel/cpu-probe.c
··· 1677 1677 1678 1678 static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) 1679 1679 { 1680 + c->cputype = CPU_LOONGSON64; 1681 + 1680 1682 /* All Loongson processors covered here define ExcCode 16 as GSExc. */ 1683 + decode_configs(c); 1681 1684 c->options |= MIPS_CPU_GSEXCEX; 1682 1685 1683 1686 switch (c->processor_id & PRID_IMP_MASK) { ··· 1690 1687 case PRID_REV_LOONGSON2K_R1_1: 1691 1688 case PRID_REV_LOONGSON2K_R1_2: 1692 1689 case PRID_REV_LOONGSON2K_R1_3: 1693 - c->cputype = CPU_LOONGSON64; 1694 1690 __cpu_name[cpu] = "Loongson-2K"; 1695 1691 set_elf_platform(cpu, "gs264e"); 1696 1692 set_isa(c, MIPS_CPU_ISA_M64R2); ··· 1702 1700 switch (c->processor_id & PRID_REV_MASK) { 1703 1701 case PRID_REV_LOONGSON3A_R2_0: 1704 1702 case PRID_REV_LOONGSON3A_R2_1: 1705 - c->cputype = CPU_LOONGSON64; 1706 1703 __cpu_name[cpu] = "ICT Loongson-3"; 1707 1704 set_elf_platform(cpu, "loongson3a"); 1708 1705 set_isa(c, MIPS_CPU_ISA_M64R2); 1709 1706 break; 1710 1707 case PRID_REV_LOONGSON3A_R3_0: 1711 1708 case PRID_REV_LOONGSON3A_R3_1: 1712 - c->cputype = CPU_LOONGSON64; 1713 1709 __cpu_name[cpu] = "ICT Loongson-3"; 1714 1710 set_elf_platform(cpu, "loongson3a"); 1715 1711 set_isa(c, MIPS_CPU_ISA_M64R2); ··· 1727 1727 c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ 1728 1728 break; 1729 1729 case PRID_IMP_LOONGSON_64G: 1730 - c->cputype = CPU_LOONGSON64; 1731 1730 __cpu_name[cpu] = "ICT Loongson-3"; 1732 1731 set_elf_platform(cpu, "loongson3a"); 1733 1732 set_isa(c, MIPS_CPU_ISA_M64R2); ··· 1736 1737 panic("Unknown Loongson Processor ID!"); 1737 1738 break; 1738 1739 } 1739 - 1740 - decode_configs(c); 1741 1740 } 1742 1741 #else 1743 1742 static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
+11 -11
arch/mips/kvm/emulate.c
··· 312 312 */ 313 313 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) 314 314 { 315 - struct mips_coproc *cop0 = vcpu->arch.cop0; 315 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 316 316 317 317 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || 318 318 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); ··· 384 384 */ 385 385 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) 386 386 { 387 - struct mips_coproc *cop0 = vcpu->arch.cop0; 387 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 388 388 ktime_t expires, threshold; 389 389 u32 count, compare; 390 390 int running; ··· 444 444 */ 445 445 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu) 446 446 { 447 - struct mips_coproc *cop0 = vcpu->arch.cop0; 447 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 448 448 449 449 /* If count disabled just read static copy of count */ 450 450 if (kvm_mips_count_disabled(vcpu)) ··· 502 502 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, 503 503 ktime_t now, u32 count) 504 504 { 505 - struct mips_coproc *cop0 = vcpu->arch.cop0; 505 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 506 506 u32 compare; 507 507 u64 delta; 508 508 ktime_t expire; ··· 603 603 */ 604 604 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count) 605 605 { 606 - struct mips_coproc *cop0 = vcpu->arch.cop0; 606 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 607 607 ktime_t now; 608 608 609 609 /* Calculate bias */ ··· 649 649 */ 650 650 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) 651 651 { 652 - struct mips_coproc *cop0 = vcpu->arch.cop0; 652 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 653 653 int dc; 654 654 ktime_t now; 655 655 u32 count; ··· 696 696 */ 697 697 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) 698 698 { 699 - struct mips_coproc *cop0 = vcpu->arch.cop0; 699 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 700 700 int dc; 701 701 u32 old_compare = kvm_read_c0_guest_compare(cop0); 702 702 s32 delta = compare - old_compare; ··· 779 779 */ 780 780 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) 781 781 { 782 - struct mips_coproc *cop0 = vcpu->arch.cop0; 782 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 783 783 u32 count; 784 784 ktime_t now; 785 785 ··· 806 806 */ 807 807 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) 808 808 { 809 - struct mips_coproc *cop0 = vcpu->arch.cop0; 809 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 810 810 811 811 kvm_set_c0_guest_cause(cop0, CAUSEF_DC); 812 812 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) ··· 826 826 */ 827 827 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) 828 828 { 829 - struct mips_coproc *cop0 = vcpu->arch.cop0; 829 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 830 830 u32 count; 831 831 832 832 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); ··· 852 852 */ 853 853 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) 854 854 { 855 - struct mips_coproc *cop0 = vcpu->arch.cop0; 855 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 856 856 s64 changed = count_ctl ^ vcpu->arch.count_ctl; 857 857 s64 delta; 858 858 ktime_t expire, now;
+8 -8
arch/mips/kvm/mips.c
··· 649 649 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, 650 650 const struct kvm_one_reg *reg) 651 651 { 652 - struct mips_coproc *cop0 = vcpu->arch.cop0; 652 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 653 653 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; 654 654 int ret; 655 655 s64 v; ··· 761 761 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, 762 762 const struct kvm_one_reg *reg) 763 763 { 764 - struct mips_coproc *cop0 = vcpu->arch.cop0; 764 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 765 765 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; 766 766 s64 v; 767 767 s64 vs[2]; ··· 1086 1086 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 1087 1087 { 1088 1088 return kvm_mips_pending_timer(vcpu) || 1089 - kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI; 1089 + kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI; 1090 1090 } 1091 1091 1092 1092 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) ··· 1110 1110 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); 1111 1111 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); 1112 1112 1113 - cop0 = vcpu->arch.cop0; 1113 + cop0 = &vcpu->arch.cop0; 1114 1114 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n", 1115 1115 kvm_read_c0_guest_status(cop0), 1116 1116 kvm_read_c0_guest_cause(cop0)); ··· 1232 1232 1233 1233 case EXCCODE_TLBS: 1234 1234 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n", 1235 - cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, 1235 + cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc, 1236 1236 badvaddr); 1237 1237 1238 1238 ++vcpu->stat.tlbmiss_st_exits; ··· 1304 1304 kvm_get_badinstr(opc, vcpu, &inst); 1305 1305 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", 1306 1306 exccode, opc, inst, badvaddr, 1307 - kvm_read_c0_guest_status(vcpu->arch.cop0)); 1307 + kvm_read_c0_guest_status(&vcpu->arch.cop0)); 1308 1308 kvm_arch_vcpu_dump_regs(vcpu); 1309 1309 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1310 1310 ret = RESUME_HOST; ··· 1377 1377 /* Enable FPU for guest and restore context */ 1378 1378 void kvm_own_fpu(struct kvm_vcpu *vcpu) 1379 1379 { 1380 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1380 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 1381 1381 unsigned int sr, cfg5; 1382 1382 1383 1383 preempt_disable(); ··· 1421 1421 /* Enable MSA for guest and restore context */ 1422 1422 void kvm_own_msa(struct kvm_vcpu *vcpu) 1423 1423 { 1424 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1424 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 1425 1425 unsigned int sr, cfg5; 1426 1426 1427 1427 preempt_disable();
+2 -2
arch/mips/kvm/stats.c
··· 54 54 kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); 55 55 for (i = 0; i < N_MIPS_COPROC_REGS; i++) { 56 56 for (j = 0; j < N_MIPS_COPROC_SEL; j++) { 57 - if (vcpu->arch.cop0->stat[i][j]) 57 + if (vcpu->arch.cop0.stat[i][j]) 58 58 kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j, 59 - vcpu->arch.cop0->stat[i][j]); 59 + vcpu->arch.cop0.stat[i][j]); 60 60 } 61 61 } 62 62 #endif
+4 -4
arch/mips/kvm/trace.h
··· 322 322 ), 323 323 324 324 TP_fast_assign( 325 - __entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0); 325 + __entry->epc = kvm_read_c0_guest_epc(&vcpu->arch.cop0); 326 326 __entry->pc = vcpu->arch.pc; 327 - __entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0); 328 - __entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0); 329 - __entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0); 327 + __entry->badvaddr = kvm_read_c0_guest_badvaddr(&vcpu->arch.cop0); 328 + __entry->status = kvm_read_c0_guest_status(&vcpu->arch.cop0); 329 + __entry->cause = kvm_read_c0_guest_cause(&vcpu->arch.cop0); 330 330 ), 331 331 332 332 TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
+10 -10
arch/mips/kvm/vz.c
··· 422 422 */ 423 423 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu) 424 424 { 425 - struct mips_coproc *cop0 = vcpu->arch.cop0; 425 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 426 426 u32 cause, compare; 427 427 428 428 compare = kvm_read_sw_gc0_compare(cop0); ··· 517 517 */ 518 518 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu) 519 519 { 520 - struct mips_coproc *cop0 = vcpu->arch.cop0; 520 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 521 521 u32 gctl0, compare, cause; 522 522 523 523 gctl0 = read_c0_guestctl0(); ··· 863 863 864 864 static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val) 865 865 { 866 - struct mips_coproc *cop0 = vcpu->arch.cop0; 866 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 867 867 868 868 val &= MIPS_MAARI_INDEX; 869 869 if (val == MIPS_MAARI_INDEX) ··· 876 876 u32 *opc, u32 cause, 877 877 struct kvm_vcpu *vcpu) 878 878 { 879 - struct mips_coproc *cop0 = vcpu->arch.cop0; 879 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 880 880 enum emulation_result er = EMULATE_DONE; 881 881 u32 rt, rd, sel; 882 882 unsigned long curr_pc; ··· 1911 1911 const struct kvm_one_reg *reg, 1912 1912 s64 *v) 1913 1913 { 1914 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1914 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 1915 1915 unsigned int idx; 1916 1916 1917 1917 switch (reg->id) { ··· 2081 2081 case KVM_REG_MIPS_CP0_MAARI: 2082 2082 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) 2083 2083 return -EINVAL; 2084 - *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0); 2084 + *v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0); 2085 2085 break; 2086 2086 #ifdef CONFIG_64BIT 2087 2087 case KVM_REG_MIPS_CP0_XCONTEXT: ··· 2135 2135 const struct kvm_one_reg *reg, 2136 2136 s64 v) 2137 2137 { 2138 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2138 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 2139 2139 unsigned int idx; 2140 2140 int ret = 0; 2141 2141 unsigned int cur, change; ··· 2562 2562 2563 2563 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2564 2564 { 2565 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2565 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 2566 2566 bool migrated, all; 2567 2567 2568 2568 /* ··· 2704 2704 2705 2705 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) 2706 2706 { 2707 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2707 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 2708 2708 2709 2709 if (current->flags & PF_VCPU) 2710 2710 kvm_vz_vcpu_save_wired(vcpu); ··· 3076 3076 3077 3077 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) 3078 3078 { 3079 - struct mips_coproc *cop0 = vcpu->arch.cop0; 3079 + struct mips_coproc *cop0 = &vcpu->arch.cop0; 3080 3080 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */ 3081 3081 3082 3082 /*