Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

clocksource/drivers/arm_arch_timer_mmio: Switch over to standalone driver

Remove all the MMIO support from the per-CPU timer driver, and switch
over to the standalove driver.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
Link: https://lore.kernel.org/r/20250814154622.10193-4-maz@kernel.org

authored by

Marc Zyngier and committed by
Daniel Lezcano
0f67b56d 4891f015

+66 -626
+1
drivers/clocksource/Makefile
··· 64 64 65 65 obj-$(CONFIG_ARC_TIMERS) += arc_timer.o 66 66 obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 67 + obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer_mmio.o 67 68 obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o 68 69 obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o 69 70 obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
+65 -621
drivers/clocksource/arm_arch_timer.c
··· 34 34 35 35 #include <clocksource/arm_arch_timer.h> 36 36 37 - #define CNTTIDR 0x08 38 - #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) 39 - 40 - #define CNTACR(n) (0x40 + ((n) * 4)) 41 - #define CNTACR_RPCT BIT(0) 42 - #define CNTACR_RVCT BIT(1) 43 - #define CNTACR_RFRQ BIT(2) 44 - #define CNTACR_RVOFF BIT(3) 45 - #define CNTACR_RWVT BIT(4) 46 - #define CNTACR_RWPT BIT(5) 47 - 48 - #define CNTPCT_LO 0x00 49 - #define CNTVCT_LO 0x08 50 - #define CNTFRQ 0x10 51 - #define CNTP_CVAL_LO 0x20 52 - #define CNTP_CTL 0x2c 53 - #define CNTV_CVAL_LO 0x30 54 - #define CNTV_CTL 0x3c 55 - 56 37 /* 57 38 * The minimum amount of time a generic counter is guaranteed to not roll over 58 39 * (40 years) 59 40 */ 60 41 #define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600) 61 - 62 - static unsigned arch_timers_present __initdata; 63 - 64 - struct arch_timer { 65 - void __iomem *base; 66 - struct clock_event_device evt; 67 - }; 68 - 69 - static struct arch_timer *arch_timer_mem __ro_after_init; 70 - 71 - #define to_arch_timer(e) container_of(e, struct arch_timer, evt) 72 42 73 43 static u32 arch_timer_rate __ro_after_init; 74 44 static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init; ··· 55 85 56 86 static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI; 57 87 static bool arch_timer_c3stop __ro_after_init; 58 - static bool arch_timer_mem_use_virtual __ro_after_init; 59 88 static bool arch_counter_suspend_stop __ro_after_init; 60 89 #ifdef CONFIG_GENERIC_GETTIMEOFDAY 61 90 static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER; ··· 90 121 /* 91 122 * Architected system timer support. 92 123 */ 93 - 94 - static __always_inline 95 - void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val, 96 - struct clock_event_device *clk) 97 - { 98 - if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 99 - struct arch_timer *timer = to_arch_timer(clk); 100 - switch (reg) { 101 - case ARCH_TIMER_REG_CTRL: 102 - writel_relaxed((u32)val, timer->base + CNTP_CTL); 103 - break; 104 - case ARCH_TIMER_REG_CVAL: 105 - /* 106 - * Not guaranteed to be atomic, so the timer 107 - * must be disabled at this point. 108 - */ 109 - writeq_relaxed(val, timer->base + CNTP_CVAL_LO); 110 - break; 111 - default: 112 - BUILD_BUG(); 113 - } 114 - } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 115 - struct arch_timer *timer = to_arch_timer(clk); 116 - switch (reg) { 117 - case ARCH_TIMER_REG_CTRL: 118 - writel_relaxed((u32)val, timer->base + CNTV_CTL); 119 - break; 120 - case ARCH_TIMER_REG_CVAL: 121 - /* Same restriction as above */ 122 - writeq_relaxed(val, timer->base + CNTV_CVAL_LO); 123 - break; 124 - default: 125 - BUILD_BUG(); 126 - } 127 - } else { 128 - arch_timer_reg_write_cp15(access, reg, val); 129 - } 130 - } 131 - 132 - static __always_inline 133 - u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, 134 - struct clock_event_device *clk) 135 - { 136 - u32 val; 137 - 138 - if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 139 - struct arch_timer *timer = to_arch_timer(clk); 140 - switch (reg) { 141 - case ARCH_TIMER_REG_CTRL: 142 - val = readl_relaxed(timer->base + CNTP_CTL); 143 - break; 144 - default: 145 - BUILD_BUG(); 146 - } 147 - } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 148 - struct arch_timer *timer = to_arch_timer(clk); 149 - switch (reg) { 150 - case ARCH_TIMER_REG_CTRL: 151 - val = readl_relaxed(timer->base + CNTV_CTL); 152 - break; 153 - default: 154 - BUILD_BUG(); 155 - } 156 - } else { 157 - val = arch_timer_reg_read_cp15(access, reg); 158 - } 159 - 160 - return val; 161 - } 162 - 163 124 static noinstr u64 raw_counter_get_cntpct_stable(void) 164 125 { 165 126 return __arch_counter_get_cntpct_stable(); ··· 323 424 unsigned long ctrl; 324 425 u64 cval; 325 426 326 - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 427 + ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL); 327 428 ctrl |= ARCH_TIMER_CTRL_ENABLE; 328 429 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 329 430 ··· 335 436 write_sysreg(cval, cntv_cval_el0); 336 437 } 337 438 338 - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 439 + arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl); 339 440 } 340 441 341 442 static __maybe_unused int erratum_set_next_event_virt(unsigned long evt, ··· 566 667 { 567 668 unsigned long ctrl; 568 669 569 - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); 670 + ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL); 570 671 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 571 672 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 572 - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); 673 + arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl); 573 674 evt->event_handler(evt); 574 675 return IRQ_HANDLED; 575 676 } ··· 591 692 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); 592 693 } 593 694 594 - static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) 595 - { 596 - struct clock_event_device *evt = dev_id; 597 - 598 - return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); 599 - } 600 - 601 - static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) 602 - { 603 - struct clock_event_device *evt = dev_id; 604 - 605 - return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); 606 - } 607 - 608 695 static __always_inline int arch_timer_shutdown(const int access, 609 696 struct clock_event_device *clk) 610 697 { 611 698 unsigned long ctrl; 612 699 613 - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 700 + ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL); 614 701 ctrl &= ~ARCH_TIMER_CTRL_ENABLE; 615 - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 702 + arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl); 616 703 617 704 return 0; 618 705 } ··· 613 728 return arch_timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk); 614 729 } 615 730 616 - static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk) 617 - { 618 - return arch_timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk); 619 - } 620 - 621 - static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk) 622 - { 623 - return arch_timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk); 624 - } 625 - 626 731 static __always_inline void set_next_event(const int access, unsigned long evt, 627 732 struct clock_event_device *clk) 628 733 { 629 734 unsigned long ctrl; 630 735 u64 cnt; 631 736 632 - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 737 + ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL); 633 738 ctrl |= ARCH_TIMER_CTRL_ENABLE; 634 739 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 635 740 ··· 628 753 else 629 754 cnt = __arch_counter_get_cntvct(); 630 755 631 - arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk); 632 - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 756 + arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CVAL, evt + cnt); 757 + arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl); 633 758 } 634 759 635 760 static int arch_timer_set_next_event_virt(unsigned long evt, ··· 643 768 struct clock_event_device *clk) 644 769 { 645 770 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); 646 - return 0; 647 - } 648 - 649 - static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo) 650 - { 651 - u32 cnt_lo, cnt_hi, tmp_hi; 652 - 653 - do { 654 - cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4)); 655 - cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo)); 656 - tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4)); 657 - } while (cnt_hi != tmp_hi); 658 - 659 - return ((u64) cnt_hi << 32) | cnt_lo; 660 - } 661 - 662 - static __always_inline void set_next_event_mem(const int access, unsigned long evt, 663 - struct clock_event_device *clk) 664 - { 665 - struct arch_timer *timer = to_arch_timer(clk); 666 - unsigned long ctrl; 667 - u64 cnt; 668 - 669 - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 670 - 671 - /* Timer must be disabled before programming CVAL */ 672 - if (ctrl & ARCH_TIMER_CTRL_ENABLE) { 673 - ctrl &= ~ARCH_TIMER_CTRL_ENABLE; 674 - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 675 - } 676 - 677 - ctrl |= ARCH_TIMER_CTRL_ENABLE; 678 - ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 679 - 680 - if (access == ARCH_TIMER_MEM_VIRT_ACCESS) 681 - cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO); 682 - else 683 - cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO); 684 - 685 - arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk); 686 - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 687 - } 688 - 689 - static int arch_timer_set_next_event_virt_mem(unsigned long evt, 690 - struct clock_event_device *clk) 691 - { 692 - set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); 693 - return 0; 694 - } 695 - 696 - static int arch_timer_set_next_event_phys_mem(unsigned long evt, 697 - struct clock_event_device *clk) 698 - { 699 - set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); 700 771 return 0; 701 772 } 702 773 ··· 671 850 return CLOCKSOURCE_MASK(arch_counter_get_width()); 672 851 } 673 852 674 - static void __arch_timer_setup(unsigned type, 675 - struct clock_event_device *clk) 853 + static void __arch_timer_setup(struct clock_event_device *clk) 676 854 { 855 + typeof(clk->set_next_event) sne; 677 856 u64 max_delta; 678 857 679 858 clk->features = CLOCK_EVT_FEAT_ONESHOT; 680 859 681 - if (type == ARCH_TIMER_TYPE_CP15) { 682 - typeof(clk->set_next_event) sne; 860 + arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL); 683 861 684 - arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL); 685 - 686 - if (arch_timer_c3stop) 687 - clk->features |= CLOCK_EVT_FEAT_C3STOP; 688 - clk->name = "arch_sys_timer"; 689 - clk->rating = 450; 690 - clk->cpumask = cpumask_of(smp_processor_id()); 691 - clk->irq = arch_timer_ppi[arch_timer_uses_ppi]; 692 - switch (arch_timer_uses_ppi) { 693 - case ARCH_TIMER_VIRT_PPI: 694 - clk->set_state_shutdown = arch_timer_shutdown_virt; 695 - clk->set_state_oneshot_stopped = arch_timer_shutdown_virt; 696 - sne = erratum_handler(set_next_event_virt); 697 - break; 698 - case ARCH_TIMER_PHYS_SECURE_PPI: 699 - case ARCH_TIMER_PHYS_NONSECURE_PPI: 700 - case ARCH_TIMER_HYP_PPI: 701 - clk->set_state_shutdown = arch_timer_shutdown_phys; 702 - clk->set_state_oneshot_stopped = arch_timer_shutdown_phys; 703 - sne = erratum_handler(set_next_event_phys); 704 - break; 705 - default: 706 - BUG(); 707 - } 708 - 709 - clk->set_next_event = sne; 710 - max_delta = __arch_timer_check_delta(); 711 - } else { 712 - clk->features |= CLOCK_EVT_FEAT_DYNIRQ; 713 - clk->name = "arch_mem_timer"; 714 - clk->rating = 400; 715 - clk->cpumask = cpu_possible_mask; 716 - if (arch_timer_mem_use_virtual) { 717 - clk->set_state_shutdown = arch_timer_shutdown_virt_mem; 718 - clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; 719 - clk->set_next_event = 720 - arch_timer_set_next_event_virt_mem; 721 - } else { 722 - clk->set_state_shutdown = arch_timer_shutdown_phys_mem; 723 - clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem; 724 - clk->set_next_event = 725 - arch_timer_set_next_event_phys_mem; 726 - } 727 - 728 - max_delta = CLOCKSOURCE_MASK(56); 862 + if (arch_timer_c3stop) 863 + clk->features |= CLOCK_EVT_FEAT_C3STOP; 864 + clk->name = "arch_sys_timer"; 865 + clk->rating = 450; 866 + clk->cpumask = cpumask_of(smp_processor_id()); 867 + clk->irq = arch_timer_ppi[arch_timer_uses_ppi]; 868 + switch (arch_timer_uses_ppi) { 869 + case ARCH_TIMER_VIRT_PPI: 870 + clk->set_state_shutdown = arch_timer_shutdown_virt; 871 + clk->set_state_oneshot_stopped = arch_timer_shutdown_virt; 872 + sne = erratum_handler(set_next_event_virt); 873 + break; 874 + case ARCH_TIMER_PHYS_SECURE_PPI: 875 + case ARCH_TIMER_PHYS_NONSECURE_PPI: 876 + case ARCH_TIMER_HYP_PPI: 877 + clk->set_state_shutdown = arch_timer_shutdown_phys; 878 + clk->set_state_oneshot_stopped = arch_timer_shutdown_phys; 879 + sne = erratum_handler(set_next_event_phys); 880 + break; 881 + default: 882 + BUG(); 729 883 } 884 + 885 + clk->set_next_event = sne; 886 + max_delta = __arch_timer_check_delta(); 730 887 731 888 clk->set_state_shutdown(clk); 732 889 ··· 828 1029 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); 829 1030 u32 flags; 830 1031 831 - __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk); 1032 + __arch_timer_setup(clk); 832 1033 833 1034 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]); 834 1035 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); ··· 874 1075 pr_warn("frequency not available\n"); 875 1076 } 876 1077 877 - static void __init arch_timer_banner(unsigned type) 1078 + static void __init arch_timer_banner(void) 878 1079 { 879 - pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", 880 - type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "", 881 - type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? 882 - " and " : "", 883 - type & ARCH_TIMER_TYPE_MEM ? "mmio" : "", 1080 + pr_info("cp15 timer running at %lu.%02luMHz (%s).\n", 884 1081 (unsigned long)arch_timer_rate / 1000000, 885 1082 (unsigned long)(arch_timer_rate / 10000) % 100, 886 - type & ARCH_TIMER_TYPE_CP15 ? 887 - (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" : 888 - "", 889 - type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "", 890 - type & ARCH_TIMER_TYPE_MEM ? 891 - arch_timer_mem_use_virtual ? "virt" : "phys" : 892 - ""); 1083 + (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys"); 893 1084 } 894 1085 895 1086 u32 arch_timer_get_rate(void) ··· 897 1108 return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); 898 1109 } 899 1110 900 - static noinstr u64 arch_counter_get_cntvct_mem(void) 901 - { 902 - return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO); 903 - } 904 - 905 1111 static struct arch_timer_kvm_info arch_timer_kvm_info; 906 1112 907 1113 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void) ··· 904 1120 return &arch_timer_kvm_info; 905 1121 } 906 1122 907 - static void __init arch_counter_register(unsigned type) 1123 + static void __init arch_counter_register(void) 908 1124 { 909 1125 u64 (*scr)(void); 1126 + u64 (*rd)(void); 910 1127 u64 start_count; 911 1128 int width; 912 1129 913 - /* Register the CP15 based counter if we have one */ 914 - if (type & ARCH_TIMER_TYPE_CP15) { 915 - u64 (*rd)(void); 916 - 917 - if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || 918 - arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { 919 - if (arch_timer_counter_has_wa()) { 920 - rd = arch_counter_get_cntvct_stable; 921 - scr = raw_counter_get_cntvct_stable; 922 - } else { 923 - rd = arch_counter_get_cntvct; 924 - scr = arch_counter_get_cntvct; 925 - } 1130 + if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || 1131 + arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { 1132 + if (arch_timer_counter_has_wa()) { 1133 + rd = arch_counter_get_cntvct_stable; 1134 + scr = raw_counter_get_cntvct_stable; 926 1135 } else { 927 - if (arch_timer_counter_has_wa()) { 928 - rd = arch_counter_get_cntpct_stable; 929 - scr = raw_counter_get_cntpct_stable; 930 - } else { 931 - rd = arch_counter_get_cntpct; 932 - scr = arch_counter_get_cntpct; 933 - } 1136 + rd = arch_counter_get_cntvct; 1137 + scr = arch_counter_get_cntvct; 934 1138 } 935 - 936 - arch_timer_read_counter = rd; 937 - clocksource_counter.vdso_clock_mode = vdso_default; 938 1139 } else { 939 - arch_timer_read_counter = arch_counter_get_cntvct_mem; 940 - scr = arch_counter_get_cntvct_mem; 1140 + if (arch_timer_counter_has_wa()) { 1141 + rd = arch_counter_get_cntpct_stable; 1142 + scr = raw_counter_get_cntpct_stable; 1143 + } else { 1144 + rd = arch_counter_get_cntpct; 1145 + scr = arch_counter_get_cntpct; 1146 + } 941 1147 } 1148 + 1149 + arch_timer_read_counter = rd; 1150 + clocksource_counter.vdso_clock_mode = vdso_default; 942 1151 943 1152 width = arch_counter_get_width(); 944 1153 clocksource_counter.mask = CLOCKSOURCE_MASK(width); ··· 1080 1303 return err; 1081 1304 } 1082 1305 1083 - static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) 1084 - { 1085 - int ret; 1086 - irq_handler_t func; 1087 - 1088 - arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL); 1089 - if (!arch_timer_mem) 1090 - return -ENOMEM; 1091 - 1092 - arch_timer_mem->base = base; 1093 - arch_timer_mem->evt.irq = irq; 1094 - __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt); 1095 - 1096 - if (arch_timer_mem_use_virtual) 1097 - func = arch_timer_handler_virt_mem; 1098 - else 1099 - func = arch_timer_handler_phys_mem; 1100 - 1101 - ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt); 1102 - if (ret) { 1103 - pr_err("Failed to request mem timer irq\n"); 1104 - kfree(arch_timer_mem); 1105 - arch_timer_mem = NULL; 1106 - } 1107 - 1108 - return ret; 1109 - } 1110 - 1111 - static const struct of_device_id arch_timer_of_match[] __initconst = { 1112 - { .compatible = "arm,armv7-timer", }, 1113 - { .compatible = "arm,armv8-timer", }, 1114 - {}, 1115 - }; 1116 - 1117 - static const struct of_device_id arch_timer_mem_of_match[] __initconst = { 1118 - { .compatible = "arm,armv7-timer-mem", }, 1119 - {}, 1120 - }; 1121 - 1122 - static bool __init arch_timer_needs_of_probing(void) 1123 - { 1124 - struct device_node *dn; 1125 - bool needs_probing = false; 1126 - unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM; 1127 - 1128 - /* We have two timers, and both device-tree nodes are probed. */ 1129 - if ((arch_timers_present & mask) == mask) 1130 - return false; 1131 - 1132 - /* 1133 - * Only one type of timer is probed, 1134 - * check if we have another type of timer node in device-tree. 1135 - */ 1136 - if (arch_timers_present & ARCH_TIMER_TYPE_CP15) 1137 - dn = of_find_matching_node(NULL, arch_timer_mem_of_match); 1138 - else 1139 - dn = of_find_matching_node(NULL, arch_timer_of_match); 1140 - 1141 - if (dn && of_device_is_available(dn)) 1142 - needs_probing = true; 1143 - 1144 - of_node_put(dn); 1145 - 1146 - return needs_probing; 1147 - } 1148 - 1149 1306 static int __init arch_timer_common_init(void) 1150 1307 { 1151 - arch_timer_banner(arch_timers_present); 1152 - arch_counter_register(arch_timers_present); 1308 + arch_timer_banner(); 1309 + arch_counter_register(); 1153 1310 return arch_timer_arch_init(); 1154 1311 } 1155 1312 ··· 1132 1421 u32 rate; 1133 1422 bool has_names; 1134 1423 1135 - if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { 1424 + if (arch_timer_evt) { 1136 1425 pr_warn("multiple nodes in dt, skipping\n"); 1137 1426 return 0; 1138 1427 } 1139 - 1140 - arch_timers_present |= ARCH_TIMER_TYPE_CP15; 1141 1428 1142 1429 has_names = of_property_present(np, "interrupt-names"); 1143 1430 ··· 1181 1472 if (ret) 1182 1473 return ret; 1183 1474 1184 - if (arch_timer_needs_of_probing()) 1185 - return 0; 1186 - 1187 1475 return arch_timer_common_init(); 1188 1476 } 1189 1477 TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init); 1190 1478 TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init); 1191 1479 1192 - static u32 __init 1193 - arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame) 1194 - { 1195 - void __iomem *base; 1196 - u32 rate; 1197 - 1198 - base = ioremap(frame->cntbase, frame->size); 1199 - if (!base) { 1200 - pr_err("Unable to map frame @ %pa\n", &frame->cntbase); 1201 - return 0; 1202 - } 1203 - 1204 - rate = readl_relaxed(base + CNTFRQ); 1205 - 1206 - iounmap(base); 1207 - 1208 - return rate; 1209 - } 1210 - 1211 - static struct arch_timer_mem_frame * __init 1212 - arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem) 1213 - { 1214 - struct arch_timer_mem_frame *frame, *best_frame = NULL; 1215 - void __iomem *cntctlbase; 1216 - u32 cnttidr; 1217 - int i; 1218 - 1219 - cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size); 1220 - if (!cntctlbase) { 1221 - pr_err("Can't map CNTCTLBase @ %pa\n", 1222 - &timer_mem->cntctlbase); 1223 - return NULL; 1224 - } 1225 - 1226 - cnttidr = readl_relaxed(cntctlbase + CNTTIDR); 1227 - 1228 - /* 1229 - * Try to find a virtual capable frame. Otherwise fall back to a 1230 - * physical capable frame. 1231 - */ 1232 - for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) { 1233 - u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT | 1234 - CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT; 1235 - 1236 - frame = &timer_mem->frame[i]; 1237 - if (!frame->valid) 1238 - continue; 1239 - 1240 - /* Try enabling everything, and see what sticks */ 1241 - writel_relaxed(cntacr, cntctlbase + CNTACR(i)); 1242 - cntacr = readl_relaxed(cntctlbase + CNTACR(i)); 1243 - 1244 - if ((cnttidr & CNTTIDR_VIRT(i)) && 1245 - !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) { 1246 - best_frame = frame; 1247 - arch_timer_mem_use_virtual = true; 1248 - break; 1249 - } 1250 - 1251 - if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) 1252 - continue; 1253 - 1254 - best_frame = frame; 1255 - } 1256 - 1257 - iounmap(cntctlbase); 1258 - 1259 - return best_frame; 1260 - } 1261 - 1262 - static int __init 1263 - arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame) 1264 - { 1265 - void __iomem *base; 1266 - int ret, irq; 1267 - 1268 - if (arch_timer_mem_use_virtual) 1269 - irq = frame->virt_irq; 1270 - else 1271 - irq = frame->phys_irq; 1272 - 1273 - if (!irq) { 1274 - pr_err("Frame missing %s irq.\n", 1275 - arch_timer_mem_use_virtual ? "virt" : "phys"); 1276 - return -EINVAL; 1277 - } 1278 - 1279 - if (!request_mem_region(frame->cntbase, frame->size, 1280 - "arch_mem_timer")) 1281 - return -EBUSY; 1282 - 1283 - base = ioremap(frame->cntbase, frame->size); 1284 - if (!base) { 1285 - pr_err("Can't map frame's registers\n"); 1286 - return -ENXIO; 1287 - } 1288 - 1289 - ret = arch_timer_mem_register(base, irq); 1290 - if (ret) { 1291 - iounmap(base); 1292 - return ret; 1293 - } 1294 - 1295 - arch_timers_present |= ARCH_TIMER_TYPE_MEM; 1296 - 1297 - return 0; 1298 - } 1299 - 1300 - static int __init arch_timer_mem_of_init(struct device_node *np) 1301 - { 1302 - struct arch_timer_mem *timer_mem; 1303 - struct arch_timer_mem_frame *frame; 1304 - struct resource res; 1305 - int ret = -EINVAL; 1306 - u32 rate; 1307 - 1308 - timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL); 1309 - if (!timer_mem) 1310 - return -ENOMEM; 1311 - 1312 - if (of_address_to_resource(np, 0, &res)) 1313 - goto out; 1314 - timer_mem->cntctlbase = res.start; 1315 - timer_mem->size = resource_size(&res); 1316 - 1317 - for_each_available_child_of_node_scoped(np, frame_node) { 1318 - u32 n; 1319 - struct arch_timer_mem_frame *frame; 1320 - 1321 - if (of_property_read_u32(frame_node, "frame-number", &n)) { 1322 - pr_err(FW_BUG "Missing frame-number.\n"); 1323 - goto out; 1324 - } 1325 - if (n >= ARCH_TIMER_MEM_MAX_FRAMES) { 1326 - pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n", 1327 - ARCH_TIMER_MEM_MAX_FRAMES - 1); 1328 - goto out; 1329 - } 1330 - frame = &timer_mem->frame[n]; 1331 - 1332 - if (frame->valid) { 1333 - pr_err(FW_BUG "Duplicated frame-number.\n"); 1334 - goto out; 1335 - } 1336 - 1337 - if (of_address_to_resource(frame_node, 0, &res)) 1338 - goto out; 1339 - 1340 - frame->cntbase = res.start; 1341 - frame->size = resource_size(&res); 1342 - 1343 - frame->virt_irq = irq_of_parse_and_map(frame_node, 1344 - ARCH_TIMER_VIRT_SPI); 1345 - frame->phys_irq = irq_of_parse_and_map(frame_node, 1346 - ARCH_TIMER_PHYS_SPI); 1347 - 1348 - frame->valid = true; 1349 - } 1350 - 1351 - frame = arch_timer_mem_find_best_frame(timer_mem); 1352 - if (!frame) { 1353 - pr_err("Unable to find a suitable frame in timer @ %pa\n", 1354 - &timer_mem->cntctlbase); 1355 - ret = -EINVAL; 1356 - goto out; 1357 - } 1358 - 1359 - rate = arch_timer_mem_frame_get_cntfrq(frame); 1360 - arch_timer_of_configure_rate(rate, np); 1361 - 1362 - ret = arch_timer_mem_frame_register(frame); 1363 - if (!ret && !arch_timer_needs_of_probing()) 1364 - ret = arch_timer_common_init(); 1365 - out: 1366 - kfree(timer_mem); 1367 - return ret; 1368 - } 1369 - TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 1370 - arch_timer_mem_of_init); 1371 - 1372 1480 #ifdef CONFIG_ACPI_GTDT 1373 - static int __init 1374 - arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem) 1375 - { 1376 - struct arch_timer_mem_frame *frame; 1377 - u32 rate; 1378 - int i; 1379 - 1380 - for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) { 1381 - frame = &timer_mem->frame[i]; 1382 - 1383 - if (!frame->valid) 1384 - continue; 1385 - 1386 - rate = arch_timer_mem_frame_get_cntfrq(frame); 1387 - if (rate == arch_timer_rate) 1388 - continue; 1389 - 1390 - pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n", 1391 - &frame->cntbase, 1392 - (unsigned long)rate, (unsigned long)arch_timer_rate); 1393 - 1394 - return -EINVAL; 1395 - } 1396 - 1397 - return 0; 1398 - } 1399 - 1400 - static int __init arch_timer_mem_acpi_init(int platform_timer_count) 1401 - { 1402 - struct arch_timer_mem *timers, *timer; 1403 - struct arch_timer_mem_frame *frame, *best_frame = NULL; 1404 - int timer_count, i, ret = 0; 1405 - 1406 - timers = kcalloc(platform_timer_count, sizeof(*timers), 1407 - GFP_KERNEL); 1408 - if (!timers) 1409 - return -ENOMEM; 1410 - 1411 - ret = acpi_arch_timer_mem_init(timers, &timer_count); 1412 - if (ret || !timer_count) 1413 - goto out; 1414 - 1415 - /* 1416 - * While unlikely, it's theoretically possible that none of the frames 1417 - * in a timer expose the combination of feature we want. 1418 - */ 1419 - for (i = 0; i < timer_count; i++) { 1420 - timer = &timers[i]; 1421 - 1422 - frame = arch_timer_mem_find_best_frame(timer); 1423 - if (!best_frame) 1424 - best_frame = frame; 1425 - 1426 - ret = arch_timer_mem_verify_cntfrq(timer); 1427 - if (ret) { 1428 - pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); 1429 - goto out; 1430 - } 1431 - 1432 - if (!best_frame) /* implies !frame */ 1433 - /* 1434 - * Only complain about missing suitable frames if we 1435 - * haven't already found one in a previous iteration. 1436 - */ 1437 - pr_err("Unable to find a suitable frame in timer @ %pa\n", 1438 - &timer->cntctlbase); 1439 - } 1440 - 1441 - if (best_frame) 1442 - ret = arch_timer_mem_frame_register(best_frame); 1443 - out: 1444 - kfree(timers); 1445 - return ret; 1446 - } 1447 - 1448 - /* Initialize per-processor generic timer and memory-mapped timer(if present) */ 1449 1481 static int __init arch_timer_acpi_init(struct acpi_table_header *table) 1450 1482 { 1451 - int ret, platform_timer_count; 1483 + int ret; 1452 1484 1453 - if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { 1485 + if (arch_timer_evt) { 1454 1486 pr_warn("already initialized, skipping\n"); 1455 1487 return -EINVAL; 1456 1488 } 1457 1489 1458 - arch_timers_present |= ARCH_TIMER_TYPE_CP15; 1459 - 1460 - ret = acpi_gtdt_init(table, &platform_timer_count); 1490 + ret = acpi_gtdt_init(table, NULL); 1461 1491 if (ret) 1462 1492 return ret; 1463 1493 ··· 1237 1789 ret = arch_timer_register(); 1238 1790 if (ret) 1239 1791 return ret; 1240 - 1241 - if (platform_timer_count && 1242 - arch_timer_mem_acpi_init(platform_timer_count)) 1243 - pr_err("Failed to initialize memory-mapped timer.\n"); 1244 1792 1245 1793 return arch_timer_common_init(); 1246 1794 }
-5
include/clocksource/arm_arch_timer.h
··· 9 9 #include <linux/timecounter.h> 10 10 #include <linux/types.h> 11 11 12 - #define ARCH_TIMER_TYPE_CP15 BIT(0) 13 - #define ARCH_TIMER_TYPE_MEM BIT(1) 14 - 15 12 #define ARCH_TIMER_CTRL_ENABLE (1 << 0) 16 13 #define ARCH_TIMER_CTRL_IT_MASK (1 << 1) 17 14 #define ARCH_TIMER_CTRL_IT_STAT (1 << 2) ··· 48 51 49 52 #define ARCH_TIMER_PHYS_ACCESS 0 50 53 #define ARCH_TIMER_VIRT_ACCESS 1 51 - #define ARCH_TIMER_MEM_PHYS_ACCESS 2 52 - #define ARCH_TIMER_MEM_VIRT_ACCESS 3 53 54 54 55 #define ARCH_TIMER_MEM_MAX_FRAMES 8 55 56