Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'riscv-for-linus-6.12-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V updates from Palmer Dabbelt:

- Support using Zkr to seed KASLR

- Support IPI-triggered CPU backtracing

- Support for generic CPU vulnerabilities reporting to userspace

- A few cleanups for missing licenses

- The size limit on the XIP kernel has been removed

- Support for tracing userspace stacks

- Support for the Svvptc extension

- Various cleanups and fixes throughout the tree

* tag 'riscv-for-linus-6.12-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (47 commits)
crash: Fix riscv64 crash memory reserve dead loop
perf/riscv-sbi: Add platform specific firmware event handling
tools: Optimize ring buffer for riscv
tools: Add riscv barrier implementation
RISC-V: Don't have MAX_PHYSMEM_BITS exceed phys_addr_t
ACPI: NUMA: initialize all values of acpi_early_node_map to NUMA_NO_NODE
riscv: Enable bitops instrumentation
riscv: Omit optimized string routines when using KASAN
ACPI: RISCV: Make acpi_numa_get_nid() to be static
riscv: Randomize lower bits of stack address
selftests: riscv: Allow mmap test to compile on 32-bit
riscv: Make riscv_isa_vendor_ext_andes array static
riscv: Use LIST_HEAD() to simplify code
riscv: defconfig: Disable RZ/Five peripheral support
RISC-V: Implement kgdb_roundup_cpus() to enable future NMI Roundup
riscv: avoid Imbalance in RAS
riscv: cacheinfo: Add back init_cache_level() function
riscv: Remove unused _TIF_WORK_MASK
drivers/perf: riscv: Remove redundant macro check
riscv: define ILLEGAL_POINTER_VALUE for 64bit
...

+701 -168
+7
Documentation/devicetree/bindings/riscv/extensions.yaml
··· 171 171 memory types as ratified in the 20191213 version of the privileged 172 172 ISA specification. 173 173 174 + - const: svvptc 175 + description: 176 + The standard Svvptc supervisor-level extension for 177 + address-translation cache behaviour with respect to invalid entries 178 + as ratified at commit 4a69197e5617 ("Update to ratified state") of 179 + riscv-svvptc. 180 + 174 181 - const: zacas 175 182 description: | 176 183 The Zacas extension for Atomic Compare-and-Swap (CAS) instructions
+8
arch/riscv/Kconfig
··· 70 70 select ARCH_USE_CMPXCHG_LOCKREF if 64BIT 71 71 select ARCH_USE_MEMTEST 72 72 select ARCH_USE_QUEUED_RWLOCKS 73 + select ARCH_USE_SYM_ANNOTATIONS 73 74 select ARCH_USES_CFI_TRAPS if CFI_CLANG 74 75 select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU 75 76 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU ··· 95 94 select GENERIC_ATOMIC64 if !64BIT 96 95 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 97 96 select GENERIC_CPU_DEVICES 97 + select GENERIC_CPU_VULNERABILITIES 98 98 select GENERIC_EARLY_IOREMAP 99 99 select GENERIC_ENTRY 100 100 select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO ··· 206 204 select THREAD_INFO_IN_TASK 207 205 select TRACE_IRQFLAGS_SUPPORT 208 206 select UACCESS_MEMCPY if !MMU 207 + select USER_STACKTRACE_SUPPORT 209 208 select ZONE_DMA32 if 64BIT 210 209 211 210 config CLANG_SUPPORTS_DYNAMIC_FTRACE ··· 325 322 326 323 config FIX_EARLYCON_MEM 327 324 def_bool MMU 325 + 326 + config ILLEGAL_POINTER_VALUE 327 + hex 328 + default 0 if 32BIT 329 + default 0xdead000000000000 if 64BIT 328 330 329 331 config PGTABLE_LEVELS 330 332 int
+1 -11
arch/riscv/configs/defconfig
··· 137 137 CONFIG_MACB=y 138 138 CONFIG_E1000E=y 139 139 CONFIG_R8169=y 140 - CONFIG_RAVB=y 141 140 CONFIG_STMMAC_ETH=m 142 141 CONFIG_MICREL_PHY=y 143 142 CONFIG_MICROSEMI_PHY=y 144 143 CONFIG_MOTORCOMM_PHY=y 145 - CONFIG_CAN_RCAR_CANFD=m 146 144 CONFIG_INPUT_MOUSEDEV=y 147 145 CONFIG_KEYBOARD_SUN4I_LRADC=m 148 146 CONFIG_SERIAL_8250=y ··· 148 150 CONFIG_SERIAL_8250_DW=y 149 151 CONFIG_SERIAL_OF_PLATFORM=y 150 152 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y 151 - CONFIG_SERIAL_SH_SCI=y 152 153 CONFIG_VIRTIO_CONSOLE=y 153 154 CONFIG_HW_RANDOM=y 154 155 CONFIG_HW_RANDOM_VIRTIO=y ··· 157 160 CONFIG_I2C_DESIGNWARE_CORE=y 158 161 CONFIG_I2C_DESIGNWARE_PLATFORM=y 159 162 CONFIG_I2C_MV64XXX=m 160 - CONFIG_I2C_RIIC=y 161 163 CONFIG_SPI=y 162 164 CONFIG_SPI_CADENCE_QUADSPI=m 163 165 CONFIG_SPI_PL022=m 164 - CONFIG_SPI_RSPI=m 165 166 CONFIG_SPI_SIFIVE=y 166 167 CONFIG_SPI_SUN6I=y 167 168 # CONFIG_PTP_1588_CLOCK is not set ··· 172 177 CONFIG_SENSORS_SFCTEMP=m 173 178 CONFIG_CPU_THERMAL=y 174 179 CONFIG_DEVFREQ_THERMAL=y 175 - CONFIG_RZG2L_THERMAL=y 176 180 CONFIG_WATCHDOG=y 177 181 CONFIG_SUNXI_WATCHDOG=y 178 182 CONFIG_MFD_AXP20X_I2C=y ··· 200 206 CONFIG_USB_OTG=y 201 207 CONFIG_USB_XHCI_HCD=y 202 208 CONFIG_USB_XHCI_PLATFORM=y 209 + # CONFIG_USB_XHCI_RCAR is not set 203 210 CONFIG_USB_EHCI_HCD=y 204 211 CONFIG_USB_EHCI_HCD_PLATFORM=y 205 212 CONFIG_USB_OHCI_HCD=y 206 213 CONFIG_USB_OHCI_HCD_PLATFORM=y 207 - CONFIG_USB_RENESAS_USBHS=m 208 214 CONFIG_USB_STORAGE=y 209 215 CONFIG_USB_UAS=y 210 216 CONFIG_USB_CDNS_SUPPORT=m ··· 216 222 CONFIG_USB_MUSB_SUNXI=m 217 223 CONFIG_NOP_USB_XCEIV=m 218 224 CONFIG_USB_GADGET=y 219 - CONFIG_USB_RENESAS_USBHS_UDC=m 220 225 CONFIG_USB_CONFIGFS=m 221 226 CONFIG_USB_CONFIGFS_SERIAL=y 222 227 CONFIG_USB_CONFIGFS_ACM=y ··· 233 240 CONFIG_MMC_SDHCI_OF_DWCMSHC=y 234 241 CONFIG_MMC_SDHCI_CADENCE=y 235 242 CONFIG_MMC_SPI=y 236 - CONFIG_MMC_SDHI=y 237 243 CONFIG_MMC_DW=y 238 244 CONFIG_MMC_DW_STARFIVE=y 239 245 CONFIG_MMC_SUNXI=y ··· 250 258 CONFIG_CLK_SOPHGO_SG2042_CLKGEN=y 251 259 CONFIG_CLK_SOPHGO_SG2042_RPGATE=y 252 260 CONFIG_SUN8I_DE2_CCU=m 253 - CONFIG_RENESAS_OSTM=y 254 261 CONFIG_SUN50I_IOMMU=y 255 262 CONFIG_RPMSG_CHAR=y 256 263 CONFIG_RPMSG_CTRL=y ··· 257 266 CONFIG_PM_DEVFREQ=y 258 267 CONFIG_IIO=y 259 268 CONFIG_PHY_SUN4I_USB=m 260 - CONFIG_PHY_RCAR_GEN3_USB2=y 261 269 CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m 262 270 CONFIG_PHY_STARFIVE_JH7110_PCIE=m 263 271 CONFIG_PHY_STARFIVE_JH7110_USB=m
+4 -4
arch/riscv/errata/sifive/errata_cip_453.S
··· 21 21 1: 22 22 .endm 23 23 24 - ENTRY(sifive_cip_453_page_fault_trp) 24 + SYM_FUNC_START(sifive_cip_453_page_fault_trp) 25 25 ADD_SIGN_EXT a0, t0, t1 26 26 #ifdef CONFIG_MMU 27 27 la t0, do_page_fault ··· 29 29 la t0, do_trap_unknown 30 30 #endif 31 31 jr t0 32 - END(sifive_cip_453_page_fault_trp) 32 + SYM_FUNC_END(sifive_cip_453_page_fault_trp) 33 33 34 - ENTRY(sifive_cip_453_insn_fault_trp) 34 + SYM_FUNC_START(sifive_cip_453_insn_fault_trp) 35 35 ADD_SIGN_EXT a0, t0, t1 36 36 la t0, do_trap_insn_fault 37 37 jr t0 38 - END(sifive_cip_453_insn_fault_trp) 38 + SYM_FUNC_END(sifive_cip_453_insn_fault_trp)
-2
arch/riscv/include/asm/acpi.h
··· 91 91 #endif /* CONFIG_ACPI */ 92 92 93 93 #ifdef CONFIG_ACPI_NUMA 94 - int acpi_numa_get_nid(unsigned int cpu); 95 94 void acpi_map_cpus_to_nodes(void); 96 95 #else 97 - static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } 98 96 static inline void acpi_map_cpus_to_nodes(void) { } 99 97 #endif /* CONFIG_ACPI_NUMA */ 100 98
+23 -20
arch/riscv/include/asm/bitops.h
··· 222 222 #define __NOT(x) (~(x)) 223 223 224 224 /** 225 - * test_and_set_bit - Set a bit and return its old value 225 + * arch_test_and_set_bit - Set a bit and return its old value 226 226 * @nr: Bit to set 227 227 * @addr: Address to count from 228 228 * 229 229 * This operation may be reordered on other architectures than x86. 230 230 */ 231 - static inline int test_and_set_bit(int nr, volatile unsigned long *addr) 231 + static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr) 232 232 { 233 233 return __test_and_op_bit(or, __NOP, nr, addr); 234 234 } 235 235 236 236 /** 237 - * test_and_clear_bit - Clear a bit and return its old value 237 + * arch_test_and_clear_bit - Clear a bit and return its old value 238 238 * @nr: Bit to clear 239 239 * @addr: Address to count from 240 240 * 241 241 * This operation can be reordered on other architectures other than x86. 242 242 */ 243 - static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) 243 + static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr) 244 244 { 245 245 return __test_and_op_bit(and, __NOT, nr, addr); 246 246 } 247 247 248 248 /** 249 - * test_and_change_bit - Change a bit and return its old value 249 + * arch_test_and_change_bit - Change a bit and return its old value 250 250 * @nr: Bit to change 251 251 * @addr: Address to count from 252 252 * 253 253 * This operation is atomic and cannot be reordered. 254 254 * It also implies a memory barrier. 255 255 */ 256 - static inline int test_and_change_bit(int nr, volatile unsigned long *addr) 256 + static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr) 257 257 { 258 258 return __test_and_op_bit(xor, __NOP, nr, addr); 259 259 } 260 260 261 261 /** 262 - * set_bit - Atomically set a bit in memory 262 + * arch_set_bit - Atomically set a bit in memory 263 263 * @nr: the bit to set 264 264 * @addr: the address to start counting from 265 265 * ··· 270 270 * Note that @nr may be almost arbitrarily large; this function is not 271 271 * restricted to acting on a single-word quantity. 272 272 */ 273 - static inline void set_bit(int nr, volatile unsigned long *addr) 273 + static inline void arch_set_bit(int nr, volatile unsigned long *addr) 274 274 { 275 275 __op_bit(or, __NOP, nr, addr); 276 276 } 277 277 278 278 /** 279 - * clear_bit - Clears a bit in memory 279 + * arch_clear_bit - Clears a bit in memory 280 280 * @nr: Bit to clear 281 281 * @addr: Address to start counting from 282 282 * ··· 284 284 * on non x86 architectures, so if you are writing portable code, 285 285 * make sure not to rely on its reordering guarantees. 286 286 */ 287 - static inline void clear_bit(int nr, volatile unsigned long *addr) 287 + static inline void arch_clear_bit(int nr, volatile unsigned long *addr) 288 288 { 289 289 __op_bit(and, __NOT, nr, addr); 290 290 } 291 291 292 292 /** 293 - * change_bit - Toggle a bit in memory 293 + * arch_change_bit - Toggle a bit in memory 294 294 * @nr: Bit to change 295 295 * @addr: Address to start counting from 296 296 * ··· 298 298 * Note that @nr may be almost arbitrarily large; this function is not 299 299 * restricted to acting on a single-word quantity. 300 300 */ 301 - static inline void change_bit(int nr, volatile unsigned long *addr) 301 + static inline void arch_change_bit(int nr, volatile unsigned long *addr) 302 302 { 303 303 __op_bit(xor, __NOP, nr, addr); 304 304 } 305 305 306 306 /** 307 - * test_and_set_bit_lock - Set a bit and return its old value, for lock 307 + * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock 308 308 * @nr: Bit to set 309 309 * @addr: Address to count from 310 310 * 311 311 * This operation is atomic and provides acquire barrier semantics. 312 312 * It can be used to implement bit locks. 313 313 */ 314 - static inline int test_and_set_bit_lock( 314 + static inline int arch_test_and_set_bit_lock( 315 315 unsigned long nr, volatile unsigned long *addr) 316 316 { 317 317 return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq); 318 318 } 319 319 320 320 /** 321 - * clear_bit_unlock - Clear a bit in memory, for unlock 321 + * arch_clear_bit_unlock - Clear a bit in memory, for unlock 322 322 * @nr: the bit to set 323 323 * @addr: the address to start counting from 324 324 * 325 325 * This operation is atomic and provides release barrier semantics. 326 326 */ 327 - static inline void clear_bit_unlock( 327 + static inline void arch_clear_bit_unlock( 328 328 unsigned long nr, volatile unsigned long *addr) 329 329 { 330 330 __op_bit_ord(and, __NOT, nr, addr, .rl); 331 331 } 332 332 333 333 /** 334 - * __clear_bit_unlock - Clear a bit in memory, for unlock 334 + * arch___clear_bit_unlock - Clear a bit in memory, for unlock 335 335 * @nr: the bit to set 336 336 * @addr: the address to start counting from 337 337 * ··· 345 345 * non-atomic property here: it's a lot more instructions and we still have to 346 346 * provide release semantics anyway. 347 347 */ 348 - static inline void __clear_bit_unlock( 348 + static inline void arch___clear_bit_unlock( 349 349 unsigned long nr, volatile unsigned long *addr) 350 350 { 351 - clear_bit_unlock(nr, addr); 351 + arch_clear_bit_unlock(nr, addr); 352 352 } 353 353 354 - static inline bool xor_unlock_is_negative_byte(unsigned long mask, 354 + static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, 355 355 volatile unsigned long *addr) 356 356 { 357 357 unsigned long res; ··· 368 368 #undef __NOP 369 369 #undef __NOT 370 370 #undef __AMO 371 + 372 + #include <asm-generic/bitops/instrumented-atomic.h> 373 + #include <asm-generic/bitops/instrumented-lock.h> 371 374 372 375 #include <asm-generic/bitops/non-atomic.h> 373 376 #include <asm-generic/bitops/le.h>
+17 -1
arch/riscv/include/asm/cacheflush.h
··· 46 46 } while (0) 47 47 48 48 #ifdef CONFIG_64BIT 49 - #define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) 49 + extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1]; 50 + extern char _end[]; 51 + #define flush_cache_vmap flush_cache_vmap 52 + static inline void flush_cache_vmap(unsigned long start, unsigned long end) 53 + { 54 + if (is_vmalloc_or_module_addr((void *)start)) { 55 + int i; 56 + 57 + /* 58 + * We don't care if concurrently a cpu resets this value since 59 + * the only place this can happen is in handle_exception() where 60 + * an sfence.vma is emitted. 61 + */ 62 + for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i) 63 + new_vmalloc[i] = -1ULL; 64 + } 65 + } 50 66 #define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end) 51 67 #endif 52 68
+8
arch/riscv/include/asm/exec.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + 3 + #ifndef __ASM_EXEC_H 4 + #define __ASM_EXEC_H 5 + 6 + extern unsigned long arch_align_stack(unsigned long sp); 7 + 8 + #endif /* __ASM_EXEC_H */
+1
arch/riscv/include/asm/fence.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 1 2 #ifndef _ASM_RISCV_FENCE_H 2 3 #define _ASM_RISCV_FENCE_H 3 4
+1
arch/riscv/include/asm/hwcap.h
··· 92 92 #define RISCV_ISA_EXT_ZCF 83 93 93 #define RISCV_ISA_EXT_ZCMOP 84 94 94 #define RISCV_ISA_EXT_ZAWRS 85 95 + #define RISCV_ISA_EXT_SVVPTC 86 95 96 96 97 #define RISCV_ISA_EXT_XLINUXENVCFG 127 97 98
+5
arch/riscv/include/asm/irq.h
··· 14 14 15 15 #define INVALID_CONTEXT UINT_MAX 16 16 17 + #ifdef CONFIG_SMP 18 + void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu); 19 + #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace 20 + #endif 21 + 17 22 void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void)); 18 23 19 24 struct fwnode_handle *riscv_get_intc_hwnode(void);
+21 -8
arch/riscv/include/asm/page.h
··· 112 112 /* Offset between linear mapping virtual address and kernel load address */ 113 113 unsigned long va_pa_offset; 114 114 /* Offset between kernel mapping virtual address and kernel load address */ 115 - unsigned long va_kernel_pa_offset; 116 - unsigned long va_kernel_xip_pa_offset; 117 115 #ifdef CONFIG_XIP_KERNEL 116 + unsigned long va_kernel_xip_text_pa_offset; 117 + unsigned long va_kernel_xip_data_pa_offset; 118 118 uintptr_t xiprom; 119 119 uintptr_t xiprom_sz; 120 + #else 121 + unsigned long va_kernel_pa_offset; 120 122 #endif 121 123 }; 122 124 ··· 136 134 #else 137 135 void *linear_mapping_pa_to_va(unsigned long x); 138 136 #endif 137 + 138 + #ifdef CONFIG_XIP_KERNEL 139 139 #define kernel_mapping_pa_to_va(y) ({ \ 140 140 unsigned long _y = (unsigned long)(y); \ 141 - (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \ 142 - (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \ 143 - (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \ 141 + (_y < phys_ram_base) ? \ 142 + (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \ 143 + (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \ 144 144 }) 145 + #else 146 + #define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset)) 147 + #endif 148 + 145 149 #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) 146 150 147 151 #ifndef CONFIG_DEBUG_VIRTUAL ··· 155 147 #else 156 148 phys_addr_t linear_mapping_va_to_pa(unsigned long x); 157 149 #endif 150 + 151 + #ifdef CONFIG_XIP_KERNEL 158 152 #define kernel_mapping_va_to_pa(y) ({ \ 159 153 unsigned long _y = (unsigned long)(y); \ 160 - (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \ 161 - (_y - kernel_map.va_kernel_xip_pa_offset) : \ 162 - (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \ 154 + (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \ 155 + (_y - kernel_map.va_kernel_xip_text_pa_offset) : \ 156 + (_y - kernel_map.va_kernel_xip_data_pa_offset); \ 163 157 }) 158 + #else 159 + #define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset) 160 + #endif 164 161 165 162 #define __va_to_pa_nodebug(x) ({ \ 166 163 unsigned long _x = x; \
+17 -11
arch/riscv/include/asm/pgtable.h
··· 107 107 108 108 #endif 109 109 110 - #ifdef CONFIG_XIP_KERNEL 111 - #define XIP_OFFSET SZ_32M 112 - #define XIP_OFFSET_MASK (SZ_32M - 1) 113 - #else 114 - #define XIP_OFFSET 0 115 - #endif 116 - 117 110 #ifndef __ASSEMBLY__ 118 111 119 112 #include <asm/page.h> ··· 135 142 136 143 #ifdef CONFIG_XIP_KERNEL 137 144 #define XIP_FIXUP(addr) ({ \ 145 + extern char _sdata[], _start[], _end[]; \ 146 + uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \ 147 + + (uintptr_t)&_sdata - (uintptr_t)&_start; \ 148 + uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \ 149 + + (uintptr_t)&_end - (uintptr_t)&_start; \ 138 150 uintptr_t __a = (uintptr_t)(addr); \ 139 - (__a >= CONFIG_XIP_PHYS_ADDR && \ 140 - __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \ 141 - __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ 142 - __a; \ 151 + (__a >= __rom_start_data && __a < __rom_end_data) ? \ 152 + __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \ 143 153 }) 144 154 #else 145 155 #define XIP_FIXUP(addr) (addr) ··· 497 501 struct vm_area_struct *vma, unsigned long address, 498 502 pte_t *ptep, unsigned int nr) 499 503 { 504 + asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) 505 + : : : : svvptc); 506 + 500 507 /* 501 508 * The kernel assumes that TLBs don't cache invalid entries, but 502 509 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a ··· 509 510 */ 510 511 while (nr--) 511 512 local_flush_tlb_page(address + nr * PAGE_SIZE); 513 + 514 + svvptc:; 515 + /* 516 + * Svvptc guarantees that the new valid pte will be visible within 517 + * a bounded timeframe, so when the uarch does not cache invalid 518 + * entries, we don't have to do anything. 519 + */ 512 520 } 513 521 #define update_mmu_cache(vma, addr, ptep) \ 514 522 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+1
arch/riscv/include/asm/sbi.h
··· 159 159 160 160 #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0) 161 161 #define RISCV_PMU_RAW_EVENT_IDX 0x20000 162 + #define RISCV_PLAT_FW_EVENT 0xFFFF 162 163 163 164 /** General pmu event codes specified in SBI PMU extension */ 164 165 enum sbi_pmu_hw_generic_events_t {
+1 -1
arch/riscv/include/asm/set_memory.h
··· 46 46 47 47 #endif /* __ASSEMBLY__ */ 48 48 49 - #ifdef CONFIG_STRICT_KERNEL_RWX 49 + #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL) 50 50 #ifdef CONFIG_64BIT 51 51 #define SECTION_ALIGN (1 << 21) 52 52 #else
+1 -1
arch/riscv/include/asm/sparsemem.h
··· 7 7 #ifdef CONFIG_64BIT 8 8 #define MAX_PHYSMEM_BITS 56 9 9 #else 10 - #define MAX_PHYSMEM_BITS 34 10 + #define MAX_PHYSMEM_BITS 32 11 11 #endif /* CONFIG_64BIT */ 12 12 #define SECTION_SIZE_BITS 27 13 13 #endif /* CONFIG_SPARSEMEM */
+2
arch/riscv/include/asm/string.h
··· 19 19 extern asmlinkage void *memmove(void *, const void *, size_t); 20 20 extern asmlinkage void *__memmove(void *, const void *, size_t); 21 21 22 + #if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) 22 23 #define __HAVE_ARCH_STRCMP 23 24 extern asmlinkage int strcmp(const char *cs, const char *ct); 24 25 ··· 28 27 29 28 #define __HAVE_ARCH_STRNCMP 30 29 extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count); 30 + #endif 31 31 32 32 /* For those files which don't want to check by kasan. */ 33 33 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+7 -4
arch/riscv/include/asm/thread_info.h
··· 61 61 void *scs_base; 62 62 void *scs_sp; 63 63 #endif 64 + #ifdef CONFIG_64BIT 65 + /* 66 + * Used in handle_exception() to save a0, a1 and a2 before knowing if we 67 + * can access the kernel stack. 68 + */ 69 + unsigned long a0, a1, a2; 70 + #endif 64 71 }; 65 72 66 73 #ifdef CONFIG_SHADOW_CALL_STACK ··· 118 111 #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) 119 112 #define _TIF_UPROBE (1 << TIF_UPROBE) 120 113 #define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE) 121 - 122 - #define _TIF_WORK_MASK \ 123 - (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 124 - _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) 125 114 126 115 #endif /* _ASM_RISCV_THREAD_INFO_H */
+1
arch/riscv/include/asm/vmalloc.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 1 2 #ifndef _ASM_RISCV_VMALLOC_H 2 3 #define _ASM_RISCV_VMALLOC_H 3 4
+24 -6
arch/riscv/include/asm/xip_fixup.h
··· 9 9 10 10 #ifdef CONFIG_XIP_KERNEL 11 11 .macro XIP_FIXUP_OFFSET reg 12 - REG_L t0, _xip_fixup 12 + /* Fix-up address in Flash into address in RAM early during boot before 13 + * MMU is up. Because generated code "thinks" data is in Flash, but it 14 + * is actually in RAM (actually data is also in Flash, but Flash is 15 + * read-only, thus we need to use the data residing in RAM). 16 + * 17 + * The start of data in Flash is _sdata and the start of data in RAM is 18 + * CONFIG_PHYS_RAM_BASE. So this fix-up essentially does this: 19 + * reg += CONFIG_PHYS_RAM_BASE - _start 20 + */ 21 + li t0, CONFIG_PHYS_RAM_BASE 13 22 add \reg, \reg, t0 23 + la t0, _sdata 24 + sub \reg, \reg, t0 14 25 .endm 15 26 .macro XIP_FIXUP_FLASH_OFFSET reg 27 + /* In linker script, at the transition from read-only section to 28 + * writable section, the VMA is increased while LMA remains the same. 29 + * (See in linker script how _sdata, __data_loc and LOAD_OFFSET is 30 + * changed) 31 + * 32 + * Consequently, early during boot before MMU is up, the generated code 33 + * reads the "writable" section at wrong addresses, because VMA is used 34 + * by compiler to generate code, but the data is located in Flash using 35 + * LMA. 36 + */ 37 + la t0, _sdata 38 + sub \reg, \reg, t0 16 39 la t0, __data_loc 17 - REG_L t1, _xip_phys_offset 18 - sub \reg, \reg, t1 19 40 add \reg, \reg, t0 20 41 .endm 21 - 22 - _xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET 23 - _xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET 24 42 #else 25 43 .macro XIP_FIXUP_OFFSET reg 26 44 .endm
+1 -1
arch/riscv/kernel/acpi_numa.c
··· 30 30 31 31 static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE }; 32 32 33 - int __init acpi_numa_get_nid(unsigned int cpu) 33 + static int __init acpi_numa_get_nid(unsigned int cpu) 34 34 { 35 35 return acpi_early_node_map[cpu]; 36 36 }
+7
arch/riscv/kernel/asm-offsets.c
··· 36 36 OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]); 37 37 OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]); 38 38 OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); 39 + 40 + OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); 39 41 OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); 40 42 OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); 41 43 OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); 42 44 OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); 43 45 #ifdef CONFIG_SHADOW_CALL_STACK 44 46 OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp); 47 + #endif 48 + #ifdef CONFIG_64BIT 49 + OFFSET(TASK_TI_A0, task_struct, thread_info.a0); 50 + OFFSET(TASK_TI_A1, task_struct, thread_info.a1); 51 + OFFSET(TASK_TI_A2, task_struct, thread_info.a2); 45 52 #endif 46 53 47 54 OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
+5
arch/riscv/kernel/cacheinfo.c
··· 71 71 this_leaf->type = type; 72 72 } 73 73 74 + int init_cache_level(unsigned int cpu) 75 + { 76 + return init_of_cache_level(cpu); 77 + } 78 + 74 79 int populate_cache_leaves(unsigned int cpu) 75 80 { 76 81 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+1
arch/riscv/kernel/cpufeature.c
··· 381 381 __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), 382 382 __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), 383 383 __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), 384 + __RISCV_ISA_EXT_DATA(svvptc, RISCV_ISA_EXT_SVVPTC), 384 385 }; 385 386 386 387 const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
+6
arch/riscv/kernel/elf_kexec.c
··· 451 451 *(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) | 452 452 ENCODE_CJTYPE_IMM(val - addr); 453 453 break; 454 + case R_RISCV_ADD16: 455 + *(u16 *)loc += val; 456 + break; 457 + case R_RISCV_SUB16: 458 + *(u16 *)loc -= val; 459 + break; 454 460 case R_RISCV_ADD32: 455 461 *(u32 *)loc += val; 456 462 break;
+89 -2
arch/riscv/kernel/entry.S
··· 19 19 20 20 .section .irqentry.text, "ax" 21 21 22 + .macro new_vmalloc_check 23 + REG_S a0, TASK_TI_A0(tp) 24 + csrr a0, CSR_CAUSE 25 + /* Exclude IRQs */ 26 + blt a0, zero, _new_vmalloc_restore_context_a0 27 + 28 + REG_S a1, TASK_TI_A1(tp) 29 + /* Only check new_vmalloc if we are in page/protection fault */ 30 + li a1, EXC_LOAD_PAGE_FAULT 31 + beq a0, a1, _new_vmalloc_kernel_address 32 + li a1, EXC_STORE_PAGE_FAULT 33 + beq a0, a1, _new_vmalloc_kernel_address 34 + li a1, EXC_INST_PAGE_FAULT 35 + bne a0, a1, _new_vmalloc_restore_context_a1 36 + 37 + _new_vmalloc_kernel_address: 38 + /* Is it a kernel address? */ 39 + csrr a0, CSR_TVAL 40 + bge a0, zero, _new_vmalloc_restore_context_a1 41 + 42 + /* Check if a new vmalloc mapping appeared that could explain the trap */ 43 + REG_S a2, TASK_TI_A2(tp) 44 + /* 45 + * Computes: 46 + * a0 = &new_vmalloc[BIT_WORD(cpu)] 47 + * a1 = BIT_MASK(cpu) 48 + */ 49 + REG_L a2, TASK_TI_CPU(tp) 50 + /* 51 + * Compute the new_vmalloc element position: 52 + * (cpu / 64) * 8 = (cpu >> 6) << 3 53 + */ 54 + srli a1, a2, 6 55 + slli a1, a1, 3 56 + la a0, new_vmalloc 57 + add a0, a0, a1 58 + /* 59 + * Compute the bit position in the new_vmalloc element: 60 + * bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6 61 + * = cpu - ((cpu >> 6) << 3) << 3 62 + */ 63 + slli a1, a1, 3 64 + sub a1, a2, a1 65 + /* Compute the "get mask": 1 << bit_pos */ 66 + li a2, 1 67 + sll a1, a2, a1 68 + 69 + /* Check the value of new_vmalloc for this cpu */ 70 + REG_L a2, 0(a0) 71 + and a2, a2, a1 72 + beq a2, zero, _new_vmalloc_restore_context 73 + 74 + /* Atomically reset the current cpu bit in new_vmalloc */ 75 + amoxor.d a0, a1, (a0) 76 + 77 + /* Only emit a sfence.vma if the uarch caches invalid entries */ 78 + ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1) 79 + 80 + REG_L a0, TASK_TI_A0(tp) 81 + REG_L a1, TASK_TI_A1(tp) 82 + REG_L a2, TASK_TI_A2(tp) 83 + csrw CSR_SCRATCH, x0 84 + sret 85 + 86 + _new_vmalloc_restore_context: 87 + REG_L a2, TASK_TI_A2(tp) 88 + _new_vmalloc_restore_context_a1: 89 + REG_L a1, TASK_TI_A1(tp) 90 + _new_vmalloc_restore_context_a0: 91 + REG_L a0, TASK_TI_A0(tp) 92 + .endm 93 + 94 + 22 95 SYM_CODE_START(handle_exception) 23 96 /* 24 97 * If coming from userspace, preserve the user thread pointer and load ··· 103 30 104 31 .Lrestore_kernel_tpsp: 105 32 csrr tp, CSR_SCRATCH 33 + 34 + #ifdef CONFIG_64BIT 35 + /* 36 + * The RISC-V kernel does not eagerly emit a sfence.vma after each 37 + * new vmalloc mapping, which may result in exceptions: 38 + * - if the uarch caches invalid entries, the new mapping would not be 39 + * observed by the page table walker and an invalidation is needed. 40 + * - if the uarch does not cache invalid entries, a reordered access 41 + * could "miss" the new mapping and traps: in that case, we only need 42 + * to retry the access, no sfence.vma is required. 43 + */ 44 + new_vmalloc_check 45 + #endif 46 + 106 47 REG_S sp, TASK_TI_KERNEL_SP(tp) 107 48 108 49 #ifdef CONFIG_VMAP_STACK ··· 326 239 jalr s0 327 240 1: 328 241 move a0, sp /* pt_regs */ 329 - la ra, ret_from_exception 330 - tail syscall_exit_to_user_mode 242 + call syscall_exit_to_user_mode 243 + j ret_from_exception 331 244 SYM_CODE_END(ret_from_fork) 332 245 333 246 #ifdef CONFIG_IRQ_STACKS
+1 -3
arch/riscv/kernel/module.c
··· 787 787 int res; 788 788 unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel); 789 789 struct hlist_head *relocation_hashtable; 790 - struct list_head used_buckets_list; 791 790 unsigned int hashtable_bits; 791 + LIST_HEAD(used_buckets_list); 792 792 793 793 hashtable_bits = initialize_relocation_hashtable(num_relocations, 794 794 &relocation_hashtable); 795 795 796 796 if (!relocation_hashtable) 797 797 return -ENOMEM; 798 - 799 - INIT_LIST_HEAD(&used_buckets_list); 800 798 801 799 pr_debug("Applying relocate section %u to %u\n", relsec, 802 800 sechdrs[relsec].sh_info);
+3 -43
arch/riscv/kernel/perf_callchain.c
··· 6 6 7 7 #include <asm/stacktrace.h> 8 8 9 - /* 10 - * Get the return address for a single stackframe and return a pointer to the 11 - * next frame tail. 12 - */ 13 - static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, 14 - unsigned long fp, unsigned long reg_ra) 9 + static bool fill_callchain(void *entry, unsigned long pc) 15 10 { 16 - struct stackframe buftail; 17 - unsigned long ra = 0; 18 - unsigned long __user *user_frame_tail = 19 - (unsigned long __user *)(fp - sizeof(struct stackframe)); 20 - 21 - /* Check accessibility of one struct frame_tail beyond */ 22 - if (!access_ok(user_frame_tail, sizeof(buftail))) 23 - return 0; 24 - if (__copy_from_user_inatomic(&buftail, user_frame_tail, 25 - sizeof(buftail))) 26 - return 0; 27 - 28 - if (reg_ra != 0) 29 - ra = reg_ra; 30 - else 31 - ra = buftail.ra; 32 - 33 - fp = buftail.fp; 34 - if (ra != 0) 35 - perf_callchain_store(entry, ra); 36 - else 37 - return 0; 38 - 39 - return fp; 11 + return perf_callchain_store(entry, pc) == 0; 40 12 } 41 13 42 14 /* ··· 28 56 void perf_callchain_user(struct perf_callchain_entry_ctx *entry, 29 57 struct pt_regs *regs) 30 58 { 31 - unsigned long fp = 0; 32 - 33 - fp = regs->s0; 34 - perf_callchain_store(entry, regs->epc); 35 - 36 - fp = user_backtrace(entry, fp, regs->ra); 37 - while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) 38 - fp = user_backtrace(entry, fp, 0); 39 - } 40 - 41 - static bool fill_callchain(void *entry, unsigned long pc) 42 - { 43 - return perf_callchain_store(entry, pc) == 0; 59 + arch_stack_walk_user(fill_callchain, entry, regs); 44 60 } 45 61 46 62 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+3 -1
arch/riscv/kernel/pi/Makefile
··· 5 5 -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \ 6 6 $(call cc-option,-mbranch-protection=none) \ 7 7 -I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \ 8 + -include $(srctree)/include/linux/hidden.h \ 8 9 -D__DISABLE_EXPORTS -ffreestanding \ 9 10 -fno-asynchronous-unwind-tables -fno-unwind-tables \ 10 11 $(call cc-option,-fno-addrsig) ··· 17 16 18 17 CFLAGS_cmdline_early.o += -D__NO_FORTIFY 19 18 CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY 19 + CFLAGS_fdt_early.o += -D__NO_FORTIFY 20 20 21 21 $(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \ 22 22 --remove-section=.note.gnu.property \ ··· 34 32 $(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE 35 33 $(call if_changed_rule,cc_o_c) 36 34 37 - obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o 35 + obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o archrandom_early.pi.o 38 36 extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
+30
arch/riscv/kernel/pi/archrandom_early.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <asm/csr.h> 4 + #include <linux/processor.h> 5 + 6 + #include "pi.h" 7 + 8 + /* 9 + * To avoid rewriting code include asm/archrandom.h and create macros 10 + * for the functions that won't be included. 11 + */ 12 + #undef riscv_has_extension_unlikely 13 + #define riscv_has_extension_likely(...) false 14 + #undef pr_err_once 15 + #define pr_err_once(...) 16 + 17 + #include <asm/archrandom.h> 18 + 19 + u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa) 20 + { 21 + unsigned long seed = 0; 22 + 23 + if (!fdt_early_match_extension_isa((const void *)dtb_pa, "zkr")) 24 + return 0; 25 + 26 + if (!csr_seed_long(&seed)) 27 + return 0; 28 + 29 + return seed; 30 + }
+2 -8
arch/riscv/kernel/pi/cmdline_early.c
··· 6 6 #include <asm/pgtable.h> 7 7 #include <asm/setup.h> 8 8 9 - static char early_cmdline[COMMAND_LINE_SIZE]; 9 + #include "pi.h" 10 10 11 - /* 12 - * Declare the functions that are exported (but prefixed) here so that LLVM 13 - * does not complain it lacks the 'static' keyword (which, if added, makes 14 - * LLVM complain because the function is actually unused in this file). 15 - */ 16 - u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa); 17 - bool set_nokaslr_from_cmdline(uintptr_t dtb_pa); 11 + static char early_cmdline[COMMAND_LINE_SIZE]; 18 12 19 13 static char *get_early_cmdline(uintptr_t dtb_pa) 20 14 {
+161 -6
arch/riscv/kernel/pi/fdt_early.c
··· 2 2 #include <linux/types.h> 3 3 #include <linux/init.h> 4 4 #include <linux/libfdt.h> 5 + #include <linux/ctype.h> 5 6 6 - /* 7 - * Declare the functions that are exported (but prefixed) here so that LLVM 8 - * does not complain it lacks the 'static' keyword (which, if added, makes 9 - * LLVM complain because the function is actually unused in this file). 10 - */ 11 - u64 get_kaslr_seed(uintptr_t dtb_pa); 7 + #include "pi.h" 12 8 13 9 u64 get_kaslr_seed(uintptr_t dtb_pa) 14 10 { ··· 22 26 23 27 ret = fdt64_to_cpu(*prop); 24 28 *prop = 0; 29 + return ret; 30 + } 31 + 32 + /** 33 + * fdt_device_is_available - check if a device is available for use 34 + * 35 + * @fdt: pointer to the device tree blob 36 + * @node: offset of the node whose property to find 37 + * 38 + * Returns true if the status property is absent or set to "okay" or "ok", 39 + * false otherwise 40 + */ 41 + static bool fdt_device_is_available(const void *fdt, int node) 42 + { 43 + const char *status; 44 + int statlen; 45 + 46 + status = fdt_getprop(fdt, node, "status", &statlen); 47 + if (!status) 48 + return true; 49 + 50 + if (statlen > 0) { 51 + if (!strcmp(status, "okay") || !strcmp(status, "ok")) 52 + return true; 53 + } 54 + 55 + return false; 56 + } 57 + 58 + /* Copy of fdt_nodename_eq_ */ 59 + static int fdt_node_name_eq(const void *fdt, int offset, 60 + const char *s) 61 + { 62 + int olen; 63 + int len = strlen(s); 64 + const char *p = fdt_get_name(fdt, offset, &olen); 65 + 66 + if (!p || olen < len) 67 + /* short match */ 68 + return 0; 69 + 70 + if (memcmp(p, s, len) != 0) 71 + return 0; 72 + 73 + if (p[len] == '\0') 74 + return 1; 75 + else if (!memchr(s, '@', len) && (p[len] == '@')) 76 + return 1; 77 + else 78 + return 0; 79 + } 80 + 81 + /** 82 + * isa_string_contains - check if isa string contains an extension 83 + * 84 + * @isa_str: isa string to search 85 + * @ext_name: the extension to search for 86 + * 87 + * Returns true if the extension is in the given isa string, 88 + * false otherwise 89 + */ 90 + static bool isa_string_contains(const char *isa_str, const char *ext_name) 91 + { 92 + size_t i, single_end, len = strlen(ext_name); 93 + char ext_end; 94 + 95 + /* Error must contain rv32/64 */ 96 + if (strlen(isa_str) < 4) 97 + return false; 98 + 99 + if (len == 1) { 100 + single_end = strcspn(isa_str, "sSxXzZ"); 101 + /* Search for single chars between rv32/64 and multi-letter extensions */ 102 + for (i = 4; i < single_end; i++) { 103 + if (tolower(isa_str[i]) == ext_name[0]) 104 + return true; 105 + } 106 + return false; 107 + } 108 + 109 + /* Skip to start of multi-letter extensions */ 110 + isa_str = strpbrk(isa_str, "sSxXzZ"); 111 + while (isa_str) { 112 + if (strncasecmp(isa_str, ext_name, len) == 0) { 113 + ext_end = isa_str[len]; 114 + /* Check if matches the whole extension. */ 115 + if (ext_end == '\0' || ext_end == '_') 116 + return true; 117 + } 118 + /* Multi-letter extensions must be split from other multi-letter 119 + * extensions with an "_", the end of a multi-letter extension will 120 + * either be the null character or the "_" at the start of the next 121 + * multi-letter extension. 122 + */ 123 + isa_str = strchr(isa_str, '_'); 124 + if (isa_str) 125 + isa_str++; 126 + } 127 + 128 + return false; 129 + } 130 + 131 + /** 132 + * early_cpu_isa_ext_available - check if cpu node has an extension 133 + * 134 + * @fdt: pointer to the device tree blob 135 + * @node: offset of the cpu node 136 + * @ext_name: the extension to search for 137 + * 138 + * Returns true if the cpu node has the extension, 139 + * false otherwise 140 + */ 141 + static bool early_cpu_isa_ext_available(const void *fdt, int node, const char *ext_name) 142 + { 143 + const void *prop; 144 + int len; 145 + 146 + prop = fdt_getprop(fdt, node, "riscv,isa-extensions", &len); 147 + if (prop && fdt_stringlist_contains(prop, len, ext_name)) 148 + return true; 149 + 150 + prop = fdt_getprop(fdt, node, "riscv,isa", &len); 151 + if (prop && isa_string_contains(prop, ext_name)) 152 + return true; 153 + 154 + return false; 155 + } 156 + 157 + /** 158 + * fdt_early_match_extension_isa - check if all cpu nodes have an extension 159 + * 160 + * @fdt: pointer to the device tree blob 161 + * @ext_name: the extension to search for 162 + * 163 + * Returns true if the all available the cpu nodes have the extension, 164 + * false otherwise 165 + */ 166 + bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name) 167 + { 168 + int node, parent; 169 + bool ret = false; 170 + 171 + parent = fdt_path_offset(fdt, "/cpus"); 172 + if (parent < 0) 173 + return false; 174 + 175 + fdt_for_each_subnode(node, fdt, parent) { 176 + if (!fdt_node_name_eq(fdt, node, "cpu")) 177 + continue; 178 + 179 + if (!fdt_device_is_available(fdt, node)) 180 + continue; 181 + 182 + if (!early_cpu_isa_ext_available(fdt, node, ext_name)) 183 + return false; 184 + 185 + ret = true; 186 + } 187 + 25 188 return ret; 26 189 }
+20
arch/riscv/kernel/pi/pi.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _RISCV_PI_H_ 3 + #define _RISCV_PI_H_ 4 + 5 + #include <linux/types.h> 6 + 7 + /* 8 + * The following functions are exported (but prefixed). Declare them here so 9 + * that LLVM does not complain it lacks the 'static' keyword (which, if 10 + * added, makes LLVM complain because the function is unused). 11 + */ 12 + 13 + u64 get_kaslr_seed(uintptr_t dtb_pa); 14 + u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa); 15 + bool set_nokaslr_from_cmdline(uintptr_t dtb_pa); 16 + u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa); 17 + 18 + bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name); 19 + 20 + #endif /* _RISCV_PI_H_ */
+9
arch/riscv/kernel/process.c
··· 15 15 #include <linux/tick.h> 16 16 #include <linux/ptrace.h> 17 17 #include <linux/uaccess.h> 18 + #include <linux/personality.h> 18 19 19 20 #include <asm/unistd.h> 20 21 #include <asm/processor.h> ··· 27 26 #include <asm/cpuidle.h> 28 27 #include <asm/vector.h> 29 28 #include <asm/cpufeature.h> 29 + #include <asm/exec.h> 30 30 31 31 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) 32 32 #include <linux/stackprotector.h> ··· 99 97 __show_regs(regs); 100 98 if (!user_mode(regs)) 101 99 dump_backtrace(regs, NULL, KERN_DEFAULT); 100 + } 101 + 102 + unsigned long arch_align_stack(unsigned long sp) 103 + { 104 + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 105 + sp -= get_random_u32_below(PAGE_SIZE); 106 + return sp & ~0xf; 102 107 } 103 108 104 109 #ifdef CONFIG_COMPAT
-3
arch/riscv/kernel/riscv_ksyms.c
··· 12 12 EXPORT_SYMBOL(memset); 13 13 EXPORT_SYMBOL(memcpy); 14 14 EXPORT_SYMBOL(memmove); 15 - EXPORT_SYMBOL(strcmp); 16 - EXPORT_SYMBOL(strlen); 17 - EXPORT_SYMBOL(strncmp); 18 15 EXPORT_SYMBOL(__memset); 19 16 EXPORT_SYMBOL(__memcpy); 20 17 EXPORT_SYMBOL(__memmove);
+41 -2
arch/riscv/kernel/smp.c
··· 13 13 #include <linux/interrupt.h> 14 14 #include <linux/module.h> 15 15 #include <linux/kexec.h> 16 + #include <linux/kgdb.h> 16 17 #include <linux/percpu.h> 17 18 #include <linux/profile.h> 18 19 #include <linux/smp.h> ··· 22 21 #include <linux/delay.h> 23 22 #include <linux/irq.h> 24 23 #include <linux/irq_work.h> 24 + #include <linux/nmi.h> 25 25 26 26 #include <asm/tlbflush.h> 27 27 #include <asm/cacheflush.h> ··· 35 33 IPI_CPU_CRASH_STOP, 36 34 IPI_IRQ_WORK, 37 35 IPI_TIMER, 36 + IPI_CPU_BACKTRACE, 37 + IPI_KGDB_ROUNDUP, 38 38 IPI_MAX 39 39 }; 40 40 ··· 117 113 118 114 static irqreturn_t handle_IPI(int irq, void *data) 119 115 { 116 + unsigned int cpu = smp_processor_id(); 120 117 int ipi = irq - ipi_virq_base; 121 118 122 119 switch (ipi) { ··· 131 126 ipi_stop(); 132 127 break; 133 128 case IPI_CPU_CRASH_STOP: 134 - ipi_cpu_crash_stop(smp_processor_id(), get_irq_regs()); 129 + ipi_cpu_crash_stop(cpu, get_irq_regs()); 135 130 break; 136 131 case IPI_IRQ_WORK: 137 132 irq_work_run(); ··· 141 136 tick_receive_broadcast(); 142 137 break; 143 138 #endif 139 + case IPI_CPU_BACKTRACE: 140 + nmi_cpu_backtrace(get_irq_regs()); 141 + break; 142 + case IPI_KGDB_ROUNDUP: 143 + kgdb_nmicallback(cpu, get_irq_regs()); 144 + break; 144 145 default: 145 - pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi); 146 + pr_warn("CPU%d: unhandled IPI%d\n", cpu, ipi); 146 147 break; 147 148 } 148 149 ··· 214 203 [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", 215 204 [IPI_IRQ_WORK] = "IRQ work interrupts", 216 205 [IPI_TIMER] = "Timer broadcast interrupts", 206 + [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", 207 + [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", 217 208 }; 218 209 219 210 void show_ipi_stats(struct seq_file *p, int prec) ··· 336 323 send_ipi_single(cpu, IPI_RESCHEDULE); 337 324 } 338 325 EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); 326 + 327 + static void riscv_backtrace_ipi(cpumask_t *mask) 328 + { 329 + send_ipi_mask(mask, IPI_CPU_BACKTRACE); 330 + } 331 + 332 + void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) 333 + { 334 + nmi_trigger_cpumask_backtrace(mask, exclude_cpu, riscv_backtrace_ipi); 335 + } 336 + 337 + #ifdef CONFIG_KGDB 338 + void kgdb_roundup_cpus(void) 339 + { 340 + int this_cpu = raw_smp_processor_id(); 341 + int cpu; 342 + 343 + for_each_online_cpu(cpu) { 344 + /* No need to roundup ourselves */ 345 + if (cpu == this_cpu) 346 + continue; 347 + 348 + send_ipi_single(cpu, IPI_KGDB_ROUNDUP); 349 + } 350 + } 351 + #endif
+43
arch/riscv/kernel/stacktrace.c
··· 162 162 { 163 163 walk_stackframe(task, regs, consume_entry, cookie); 164 164 } 165 + 166 + /* 167 + * Get the return address for a single stackframe and return a pointer to the 168 + * next frame tail. 169 + */ 170 + static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry, 171 + void *cookie, unsigned long fp, 172 + unsigned long reg_ra) 173 + { 174 + struct stackframe buftail; 175 + unsigned long ra = 0; 176 + unsigned long __user *user_frame_tail = 177 + (unsigned long __user *)(fp - sizeof(struct stackframe)); 178 + 179 + /* Check accessibility of one struct frame_tail beyond */ 180 + if (!access_ok(user_frame_tail, sizeof(buftail))) 181 + return 0; 182 + if (__copy_from_user_inatomic(&buftail, user_frame_tail, 183 + sizeof(buftail))) 184 + return 0; 185 + 186 + ra = reg_ra ? : buftail.ra; 187 + 188 + fp = buftail.fp; 189 + if (!ra || !consume_entry(cookie, ra)) 190 + return 0; 191 + 192 + return fp; 193 + } 194 + 195 + void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, 196 + const struct pt_regs *regs) 197 + { 198 + unsigned long fp = 0; 199 + 200 + fp = regs->s0; 201 + if (!consume_entry(cookie, regs->epc)) 202 + return; 203 + 204 + fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra); 205 + while (fp && !(fp & 0x7)) 206 + fp = unwind_user_frame(consume_entry, cookie, fp, 0); 207 + }
+1 -1
arch/riscv/kernel/vdso/Makefile
··· 45 45 # link rule for the .so file, .lds has to be first 46 46 $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE 47 47 $(call if_changed,vdsold) 48 - LDFLAGS_vdso.so.dbg = -shared -S -soname=linux-vdso.so.1 \ 48 + LDFLAGS_vdso.so.dbg = -shared -soname=linux-vdso.so.1 \ 49 49 --build-id=sha1 --hash-style=both --eh-frame-hdr 50 50 51 51 # strip rule for the .so file
+1 -1
arch/riscv/kernel/vendor_extensions/andes.c
··· 8 8 #include <linux/types.h> 9 9 10 10 /* All Andes vendor extensions supported in Linux */ 11 - const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = { 11 + static const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = { 12 12 __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU), 13 13 }; 14 14
+7
arch/riscv/kernel/vmcore_info.c
··· 19 19 #endif 20 20 #endif 21 21 vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR); 22 + #ifdef CONFIG_XIP_KERNEL 23 + /* TODO: Communicate with crash-utility developers on the information to 24 + * export. The XIP case is more complicated, because the virtual-physical 25 + * address offset depends on whether the address is in ROM or in RAM. 26 + */ 27 + #else 22 28 vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n", 23 29 kernel_map.va_kernel_pa_offset); 30 + #endif 24 31 }
+3 -2
arch/riscv/kernel/vmlinux-xip.lds.S
··· 14 14 #include <asm/page.h> 15 15 #include <asm/cache.h> 16 16 #include <asm/thread_info.h> 17 + #include <asm/set_memory.h> 17 18 18 19 OUTPUT_ARCH(riscv) 19 20 ENTRY(_start) ··· 66 65 * From this point, stuff is considered writable and will be copied to RAM 67 66 */ 68 67 __data_loc = ALIGN(PAGE_SIZE); /* location in file */ 69 - . = KERNEL_LINK_ADDR + XIP_OFFSET; /* location in memory */ 68 + . = ALIGN(SECTION_ALIGN); /* location in memory */ 70 69 71 70 #undef LOAD_OFFSET 72 - #define LOAD_OFFSET (KERNEL_LINK_ADDR + XIP_OFFSET - (__data_loc & XIP_OFFSET_MASK)) 71 + #define LOAD_OFFSET (KERNEL_LINK_ADDR + _sdata - __data_loc) 73 72 74 73 _sdata = .; /* Start of data section */ 75 74 _data = .;
+2
arch/riscv/lib/Makefile
··· 3 3 lib-y += memcpy.o 4 4 lib-y += memset.o 5 5 lib-y += memmove.o 6 + ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),) 6 7 lib-y += strcmp.o 7 8 lib-y += strlen.o 8 9 lib-y += strncmp.o 10 + endif 9 11 lib-y += csum.o 10 12 ifeq ($(CONFIG_MMU), y) 11 13 lib-$(CONFIG_RISCV_ISA_V) += uaccess_vector.o
+2
arch/riscv/lib/memset.S
··· 111 111 ret 112 112 SYM_FUNC_END(__memset) 113 113 SYM_FUNC_ALIAS_WEAK(memset, __memset) 114 + SYM_FUNC_ALIAS(__pi_memset, __memset) 115 + SYM_FUNC_ALIAS(__pi___memset, __memset)
+2
arch/riscv/lib/strcmp.S
··· 120 120 .option pop 121 121 #endif 122 122 SYM_FUNC_END(strcmp) 123 + SYM_FUNC_ALIAS(__pi_strcmp, strcmp) 124 + EXPORT_SYMBOL(strcmp)
+1
arch/riscv/lib/strlen.S
··· 131 131 #endif 132 132 SYM_FUNC_END(strlen) 133 133 SYM_FUNC_ALIAS(__pi_strlen, strlen) 134 + EXPORT_SYMBOL(strlen)
+2
arch/riscv/lib/strncmp.S
··· 136 136 .option pop 137 137 #endif 138 138 SYM_FUNC_END(strncmp) 139 + SYM_FUNC_ALIAS(__pi_strncmp, strncmp) 140 + EXPORT_SYMBOL(strncmp)
+15 -13
arch/riscv/mm/init.c
··· 37 37 38 38 #include "../kernel/head.h" 39 39 40 + u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1]; 41 + 40 42 struct kernel_mapping kernel_map __ro_after_init; 41 43 EXPORT_SYMBOL(kernel_map); 42 44 #ifdef CONFIG_XIP_KERNEL ··· 919 917 static void __init create_kernel_page_table(pgd_t *pgdir, 920 918 __always_unused bool early) 921 919 { 922 - uintptr_t va, end_va; 920 + uintptr_t va, start_va, end_va; 923 921 924 922 /* Map the flash resident part */ 925 923 end_va = kernel_map.virt_addr + kernel_map.xiprom_sz; ··· 929 927 PMD_SIZE, PAGE_KERNEL_EXEC); 930 928 931 929 /* Map the data in RAM */ 930 + start_va = kernel_map.virt_addr + (uintptr_t)&_sdata - (uintptr_t)&_start; 932 931 end_va = kernel_map.virt_addr + kernel_map.size; 933 - for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE) 932 + for (va = start_va; va < end_va; va += PMD_SIZE) 934 933 create_pgd_mapping(pgdir, va, 935 - kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), 934 + kernel_map.phys_addr + (va - start_va), 936 935 PMD_SIZE, PAGE_KERNEL); 937 936 } 938 937 #else ··· 1051 1048 #ifdef CONFIG_RANDOMIZE_BASE 1052 1049 extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa); 1053 1050 extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa); 1051 + extern u64 __init __pi_get_kaslr_seed_zkr(const uintptr_t dtb_pa); 1054 1052 1055 1053 static int __init print_nokaslr(char *p) 1056 1054 { ··· 1072 1068 1073 1069 #ifdef CONFIG_RANDOMIZE_BASE 1074 1070 if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) { 1075 - u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa); 1071 + u64 kaslr_seed = __pi_get_kaslr_seed_zkr(dtb_pa); 1076 1072 u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start); 1077 1073 u32 nr_pos; 1078 1074 1075 + if (kaslr_seed == 0) 1076 + kaslr_seed = __pi_get_kaslr_seed(dtb_pa); 1079 1077 /* 1080 1078 * Compute the number of positions available: we are limited 1081 1079 * by the early page table that only has one PUD and we must ··· 1104 1098 kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; 1105 1099 kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start); 1106 1100 1107 - kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; 1101 + kernel_map.va_kernel_xip_text_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; 1102 + kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr 1103 + + (uintptr_t)&_sdata - (uintptr_t)&_start; 1108 1104 #else 1109 1105 kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); 1110 1106 kernel_map.phys_addr = (uintptr_t)(&_start); 1111 1107 kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; 1108 + kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; 1112 1109 #endif 1113 1110 1114 1111 #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) ··· 1133 1124 */ 1134 1125 kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ? 1135 1126 0UL : PAGE_OFFSET - kernel_map.phys_addr; 1136 - kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; 1137 1127 1138 - /* 1139 - * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit 1140 - * kernel, whereas for 64-bit kernel, the end of the virtual address 1141 - * space is occupied by the modules/BPF/kernel mappings which reduces 1142 - * the available size of the linear mapping. 1143 - */ 1144 - memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0); 1128 + memory_limit = KERN_VIRT_SIZE; 1145 1129 1146 1130 /* Sanity check alignment and size */ 1147 1131 BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
+13
arch/riscv/mm/pgtable.c
··· 9 9 unsigned long address, pte_t *ptep, 10 10 pte_t entry, int dirty) 11 11 { 12 + asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) 13 + : : : : svvptc); 14 + 12 15 if (!pte_same(ptep_get(ptep), entry)) 13 16 __set_pte_at(vma->vm_mm, ptep, entry); 14 17 /* ··· 19 16 * the case that the PTE changed and the spurious fault case. 20 17 */ 21 18 return true; 19 + 20 + svvptc: 21 + if (!pte_same(ptep_get(ptep), entry)) { 22 + __set_pte_at(vma->vm_mm, ptep, entry); 23 + /* Here only not svadu is impacted */ 24 + flush_tlb_page(vma, address); 25 + return true; 26 + } 27 + 28 + return false; 22 29 } 23 30 24 31 int ptep_test_and_clear_young(struct vm_area_struct *vma,
+2
arch/riscv/purgatory/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 3 purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o 4 + ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),) 4 5 purgatory-y += strcmp.o strlen.o strncmp.o 6 + endif 5 7 6 8 targets += $(purgatory-y) 7 9 PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
-2
drivers/perf/riscv_pmu.c
··· 39 39 userpg->cap_user_time_short = 0; 40 40 userpg->cap_user_rdpmc = riscv_perf_user_access(event); 41 41 42 - #ifdef CONFIG_RISCV_PMU 43 42 /* 44 43 * The counters are 64-bit but the priv spec doesn't mandate all the 45 44 * bits to be implemented: that's why, counter width can vary based on ··· 46 47 */ 47 48 if (userpg->cap_user_rdpmc) 48 49 userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1; 49 - #endif 50 50 51 51 do { 52 52 rd = sched_clock_read_begin(&seq);
+23 -10
drivers/perf/riscv_pmu_sbi.c
··· 60 60 #define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY) 61 61 62 62 PMU_FORMAT_ATTR(event, "config:0-47"); 63 - PMU_FORMAT_ATTR(firmware, "config:63"); 63 + PMU_FORMAT_ATTR(firmware, "config:62-63"); 64 64 65 65 static bool sbi_v2_available; 66 66 static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available); ··· 507 507 { 508 508 u32 type = event->attr.type; 509 509 u64 config = event->attr.config; 510 - int bSoftware; 511 510 u64 raw_config_val; 512 511 int ret; 513 512 ··· 527 528 break; 528 529 case PERF_TYPE_RAW: 529 530 /* 530 - * As per SBI specification, the upper 16 bits must be unused for 531 - * a raw event. Use the MSB (63b) to distinguish between hardware 532 - * raw event and firmware events. 531 + * As per SBI specification, the upper 16 bits must be unused 532 + * for a raw event. 533 + * Bits 63:62 are used to distinguish between raw events 534 + * 00 - Hardware raw event 535 + * 10 - SBI firmware events 536 + * 11 - Risc-V platform specific firmware event 533 537 */ 534 - bSoftware = config >> 63; 535 538 raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK; 536 - if (bSoftware) { 537 - ret = (raw_config_val & 0xFFFF) | 538 - (SBI_PMU_EVENT_TYPE_FW << 16); 539 - } else { 539 + switch (config >> 62) { 540 + case 0: 540 541 ret = RISCV_PMU_RAW_EVENT_IDX; 541 542 *econfig = raw_config_val; 543 + break; 544 + case 2: 545 + ret = (raw_config_val & 0xFFFF) | 546 + (SBI_PMU_EVENT_TYPE_FW << 16); 547 + break; 548 + case 3: 549 + /* 550 + * For Risc-V platform specific firmware events 551 + * Event code - 0xFFFF 552 + * Event data - raw event encoding 553 + */ 554 + ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT; 555 + *econfig = raw_config_val; 556 + break; 542 557 } 543 558 break; 544 559 default:
+39
tools/arch/riscv/include/asm/barrier.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copied from the kernel sources to tools/arch/riscv: 4 + * 5 + * Copyright (C) 2012 ARM Ltd. 6 + * Copyright (C) 2013 Regents of the University of California 7 + * Copyright (C) 2017 SiFive 8 + */ 9 + 10 + #ifndef _TOOLS_LINUX_ASM_RISCV_BARRIER_H 11 + #define _TOOLS_LINUX_ASM_RISCV_BARRIER_H 12 + 13 + #include <asm/fence.h> 14 + #include <linux/compiler.h> 15 + 16 + /* These barriers need to enforce ordering on both devices and memory. */ 17 + #define mb() RISCV_FENCE(iorw, iorw) 18 + #define rmb() RISCV_FENCE(ir, ir) 19 + #define wmb() RISCV_FENCE(ow, ow) 20 + 21 + /* These barriers do not need to enforce ordering on devices, just memory. */ 22 + #define smp_mb() RISCV_FENCE(rw, rw) 23 + #define smp_rmb() RISCV_FENCE(r, r) 24 + #define smp_wmb() RISCV_FENCE(w, w) 25 + 26 + #define smp_store_release(p, v) \ 27 + do { \ 28 + RISCV_FENCE(rw, w); \ 29 + WRITE_ONCE(*p, v); \ 30 + } while (0) 31 + 32 + #define smp_load_acquire(p) \ 33 + ({ \ 34 + typeof(*p) ___p1 = READ_ONCE(*p); \ 35 + RISCV_FENCE(r, rw); \ 36 + ___p1; \ 37 + }) 38 + 39 + #endif /* _TOOLS_LINUX_ASM_RISCV_BARRIER_H */
+13
tools/arch/riscv/include/asm/fence.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copied from the kernel sources to tools/arch/riscv: 4 + */ 5 + 6 + #ifndef _ASM_RISCV_FENCE_H 7 + #define _ASM_RISCV_FENCE_H 8 + 9 + #define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n" 10 + #define RISCV_FENCE(p, s) \ 11 + ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); }) 12 + 13 + #endif /* _ASM_RISCV_FENCE_H */
+2
tools/include/asm/barrier.h
··· 8 8 #include "../../arch/arm64/include/asm/barrier.h" 9 9 #elif defined(__powerpc__) 10 10 #include "../../arch/powerpc/include/asm/barrier.h" 11 + #elif defined(__riscv) 12 + #include "../../arch/riscv/include/asm/barrier.h" 11 13 #elif defined(__s390__) 12 14 #include "../../arch/s390/include/asm/barrier.h" 13 15 #elif defined(__sh__)
+1 -1
tools/include/linux/ring_buffer.h
··· 55 55 * READ_ONCE() + smp_mb() pair. 56 56 */ 57 57 #if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) || \ 58 - defined(__ia64__) || defined(__sparc__) && defined(__arch64__) 58 + defined(__ia64__) || defined(__sparc__) && defined(__arch64__) || defined(__riscv) 59 59 return smp_load_acquire(&base->data_head); 60 60 #else 61 61 u64 head = READ_ONCE(base->data_head);