Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'riscv-for-linus-7.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Paul Walmsley:

- Fix a CONFIG_SPARSEMEM crash on RV32 by avoiding early phys_to_page()

- Prevent runtime const infrastructure from being used by modules,
similar to what was done for x86

- Avoid problems when shutting down ACPI systems with IOMMUs by adding
a device dependency between IOMMU and devices that use it

- Fix a bug where the CPU pointer masking state isn't properly reset
when tagged addresses aren't enabled for a task

- Fix some incorrect register assignments, and add some missing ones,
in kgdb support code

- Fix compilation of non-kernel code that uses the ptrace uapi header
by replacing BIT() with _BITUL()

- Fix compilation of the validate_v_ptrace kselftest by working around
kselftest macro expansion issues

* tag 'riscv-for-linus-7.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
ACPI: RIMT: Add dependency between iommu and devices
selftests: riscv: Add braces around EXPECT_EQ()
riscv: use _BITUL macro rather than BIT() in ptrace uapi and kselftests
riscv: Reset pmm when PR_TAGGED_ADDR_ENABLE is not set
riscv: make runtime const not usable by modules
riscv: patch: Avoid early phys_to_page()
riscv: kgdb: fix several debug register assignment bugs

+47 -28
+4
arch/riscv/include/asm/runtime-const.h
··· 2 2 #ifndef _ASM_RISCV_RUNTIME_CONST_H 3 3 #define _ASM_RISCV_RUNTIME_CONST_H 4 4 5 + #ifdef MODULE 6 + #error "Cannot use runtime-const infrastructure from modules" 7 + #endif 8 + 5 9 #include <asm/asm.h> 6 10 #include <asm/alternative.h> 7 11 #include <asm/cacheflush.h>
+7 -6
arch/riscv/include/uapi/asm/ptrace.h
··· 9 9 #ifndef __ASSEMBLER__ 10 10 11 11 #include <linux/types.h> 12 + #include <linux/const.h> 12 13 13 14 #define PTRACE_GETFDPIC 33 14 15 ··· 139 138 #define PTRACE_CFI_SS_LOCK_BIT 4 140 139 #define PTRACE_CFI_SS_PTR_BIT 5 141 140 142 - #define PTRACE_CFI_LP_EN_STATE BIT(PTRACE_CFI_LP_EN_BIT) 143 - #define PTRACE_CFI_LP_LOCK_STATE BIT(PTRACE_CFI_LP_LOCK_BIT) 144 - #define PTRACE_CFI_ELP_STATE BIT(PTRACE_CFI_ELP_BIT) 145 - #define PTRACE_CFI_SS_EN_STATE BIT(PTRACE_CFI_SS_EN_BIT) 146 - #define PTRACE_CFI_SS_LOCK_STATE BIT(PTRACE_CFI_SS_LOCK_BIT) 147 - #define PTRACE_CFI_SS_PTR_STATE BIT(PTRACE_CFI_SS_PTR_BIT) 141 + #define PTRACE_CFI_LP_EN_STATE _BITUL(PTRACE_CFI_LP_EN_BIT) 142 + #define PTRACE_CFI_LP_LOCK_STATE _BITUL(PTRACE_CFI_LP_LOCK_BIT) 143 + #define PTRACE_CFI_ELP_STATE _BITUL(PTRACE_CFI_ELP_BIT) 144 + #define PTRACE_CFI_SS_EN_STATE _BITUL(PTRACE_CFI_SS_EN_BIT) 145 + #define PTRACE_CFI_SS_LOCK_STATE _BITUL(PTRACE_CFI_SS_LOCK_BIT) 146 + #define PTRACE_CFI_SS_PTR_STATE _BITUL(PTRACE_CFI_SS_PTR_BIT) 148 147 149 148 #define PRACE_CFI_STATE_INVALID_MASK ~(PTRACE_CFI_LP_EN_STATE | \ 150 149 PTRACE_CFI_LP_LOCK_STATE | \
+4 -3
arch/riscv/kernel/kgdb.c
··· 175 175 {DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)}, 176 176 {DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)}, 177 177 {DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)}, 178 - {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)}, 178 + {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, s1)}, 179 179 {DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)}, 180 180 {DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)}, 181 181 {DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)}, ··· 244 244 gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6]; 245 245 gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7]; 246 246 gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8]; 247 - gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10]; 248 - gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11]; 247 + gdb_regs[DBG_REG_S9_OFF] = task->thread.s[9]; 248 + gdb_regs[DBG_REG_S10_OFF] = task->thread.s[10]; 249 + gdb_regs[DBG_REG_S11_OFF] = task->thread.s[11]; 249 250 gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra; 250 251 } 251 252
+11 -10
arch/riscv/kernel/patch.c
··· 42 42 static __always_inline void *patch_map(void *addr, const unsigned int fixmap) 43 43 { 44 44 uintptr_t uintaddr = (uintptr_t) addr; 45 - struct page *page; 45 + phys_addr_t phys; 46 46 47 - if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr)) 48 - page = phys_to_page(__pa_symbol(addr)); 49 - else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) 50 - page = vmalloc_to_page(addr); 51 - else 47 + if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr)) { 48 + phys = __pa_symbol(addr); 49 + } else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) { 50 + struct page *page = vmalloc_to_page(addr); 51 + 52 + BUG_ON(!page); 53 + phys = page_to_phys(page) + offset_in_page(addr); 54 + } else { 52 55 return addr; 56 + } 53 57 54 - BUG_ON(!page); 55 - 56 - return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + 57 - offset_in_page(addr)); 58 + return (void *)set_fixmap_offset(fixmap, phys); 58 59 } 59 60 60 61 static void patch_unmap(int fixmap)
+3 -1
arch/riscv/kernel/process.c
··· 347 347 if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen)) 348 348 return -EINVAL; 349 349 350 - if (!(arg & PR_TAGGED_ADDR_ENABLE)) 350 + if (!(arg & PR_TAGGED_ADDR_ENABLE)) { 351 351 pmlen = PMLEN_0; 352 + pmm = ENVCFG_PMM_PMLEN_0; 353 + } 352 354 353 355 if (mmap_write_lock_killable(mm)) 354 356 return -EINTR;
+7
drivers/acpi/riscv/rimt.c
··· 263 263 if (!rimt_fwnode) 264 264 return -EPROBE_DEFER; 265 265 266 + /* 267 + * EPROBE_DEFER ensures IOMMU is probed before the devices that 268 + * depend on them. During shutdown, however, the IOMMU may be removed 269 + * first, leading to issues. To avoid this, a device link is added 270 + * which enforces the correct removal order. 271 + */ 272 + device_link_add(dev, rimt_fwnode->dev, DL_FLAG_AUTOREMOVE_CONSUMER); 266 273 return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode); 267 274 } 268 275
+11 -8
tools/testing/selftests/riscv/vector/validate_v_ptrace.c
··· 290 290 291 291 /* verify initial vsetvli settings */ 292 292 293 - if (is_xtheadvector_supported()) 293 + if (is_xtheadvector_supported()) { 294 294 EXPECT_EQ(5UL, regset_data->vtype); 295 - else 295 + } else { 296 296 EXPECT_EQ(9UL, regset_data->vtype); 297 + } 297 298 298 299 EXPECT_EQ(regset_data->vlenb, regset_data->vl); 299 300 EXPECT_EQ(vlenb, regset_data->vlenb); ··· 347 346 { 348 347 } 349 348 350 - #define VECTOR_1_0 BIT(0) 351 - #define XTHEAD_VECTOR_0_7 BIT(1) 349 + #define VECTOR_1_0 _BITUL(0) 350 + #define XTHEAD_VECTOR_0_7 _BITUL(1) 352 351 353 352 #define vector_test(x) ((x) & VECTOR_1_0) 354 353 #define xthead_test(x) ((x) & XTHEAD_VECTOR_0_7) ··· 620 619 621 620 /* verify initial vsetvli settings */ 622 621 623 - if (is_xtheadvector_supported()) 622 + if (is_xtheadvector_supported()) { 624 623 EXPECT_EQ(5UL, regset_data->vtype); 625 - else 624 + } else { 626 625 EXPECT_EQ(9UL, regset_data->vtype); 626 + } 627 627 628 628 EXPECT_EQ(regset_data->vlenb, regset_data->vl); 629 629 EXPECT_EQ(vlenb, regset_data->vlenb); ··· 829 827 830 828 /* verify initial vsetvli settings */ 831 829 832 - if (is_xtheadvector_supported()) 830 + if (is_xtheadvector_supported()) { 833 831 EXPECT_EQ(5UL, regset_data->vtype); 834 - else 832 + } else { 835 833 EXPECT_EQ(9UL, regset_data->vtype); 834 + } 836 835 837 836 EXPECT_EQ(regset_data->vlenb, regset_data->vl); 838 837 EXPECT_EQ(vlenb, regset_data->vlenb);