Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

- Avoid writing an uninitialised stack variable to POR_EL0 on sigreturn
if the poe_context record is absent

- Reserve one more page for the early 4K-page kernel mapping to cover
the extra [_text, _stext) split introduced by the non-executable
read-only mapping

- Force the arch_local_irq_*() wrappers to be __always_inline so that
noinstr entry and idle paths cannot call out-of-line, instrumentable
copies

- Fix potential sign extension in the arm64 SCS unwinder's DWARF
advance_loc4 decoding

- Tolerate arm64 ACPI platforms with only WFI and no deeper PSCI idle
states, restoring cpuidle registration on such systems

- Include the UAPI <asm/ptrace.h> header in the arm64 GCS libc test
rather than carrying a duplicate struct user_gcs definition (the
original #ifdef NT_ARM_GCS was wrong to cover the structure
definition as it would be masked out if the toolchain defined it)

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: signal: Preserve POR_EL0 if poe_context is missing
arm64: Reserve an extra page for early kernel mapping
kselftest/arm64: Include <asm/ptrace.h> for user_gcs definition
ACPI: arm64: cpuidle: Tolerate platforms with no deep PSCI idle states
arm64/irqflags: __always_inline the arch_local_irq_*() helpers
arm64/scs: Fix potential sign extension issue of advance_loc4

+62 -34
+7 -7
arch/arm64/include/asm/irqflags.h
··· 40 40 barrier(); 41 41 } 42 42 43 - static inline void arch_local_irq_enable(void) 43 + static __always_inline void arch_local_irq_enable(void) 44 44 { 45 45 if (system_uses_irq_prio_masking()) { 46 46 __pmr_local_irq_enable(); ··· 68 68 barrier(); 69 69 } 70 70 71 - static inline void arch_local_irq_disable(void) 71 + static __always_inline void arch_local_irq_disable(void) 72 72 { 73 73 if (system_uses_irq_prio_masking()) { 74 74 __pmr_local_irq_disable(); ··· 90 90 /* 91 91 * Save the current interrupt enable state. 92 92 */ 93 - static inline unsigned long arch_local_save_flags(void) 93 + static __always_inline unsigned long arch_local_save_flags(void) 94 94 { 95 95 if (system_uses_irq_prio_masking()) { 96 96 return __pmr_local_save_flags(); ··· 109 109 return flags != GIC_PRIO_IRQON; 110 110 } 111 111 112 - static inline bool arch_irqs_disabled_flags(unsigned long flags) 112 + static __always_inline bool arch_irqs_disabled_flags(unsigned long flags) 113 113 { 114 114 if (system_uses_irq_prio_masking()) { 115 115 return __pmr_irqs_disabled_flags(flags); ··· 128 128 return __pmr_irqs_disabled_flags(__pmr_local_save_flags()); 129 129 } 130 130 131 - static inline bool arch_irqs_disabled(void) 131 + static __always_inline bool arch_irqs_disabled(void) 132 132 { 133 133 if (system_uses_irq_prio_masking()) { 134 134 return __pmr_irqs_disabled(); ··· 160 160 return flags; 161 161 } 162 162 163 - static inline unsigned long arch_local_irq_save(void) 163 + static __always_inline unsigned long arch_local_irq_save(void) 164 164 { 165 165 if (system_uses_irq_prio_masking()) { 166 166 return __pmr_local_irq_save(); ··· 187 187 /* 188 188 * restore saved IRQ state 189 189 */ 190 - static inline void arch_local_irq_restore(unsigned long flags) 190 + static __always_inline void arch_local_irq_restore(unsigned long flags) 191 191 { 192 192 if (system_uses_irq_prio_masking()) { 193 193 __pmr_local_irq_restore(flags);
+6 -1
arch/arm64/include/asm/kernel-pgtable.h
··· 68 68 #define KERNEL_SEGMENT_COUNT 5 69 69 70 70 #if SWAPPER_BLOCK_SIZE > SEGMENT_ALIGN 71 - #define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 1) 71 + /* 72 + * KERNEL_SEGMENT_COUNT counts the permanent kernel VMAs. The early mapping 73 + * has one additional split, [_text, _stext). Reserve one more page for the 74 + * SWAPPER_BLOCK_SIZE-unaligned boundaries. 75 + */ 76 + #define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 2) 72 77 /* 73 78 * The initial ID map consists of the kernel image, mapped as two separate 74 79 * segments, and may appear misaligned wrt the swapper block size. This means
+2 -2
arch/arm64/kernel/pi/patch-scs.c
··· 196 196 loc += *opcode++ * code_alignment_factor; 197 197 loc += (*opcode++ << 8) * code_alignment_factor; 198 198 loc += (*opcode++ << 16) * code_alignment_factor; 199 - loc += (*opcode++ << 24) * code_alignment_factor; 199 + loc += ((u64)*opcode++ << 24) * code_alignment_factor; 200 200 size -= 4; 201 - break; 201 + break; 202 202 203 203 case DW_CFA_def_cfa: 204 204 case DW_CFA_offset_extended:
+43 -11
arch/arm64/kernel/signal.c
··· 67 67 unsigned long end_offset; 68 68 }; 69 69 70 + #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 71 + #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 72 + 70 73 /* 71 74 * Holds any EL0-controlled state that influences unprivileged memory accesses. 72 75 * This includes both accesses done in userspace and uaccess done in the kernel. ··· 77 74 * This state needs to be carefully managed to ensure that it doesn't cause 78 75 * uaccess to fail when setting up the signal frame, and the signal handler 79 76 * itself also expects a well-defined state when entered. 77 + * 78 + * The struct should be zero-initialised. Its members should only be accessed 79 + * via the accessors below. __valid_fields tracks which of the fields are valid 80 + * (have been set to some value). 80 81 */ 81 82 struct user_access_state { 82 - u64 por_el0; 83 + unsigned int __valid_fields; 84 + u64 __por_el0; 83 85 }; 84 86 85 - #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 86 - #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 87 + #define UA_STATE_HAS_POR_EL0 BIT(0) 88 + 89 + static void set_ua_state_por_el0(struct user_access_state *ua_state, 90 + u64 por_el0) 91 + { 92 + ua_state->__por_el0 = por_el0; 93 + ua_state->__valid_fields |= UA_STATE_HAS_POR_EL0; 94 + } 95 + 96 + static int get_ua_state_por_el0(const struct user_access_state *ua_state, 97 + u64 *por_el0) 98 + { 99 + if (ua_state->__valid_fields & UA_STATE_HAS_POR_EL0) { 100 + *por_el0 = ua_state->__por_el0; 101 + return 0; 102 + } 103 + 104 + return -ENOENT; 105 + } 87 106 88 107 /* 89 108 * Save the user access state into ua_state and reset it to disable any ··· 119 94 for (int pkey = 0; pkey < arch_max_pkey(); pkey++) 120 95 por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX); 121 96 122 - ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0); 97 + set_ua_state_por_el0(ua_state, read_sysreg_s(SYS_POR_EL0)); 123 98 write_sysreg_s(por_enable_all, SYS_POR_EL0); 124 99 /* 125 100 * No ISB required as we can tolerate spurious Overlay faults - ··· 147 122 */ 148 123 static void restore_user_access_state(const struct user_access_state *ua_state) 149 124 { 150 - if (system_supports_poe()) 151 - write_sysreg_s(ua_state->por_el0, SYS_POR_EL0); 125 + u64 por_el0; 126 + 127 + if (get_ua_state_por_el0(ua_state, &por_el0) == 0) 128 + write_sysreg_s(por_el0, SYS_POR_EL0); 152 129 } 153 130 154 131 static void init_user_layout(struct rt_sigframe_user_layout *user) ··· 360 333 static int preserve_poe_context(struct poe_context __user *ctx, 361 334 const struct user_access_state *ua_state) 362 335 { 363 - int err = 0; 336 + int err; 337 + u64 por_el0; 338 + 339 + err = get_ua_state_por_el0(ua_state, &por_el0); 340 + if (WARN_ON_ONCE(err)) 341 + return err; 364 342 365 343 __put_user_error(POE_MAGIC, &ctx->head.magic, err); 366 344 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 367 - __put_user_error(ua_state->por_el0, &ctx->por_el0, err); 345 + __put_user_error(por_el0, &ctx->por_el0, err); 368 346 369 347 return err; 370 348 } ··· 385 353 386 354 __get_user_error(por_el0, &(user->poe->por_el0), err); 387 355 if (!err) 388 - ua_state->por_el0 = por_el0; 356 + set_ua_state_por_el0(ua_state, por_el0); 389 357 390 358 return err; 391 359 } ··· 1127 1095 { 1128 1096 struct pt_regs *regs = current_pt_regs(); 1129 1097 struct rt_sigframe __user *frame; 1130 - struct user_access_state ua_state; 1098 + struct user_access_state ua_state = {}; 1131 1099 1132 1100 /* Always make any pending restarted system calls return -EINTR */ 1133 1101 current->restart_block.fn = do_no_restart_syscall; ··· 1539 1507 { 1540 1508 struct rt_sigframe_user_layout user; 1541 1509 struct rt_sigframe __user *frame; 1542 - struct user_access_state ua_state; 1510 + struct user_access_state ua_state = {}; 1543 1511 int err = 0; 1544 1512 1545 1513 fpsimd_save_and_flush_current_state();
+3 -7
drivers/acpi/arm64/cpuidle.c
··· 16 16 17 17 static int psci_acpi_cpu_init_idle(unsigned int cpu) 18 18 { 19 - int i, count; 19 + int i; 20 20 struct acpi_lpi_state *lpi; 21 21 struct acpi_processor *pr = per_cpu(processors, cpu); 22 22 ··· 30 30 if (!psci_ops.cpu_suspend) 31 31 return -EOPNOTSUPP; 32 32 33 - count = pr->power.count - 1; 34 - if (count <= 0) 35 - return -ENODEV; 36 - 37 - for (i = 0; i < count; i++) { 33 + for (i = 1; i < pr->power.count; i++) { 38 34 u32 state; 39 35 40 - lpi = &pr->power.lpi_states[i + 1]; 36 + lpi = &pr->power.lpi_states[i]; 41 37 /* 42 38 * Only bits[31:0] represent a PSCI power_state while 43 39 * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
-6
tools/testing/selftests/arm64/gcs/gcs-util.h
··· 18 18 19 19 #ifndef NT_ARM_GCS 20 20 #define NT_ARM_GCS 0x410 21 - 22 - struct user_gcs { 23 - __u64 features_enabled; 24 - __u64 features_locked; 25 - __u64 gcspr_el0; 26 - }; 27 21 #endif 28 22 29 23 /* Shadow Stack/Guarded Control Stack interface */
+1
tools/testing/selftests/arm64/gcs/libc-gcs.c
··· 16 16 17 17 #include <asm/hwcap.h> 18 18 #include <asm/mman.h> 19 + #include <asm/ptrace.h> 19 20 20 21 #include <linux/compiler.h> 21 22