Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux

Pull ARM fixes from Russell King:

- Fix kernel mapping for XIP kernels

- Fix SMP support for XIP kernels

- Fix complication corner case with CFI

- Fix a typo in nommu code

- Fix cacheflush syscall when PAN is enabled on LPAE platforms

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux:
ARM: fix cacheflush with PAN
ARM: 9435/1: ARM/nommu: Fix typo "absence"
ARM: 9434/1: cfi: Fix compilation corner case
ARM: 9420/1: smp: Fix SMP for xip kernels
ARM: 9419/1: mm: Fix kernel memory mapping for xip kernels

+50 -17
+10 -2
arch/arm/kernel/head.S
··· 252 252 */ 253 253 add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER) 254 254 ldr r6, =(_end - 1) 255 + 256 + /* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory */ 257 + #ifndef CONFIG_XIP_KERNEL 255 258 adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) 256 259 #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 257 260 str r8, [r5, #4] @ Save physical start of kernel (BE) 258 261 #else 259 262 str r8, [r5] @ Save physical start of kernel (LE) 263 + #endif 260 264 #endif 261 265 orr r3, r8, r7 @ Add the MMU flags 262 266 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) ··· 268 264 add r3, r3, #1 << SECTION_SHIFT 269 265 cmp r0, r6 270 266 bls 1b 267 + #ifndef CONFIG_XIP_KERNEL 271 268 eor r3, r3, r7 @ Remove the MMU flags 272 269 adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) 273 270 #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 ··· 276 271 #else 277 272 str r3, [r5] @ Save physical end of kernel (LE) 278 273 #endif 279 - 280 - #ifdef CONFIG_XIP_KERNEL 274 + #else 281 275 /* 282 276 * Map the kernel image separately as it is not located in RAM. 283 277 */ ··· 411 407 /* 412 408 * Use the page tables supplied from __cpu_up. 413 409 */ 410 + #ifdef CONFIG_XIP_KERNEL 411 + ldr r3, =(secondary_data + PLAT_PHYS_OFFSET - PAGE_OFFSET) 412 + #else 414 413 adr_l r3, secondary_data 414 + #endif 415 415 mov_l r12, __secondary_switched 416 416 ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir 417 417 ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
+7
arch/arm/kernel/psci_smp.c
··· 45 45 static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle) 46 46 { 47 47 if (psci_ops.cpu_on) 48 + #ifdef CONFIG_XIP_KERNEL 49 + return psci_ops.cpu_on(cpu_logical_map(cpu), 50 + ((phys_addr_t)(&secondary_startup) 51 + - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 52 + + CONFIG_XIP_PHYS_ADDR)); 53 + #else 48 54 return psci_ops.cpu_on(cpu_logical_map(cpu), 49 55 virt_to_idmap(&secondary_startup)); 56 + #endif 50 57 return -ENODEV; 51 58 } 52 59
+3
arch/arm/kernel/traps.c
··· 570 570 static inline int 571 571 __do_cache_op(unsigned long start, unsigned long end) 572 572 { 573 + unsigned int ua_flags; 573 574 int ret; 574 575 575 576 do { ··· 579 578 if (fatal_signal_pending(current)) 580 579 return 0; 581 580 581 + ua_flags = uaccess_save_and_enable(); 582 582 ret = flush_icache_user_range(start, start + chunk); 583 + uaccess_restore(ua_flags); 583 584 if (ret) 584 585 return ret; 585 586
+1 -1
arch/arm/mm/dma-mapping-nommu.c
··· 39 39 /* 40 40 * Cache support for v7m is optional, so can be treated as 41 41 * coherent if no cache has been detected. Note that it is not 42 - * enough to check if MPU is in use or not since in absense of 42 + * enough to check if MPU is in use or not since in absence of 43 43 * MPU system memory map is used. 44 44 */ 45 45 dev->dma_coherent = cacheid ? coherent : true;
+7
arch/arm/mm/idmap.c
··· 84 84 unsigned long addr, end; 85 85 unsigned long next; 86 86 87 + #ifdef CONFIG_XIP_KERNEL 88 + addr = (phys_addr_t)(text_start) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 89 + + CONFIG_XIP_PHYS_ADDR; 90 + end = (phys_addr_t)(text_end) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 91 + + CONFIG_XIP_PHYS_ADDR; 92 + #else 87 93 addr = virt_to_idmap(text_start); 88 94 end = virt_to_idmap(text_end); 95 + #endif 89 96 pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end); 90 97 91 98 prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
+21 -13
arch/arm/mm/mmu.c
··· 1403 1403 } 1404 1404 1405 1405 /* 1406 - * Map the kernel if it is XIP. 1407 - * It is always first in the modulearea. 1408 - */ 1409 - #ifdef CONFIG_XIP_KERNEL 1410 - map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 1411 - map.virtual = MODULES_VADDR; 1412 - map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; 1413 - map.type = MT_ROM; 1414 - create_mapping(&map); 1415 - #endif 1416 - 1417 - /* 1418 1406 * Map the cache flushing regions. 1419 1407 */ 1420 1408 #ifdef FLUSH_BASE ··· 1591 1603 * This will only persist until we turn on proper memory management later on 1592 1604 * and we remap the whole kernel with page granularity. 1593 1605 */ 1606 + #ifdef CONFIG_XIP_KERNEL 1607 + phys_addr_t kernel_nx_start = kernel_sec_start; 1608 + #else 1594 1609 phys_addr_t kernel_x_start = kernel_sec_start; 1595 1610 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1596 1611 phys_addr_t kernel_nx_start = kernel_x_end; 1612 + #endif 1597 1613 phys_addr_t kernel_nx_end = kernel_sec_end; 1598 1614 struct map_desc map; 1599 1615 1616 + /* 1617 + * Map the kernel if it is XIP. 1618 + * It is always first in the modulearea. 1619 + */ 1620 + #ifdef CONFIG_XIP_KERNEL 1621 + map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 1622 + map.virtual = MODULES_VADDR; 1623 + map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; 1624 + map.type = MT_ROM; 1625 + create_mapping(&map); 1626 + #else 1600 1627 map.pfn = __phys_to_pfn(kernel_x_start); 1601 1628 map.virtual = __phys_to_virt(kernel_x_start); 1602 1629 map.length = kernel_x_end - kernel_x_start; ··· 1621 1618 /* If the nx part is small it may end up covered by the tail of the RWX section */ 1622 1619 if (kernel_x_end == kernel_nx_end) 1623 1620 return; 1624 - 1621 + #endif 1625 1622 map.pfn = __phys_to_pfn(kernel_nx_start); 1626 1623 map.virtual = __phys_to_virt(kernel_nx_start); 1627 1624 map.length = kernel_nx_end - kernel_nx_start; ··· 1767 1764 { 1768 1765 void *zero_page; 1769 1766 1767 + #ifdef CONFIG_XIP_KERNEL 1768 + /* Store the kernel RW RAM region start/end in these variables */ 1769 + kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK; 1770 + kernel_sec_end = round_up(__pa(_end), SECTION_SIZE); 1771 + #endif 1770 1772 pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n", 1771 1773 kernel_sec_start, kernel_sec_end); 1772 1774
+1 -1
arch/arm/mm/proc-v7.S
··· 94 94 ret lr 95 95 SYM_FUNC_END(cpu_v7_dcache_clean_area) 96 96 97 - #ifdef CONFIG_ARM_PSCI 97 + #if defined(CONFIG_ARM_PSCI) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) 98 98 .arch_extension sec 99 99 SYM_TYPED_FUNC_START(cpu_v7_smc_switch_mm) 100 100 stmfd sp!, {r0 - r3}