Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
"11 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
scripts/spdxcheck.py: always open files in binary mode
checkstack.pl: fix for aarch64
userfaultfd: check VM_MAYWRITE was set after verifying the uffd is registered
fs/iomap.c: get/put the page in iomap_page_create/release()
hugetlbfs: call VM_BUG_ON_PAGE earlier in free_huge_page()
memblock: annotate memblock_is_reserved() with __init_memblock
psi: fix reference to kernel commandline enable
arch/sh/include/asm/io.h: provide prototypes for PCI I/O mapping in asm/io.h
mm/sparse: add common helper to mark all memblocks present
mm: introduce common STRUCT_PAGE_MAX_SHIFT define
alpha: fix hang caused by the bootmem removal

+54 -30
+1
arch/alpha/kernel/setup.c
··· 634 634 635 635 /* Find our memory. */ 636 636 setup_memory(kernel_end); 637 + memblock_set_bottom_up(true); 637 638 638 639 /* First guess at cpu cache sizes. Do this before init_arch. */ 639 640 determine_cpu_caches(cpu->type);
+3 -3
arch/alpha/mm/numa.c
··· 144 144 if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) 145 145 panic("kernel loaded out of ram"); 146 146 147 + memblock_add(PFN_PHYS(node_min_pfn), 148 + (node_max_pfn - node_min_pfn) << PAGE_SHIFT); 149 + 147 150 /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. 148 151 Note that we round this down, not up - node memory 149 152 has much larger alignment than 8Mb, so it's safe. */ 150 153 node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); 151 - 152 - memblock_add(PFN_PHYS(node_min_pfn), 153 - (node_max_pfn - node_min_pfn) << PAGE_SHIFT); 154 154 155 155 NODE_DATA(nid)->node_start_pfn = node_min_pfn; 156 156 NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
-9
arch/arm64/include/asm/memory.h
··· 35 35 #define PCI_IO_SIZE SZ_16M 36 36 37 37 /* 38 - * Log2 of the upper bound of the size of a struct page. Used for sizing 39 - * the vmemmap region only, does not affect actual memory footprint. 40 - * We don't use sizeof(struct page) directly since taking its size here 41 - * requires its definition to be available at this point in the inclusion 42 - * chain, and it may not be a power of 2 in the first place. 43 - */ 44 - #define STRUCT_PAGE_MAX_SHIFT 6 45 - 46 - /* 47 38 * VMEMMAP_SIZE - allows the whole linear region to be covered by 48 39 * a struct page array 49 40 */
-8
arch/arm64/mm/init.c
··· 610 610 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); 611 611 #endif 612 612 613 - #ifdef CONFIG_SPARSEMEM_VMEMMAP 614 - /* 615 - * Make sure we chose the upper bound of sizeof(struct page) 616 - * correctly when sizing the VMEMMAP array. 617 - */ 618 - BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); 619 - #endif 620 - 621 613 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 622 614 extern int sysctl_overcommit_memory; 623 615 /*
+1
arch/sh/include/asm/io.h
··· 24 24 #define __IO_PREFIX generic 25 25 #include <asm/io_generic.h> 26 26 #include <asm/io_trapped.h> 27 + #include <asm-generic/pci_iomap.h> 27 28 #include <mach/mangle-port.h> 28 29 29 30 #define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
+7
fs/iomap.c
··· 116 116 atomic_set(&iop->read_count, 0); 117 117 atomic_set(&iop->write_count, 0); 118 118 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); 119 + 120 + /* 121 + * migrate_page_move_mapping() assumes that pages with private data have 122 + * their count elevated by 1. 123 + */ 124 + get_page(page); 119 125 set_page_private(page, (unsigned long)iop); 120 126 SetPagePrivate(page); 121 127 return iop; ··· 138 132 WARN_ON_ONCE(atomic_read(&iop->write_count)); 139 133 ClearPagePrivate(page); 140 134 set_page_private(page, 0); 135 + put_page(page); 141 136 kfree(iop); 142 137 } 143 138
+2 -1
fs/userfaultfd.c
··· 1566 1566 cond_resched(); 1567 1567 1568 1568 BUG_ON(!vma_can_userfault(vma)); 1569 - WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 1570 1569 1571 1570 /* 1572 1571 * Nothing to do: this vma is already registered into this ··· 1573 1574 */ 1574 1575 if (!vma->vm_userfaultfd_ctx.ctx) 1575 1576 goto skip; 1577 + 1578 + WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 1576 1579 1577 1580 if (vma->vm_start > start) 1578 1581 start = vma->vm_start;
+1
include/asm-generic/fixmap.h
··· 16 16 #define __ASM_GENERIC_FIXMAP_H 17 17 18 18 #include <linux/bug.h> 19 + #include <linux/mm_types.h> 19 20 20 21 #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 21 22 #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+5
include/linux/mm_types.h
··· 206 206 #endif 207 207 } _struct_page_alignment; 208 208 209 + /* 210 + * Used for sizing the vmemmap region on some architectures 211 + */ 212 + #define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page))) 213 + 209 214 #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) 210 215 #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) 211 216
+6
include/linux/mmzone.h
··· 783 783 static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 784 784 #endif 785 785 786 + #if defined(CONFIG_SPARSEMEM) 787 + void memblocks_present(void); 788 + #else 789 + static inline void memblocks_present(void) {} 790 + #endif 791 + 786 792 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 787 793 int local_memory_node(int node_id); 788 794 #else
+2 -2
init/Kconfig
··· 515 515 depends on PSI 516 516 help 517 517 If set, pressure stall information tracking will be disabled 518 - per default but can be enabled through passing psi_enable=1 519 - on the kernel commandline during boot. 518 + per default but can be enabled through passing psi=1 on the 519 + kernel commandline during boot. 520 520 521 521 endmenu # "CPU/Task time and stats accounting" 522 522
+3 -2
mm/hugetlb.c
··· 1248 1248 (struct hugepage_subpool *)page_private(page); 1249 1249 bool restore_reserve; 1250 1250 1251 - set_page_private(page, 0); 1252 - page->mapping = NULL; 1253 1251 VM_BUG_ON_PAGE(page_count(page), page); 1254 1252 VM_BUG_ON_PAGE(page_mapcount(page), page); 1253 + 1254 + set_page_private(page, 0); 1255 + page->mapping = NULL; 1255 1256 restore_reserve = PagePrivate(page); 1256 1257 ClearPagePrivate(page); 1257 1258
+1 -1
mm/memblock.c
··· 1727 1727 return -1; 1728 1728 } 1729 1729 1730 - bool __init memblock_is_reserved(phys_addr_t addr) 1730 + bool __init_memblock memblock_is_reserved(phys_addr_t addr) 1731 1731 { 1732 1732 return memblock_search(&memblock.reserved, addr) != -1; 1733 1733 }
+16
mm/sparse.c
··· 240 240 } 241 241 242 242 /* 243 + * Mark all memblocks as present using memory_present(). This is a 244 + * convienence function that is useful for a number of arches 245 + * to mark all of the systems memory as present during initialization. 246 + */ 247 + void __init memblocks_present(void) 248 + { 249 + struct memblock_region *reg; 250 + 251 + for_each_memblock(memory, reg) { 252 + memory_present(memblock_get_region_node(reg), 253 + memblock_region_memory_base_pfn(reg), 254 + memblock_region_memory_end_pfn(reg)); 255 + } 256 + } 257 + 258 + /* 243 259 * Subtle, we encode the real pfn into the mem_map such that 244 260 * the identity pfn - section_mem_map will return the actual 245 261 * physical page frame number.
+2 -2
scripts/checkstack.pl
··· 47 47 $xs = "[0-9a-f ]"; # hex character or space 48 48 $funcre = qr/^$x* <(.*)>:$/; 49 49 if ($arch eq 'aarch64') { 50 - #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp,#-80]! 51 - $re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o; 50 + #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]! 51 + $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o; 52 52 } elsif ($arch eq 'arm') { 53 53 #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64 54 54 $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
+4 -2
scripts/spdxcheck.py
··· 168 168 self.curline = 0 169 169 try: 170 170 for line in fd: 171 + line = line.decode(locale.getpreferredencoding(False), errors='ignore') 171 172 self.curline += 1 172 173 if self.curline > maxlines: 173 174 break ··· 250 249 251 250 try: 252 251 if len(args.path) and args.path[0] == '-': 253 - parser.parse_lines(sys.stdin, args.maxlines, '-') 252 + stdin = os.fdopen(sys.stdin.fileno(), 'rb') 253 + parser.parse_lines(stdin, args.maxlines, '-') 254 254 else: 255 255 if args.path: 256 256 for p in args.path: 257 257 if os.path.isfile(p): 258 - parser.parse_lines(open(p), args.maxlines, p) 258 + parser.parse_lines(open(p, 'rb'), args.maxlines, p) 259 259 elif os.path.isdir(p): 260 260 scan_git_subtree(repo.head.reference.commit.tree, p) 261 261 else: