Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
"17 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
hexagon: define ioremap_uc
ocfs2: fix the crash due to call ocfs2_get_dlm_debug once less
ocfs2: call journal flush to mark journal as empty after journal recovery when mount
mm/hugetlb: defer freeing of huge pages if in non-task context
mm/gup: fix memory leak in __gup_benchmark_ioctl
mm/oom: fix pgtables units mismatch in Killed process message
fs/posix_acl.c: fix kernel-doc warnings
hexagon: work around compiler crash
hexagon: parenthesize registers in asm predicates
fs/namespace.c: make to_mnt_ns() static
fs/nsfs.c: include headers for missing declarations
fs/direct-io.c: include fs/internal.h for missing prototype
mm: move_pages: return valid node id in status if the page is already on the target node
memcg: account security cred as well to kmemcg
kcov: fix struct layout for kcov_remote_arg
mm/zsmalloc.c: fix the migrated zspage statistics.
mm/memory_hotplug: shrink zones when offlining memory

+166 -90
+5 -5
Documentation/dev-tools/kcov.rst
··· 251 251 .. code-block:: c 252 252 253 253 struct kcov_remote_arg { 254 - unsigned trace_mode; 255 - unsigned area_size; 256 - unsigned num_handles; 257 - uint64_t common_handle; 258 - uint64_t handles[0]; 254 + __u32 trace_mode; 255 + __u32 area_size; 256 + __u32 num_handles; 257 + __aligned_u64 common_handle; 258 + __aligned_u64 handles[0]; 259 259 }; 260 260 261 261 #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
+1 -3
arch/arm64/mm/mmu.c
··· 1070 1070 { 1071 1071 unsigned long start_pfn = start >> PAGE_SHIFT; 1072 1072 unsigned long nr_pages = size >> PAGE_SHIFT; 1073 - struct zone *zone; 1074 1073 1075 1074 /* 1076 1075 * FIXME: Cleanup page tables (also in arch_add_memory() in case ··· 1078 1079 * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be 1079 1080 * unlocked yet. 1080 1081 */ 1081 - zone = page_zone(pfn_to_page(start_pfn)); 1082 - __remove_pages(zone, start_pfn, nr_pages, altmap); 1082 + __remove_pages(start_pfn, nr_pages, altmap); 1083 1083 } 1084 1084 #endif
+4 -4
arch/hexagon/include/asm/atomic.h
··· 91 91 "1: %0 = memw_locked(%1);\n" \ 92 92 " %0 = "#op "(%0,%2);\n" \ 93 93 " memw_locked(%1,P3)=%0;\n" \ 94 - " if !P3 jump 1b;\n" \ 94 + " if (!P3) jump 1b;\n" \ 95 95 : "=&r" (output) \ 96 96 : "r" (&v->counter), "r" (i) \ 97 97 : "memory", "p3" \ ··· 107 107 "1: %0 = memw_locked(%1);\n" \ 108 108 " %0 = "#op "(%0,%2);\n" \ 109 109 " memw_locked(%1,P3)=%0;\n" \ 110 - " if !P3 jump 1b;\n" \ 110 + " if (!P3) jump 1b;\n" \ 111 111 : "=&r" (output) \ 112 112 : "r" (&v->counter), "r" (i) \ 113 113 : "memory", "p3" \ ··· 124 124 "1: %0 = memw_locked(%2);\n" \ 125 125 " %1 = "#op "(%0,%3);\n" \ 126 126 " memw_locked(%2,P3)=%1;\n" \ 127 - " if !P3 jump 1b;\n" \ 127 + " if (!P3) jump 1b;\n" \ 128 128 : "=&r" (output), "=&r" (val) \ 129 129 : "r" (&v->counter), "r" (i) \ 130 130 : "memory", "p3" \ ··· 173 173 " }" 174 174 " memw_locked(%2, p3) = %1;" 175 175 " {" 176 - " if !p3 jump 1b;" 176 + " if (!p3) jump 1b;" 177 177 " }" 178 178 "2:" 179 179 : "=&r" (__oldval), "=&r" (tmp)
+4 -4
arch/hexagon/include/asm/bitops.h
··· 38 38 "1: R12 = memw_locked(R10);\n" 39 39 " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" 40 40 " memw_locked(R10,P1) = R12;\n" 41 - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" 41 + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" 42 42 : "=&r" (oldval) 43 43 : "r" (addr), "r" (nr) 44 44 : "r10", "r11", "r12", "p0", "p1", "memory" ··· 62 62 "1: R12 = memw_locked(R10);\n" 63 63 " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" 64 64 " memw_locked(R10,P1) = R12;\n" 65 - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" 65 + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" 66 66 : "=&r" (oldval) 67 67 : "r" (addr), "r" (nr) 68 68 : "r10", "r11", "r12", "p0", "p1", "memory" ··· 88 88 "1: R12 = memw_locked(R10);\n" 89 89 " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" 90 90 " memw_locked(R10,P1) = R12;\n" 91 - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" 91 + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" 92 92 : "=&r" (oldval) 93 93 : "r" (addr), "r" (nr) 94 94 : "r10", "r11", "r12", "p0", "p1", "memory" ··· 223 223 int r; 224 224 225 225 asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" 226 - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" 226 + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" 227 227 : "=&r" (r) 228 228 : "r" (x) 229 229 : "p0");
+1 -1
arch/hexagon/include/asm/cmpxchg.h
··· 30 30 __asm__ __volatile__ ( 31 31 "1: %0 = memw_locked(%1);\n" /* load into retval */ 32 32 " memw_locked(%1,P0) = %2;\n" /* store into memory */ 33 - " if !P0 jump 1b;\n" 33 + " if (!P0) jump 1b;\n" 34 34 : "=&r" (retval) 35 35 : "r" (ptr), "r" (x) 36 36 : "memory", "p0"
+3 -3
arch/hexagon/include/asm/futex.h
··· 16 16 /* For example: %1 = %4 */ \ 17 17 insn \ 18 18 "2: memw_locked(%3,p2) = %1;\n" \ 19 - " if !p2 jump 1b;\n" \ 19 + " if (!p2) jump 1b;\n" \ 20 20 " %1 = #0;\n" \ 21 21 "3:\n" \ 22 22 ".section .fixup,\"ax\"\n" \ ··· 84 84 "1: %1 = memw_locked(%3)\n" 85 85 " {\n" 86 86 " p2 = cmp.eq(%1,%4)\n" 87 - " if !p2.new jump:NT 3f\n" 87 + " if (!p2.new) jump:NT 3f\n" 88 88 " }\n" 89 89 "2: memw_locked(%3,p2) = %5\n" 90 - " if !p2 jump 1b\n" 90 + " if (!p2) jump 1b\n" 91 91 "3:\n" 92 92 ".section .fixup,\"ax\"\n" 93 93 "4: %0 = #%6\n"
+1
arch/hexagon/include/asm/io.h
··· 173 173 174 174 void __iomem *ioremap(unsigned long phys_addr, unsigned long size); 175 175 #define ioremap_nocache ioremap 176 + #define ioremap_uc(X, Y) ioremap((X), (Y)) 176 177 177 178 178 179 #define __raw_writel writel
+10 -10
arch/hexagon/include/asm/spinlock.h
··· 30 30 __asm__ __volatile__( 31 31 "1: R6 = memw_locked(%0);\n" 32 32 " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" 33 - " { if !P3 jump 1b; }\n" 33 + " { if (!P3) jump 1b; }\n" 34 34 " memw_locked(%0,P3) = R6;\n" 35 - " { if !P3 jump 1b; }\n" 35 + " { if (!P3) jump 1b; }\n" 36 36 : 37 37 : "r" (&lock->lock) 38 38 : "memory", "r6", "p3" ··· 46 46 "1: R6 = memw_locked(%0);\n" 47 47 " R6 = add(R6,#-1);\n" 48 48 " memw_locked(%0,P3) = R6\n" 49 - " if !P3 jump 1b;\n" 49 + " if (!P3) jump 1b;\n" 50 50 : 51 51 : "r" (&lock->lock) 52 52 : "memory", "r6", "p3" ··· 61 61 __asm__ __volatile__( 62 62 " R6 = memw_locked(%1);\n" 63 63 " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" 64 - " { if !P3 jump 1f; }\n" 64 + " { if (!P3) jump 1f; }\n" 65 65 " memw_locked(%1,P3) = R6;\n" 66 66 " { %0 = P3 }\n" 67 67 "1:\n" ··· 78 78 __asm__ __volatile__( 79 79 "1: R6 = memw_locked(%0)\n" 80 80 " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" 81 - " { if !P3 jump 1b; }\n" 81 + " { if (!P3) jump 1b; }\n" 82 82 " memw_locked(%0,P3) = R6;\n" 83 - " { if !P3 jump 1b; }\n" 83 + " { if (!P3) jump 1b; }\n" 84 84 : 85 85 : "r" (&lock->lock) 86 86 : "memory", "r6", "p3" ··· 94 94 __asm__ __volatile__( 95 95 " R6 = memw_locked(%1)\n" 96 96 " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" 97 - " { if !P3 jump 1f; }\n" 97 + " { if (!P3) jump 1f; }\n" 98 98 " memw_locked(%1,P3) = R6;\n" 99 99 " %0 = P3;\n" 100 100 "1:\n" ··· 117 117 __asm__ __volatile__( 118 118 "1: R6 = memw_locked(%0);\n" 119 119 " P3 = cmp.eq(R6,#0);\n" 120 - " { if !P3 jump 1b; R6 = #1; }\n" 120 + " { if (!P3) jump 1b; R6 = #1; }\n" 121 121 " memw_locked(%0,P3) = R6;\n" 122 - " { if !P3 jump 1b; }\n" 122 + " { if (!P3) jump 1b; }\n" 123 123 : 124 124 : "r" (&lock->lock) 125 125 : "memory", "r6", "p3" ··· 139 139 __asm__ __volatile__( 140 140 " R6 = memw_locked(%1);\n" 141 141 " P3 = cmp.eq(R6,#0);\n" 142 - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" 142 + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" 143 143 " memw_locked(%1,P3) = R6;\n" 144 144 " %0 = P3;\n" 145 145 "1:\n"
+1 -3
arch/hexagon/kernel/stacktrace.c
··· 11 11 #include <linux/thread_info.h> 12 12 #include <linux/module.h> 13 13 14 - register unsigned long current_frame_pointer asm("r30"); 15 - 16 14 struct stackframe { 17 15 unsigned long fp; 18 16 unsigned long rets; ··· 28 30 29 31 low = (unsigned long)task_stack_page(current); 30 32 high = low + THREAD_SIZE; 31 - fp = current_frame_pointer; 33 + fp = (unsigned long)__builtin_frame_address(0); 32 34 33 35 while (fp >= low && fp <= (high - sizeof(*frame))) { 34 36 frame = (struct stackframe *)fp;
+1 -1
arch/hexagon/kernel/vm_entry.S
··· 369 369 R26.L = #LO(do_work_pending); 370 370 R0 = #VM_INT_DISABLE; 371 371 } 372 - if P0 jump check_work_pending 372 + if (P0) jump check_work_pending 373 373 { 374 374 R0 = R25; 375 375 callr R24
+1 -3
arch/ia64/mm/init.c
··· 689 689 { 690 690 unsigned long start_pfn = start >> PAGE_SHIFT; 691 691 unsigned long nr_pages = size >> PAGE_SHIFT; 692 - struct zone *zone; 693 692 694 - zone = page_zone(pfn_to_page(start_pfn)); 695 - __remove_pages(zone, start_pfn, nr_pages, altmap); 693 + __remove_pages(start_pfn, nr_pages, altmap); 696 694 } 697 695 #endif
+1 -2
arch/powerpc/mm/mem.c
··· 151 151 { 152 152 unsigned long start_pfn = start >> PAGE_SHIFT; 153 153 unsigned long nr_pages = size >> PAGE_SHIFT; 154 - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); 155 154 int ret; 156 155 157 - __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); 156 + __remove_pages(start_pfn, nr_pages, altmap); 158 157 159 158 /* Remove htab bolted mappings for this section of memory */ 160 159 start = (unsigned long)__va(start);
+1 -3
arch/s390/mm/init.c
··· 292 292 { 293 293 unsigned long start_pfn = start >> PAGE_SHIFT; 294 294 unsigned long nr_pages = size >> PAGE_SHIFT; 295 - struct zone *zone; 296 295 297 - zone = page_zone(pfn_to_page(start_pfn)); 298 - __remove_pages(zone, start_pfn, nr_pages, altmap); 296 + __remove_pages(start_pfn, nr_pages, altmap); 299 297 vmem_remove_mapping(start, size); 300 298 } 301 299 #endif /* CONFIG_MEMORY_HOTPLUG */
+1 -3
arch/sh/mm/init.c
··· 434 434 { 435 435 unsigned long start_pfn = PFN_DOWN(start); 436 436 unsigned long nr_pages = size >> PAGE_SHIFT; 437 - struct zone *zone; 438 437 439 - zone = page_zone(pfn_to_page(start_pfn)); 440 - __remove_pages(zone, start_pfn, nr_pages, altmap); 438 + __remove_pages(start_pfn, nr_pages, altmap); 441 439 } 442 440 #endif /* CONFIG_MEMORY_HOTPLUG */
+1 -3
arch/x86/mm/init_32.c
··· 865 865 { 866 866 unsigned long start_pfn = start >> PAGE_SHIFT; 867 867 unsigned long nr_pages = size >> PAGE_SHIFT; 868 - struct zone *zone; 869 868 870 - zone = page_zone(pfn_to_page(start_pfn)); 871 - __remove_pages(zone, start_pfn, nr_pages, altmap); 869 + __remove_pages(start_pfn, nr_pages, altmap); 872 870 } 873 871 #endif 874 872
+1 -3
arch/x86/mm/init_64.c
··· 1212 1212 { 1213 1213 unsigned long start_pfn = start >> PAGE_SHIFT; 1214 1214 unsigned long nr_pages = size >> PAGE_SHIFT; 1215 - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); 1216 - struct zone *zone = page_zone(page); 1217 1215 1218 - __remove_pages(zone, start_pfn, nr_pages, altmap); 1216 + __remove_pages(start_pfn, nr_pages, altmap); 1219 1217 kernel_physical_mapping_remove(start, start + size); 1220 1218 } 1221 1219 #endif /* CONFIG_MEMORY_HOTPLUG */
+2
fs/direct-io.c
··· 39 39 #include <linux/atomic.h> 40 40 #include <linux/prefetch.h> 41 41 42 + #include "internal.h" 43 + 42 44 /* 43 45 * How many user pages to map in one call to get_user_pages(). This determines 44 46 * the size of a structure in the slab cache
+1 -1
fs/namespace.c
··· 1728 1728 dentry->d_fsdata == &mntns_operations; 1729 1729 } 1730 1730 1731 - struct mnt_namespace *to_mnt_ns(struct ns_common *ns) 1731 + static struct mnt_namespace *to_mnt_ns(struct ns_common *ns) 1732 1732 { 1733 1733 return container_of(ns, struct mnt_namespace, ns); 1734 1734 }
+3
fs/nsfs.c
··· 3 3 #include <linux/pseudo_fs.h> 4 4 #include <linux/file.h> 5 5 #include <linux/fs.h> 6 + #include <linux/proc_fs.h> 6 7 #include <linux/proc_ns.h> 7 8 #include <linux/magic.h> 8 9 #include <linux/ktime.h> ··· 11 10 #include <linux/user_namespace.h> 12 11 #include <linux/nsfs.h> 13 12 #include <linux/uaccess.h> 13 + 14 + #include "internal.h" 14 15 15 16 static struct vfsmount *nsfs_mnt; 16 17
+1
fs/ocfs2/dlmglue.c
··· 3282 3282 3283 3283 debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root, 3284 3284 &dlm_debug->d_filter_secs); 3285 + ocfs2_get_dlm_debug(dlm_debug); 3285 3286 } 3286 3287 3287 3288 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
+8
fs/ocfs2/journal.c
··· 1066 1066 1067 1067 ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num); 1068 1068 1069 + if (replayed) { 1070 + jbd2_journal_lock_updates(journal->j_journal); 1071 + status = jbd2_journal_flush(journal->j_journal); 1072 + jbd2_journal_unlock_updates(journal->j_journal); 1073 + if (status < 0) 1074 + mlog_errno(status); 1075 + } 1076 + 1069 1077 status = ocfs2_journal_toggle_dirty(osb, 1, replayed); 1070 1078 if (status < 0) { 1071 1079 mlog_errno(status);
+5 -2
fs/posix_acl.c
··· 631 631 632 632 /** 633 633 * posix_acl_update_mode - update mode in set_acl 634 + * @inode: target inode 635 + * @mode_p: mode (pointer) for update 636 + * @acl: acl pointer 634 637 * 635 638 * Update the file mode when setting an ACL: compute the new file permission 636 639 * bits based on the ACL. In addition, if the ACL is equivalent to the new 637 - * file mode, set *acl to NULL to indicate that no ACL should be set. 640 + * file mode, set *@acl to NULL to indicate that no ACL should be set. 638 641 * 639 - * As with chmod, clear the setgit bit if the caller is not in the owning group 642 + * As with chmod, clear the setgid bit if the caller is not in the owning group 640 643 * or capable of CAP_FSETID (see inode_change_ok). 641 644 * 642 645 * Called from set_acl inode operations.
+5 -2
include/linux/memory_hotplug.h
··· 122 122 123 123 extern void arch_remove_memory(int nid, u64 start, u64 size, 124 124 struct vmem_altmap *altmap); 125 - extern void __remove_pages(struct zone *zone, unsigned long start_pfn, 126 - unsigned long nr_pages, struct vmem_altmap *altmap); 125 + extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 126 + struct vmem_altmap *altmap); 127 127 128 128 /* reasonably generic interface to expand the physical pages */ 129 129 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, ··· 342 342 extern int add_memory_resource(int nid, struct resource *resource); 343 343 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 344 344 unsigned long nr_pages, struct vmem_altmap *altmap); 345 + extern void remove_pfn_range_from_zone(struct zone *zone, 346 + unsigned long start_pfn, 347 + unsigned long nr_pages); 345 348 extern bool is_memblock_offlined(struct memory_block *mem); 346 349 extern int sparse_add_section(int nid, unsigned long pfn, 347 350 unsigned long nr_pages, struct vmem_altmap *altmap);
+5 -5
include/uapi/linux/kcov.h
··· 9 9 * and the comment before kcov_remote_start() for usage details. 10 10 */ 11 11 struct kcov_remote_arg { 12 - unsigned int trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */ 13 - unsigned int area_size; /* Length of coverage buffer in words */ 14 - unsigned int num_handles; /* Size of handles array */ 15 - __u64 common_handle; 16 - __u64 handles[0]; 12 + __u32 trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */ 13 + __u32 area_size; /* Length of coverage buffer in words */ 14 + __u32 num_handles; /* Size of handles array */ 15 + __aligned_u64 common_handle; 16 + __aligned_u64 handles[0]; 17 17 }; 18 18 19 19 #define KCOV_REMOTE_MAX_HANDLES 0x100
+3 -3
kernel/cred.c
··· 223 223 new->magic = CRED_MAGIC; 224 224 #endif 225 225 226 - if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) 226 + if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0) 227 227 goto error; 228 228 229 229 return new; ··· 282 282 new->security = NULL; 283 283 #endif 284 284 285 - if (security_prepare_creds(new, old, GFP_KERNEL) < 0) 285 + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) 286 286 goto error; 287 287 validate_creds(new); 288 288 return new; ··· 715 715 #ifdef CONFIG_SECURITY 716 716 new->security = NULL; 717 717 #endif 718 - if (security_prepare_creds(new, old, GFP_KERNEL) < 0) 718 + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) 719 719 goto error; 720 720 721 721 put_cred(old);
+6 -2
mm/gup_benchmark.c
··· 26 26 unsigned long i, nr_pages, addr, next; 27 27 int nr; 28 28 struct page **pages; 29 + int ret = 0; 29 30 30 31 if (gup->size > ULONG_MAX) 31 32 return -EINVAL; ··· 64 63 NULL); 65 64 break; 66 65 default: 67 - return -1; 66 + kvfree(pages); 67 + ret = -EINVAL; 68 + goto out; 68 69 } 69 70 70 71 if (nr <= 0) ··· 88 85 gup->put_delta_usec = ktime_us_delta(end_time, start_time); 89 86 90 87 kvfree(pages); 91 - return 0; 88 + out: 89 + return ret; 92 90 } 93 91 94 92 static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd,
+50 -1
mm/hugetlb.c
··· 27 27 #include <linux/swapops.h> 28 28 #include <linux/jhash.h> 29 29 #include <linux/numa.h> 30 + #include <linux/llist.h> 30 31 31 32 #include <asm/page.h> 32 33 #include <asm/pgtable.h> ··· 1137 1136 page[2].mapping = NULL; 1138 1137 } 1139 1138 1140 - void free_huge_page(struct page *page) 1139 + static void __free_huge_page(struct page *page) 1141 1140 { 1142 1141 /* 1143 1142 * Can't pass hstate in here because it is called from the ··· 1198 1197 enqueue_huge_page(h, page); 1199 1198 } 1200 1199 spin_unlock(&hugetlb_lock); 1200 + } 1201 + 1202 + /* 1203 + * As free_huge_page() can be called from a non-task context, we have 1204 + * to defer the actual freeing in a workqueue to prevent potential 1205 + * hugetlb_lock deadlock. 1206 + * 1207 + * free_hpage_workfn() locklessly retrieves the linked list of pages to 1208 + * be freed and frees them one-by-one. As the page->mapping pointer is 1209 + * going to be cleared in __free_huge_page() anyway, it is reused as the 1210 + * llist_node structure of a lockless linked list of huge pages to be freed. 1211 + */ 1212 + static LLIST_HEAD(hpage_freelist); 1213 + 1214 + static void free_hpage_workfn(struct work_struct *work) 1215 + { 1216 + struct llist_node *node; 1217 + struct page *page; 1218 + 1219 + node = llist_del_all(&hpage_freelist); 1220 + 1221 + while (node) { 1222 + page = container_of((struct address_space **)node, 1223 + struct page, mapping); 1224 + node = node->next; 1225 + __free_huge_page(page); 1226 + } 1227 + } 1228 + static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1229 + 1230 + void free_huge_page(struct page *page) 1231 + { 1232 + /* 1233 + * Defer freeing if in non-task context to avoid hugetlb_lock deadlock. 1234 + */ 1235 + if (!in_task()) { 1236 + /* 1237 + * Only call schedule_work() if hpage_freelist is previously 1238 + * empty. Otherwise, schedule_work() had been called but the 1239 + * workfn hasn't retrieved the list yet. 1240 + */ 1241 + if (llist_add((struct llist_node *)&page->mapping, 1242 + &hpage_freelist)) 1243 + schedule_work(&free_hpage_work); 1244 + return; 1245 + } 1246 + 1247 + __free_huge_page(page); 1201 1248 } 1202 1249 1203 1250 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+16 -15
mm/memory_hotplug.c
··· 483 483 pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; 484 484 } 485 485 486 - static void __remove_zone(struct zone *zone, unsigned long start_pfn, 487 - unsigned long nr_pages) 486 + void __ref remove_pfn_range_from_zone(struct zone *zone, 487 + unsigned long start_pfn, 488 + unsigned long nr_pages) 488 489 { 489 490 struct pglist_data *pgdat = zone->zone_pgdat; 490 491 unsigned long flags; ··· 500 499 return; 501 500 #endif 502 501 502 + clear_zone_contiguous(zone); 503 + 503 504 pgdat_resize_lock(zone->zone_pgdat, &flags); 504 505 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); 505 506 update_pgdat_span(pgdat); 506 507 pgdat_resize_unlock(zone->zone_pgdat, &flags); 508 + 509 + set_zone_contiguous(zone); 507 510 } 508 511 509 - static void __remove_section(struct zone *zone, unsigned long pfn, 510 - unsigned long nr_pages, unsigned long map_offset, 511 - struct vmem_altmap *altmap) 512 + static void __remove_section(unsigned long pfn, unsigned long nr_pages, 513 + unsigned long map_offset, 514 + struct vmem_altmap *altmap) 512 515 { 513 516 struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn)); 514 517 515 518 if (WARN_ON_ONCE(!valid_section(ms))) 516 519 return; 517 520 518 - __remove_zone(zone, pfn, nr_pages); 519 521 sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); 520 522 } 521 523 522 524 /** 523 - * __remove_pages() - remove sections of pages from a zone 524 - * @zone: zone from which pages need to be removed 525 + * __remove_pages() - remove sections of pages 525 526 * @pfn: starting pageframe (must be aligned to start of a section) 526 527 * @nr_pages: number of pages to remove (must be multiple of section size) 527 528 * @altmap: alternative device page map or %NULL if default memmap is used ··· 533 530 * sure that pages are marked reserved and zones are adjust properly by 534 531 * calling offline_pages(). 535 532 */ 536 - void __remove_pages(struct zone *zone, unsigned long pfn, 537 - unsigned long nr_pages, struct vmem_altmap *altmap) 533 + void __remove_pages(unsigned long pfn, unsigned long nr_pages, 534 + struct vmem_altmap *altmap) 538 535 { 539 536 unsigned long map_offset = 0; 540 537 unsigned long nr, start_sec, end_sec; 541 538 542 539 map_offset = vmem_altmap_offset(altmap); 543 - 544 - clear_zone_contiguous(zone); 545 540 546 541 if (check_pfn_span(pfn, nr_pages, "remove")) 547 542 return; ··· 552 551 cond_resched(); 553 552 pfns = min(nr_pages, PAGES_PER_SECTION 554 553 - (pfn & ~PAGE_SECTION_MASK)); 555 - __remove_section(zone, pfn, pfns, map_offset, altmap); 554 + __remove_section(pfn, pfns, map_offset, altmap); 556 555 pfn += pfns; 557 556 nr_pages -= pfns; 558 557 map_offset = 0; 559 558 } 560 - 561 - set_zone_contiguous(zone); 562 559 } 563 560 564 561 int set_online_page_callback(online_page_callback_t callback) ··· 868 869 (unsigned long long) pfn << PAGE_SHIFT, 869 870 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); 870 871 memory_notify(MEM_CANCEL_ONLINE, &arg); 872 + remove_pfn_range_from_zone(zone, pfn, nr_pages); 871 873 mem_hotplug_done(); 872 874 return ret; 873 875 } ··· 1628 1628 writeback_set_ratelimit(); 1629 1629 1630 1630 memory_notify(MEM_OFFLINE, &arg); 1631 + remove_pfn_range_from_zone(zone, start_pfn, nr_pages); 1631 1632 mem_hotplug_done(); 1632 1633 return 0; 1633 1634
+1 -1
mm/memremap.c
··· 120 120 121 121 mem_hotplug_begin(); 122 122 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { 123 - __remove_pages(page_zone(first_page), PHYS_PFN(res->start), 123 + __remove_pages(PHYS_PFN(res->start), 124 124 PHYS_PFN(resource_size(res)), NULL); 125 125 } else { 126 126 arch_remove_memory(nid, res->start, resource_size(res),
+17 -6
mm/migrate.c
··· 1512 1512 /* 1513 1513 * Resolves the given address to a struct page, isolates it from the LRU and 1514 1514 * puts it to the given pagelist. 1515 - * Returns -errno if the page cannot be found/isolated or 0 when it has been 1516 - * queued or the page doesn't need to be migrated because it is already on 1517 - * the target node 1515 + * Returns: 1516 + * errno - if the page cannot be found/isolated 1517 + * 0 - when it doesn't have to be migrated because it is already on the 1518 + * target node 1519 + * 1 - when it has been queued 1518 1520 */ 1519 1521 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1520 1522 int node, struct list_head *pagelist, bool migrate_all) ··· 1555 1553 if (PageHuge(page)) { 1556 1554 if (PageHead(page)) { 1557 1555 isolate_huge_page(page, pagelist); 1558 - err = 0; 1556 + err = 1; 1559 1557 } 1560 1558 } else { 1561 1559 struct page *head; ··· 1565 1563 if (err) 1566 1564 goto out_putpage; 1567 1565 1568 - err = 0; 1566 + err = 1; 1569 1567 list_add_tail(&head->lru, pagelist); 1570 1568 mod_node_page_state(page_pgdat(head), 1571 1569 NR_ISOLATED_ANON + page_is_file_cache(head), ··· 1642 1640 */ 1643 1641 err = add_page_for_migration(mm, addr, current_node, 1644 1642 &pagelist, flags & MPOL_MF_MOVE_ALL); 1645 - if (!err) 1643 + 1644 + if (!err) { 1645 + /* The page is already on the target node */ 1646 + err = store_status(status, i, current_node, 1); 1647 + if (err) 1648 + goto out_flush; 1646 1649 continue; 1650 + } else if (err > 0) { 1651 + /* The page is successfully queued for migration */ 1652 + continue; 1653 + } 1647 1654 1648 1655 err = store_status(status, i, err, 1); 1649 1656 if (err)
+1 -1
mm/oom_kill.c
··· 890 890 K(get_mm_counter(mm, MM_FILEPAGES)), 891 891 K(get_mm_counter(mm, MM_SHMEMPAGES)), 892 892 from_kuid(&init_user_ns, task_uid(victim)), 893 - mm_pgtables_bytes(mm), victim->signal->oom_score_adj); 893 + mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj); 894 894 task_unlock(victim); 895 895 896 896 /*
+5
mm/zsmalloc.c
··· 2069 2069 zs_pool_dec_isolated(pool); 2070 2070 } 2071 2071 2072 + if (page_zone(newpage) != page_zone(page)) { 2073 + dec_zone_page_state(page, NR_ZSPAGES); 2074 + inc_zone_page_state(newpage, NR_ZSPAGES); 2075 + } 2076 + 2072 2077 reset_page(page); 2073 2078 put_page(page); 2074 2079 page = newpage;