Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2023-10-01-08-34' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"Fourteen hotfixes, eleven of which are cc:stable. The remainder
pertain to issues which were introduced after 6.5"

* tag 'mm-hotfixes-stable-2023-10-01-08-34' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
Crash: add lock to serialize crash hotplug handling
selftests/mm: fix awk usage in charge_reserved_hugetlb.sh and hugetlb_reparenting_test.sh that may cause error
mm: mempolicy: keep VMA walk if both MPOL_MF_STRICT and MPOL_MF_MOVE are specified
mm/damon/vaddr-test: fix memory leak in damon_do_test_apply_three_regions()
mm, memcg: reconsider kmem.limit_in_bytes deprecation
mm: zswap: fix potential memory corruption on duplicate store
arm64: hugetlb: fix set_huge_pte_at() to work with all swap entries
mm: hugetlb: add huge page size param to set_huge_pte_at()
maple_tree: add MAS_UNDERFLOW and MAS_OVERFLOW states
maple_tree: add mas_is_active() to detect in-tree walks
nilfs2: fix potential use after free in nilfs_gccache_submit_read_data()
mm: abstract moving to the next PFN
mm: report success more often from filemap_map_folio_range()
fs: binfmt_elf_efpic: fix personality for ELF-FDPIC

+458 -172
+7
Documentation/admin-guide/cgroup-v1/memory.rst
··· 92 92 memory.oom_control set/show oom controls. 93 93 memory.numa_stat show the number of memory usage per numa 94 94 node 95 + memory.kmem.limit_in_bytes Deprecated knob to set and read the kernel 96 + memory hard limit. Kernel hard limit is not 97 + supported since 5.16. Writing any value to 98 + do file will not have any effect same as if 99 + nokmem kernel parameter was specified. 100 + Kernel memory is still charged and reported 101 + by memory.kmem.usage_in_bytes. 95 102 memory.kmem.usage_in_bytes show current kernel memory allocation 96 103 memory.kmem.failcnt show the number of kernel memory usage 97 104 hits limits
+1 -1
arch/arm64/include/asm/hugetlb.h
··· 28 28 #define arch_make_huge_pte arch_make_huge_pte 29 29 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 30 30 extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 31 - pte_t *ptep, pte_t pte); 31 + pte_t *ptep, pte_t pte, unsigned long sz); 32 32 #define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS 33 33 extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, 34 34 unsigned long addr, pte_t *ptep,
+7 -16
arch/arm64/mm/hugetlbpage.c
··· 241 241 flush_tlb_range(&vma, saddr, addr); 242 242 } 243 243 244 - static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry) 245 - { 246 - VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry)); 247 - 248 - return page_folio(pfn_to_page(swp_offset_pfn(entry))); 249 - } 250 - 251 244 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 252 - pte_t *ptep, pte_t pte) 245 + pte_t *ptep, pte_t pte, unsigned long sz) 253 246 { 254 247 size_t pgsize; 255 248 int i; ··· 250 257 unsigned long pfn, dpfn; 251 258 pgprot_t hugeprot; 252 259 260 + ncontig = num_contig_ptes(sz, &pgsize); 261 + 253 262 if (!pte_present(pte)) { 254 - struct folio *folio; 255 - 256 - folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte)); 257 - ncontig = num_contig_ptes(folio_size(folio), &pgsize); 258 - 259 - for (i = 0; i < ncontig; i++, ptep++) 263 + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) 260 264 set_pte_at(mm, addr, ptep, pte); 261 265 return; 262 266 } ··· 263 273 return; 264 274 } 265 275 266 - ncontig = find_num_contig(mm, addr, ptep, &pgsize); 267 276 pfn = pte_pfn(pte); 268 277 dpfn = pgsize >> PAGE_SHIFT; 269 278 hugeprot = pte_pgprot(pte); ··· 560 571 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 561 572 pte_t old_pte, pte_t pte) 562 573 { 563 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 574 + unsigned long psize = huge_page_size(hstate_vma(vma)); 575 + 576 + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 564 577 }
+1 -1
arch/parisc/include/asm/hugetlb.h
··· 6 6 7 7 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 8 8 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 9 - pte_t *ptep, pte_t pte); 9 + pte_t *ptep, pte_t pte, unsigned long sz); 10 10 11 11 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR 12 12 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+1 -1
arch/parisc/mm/hugetlbpage.c
··· 140 140 } 141 141 142 142 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 143 - pte_t *ptep, pte_t entry) 143 + pte_t *ptep, pte_t entry, unsigned long sz) 144 144 { 145 145 __set_huge_pte_at(mm, addr, ptep, entry); 146 146 }
+2 -1
arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
··· 46 46 } 47 47 48 48 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 49 - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); 49 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 50 + pte_t pte, unsigned long sz); 50 51 51 52 #define __HAVE_ARCH_HUGE_PTE_CLEAR 52 53 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+4 -1
arch/powerpc/mm/book3s64/hugetlbpage.c
··· 143 143 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, 144 144 pte_t *ptep, pte_t old_pte, pte_t pte) 145 145 { 146 + unsigned long psize; 146 147 147 148 if (radix_enabled()) 148 149 return radix__huge_ptep_modify_prot_commit(vma, addr, ptep, 149 150 old_pte, pte); 150 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 151 + 152 + psize = huge_page_size(hstate_vma(vma)); 153 + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 151 154 } 152 155 153 156 void __init hugetlbpage_init_defaultsize(void)
+2 -1
arch/powerpc/mm/book3s64/radix_hugetlbpage.c
··· 47 47 pte_t old_pte, pte_t pte) 48 48 { 49 49 struct mm_struct *mm = vma->vm_mm; 50 + unsigned long psize = huge_page_size(hstate_vma(vma)); 50 51 51 52 /* 52 53 * POWER9 NMMU must flush the TLB after clearing the PTE before ··· 59 58 atomic_read(&mm->context.copros) > 0) 60 59 radix__flush_hugetlb_page(vma, addr); 61 60 62 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 61 + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 63 62 }
+2 -1
arch/powerpc/mm/nohash/8xx.c
··· 91 91 if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot))) 92 92 return -EINVAL; 93 93 94 - set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot))); 94 + set_huge_pte_at(&init_mm, va, ptep, 95 + pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), psize); 95 96 96 97 return 0; 97 98 }
+2 -1
arch/powerpc/mm/pgtable.c
··· 288 288 } 289 289 290 290 #if defined(CONFIG_PPC_8xx) 291 - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) 291 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 292 + pte_t pte, unsigned long sz) 292 293 { 293 294 pmd_t *pmd = pmd_off(mm, addr); 294 295 pte_basic_t val;
+2 -1
arch/riscv/include/asm/hugetlb.h
··· 18 18 19 19 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 20 20 void set_huge_pte_at(struct mm_struct *mm, 21 - unsigned long addr, pte_t *ptep, pte_t pte); 21 + unsigned long addr, pte_t *ptep, pte_t pte, 22 + unsigned long sz); 22 23 23 24 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR 24 25 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+2 -1
arch/riscv/mm/hugetlbpage.c
··· 180 180 void set_huge_pte_at(struct mm_struct *mm, 181 181 unsigned long addr, 182 182 pte_t *ptep, 183 - pte_t pte) 183 + pte_t pte, 184 + unsigned long sz) 184 185 { 185 186 int i, pte_num; 186 187
+4 -2
arch/s390/include/asm/hugetlb.h
··· 16 16 #define hugepages_supported() (MACHINE_HAS_EDAT1) 17 17 18 18 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 19 + pte_t *ptep, pte_t pte, unsigned long sz); 20 + void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 19 21 pte_t *ptep, pte_t pte); 20 22 pte_t huge_ptep_get(pte_t *ptep); 21 23 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, ··· 67 65 int changed = !pte_same(huge_ptep_get(ptep), pte); 68 66 if (changed) { 69 67 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 70 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 68 + __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 71 69 } 72 70 return changed; 73 71 } ··· 76 74 unsigned long addr, pte_t *ptep) 77 75 { 78 76 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); 79 - set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); 77 + __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); 80 78 } 81 79 82 80 static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+7 -1
arch/s390/mm/hugetlbpage.c
··· 142 142 __storage_key_init_range(paddr, paddr + size - 1); 143 143 } 144 144 145 - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 145 + void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 146 146 pte_t *ptep, pte_t pte) 147 147 { 148 148 unsigned long rste; ··· 161 161 162 162 clear_huge_pte_skeys(mm, rste); 163 163 set_pte(ptep, __pte(rste)); 164 + } 165 + 166 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 167 + pte_t *ptep, pte_t pte, unsigned long sz) 168 + { 169 + __set_huge_pte_at(mm, addr, ptep, pte); 164 170 } 165 171 166 172 pte_t huge_ptep_get(pte_t *ptep)
+4 -2
arch/sparc/include/asm/hugetlb.h
··· 14 14 15 15 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 16 16 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 17 + pte_t *ptep, pte_t pte, unsigned long sz); 18 + void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 17 19 pte_t *ptep, pte_t pte); 18 20 19 21 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR ··· 34 32 unsigned long addr, pte_t *ptep) 35 33 { 36 34 pte_t old_pte = *ptep; 37 - set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 35 + __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 38 36 } 39 37 40 38 #define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS ··· 44 42 { 45 43 int changed = !pte_same(*ptep, pte); 46 44 if (changed) { 47 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 45 + __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 48 46 flush_tlb_page(vma, addr); 49 47 } 50 48 return changed;
+7 -1
arch/sparc/mm/hugetlbpage.c
··· 328 328 return pte_offset_huge(pmd, addr); 329 329 } 330 330 331 - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 331 + void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 332 332 pte_t *ptep, pte_t entry) 333 333 { 334 334 unsigned int nptes, orig_shift, shift; ··· 362 362 if (size == HPAGE_SIZE) 363 363 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, 364 364 orig_shift); 365 + } 366 + 367 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 368 + pte_t *ptep, pte_t entry, unsigned long sz) 369 + { 370 + __set_huge_pte_at(mm, addr, ptep, entry); 365 371 } 366 372 367 373 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+8
arch/x86/include/asm/pgtable.h
··· 955 955 return a.pte == b.pte; 956 956 } 957 957 958 + static inline pte_t pte_next_pfn(pte_t pte) 959 + { 960 + if (__pte_needs_invert(pte_val(pte))) 961 + return __pte(pte_val(pte) - (1UL << PFN_PTE_SHIFT)); 962 + return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); 963 + } 964 + #define pte_next_pfn pte_next_pfn 965 + 958 966 static inline int pte_present(pte_t a) 959 967 { 960 968 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
+2 -3
fs/binfmt_elf_fdpic.c
··· 345 345 /* there's now no turning back... the old userspace image is dead, 346 346 * defunct, deceased, etc. 347 347 */ 348 + SET_PERSONALITY(exec_params.hdr); 348 349 if (elf_check_fdpic(&exec_params.hdr)) 349 - set_personality(PER_LINUX_FDPIC); 350 - else 351 - set_personality(PER_LINUX); 350 + current->personality |= PER_LINUX_FDPIC; 352 351 if (elf_read_implies_exec(&exec_params.hdr, executable_stack)) 353 352 current->personality |= READ_IMPLIES_EXEC; 354 353
+3 -3
fs/nilfs2/gcinode.c
··· 73 73 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 74 74 75 75 err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn); 76 - if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */ 77 - brelse(bh); 76 + if (unlikely(err)) /* -EIO, -ENOMEM, -ENOENT */ 78 77 goto failed; 79 - } 80 78 } 81 79 82 80 lock_buffer(bh); ··· 100 102 failed: 101 103 unlock_page(bh->b_page); 102 104 put_page(bh->b_page); 105 + if (unlikely(err)) 106 + brelse(bh); 103 107 return err; 104 108 } 105 109
+1 -1
include/asm-generic/hugetlb.h
··· 76 76 77 77 #ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 78 78 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 79 - pte_t *ptep, pte_t pte) 79 + pte_t *ptep, pte_t pte, unsigned long sz) 80 80 { 81 81 set_pte_at(mm, addr, ptep, pte); 82 82 }
+4 -2
include/linux/hugetlb.h
··· 984 984 unsigned long addr, pte_t *ptep, 985 985 pte_t old_pte, pte_t pte) 986 986 { 987 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 987 + unsigned long psize = huge_page_size(hstate_vma(vma)); 988 + 989 + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 988 990 } 989 991 #endif 990 992 ··· 1175 1173 } 1176 1174 1177 1175 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 1178 - pte_t *ptep, pte_t pte) 1176 + pte_t *ptep, pte_t pte, unsigned long sz) 1179 1177 { 1180 1178 } 1181 1179
+11
include/linux/maple_tree.h
··· 428 428 #define MAS_ROOT ((struct maple_enode *)5UL) 429 429 #define MAS_NONE ((struct maple_enode *)9UL) 430 430 #define MAS_PAUSE ((struct maple_enode *)17UL) 431 + #define MAS_OVERFLOW ((struct maple_enode *)33UL) 432 + #define MAS_UNDERFLOW ((struct maple_enode *)65UL) 431 433 #define MA_ERROR(err) \ 432 434 ((struct maple_enode *)(((unsigned long)err << 2) | 2UL)) 433 435 ··· 511 509 static inline bool mas_is_paused(const struct ma_state *mas) 512 510 { 513 511 return mas->node == MAS_PAUSE; 512 + } 513 + 514 + /* Check if the mas is pointing to a node or not */ 515 + static inline bool mas_is_active(struct ma_state *mas) 516 + { 517 + if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE) 518 + return true; 519 + 520 + return false; 514 521 } 515 522 516 523 /**
+9 -1
include/linux/pgtable.h
··· 206 206 #endif 207 207 208 208 #ifndef set_ptes 209 + 210 + #ifndef pte_next_pfn 211 + static inline pte_t pte_next_pfn(pte_t pte) 212 + { 213 + return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); 214 + } 215 + #endif 216 + 209 217 /** 210 218 * set_ptes - Map consecutive pages to a contiguous range of addresses. 211 219 * @mm: Address space to map the pages into. ··· 239 231 if (--nr == 0) 240 232 break; 241 233 ptep++; 242 - pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); 234 + pte = pte_next_pfn(pte); 243 235 } 244 236 arch_leave_lazy_mmu_mode(); 245 237 }
+17
kernel/crash_core.c
··· 740 740 #define pr_fmt(fmt) "crash hp: " fmt 741 741 742 742 /* 743 + * Different than kexec/kdump loading/unloading/jumping/shrinking which 744 + * usually rarely happen, there will be many crash hotplug events notified 745 + * during one short period, e.g one memory board is hot added and memory 746 + * regions are online. So mutex lock __crash_hotplug_lock is used to 747 + * serialize the crash hotplug handling specifically. 748 + */ 749 + DEFINE_MUTEX(__crash_hotplug_lock); 750 + #define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock) 751 + #define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock) 752 + 753 + /* 743 754 * This routine utilized when the crash_hotplug sysfs node is read. 744 755 * It reflects the kernel's ability/permission to update the crash 745 756 * elfcorehdr directly. ··· 759 748 { 760 749 int rc = 0; 761 750 751 + crash_hotplug_lock(); 762 752 /* Obtain lock while reading crash information */ 763 753 if (!kexec_trylock()) { 764 754 pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n"); 755 + crash_hotplug_unlock(); 765 756 return 0; 766 757 } 767 758 if (kexec_crash_image) { ··· 774 761 } 775 762 /* Release lock now that update complete */ 776 763 kexec_unlock(); 764 + crash_hotplug_unlock(); 777 765 778 766 return rc; 779 767 } ··· 797 783 { 798 784 struct kimage *image; 799 785 786 + crash_hotplug_lock(); 800 787 /* Obtain lock while changing crash information */ 801 788 if (!kexec_trylock()) { 802 789 pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n"); 790 + crash_hotplug_unlock(); 803 791 return; 804 792 } 805 793 ··· 868 852 out: 869 853 /* Release lock now that update complete */ 870 854 kexec_unlock(); 855 + crash_hotplug_unlock(); 871 856 } 872 857 873 858 static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *v)
+166 -61
lib/maple_tree.c
··· 256 256 return xa_is_err(mas->node); 257 257 } 258 258 259 + static __always_inline bool mas_is_overflow(struct ma_state *mas) 260 + { 261 + if (unlikely(mas->node == MAS_OVERFLOW)) 262 + return true; 263 + 264 + return false; 265 + } 266 + 267 + static __always_inline bool mas_is_underflow(struct ma_state *mas) 268 + { 269 + if (unlikely(mas->node == MAS_UNDERFLOW)) 270 + return true; 271 + 272 + return false; 273 + } 274 + 259 275 static inline bool mas_searchable(struct ma_state *mas) 260 276 { 261 277 if (mas_is_none(mas)) ··· 4431 4415 * 4432 4416 * @mas: The maple state 4433 4417 * @max: The minimum starting range 4418 + * @empty: Can be empty 4419 + * @set_underflow: Set the @mas->node to underflow state on limit. 4434 4420 * 4435 4421 * Return: The entry in the previous slot which is possibly NULL 4436 4422 */ 4437 - static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty) 4423 + static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty, 4424 + bool set_underflow) 4438 4425 { 4439 4426 void *entry; 4440 4427 void __rcu **slots; ··· 4454 4435 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4455 4436 goto retry; 4456 4437 4457 - again: 4458 4438 if (mas->min <= min) { 4459 4439 pivot = mas_safe_min(mas, pivots, mas->offset); 4460 4440 ··· 4461 4443 goto retry; 4462 4444 4463 4445 if (pivot <= min) 4464 - return NULL; 4446 + goto underflow; 4465 4447 } 4466 4448 4449 + again: 4467 4450 if (likely(mas->offset)) { 4468 4451 mas->offset--; 4469 4452 mas->last = mas->index - 1; ··· 4476 4457 } 4477 4458 4478 4459 if (mas_is_none(mas)) 4479 - return NULL; 4460 + goto underflow; 4480 4461 4481 4462 mas->last = mas->max; 4482 4463 node = mas_mn(mas); ··· 4493 4474 if (likely(entry)) 4494 4475 return entry; 4495 4476 4496 - if (!empty) 4477 + if (!empty) { 4478 + if (mas->index <= min) 4479 + goto underflow; 4480 + 4497 4481 goto again; 4482 + } 4498 4483 4499 4484 return entry; 4485 + 4486 + underflow: 4487 + if (set_underflow) 4488 + mas->node = MAS_UNDERFLOW; 4489 + return NULL; 4500 4490 } 4501 4491 4502 4492 /* ··· 4595 4567 * @mas: The maple state 4596 4568 * @max: The maximum starting range 4597 4569 * @empty: Can be empty 4570 + * @set_overflow: Should @mas->node be set to overflow when the limit is 4571 + * reached. 4598 4572 * 4599 4573 * Return: The entry in the next slot which is possibly NULL 4600 4574 */ 4601 - static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) 4575 + static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty, 4576 + bool set_overflow) 4602 4577 { 4603 4578 void __rcu **slots; 4604 4579 unsigned long *pivots; ··· 4620 4589 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4621 4590 goto retry; 4622 4591 4623 - again: 4624 4592 if (mas->max >= max) { 4625 4593 if (likely(mas->offset < data_end)) 4626 4594 pivot = pivots[mas->offset]; 4627 4595 else 4628 - return NULL; /* must be mas->max */ 4596 + goto overflow; 4629 4597 4630 4598 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4631 4599 goto retry; 4632 4600 4633 4601 if (pivot >= max) 4634 - return NULL; 4602 + goto overflow; 4635 4603 } 4636 4604 4637 4605 if (likely(mas->offset < data_end)) { 4638 4606 mas->index = pivots[mas->offset] + 1; 4607 + again: 4639 4608 mas->offset++; 4640 4609 if (likely(mas->offset < data_end)) 4641 4610 mas->last = pivots[mas->offset]; ··· 4647 4616 goto retry; 4648 4617 } 4649 4618 4650 - if (mas_is_none(mas)) 4619 + if (WARN_ON_ONCE(mas_is_none(mas))) { 4620 + mas->node = MAS_OVERFLOW; 4651 4621 return NULL; 4622 + goto overflow; 4623 + } 4652 4624 4653 4625 mas->offset = 0; 4654 4626 mas->index = mas->min; ··· 4670 4636 return entry; 4671 4637 4672 4638 if (!empty) { 4673 - if (!mas->offset) 4674 - data_end = 2; 4639 + if (mas->last >= max) 4640 + goto overflow; 4641 + 4642 + mas->index = mas->last + 1; 4643 + /* Node cannot end on NULL, so it's safe to short-cut here */ 4675 4644 goto again; 4676 4645 } 4677 4646 4678 4647 return entry; 4648 + 4649 + overflow: 4650 + if (set_overflow) 4651 + mas->node = MAS_OVERFLOW; 4652 + return NULL; 4679 4653 } 4680 4654 4681 4655 /* ··· 4693 4651 * 4694 4652 * Set the @mas->node to the next entry and the range_start to 4695 4653 * the beginning value for the entry. Does not check beyond @limit. 4696 - * Sets @mas->index and @mas->last to the limit if it is hit. 4654 + * Sets @mas->index and @mas->last to the range, Does not update @mas->index and 4655 + * @mas->last on overflow. 4697 4656 * Restarts on dead nodes. 4698 4657 * 4699 4658 * Return: the next entry or %NULL. 4700 4659 */ 4701 4660 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) 4702 4661 { 4703 - if (mas->last >= limit) 4662 + if (mas->last >= limit) { 4663 + mas->node = MAS_OVERFLOW; 4704 4664 return NULL; 4665 + } 4705 4666 4706 - return mas_next_slot(mas, limit, false); 4667 + return mas_next_slot(mas, limit, false, true); 4707 4668 } 4708 4669 4709 4670 /* ··· 4882 4837 { 4883 4838 void *entry; 4884 4839 4885 - if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas)) 4840 + if (!mas_is_active(mas) || !mas_is_start(mas)) 4886 4841 mas->node = MAS_START; 4887 4842 retry: 4888 4843 entry = mas_state_walk(mas); ··· 5339 5294 5340 5295 static void mas_wr_store_setup(struct ma_wr_state *wr_mas) 5341 5296 { 5342 - if (mas_is_start(wr_mas->mas)) 5343 - return; 5297 + if (!mas_is_active(wr_mas->mas)) { 5298 + if (mas_is_start(wr_mas->mas)) 5299 + return; 5344 5300 5345 - if (unlikely(mas_is_paused(wr_mas->mas))) 5346 - goto reset; 5301 + if (unlikely(mas_is_paused(wr_mas->mas))) 5302 + goto reset; 5347 5303 5348 - if (unlikely(mas_is_none(wr_mas->mas))) 5349 - goto reset; 5304 + if (unlikely(mas_is_none(wr_mas->mas))) 5305 + goto reset; 5306 + 5307 + if (unlikely(mas_is_overflow(wr_mas->mas))) 5308 + goto reset; 5309 + 5310 + if (unlikely(mas_is_underflow(wr_mas->mas))) 5311 + goto reset; 5312 + } 5350 5313 5351 5314 /* 5352 5315 * A less strict version of mas_is_span_wr() where we allow spanning ··· 5648 5595 { 5649 5596 bool was_none = mas_is_none(mas); 5650 5597 5651 - if (mas_is_none(mas) || mas_is_paused(mas)) 5598 + if (unlikely(mas->last >= max)) { 5599 + mas->node = MAS_OVERFLOW; 5600 + return true; 5601 + } 5602 + 5603 + if (mas_is_active(mas)) 5604 + return false; 5605 + 5606 + if (mas_is_none(mas) || mas_is_paused(mas)) { 5652 5607 mas->node = MAS_START; 5608 + } else if (mas_is_overflow(mas)) { 5609 + /* Overflowed before, but the max changed */ 5610 + mas->node = MAS_START; 5611 + } else if (mas_is_underflow(mas)) { 5612 + mas->node = MAS_START; 5613 + *entry = mas_walk(mas); 5614 + if (*entry) 5615 + return true; 5616 + } 5653 5617 5654 5618 if (mas_is_start(mas)) 5655 5619 *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ ··· 5685 5615 5686 5616 if (mas_is_none(mas)) 5687 5617 return true; 5618 + 5688 5619 return false; 5689 5620 } 5690 5621 ··· 5708 5637 return entry; 5709 5638 5710 5639 /* Retries on dead nodes handled by mas_next_slot */ 5711 - return mas_next_slot(mas, max, false); 5640 + return mas_next_slot(mas, max, false, true); 5712 5641 } 5713 5642 EXPORT_SYMBOL_GPL(mas_next); 5714 5643 ··· 5731 5660 return entry; 5732 5661 5733 5662 /* Retries on dead nodes handled by mas_next_slot */ 5734 - return mas_next_slot(mas, max, true); 5663 + return mas_next_slot(mas, max, true, true); 5735 5664 } 5736 5665 EXPORT_SYMBOL_GPL(mas_next_range); 5737 5666 ··· 5762 5691 static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, 5763 5692 void **entry) 5764 5693 { 5765 - if (mas->index <= min) 5766 - goto none; 5767 - 5768 - if (mas_is_none(mas) || mas_is_paused(mas)) 5769 - mas->node = MAS_START; 5770 - 5771 - if (mas_is_start(mas)) { 5772 - mas_walk(mas); 5773 - if (!mas->index) 5774 - goto none; 5694 + if (unlikely(mas->index <= min)) { 5695 + mas->node = MAS_UNDERFLOW; 5696 + return true; 5775 5697 } 5698 + 5699 + if (mas_is_active(mas)) 5700 + return false; 5701 + 5702 + if (mas_is_overflow(mas)) { 5703 + mas->node = MAS_START; 5704 + *entry = mas_walk(mas); 5705 + if (*entry) 5706 + return true; 5707 + } 5708 + 5709 + if (mas_is_none(mas) || mas_is_paused(mas)) { 5710 + mas->node = MAS_START; 5711 + } else if (mas_is_underflow(mas)) { 5712 + /* underflowed before but the min changed */ 5713 + mas->node = MAS_START; 5714 + } 5715 + 5716 + if (mas_is_start(mas)) 5717 + mas_walk(mas); 5776 5718 5777 5719 if (unlikely(mas_is_ptr(mas))) { 5778 5720 if (!mas->index) ··· 5831 5747 if (mas_prev_setup(mas, min, &entry)) 5832 5748 return entry; 5833 5749 5834 - return mas_prev_slot(mas, min, false); 5750 + return mas_prev_slot(mas, min, false, true); 5835 5751 } 5836 5752 EXPORT_SYMBOL_GPL(mas_prev); 5837 5753 ··· 5854 5770 if (mas_prev_setup(mas, min, &entry)) 5855 5771 return entry; 5856 5772 5857 - return mas_prev_slot(mas, min, true); 5773 + return mas_prev_slot(mas, min, true, true); 5858 5774 } 5859 5775 EXPORT_SYMBOL_GPL(mas_prev_range); 5860 5776 ··· 5912 5828 static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, 5913 5829 void **entry) 5914 5830 { 5915 - *entry = NULL; 5831 + if (mas_is_active(mas)) { 5832 + if (mas->last < max) 5833 + return false; 5916 5834 5917 - if (unlikely(mas_is_none(mas))) { 5835 + return true; 5836 + } 5837 + 5838 + if (mas_is_paused(mas)) { 5839 + if (unlikely(mas->last >= max)) 5840 + return true; 5841 + 5842 + mas->index = ++mas->last; 5843 + mas->node = MAS_START; 5844 + } else if (mas_is_none(mas)) { 5918 5845 if (unlikely(mas->last >= max)) 5919 5846 return true; 5920 5847 5921 5848 mas->index = mas->last; 5922 5849 mas->node = MAS_START; 5923 - } else if (unlikely(mas_is_paused(mas))) { 5924 - if (unlikely(mas->last >= max)) 5850 + } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) { 5851 + if (mas->index > max) { 5852 + mas->node = MAS_OVERFLOW; 5925 5853 return true; 5854 + } 5926 5855 5927 5856 mas->node = MAS_START; 5928 - mas->index = ++mas->last; 5929 - } else if (unlikely(mas_is_ptr(mas))) 5930 - goto ptr_out_of_range; 5857 + } 5931 5858 5932 - if (unlikely(mas_is_start(mas))) { 5859 + if (mas_is_start(mas)) { 5933 5860 /* First run or continue */ 5934 5861 if (mas->index > max) 5935 5862 return true; ··· 5990 5895 return entry; 5991 5896 5992 5897 /* Retries on dead nodes handled by mas_next_slot */ 5993 - return mas_next_slot(mas, max, false); 5898 + return mas_next_slot(mas, max, false, false); 5994 5899 } 5995 5900 EXPORT_SYMBOL_GPL(mas_find); 5996 5901 ··· 6008 5913 */ 6009 5914 void *mas_find_range(struct ma_state *mas, unsigned long max) 6010 5915 { 6011 - void *entry; 5916 + void *entry = NULL; 6012 5917 6013 5918 if (mas_find_setup(mas, max, &entry)) 6014 5919 return entry; 6015 5920 6016 5921 /* Retries on dead nodes handled by mas_next_slot */ 6017 - return mas_next_slot(mas, max, true); 5922 + return mas_next_slot(mas, max, true, false); 6018 5923 } 6019 5924 EXPORT_SYMBOL_GPL(mas_find_range); 6020 5925 ··· 6029 5934 static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, 6030 5935 void **entry) 6031 5936 { 6032 - *entry = NULL; 5937 + if (mas_is_active(mas)) { 5938 + if (mas->index > min) 5939 + return false; 6033 5940 6034 - if (unlikely(mas_is_none(mas))) { 6035 - if (mas->index <= min) 6036 - goto none; 6037 - 6038 - mas->last = mas->index; 6039 - mas->node = MAS_START; 5941 + return true; 6040 5942 } 6041 5943 6042 - if (unlikely(mas_is_paused(mas))) { 5944 + if (mas_is_paused(mas)) { 6043 5945 if (unlikely(mas->index <= min)) { 6044 5946 mas->node = MAS_NONE; 6045 5947 return true; 6046 5948 } 6047 5949 mas->node = MAS_START; 6048 5950 mas->last = --mas->index; 5951 + } else if (mas_is_none(mas)) { 5952 + if (mas->index <= min) 5953 + goto none; 5954 + 5955 + mas->last = mas->index; 5956 + mas->node = MAS_START; 5957 + } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) { 5958 + if (mas->last <= min) { 5959 + mas->node = MAS_UNDERFLOW; 5960 + return true; 5961 + } 5962 + 5963 + mas->node = MAS_START; 6049 5964 } 6050 5965 6051 - if (unlikely(mas_is_start(mas))) { 5966 + if (mas_is_start(mas)) { 6052 5967 /* First run or continue */ 6053 5968 if (mas->index < min) 6054 5969 return true; ··· 6109 6004 */ 6110 6005 void *mas_find_rev(struct ma_state *mas, unsigned long min) 6111 6006 { 6112 - void *entry; 6007 + void *entry = NULL; 6113 6008 6114 6009 if (mas_find_rev_setup(mas, min, &entry)) 6115 6010 return entry; 6116 6011 6117 6012 /* Retries on dead nodes handled by mas_prev_slot */ 6118 - return mas_prev_slot(mas, min, false); 6013 + return mas_prev_slot(mas, min, false, false); 6119 6014 6120 6015 } 6121 6016 EXPORT_SYMBOL_GPL(mas_find_rev); ··· 6135 6030 */ 6136 6031 void *mas_find_range_rev(struct ma_state *mas, unsigned long min) 6137 6032 { 6138 - void *entry; 6033 + void *entry = NULL; 6139 6034 6140 6035 if (mas_find_rev_setup(mas, min, &entry)) 6141 6036 return entry; 6142 6037 6143 6038 /* Retries on dead nodes handled by mas_prev_slot */ 6144 - return mas_prev_slot(mas, min, true); 6039 + return mas_prev_slot(mas, min, true, false); 6145 6040 } 6146 6041 EXPORT_SYMBOL_GPL(mas_find_range_rev); 6147 6042
+72 -15
lib/test_maple_tree.c
··· 2166 2166 MT_BUG_ON(mt, val != NULL); 2167 2167 MT_BUG_ON(mt, mas.index != 0); 2168 2168 MT_BUG_ON(mt, mas.last != 5); 2169 - MT_BUG_ON(mt, mas.node != MAS_NONE); 2169 + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); 2170 2170 2171 2171 mas.index = 0; 2172 2172 mas.last = 5; ··· 2917 2917 * exists MAS_NONE active range 2918 2918 * exists active active range 2919 2919 * DNE active active set to last range 2920 + * ERANGE active MAS_OVERFLOW last range 2920 2921 * 2921 2922 * Function ENTRY Start Result index & last 2922 2923 * mas_prev() ··· 2946 2945 * any MAS_ROOT MAS_NONE 0 2947 2946 * exists active active range 2948 2947 * DNE active active last range 2948 + * ERANGE active MAS_UNDERFLOW last range 2949 2949 * 2950 2950 * Function ENTRY Start Result index & last 2951 2951 * mas_find() ··· 2957 2955 * DNE MAS_START MAS_NONE 0 2958 2956 * DNE MAS_PAUSE MAS_NONE 0 2959 2957 * DNE MAS_ROOT MAS_NONE 0 2960 - * DNE MAS_NONE MAS_NONE 0 2958 + * DNE MAS_NONE MAS_NONE 1 2961 2959 * if index == 0 2962 2960 * exists MAS_START MAS_ROOT 0 2963 2961 * exists MAS_PAUSE MAS_ROOT 0 ··· 2969 2967 * DNE MAS_START active set to max 2970 2968 * exists MAS_PAUSE active range 2971 2969 * DNE MAS_PAUSE active set to max 2972 - * exists MAS_NONE active range 2970 + * exists MAS_NONE active range (start at last) 2973 2971 * exists active active range 2974 2972 * DNE active active last range (max < last) 2975 2973 * ··· 2994 2992 * DNE MAS_START active set to min 2995 2993 * exists MAS_PAUSE active range 2996 2994 * DNE MAS_PAUSE active set to min 2997 - * exists MAS_NONE active range 2995 + * exists MAS_NONE active range (start at index) 2998 2996 * exists active active range 2999 2997 * DNE active active last range (min > index) 3000 2998 * ··· 3041 3039 mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL); 3042 3040 3043 3041 mas_lock(&mas); 3044 - /* prev: Start -> none */ 3042 + /* prev: Start -> underflow*/ 3045 3043 entry = mas_prev(&mas, 0); 3046 3044 MT_BUG_ON(mt, entry != NULL); 3047 - MT_BUG_ON(mt, mas.node != MAS_NONE); 3045 + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); 3048 3046 3049 3047 /* prev: Start -> root */ 3050 3048 mas_set(&mas, 10); ··· 3071 3069 MT_BUG_ON(mt, entry != NULL); 3072 3070 MT_BUG_ON(mt, mas.node != MAS_NONE); 3073 3071 3074 - /* next: start -> none */ 3072 + /* next: start -> none*/ 3075 3073 mas_set(&mas, 10); 3076 3074 entry = mas_next(&mas, ULONG_MAX); 3077 3075 MT_BUG_ON(mt, mas.index != 1); ··· 3270 3268 MT_BUG_ON(mt, mas.last != 0x2500); 3271 3269 MT_BUG_ON(mt, !mas_active(mas)); 3272 3270 3273 - /* next:active -> active out of range*/ 3271 + /* next:active -> active beyond data */ 3274 3272 entry = mas_next(&mas, 0x2999); 3275 3273 MT_BUG_ON(mt, entry != NULL); 3276 3274 MT_BUG_ON(mt, mas.index != 0x2501); 3277 3275 MT_BUG_ON(mt, mas.last != 0x2fff); 3278 3276 MT_BUG_ON(mt, !mas_active(mas)); 3279 3277 3280 - /* Continue after out of range*/ 3278 + /* Continue after last range ends after max */ 3281 3279 entry = mas_next(&mas, ULONG_MAX); 3282 3280 MT_BUG_ON(mt, entry != ptr3); 3283 3281 MT_BUG_ON(mt, mas.index != 0x3000); 3284 3282 MT_BUG_ON(mt, mas.last != 0x3500); 3285 3283 MT_BUG_ON(mt, !mas_active(mas)); 3286 3284 3287 - /* next:active -> active out of range*/ 3285 + /* next:active -> active continued */ 3288 3286 entry = mas_next(&mas, ULONG_MAX); 3289 3287 MT_BUG_ON(mt, entry != NULL); 3290 3288 MT_BUG_ON(mt, mas.index != 0x3501); 3291 3289 MT_BUG_ON(mt, mas.last != ULONG_MAX); 3290 + MT_BUG_ON(mt, !mas_active(mas)); 3291 + 3292 + /* next:active -> overflow */ 3293 + entry = mas_next(&mas, ULONG_MAX); 3294 + MT_BUG_ON(mt, entry != NULL); 3295 + MT_BUG_ON(mt, mas.index != 0x3501); 3296 + MT_BUG_ON(mt, mas.last != ULONG_MAX); 3297 + MT_BUG_ON(mt, mas.node != MAS_OVERFLOW); 3298 + 3299 + /* next:overflow -> overflow */ 3300 + entry = mas_next(&mas, ULONG_MAX); 3301 + MT_BUG_ON(mt, entry != NULL); 3302 + MT_BUG_ON(mt, mas.index != 0x3501); 3303 + MT_BUG_ON(mt, mas.last != ULONG_MAX); 3304 + MT_BUG_ON(mt, mas.node != MAS_OVERFLOW); 3305 + 3306 + /* prev:overflow -> active */ 3307 + entry = mas_prev(&mas, 0); 3308 + MT_BUG_ON(mt, entry != ptr3); 3309 + MT_BUG_ON(mt, mas.index != 0x3000); 3310 + MT_BUG_ON(mt, mas.last != 0x3500); 3292 3311 MT_BUG_ON(mt, !mas_active(mas)); 3293 3312 3294 3313 /* next: none -> active, skip value at location */ ··· 3330 3307 MT_BUG_ON(mt, mas.last != 0x1500); 3331 3308 MT_BUG_ON(mt, !mas_active(mas)); 3332 3309 3333 - /* prev:active -> active out of range*/ 3310 + /* prev:active -> active spanning end range */ 3311 + entry = mas_prev(&mas, 0x0100); 3312 + MT_BUG_ON(mt, entry != NULL); 3313 + MT_BUG_ON(mt, mas.index != 0); 3314 + MT_BUG_ON(mt, mas.last != 0x0FFF); 3315 + MT_BUG_ON(mt, !mas_active(mas)); 3316 + 3317 + /* prev:active -> underflow */ 3334 3318 entry = mas_prev(&mas, 0); 3335 3319 MT_BUG_ON(mt, entry != NULL); 3336 3320 MT_BUG_ON(mt, mas.index != 0); 3337 3321 MT_BUG_ON(mt, mas.last != 0x0FFF); 3322 + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); 3323 + 3324 + /* prev:underflow -> underflow */ 3325 + entry = mas_prev(&mas, 0); 3326 + MT_BUG_ON(mt, entry != NULL); 3327 + MT_BUG_ON(mt, mas.index != 0); 3328 + MT_BUG_ON(mt, mas.last != 0x0FFF); 3329 + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); 3330 + 3331 + /* next:underflow -> active */ 3332 + entry = mas_next(&mas, ULONG_MAX); 3333 + MT_BUG_ON(mt, entry != ptr); 3334 + MT_BUG_ON(mt, mas.index != 0x1000); 3335 + MT_BUG_ON(mt, mas.last != 0x1500); 3336 + MT_BUG_ON(mt, !mas_active(mas)); 3337 + 3338 + /* prev:first value -> underflow */ 3339 + entry = mas_prev(&mas, 0x1000); 3340 + MT_BUG_ON(mt, entry != NULL); 3341 + MT_BUG_ON(mt, mas.index != 0x1000); 3342 + MT_BUG_ON(mt, mas.last != 0x1500); 3343 + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); 3344 + 3345 + /* find:underflow -> first value */ 3346 + entry = mas_find(&mas, ULONG_MAX); 3347 + MT_BUG_ON(mt, entry != ptr); 3348 + MT_BUG_ON(mt, mas.index != 0x1000); 3349 + MT_BUG_ON(mt, mas.last != 0x1500); 3338 3350 MT_BUG_ON(mt, !mas_active(mas)); 3339 3351 3340 3352 /* prev: pause ->active */ ··· 3383 3325 MT_BUG_ON(mt, mas.last != 0x2500); 3384 3326 MT_BUG_ON(mt, !mas_active(mas)); 3385 3327 3386 - /* prev:active -> active out of range*/ 3328 + /* prev:active -> active spanning min */ 3387 3329 entry = mas_prev(&mas, 0x1600); 3388 3330 MT_BUG_ON(mt, entry != NULL); 3389 3331 MT_BUG_ON(mt, mas.index != 0x1501); 3390 3332 MT_BUG_ON(mt, mas.last != 0x1FFF); 3391 3333 MT_BUG_ON(mt, !mas_active(mas)); 3392 3334 3393 - /* prev: active ->active, continue*/ 3335 + /* prev: active ->active, continue */ 3394 3336 entry = mas_prev(&mas, 0); 3395 3337 MT_BUG_ON(mt, entry != ptr); 3396 3338 MT_BUG_ON(mt, mas.index != 0x1000); ··· 3437 3379 MT_BUG_ON(mt, mas.last != 0x2FFF); 3438 3380 MT_BUG_ON(mt, !mas_active(mas)); 3439 3381 3440 - /* find: none ->active */ 3382 + /* find: overflow ->active */ 3441 3383 entry = mas_find(&mas, 0x5000); 3442 3384 MT_BUG_ON(mt, entry != ptr3); 3443 3385 MT_BUG_ON(mt, mas.index != 0x3000); ··· 3835 3777 mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); 3836 3778 check_empty_area_fill(&tree); 3837 3779 mtree_destroy(&tree); 3838 - 3839 3780 3840 3781 mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); 3841 3782 check_state_handling(&tree);
+2
mm/damon/vaddr-test.h
··· 148 148 KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]); 149 149 KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); 150 150 } 151 + 152 + damon_destroy_target(t); 151 153 } 152 154 153 155 /*
+2 -1
mm/damon/vaddr.c
··· 341 341 bool referenced = false; 342 342 pte_t entry = huge_ptep_get(pte); 343 343 struct folio *folio = pfn_folio(pte_pfn(entry)); 344 + unsigned long psize = huge_page_size(hstate_vma(vma)); 344 345 345 346 folio_get(folio); 346 347 347 348 if (pte_young(entry)) { 348 349 referenced = true; 349 350 entry = pte_mkold(entry); 350 - set_huge_pte_at(mm, addr, pte, entry); 351 + set_huge_pte_at(mm, addr, pte, entry, psize); 351 352 } 352 353 353 354 #ifdef CONFIG_MMU_NOTIFIER
+2 -2
mm/filemap.c
··· 3503 3503 if (count) { 3504 3504 set_pte_range(vmf, folio, page, count, addr); 3505 3505 folio_ref_add(folio, count); 3506 - if (in_range(vmf->address, addr, count)) 3506 + if (in_range(vmf->address, addr, count * PAGE_SIZE)) 3507 3507 ret = VM_FAULT_NOPAGE; 3508 3508 } 3509 3509 ··· 3517 3517 if (count) { 3518 3518 set_pte_range(vmf, folio, page, count, addr); 3519 3519 folio_ref_add(folio, count); 3520 - if (in_range(vmf->address, addr, count)) 3520 + if (in_range(vmf->address, addr, count * PAGE_SIZE)) 3521 3521 ret = VM_FAULT_NOPAGE; 3522 3522 } 3523 3523
+24 -19
mm/hugetlb.c
··· 4980 4980 4981 4981 static void 4982 4982 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 4983 - struct folio *new_folio, pte_t old) 4983 + struct folio *new_folio, pte_t old, unsigned long sz) 4984 4984 { 4985 4985 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); 4986 4986 ··· 4988 4988 hugepage_add_new_anon_rmap(new_folio, vma, addr); 4989 4989 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) 4990 4990 newpte = huge_pte_mkuffd_wp(newpte); 4991 - set_huge_pte_at(vma->vm_mm, addr, ptep, newpte); 4991 + set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); 4992 4992 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 4993 4993 folio_set_hugetlb_migratable(new_folio); 4994 4994 } ··· 5065 5065 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5066 5066 if (!userfaultfd_wp(dst_vma)) 5067 5067 entry = huge_pte_clear_uffd_wp(entry); 5068 - set_huge_pte_at(dst, addr, dst_pte, entry); 5068 + set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5069 5069 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5070 5070 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5071 5071 bool uffd_wp = pte_swp_uffd_wp(entry); ··· 5080 5080 entry = swp_entry_to_pte(swp_entry); 5081 5081 if (userfaultfd_wp(src_vma) && uffd_wp) 5082 5082 entry = pte_swp_mkuffd_wp(entry); 5083 - set_huge_pte_at(src, addr, src_pte, entry); 5083 + set_huge_pte_at(src, addr, src_pte, entry, sz); 5084 5084 } 5085 5085 if (!userfaultfd_wp(dst_vma)) 5086 5086 entry = huge_pte_clear_uffd_wp(entry); 5087 - set_huge_pte_at(dst, addr, dst_pte, entry); 5087 + set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5088 5088 } else if (unlikely(is_pte_marker(entry))) { 5089 5089 pte_marker marker = copy_pte_marker( 5090 5090 pte_to_swp_entry(entry), dst_vma); 5091 5091 5092 5092 if (marker) 5093 5093 set_huge_pte_at(dst, addr, dst_pte, 5094 - make_pte_marker(marker)); 5094 + make_pte_marker(marker), sz); 5095 5095 } else { 5096 5096 entry = huge_ptep_get(src_pte); 5097 5097 pte_folio = page_folio(pte_page(entry)); ··· 5145 5145 goto again; 5146 5146 } 5147 5147 hugetlb_install_folio(dst_vma, dst_pte, addr, 5148 - new_folio, src_pte_old); 5148 + new_folio, src_pte_old, sz); 5149 5149 spin_unlock(src_ptl); 5150 5150 spin_unlock(dst_ptl); 5151 5151 continue; ··· 5166 5166 if (!userfaultfd_wp(dst_vma)) 5167 5167 entry = huge_pte_clear_uffd_wp(entry); 5168 5168 5169 - set_huge_pte_at(dst, addr, dst_pte, entry); 5169 + set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5170 5170 hugetlb_count_add(npages, dst); 5171 5171 } 5172 5172 spin_unlock(src_ptl); ··· 5184 5184 } 5185 5185 5186 5186 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5187 - unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte) 5187 + unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, 5188 + unsigned long sz) 5188 5189 { 5189 5190 struct hstate *h = hstate_vma(vma); 5190 5191 struct mm_struct *mm = vma->vm_mm; ··· 5203 5202 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5204 5203 5205 5204 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 5206 - set_huge_pte_at(mm, new_addr, dst_pte, pte); 5205 + set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); 5207 5206 5208 5207 if (src_ptl != dst_ptl) 5209 5208 spin_unlock(src_ptl); ··· 5260 5259 if (!dst_pte) 5261 5260 break; 5262 5261 5263 - move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte); 5262 + move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz); 5264 5263 } 5265 5264 5266 5265 if (shared_pmd) ··· 5338 5337 if (pte_swp_uffd_wp_any(pte) && 5339 5338 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5340 5339 set_huge_pte_at(mm, address, ptep, 5341 - make_pte_marker(PTE_MARKER_UFFD_WP)); 5340 + make_pte_marker(PTE_MARKER_UFFD_WP), 5341 + sz); 5342 5342 else 5343 5343 huge_pte_clear(mm, address, ptep, sz); 5344 5344 spin_unlock(ptl); ··· 5373 5371 if (huge_pte_uffd_wp(pte) && 5374 5372 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5375 5373 set_huge_pte_at(mm, address, ptep, 5376 - make_pte_marker(PTE_MARKER_UFFD_WP)); 5374 + make_pte_marker(PTE_MARKER_UFFD_WP), 5375 + sz); 5377 5376 hugetlb_count_sub(pages_per_huge_page(h), mm); 5378 5377 page_remove_rmap(page, vma, true); 5379 5378 ··· 5679 5676 hugepage_add_new_anon_rmap(new_folio, vma, haddr); 5680 5677 if (huge_pte_uffd_wp(pte)) 5681 5678 newpte = huge_pte_mkuffd_wp(newpte); 5682 - set_huge_pte_at(mm, haddr, ptep, newpte); 5679 + set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h)); 5683 5680 folio_set_hugetlb_migratable(new_folio); 5684 5681 /* Make the old page be freed below */ 5685 5682 new_folio = old_folio; ··· 5975 5972 */ 5976 5973 if (unlikely(pte_marker_uffd_wp(old_pte))) 5977 5974 new_pte = huge_pte_mkuffd_wp(new_pte); 5978 - set_huge_pte_at(mm, haddr, ptep, new_pte); 5975 + set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h)); 5979 5976 5980 5977 hugetlb_count_add(pages_per_huge_page(h), mm); 5981 5978 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { ··· 6264 6261 } 6265 6262 6266 6263 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 6267 - set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 6264 + set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, 6265 + huge_page_size(h)); 6268 6266 6269 6267 /* No need to invalidate - it was non-present before */ 6270 6268 update_mmu_cache(dst_vma, dst_addr, dst_pte); ··· 6416 6412 if (wp_enabled) 6417 6413 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6418 6414 6419 - set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 6415 + set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h)); 6420 6416 6421 6417 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6422 6418 ··· 6602 6598 else if (uffd_wp_resolve) 6603 6599 newpte = pte_swp_clear_uffd_wp(newpte); 6604 6600 if (!pte_same(pte, newpte)) 6605 - set_huge_pte_at(mm, address, ptep, newpte); 6601 + set_huge_pte_at(mm, address, ptep, newpte, psize); 6606 6602 } else if (unlikely(is_pte_marker(pte))) { 6607 6603 /* No other markers apply for now. */ 6608 6604 WARN_ON_ONCE(!pte_marker_uffd_wp(pte)); ··· 6627 6623 if (unlikely(uffd_wp)) 6628 6624 /* Safe to modify directly (none->non-present). */ 6629 6625 set_huge_pte_at(mm, address, ptep, 6630 - make_pte_marker(PTE_MARKER_UFFD_WP)); 6626 + make_pte_marker(PTE_MARKER_UFFD_WP), 6627 + psize); 6631 6628 } 6632 6629 spin_unlock(ptl); 6633 6630 }
+13
mm/memcontrol.c
··· 3867 3867 case _MEMSWAP: 3868 3868 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3869 3869 break; 3870 + case _KMEM: 3871 + pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3872 + "Writing any value to this file has no effect. " 3873 + "Please report your usecase to linux-mm@kvack.org if you " 3874 + "depend on this functionality.\n"); 3875 + ret = 0; 3876 + break; 3870 3877 case _TCP: 3871 3878 ret = memcg_update_tcp_max(memcg, nr_pages); 3872 3879 break; ··· 5084 5077 .seq_show = memcg_numa_stat_show, 5085 5078 }, 5086 5079 #endif 5080 + { 5081 + .name = "kmem.limit_in_bytes", 5082 + .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 5083 + .write = mem_cgroup_write, 5084 + .read_u64 = mem_cgroup_read_u64, 5085 + }, 5087 5086 { 5088 5087 .name = "kmem.usage_in_bytes", 5089 5088 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
+19 -20
mm/mempolicy.c
··· 426 426 unsigned long start; 427 427 unsigned long end; 428 428 struct vm_area_struct *first; 429 + bool has_unmovable; 429 430 }; 430 431 431 432 /* ··· 447 446 /* 448 447 * queue_folios_pmd() has three possible return values: 449 448 * 0 - folios are placed on the right node or queued successfully, or 450 - * special page is met, i.e. huge zero page. 451 - * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 452 - * specified. 449 + * special page is met, i.e. zero page, or unmovable page is found 450 + * but continue walking (indicated by queue_pages.has_unmovable). 453 451 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 454 452 * existing folio was already on a node that does not follow the 455 453 * policy. ··· 479 479 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 480 480 if (!vma_migratable(walk->vma) || 481 481 migrate_folio_add(folio, qp->pagelist, flags)) { 482 - ret = 1; 482 + qp->has_unmovable = true; 483 483 goto unlock; 484 484 } 485 485 } else ··· 495 495 * 496 496 * queue_folios_pte_range() has three possible return values: 497 497 * 0 - folios are placed on the right node or queued successfully, or 498 - * special page is met, i.e. zero page. 499 - * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 500 - * specified. 498 + * special page is met, i.e. zero page, or unmovable page is found 499 + * but continue walking (indicated by queue_pages.has_unmovable). 501 500 * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already 502 501 * on a node that does not follow the policy. 503 502 */ ··· 507 508 struct folio *folio; 508 509 struct queue_pages *qp = walk->private; 509 510 unsigned long flags = qp->flags; 510 - bool has_unmovable = false; 511 511 pte_t *pte, *mapped_pte; 512 512 pte_t ptent; 513 513 spinlock_t *ptl; ··· 536 538 if (!queue_folio_required(folio, qp)) 537 539 continue; 538 540 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 539 - /* MPOL_MF_STRICT must be specified if we get here */ 540 - if (!vma_migratable(vma)) { 541 - has_unmovable = true; 542 - break; 543 - } 541 + /* 542 + * MPOL_MF_STRICT must be specified if we get here. 543 + * Continue walking vmas due to MPOL_MF_MOVE* flags. 544 + */ 545 + if (!vma_migratable(vma)) 546 + qp->has_unmovable = true; 544 547 545 548 /* 546 549 * Do not abort immediately since there may be ··· 549 550 * need migrate other LRU pages. 550 551 */ 551 552 if (migrate_folio_add(folio, qp->pagelist, flags)) 552 - has_unmovable = true; 553 + qp->has_unmovable = true; 553 554 } else 554 555 break; 555 556 } 556 557 pte_unmap_unlock(mapped_pte, ptl); 557 558 cond_resched(); 558 - 559 - if (has_unmovable) 560 - return 1; 561 559 562 560 return addr != end ? -EIO : 0; 563 561 } ··· 595 599 * Detecting misplaced folio but allow migrating folios which 596 600 * have been queued. 597 601 */ 598 - ret = 1; 602 + qp->has_unmovable = true; 599 603 goto unlock; 600 604 } 601 605 ··· 616 620 * Failed to isolate folio but allow migrating pages 617 621 * which have been queued. 618 622 */ 619 - ret = 1; 623 + qp->has_unmovable = true; 620 624 } 621 625 unlock: 622 626 spin_unlock(ptl); ··· 752 756 .start = start, 753 757 .end = end, 754 758 .first = NULL, 759 + .has_unmovable = false, 755 760 }; 756 761 const struct mm_walk_ops *ops = lock_vma ? 757 762 &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops; 758 763 759 764 err = walk_page_range(mm, start, end, ops, &qp); 760 765 766 + if (qp.has_unmovable) 767 + err = 1; 761 768 if (!qp.first) 762 769 /* whole range in hole */ 763 770 err = -EFAULT; ··· 1357 1358 putback_movable_pages(&pagelist); 1358 1359 } 1359 1360 1360 - if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 1361 + if (((ret > 0) || nr_failed) && (flags & MPOL_MF_STRICT)) 1361 1362 err = -EIO; 1362 1363 } else { 1363 1364 up_out:
+5 -2
mm/migrate.c
··· 243 243 244 244 #ifdef CONFIG_HUGETLB_PAGE 245 245 if (folio_test_hugetlb(folio)) { 246 - unsigned int shift = huge_page_shift(hstate_vma(vma)); 246 + struct hstate *h = hstate_vma(vma); 247 + unsigned int shift = huge_page_shift(h); 248 + unsigned long psize = huge_page_size(h); 247 249 248 250 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 249 251 if (folio_test_anon(folio)) ··· 253 251 rmap_flags); 254 252 else 255 253 page_dup_file_rmap(new, true); 256 - set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 254 + set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, 255 + psize); 257 256 } else 258 257 #endif 259 258 {
+18 -5
mm/rmap.c
··· 1480 1480 struct mmu_notifier_range range; 1481 1481 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1482 1482 unsigned long pfn; 1483 + unsigned long hsz = 0; 1483 1484 1484 1485 /* 1485 1486 * When racing against e.g. zap_pte_range() on another cpu, ··· 1512 1511 */ 1513 1512 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1514 1513 &range.end); 1514 + 1515 + /* We need the huge page size for set_huge_pte_at() */ 1516 + hsz = huge_page_size(hstate_vma(vma)); 1515 1517 } 1516 1518 mmu_notifier_invalidate_range_start(&range); 1517 1519 ··· 1632 1628 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1633 1629 if (folio_test_hugetlb(folio)) { 1634 1630 hugetlb_count_sub(folio_nr_pages(folio), mm); 1635 - set_huge_pte_at(mm, address, pvmw.pte, pteval); 1631 + set_huge_pte_at(mm, address, pvmw.pte, pteval, 1632 + hsz); 1636 1633 } else { 1637 1634 dec_mm_counter(mm, mm_counter(&folio->page)); 1638 1635 set_pte_at(mm, address, pvmw.pte, pteval); ··· 1825 1820 struct mmu_notifier_range range; 1826 1821 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1827 1822 unsigned long pfn; 1823 + unsigned long hsz = 0; 1828 1824 1829 1825 /* 1830 1826 * When racing against e.g. zap_pte_range() on another cpu, ··· 1861 1855 */ 1862 1856 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1863 1857 &range.end); 1858 + 1859 + /* We need the huge page size for set_huge_pte_at() */ 1860 + hsz = huge_page_size(hstate_vma(vma)); 1864 1861 } 1865 1862 mmu_notifier_invalidate_range_start(&range); 1866 1863 ··· 2029 2020 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2030 2021 if (folio_test_hugetlb(folio)) { 2031 2022 hugetlb_count_sub(folio_nr_pages(folio), mm); 2032 - set_huge_pte_at(mm, address, pvmw.pte, pteval); 2023 + set_huge_pte_at(mm, address, pvmw.pte, pteval, 2024 + hsz); 2033 2025 } else { 2034 2026 dec_mm_counter(mm, mm_counter(&folio->page)); 2035 2027 set_pte_at(mm, address, pvmw.pte, pteval); ··· 2054 2044 2055 2045 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2056 2046 if (folio_test_hugetlb(folio)) 2057 - set_huge_pte_at(mm, address, pvmw.pte, pteval); 2047 + set_huge_pte_at(mm, address, pvmw.pte, 2048 + pteval, hsz); 2058 2049 else 2059 2050 set_pte_at(mm, address, pvmw.pte, pteval); 2060 2051 ret = false; ··· 2069 2058 if (anon_exclusive && 2070 2059 page_try_share_anon_rmap(subpage)) { 2071 2060 if (folio_test_hugetlb(folio)) 2072 - set_huge_pte_at(mm, address, pvmw.pte, pteval); 2061 + set_huge_pte_at(mm, address, pvmw.pte, 2062 + pteval, hsz); 2073 2063 else 2074 2064 set_pte_at(mm, address, pvmw.pte, pteval); 2075 2065 ret = false; ··· 2102 2090 if (pte_uffd_wp(pteval)) 2103 2091 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2104 2092 if (folio_test_hugetlb(folio)) 2105 - set_huge_pte_at(mm, address, pvmw.pte, swp_pte); 2093 + set_huge_pte_at(mm, address, pvmw.pte, swp_pte, 2094 + hsz); 2106 2095 else 2107 2096 set_pte_at(mm, address, pvmw.pte, swp_pte); 2108 2097 trace_set_migration_pte(address, pte_val(swp_pte),
+1 -1
mm/vmalloc.c
··· 111 111 pte_t entry = pfn_pte(pfn, prot); 112 112 113 113 entry = arch_make_huge_pte(entry, ilog2(size), 0); 114 - set_huge_pte_at(&init_mm, addr, pte, entry); 114 + set_huge_pte_at(&init_mm, addr, pte, entry, size); 115 115 pfn += PFN_DOWN(size); 116 116 continue; 117 117 }
+20
mm/zswap.c
··· 1219 1219 return false; 1220 1220 1221 1221 /* 1222 + * If this is a duplicate, it must be removed before attempting to store 1223 + * it, otherwise, if the store fails the old page won't be removed from 1224 + * the tree, and it might be written back overriding the new data. 1225 + */ 1226 + spin_lock(&tree->lock); 1227 + dupentry = zswap_rb_search(&tree->rbroot, offset); 1228 + if (dupentry) { 1229 + zswap_duplicate_entry++; 1230 + zswap_invalidate_entry(tree, dupentry); 1231 + } 1232 + spin_unlock(&tree->lock); 1233 + 1234 + /* 1222 1235 * XXX: zswap reclaim does not work with cgroups yet. Without a 1223 1236 * cgroup-aware entry LRU, we will push out entries system-wide based on 1224 1237 * local cgroup limits. ··· 1346 1333 1347 1334 /* map */ 1348 1335 spin_lock(&tree->lock); 1336 + /* 1337 + * A duplicate entry should have been removed at the beginning of this 1338 + * function. Since the swap entry should be pinned, if a duplicate is 1339 + * found again here it means that something went wrong in the swap 1340 + * cache. 1341 + */ 1349 1342 while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) { 1343 + WARN_ON(1); 1350 1344 zswap_duplicate_entry++; 1351 1345 zswap_invalidate_entry(tree, dupentry); 1352 1346 }
+2 -2
tools/testing/selftests/mm/charge_reserved_hugetlb.sh
··· 25 25 fi 26 26 27 27 if [[ $cgroup2 ]]; then 28 - cgroup_path=$(mount -t cgroup2 | head -1 | awk -e '{print $3}') 28 + cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}') 29 29 if [[ -z "$cgroup_path" ]]; then 30 30 cgroup_path=/dev/cgroup/memory 31 31 mount -t cgroup2 none $cgroup_path ··· 33 33 fi 34 34 echo "+hugetlb" >$cgroup_path/cgroup.subtree_control 35 35 else 36 - cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}') 36 + cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}') 37 37 if [[ -z "$cgroup_path" ]]; then 38 38 cgroup_path=/dev/cgroup/memory 39 39 mount -t cgroup memory,hugetlb $cgroup_path
+2 -2
tools/testing/selftests/mm/hugetlb_reparenting_test.sh
··· 20 20 21 21 22 22 if [[ $cgroup2 ]]; then 23 - CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk -e '{print $3}') 23 + CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}') 24 24 if [[ -z "$CGROUP_ROOT" ]]; then 25 25 CGROUP_ROOT=/dev/cgroup/memory 26 26 mount -t cgroup2 none $CGROUP_ROOT ··· 28 28 fi 29 29 echo "+hugetlb +memory" >$CGROUP_ROOT/cgroup.subtree_control 30 30 else 31 - CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}') 31 + CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}') 32 32 if [[ -z "$CGROUP_ROOT" ]]; then 33 33 CGROUP_ROOT=/dev/cgroup/memory 34 34 mount -t cgroup memory,hugetlb $CGROUP_ROOT