Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

thp: prevent hugepages during args/env copying into the user stack

Transparent hugepages can only be created if rmap is fully
functional. So we must prevent hugepages to be created while
is_vma_temporary_stack() is true.

This also optmizes away some harmless but unnecessary setting of
khugepaged_scan.address and it switches some BUG_ON to VM_BUG_ON.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Andrea Arcangeli and committed by
Linus Torvalds
a7d6e4ec 09f586b3

+18 -20
+2 -1
include/linux/huge_mm.h
··· 57 57 (transparent_hugepage_flags & \ 58 58 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ 59 59 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ 60 - !((__vma)->vm_flags & VM_NOHUGEPAGE)) 60 + !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ 61 + !is_vma_temporary_stack(__vma)) 61 62 #define transparent_hugepage_defrag(__vma) \ 62 63 ((transparent_hugepage_flags & \ 63 64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
+16 -19
mm/huge_memory.c
··· 1811 1811 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ 1812 1812 if (!vma->anon_vma || vma->vm_ops || vma->vm_file) 1813 1813 goto out; 1814 + if (is_vma_temporary_stack(vma)) 1815 + goto out; 1814 1816 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); 1815 1817 1816 1818 pgd = pgd_offset(mm, address); ··· 2034 2032 if ((!(vma->vm_flags & VM_HUGEPAGE) && 2035 2033 !khugepaged_always()) || 2036 2034 (vma->vm_flags & VM_NOHUGEPAGE)) { 2035 + skip: 2037 2036 progress++; 2038 2037 continue; 2039 2038 } 2040 - 2041 2039 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ 2042 - if (!vma->anon_vma || vma->vm_ops || vma->vm_file) { 2043 - khugepaged_scan.address = vma->vm_end; 2044 - progress++; 2045 - continue; 2046 - } 2040 + if (!vma->anon_vma || vma->vm_ops || vma->vm_file) 2041 + goto skip; 2042 + if (is_vma_temporary_stack(vma)) 2043 + goto skip; 2044 + 2047 2045 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); 2048 2046 2049 2047 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2050 2048 hend = vma->vm_end & HPAGE_PMD_MASK; 2051 - if (hstart >= hend) { 2052 - progress++; 2053 - continue; 2054 - } 2049 + if (hstart >= hend) 2050 + goto skip; 2051 + if (khugepaged_scan.address > hend) 2052 + goto skip; 2055 2053 if (khugepaged_scan.address < hstart) 2056 2054 khugepaged_scan.address = hstart; 2057 - if (khugepaged_scan.address > hend) { 2058 - khugepaged_scan.address = hend + HPAGE_PMD_SIZE; 2059 - progress++; 2060 - continue; 2061 - } 2062 - BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2055 + VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2063 2056 2064 2057 while (khugepaged_scan.address < hend) { 2065 2058 int ret; ··· 2083 2086 breakouterloop_mmap_sem: 2084 2087 2085 2088 spin_lock(&khugepaged_mm_lock); 2086 - BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2089 + VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2087 2090 /* 2088 2091 * Release the current mm_slot if this mm is about to die, or 2089 2092 * if we scanned all vmas of this mm. ··· 2238 2241 2239 2242 for (;;) { 2240 2243 mutex_unlock(&khugepaged_mutex); 2241 - BUG_ON(khugepaged_thread != current); 2244 + VM_BUG_ON(khugepaged_thread != current); 2242 2245 khugepaged_loop(); 2243 - BUG_ON(khugepaged_thread != current); 2246 + VM_BUG_ON(khugepaged_thread != current); 2244 2247 2245 2248 mutex_lock(&khugepaged_mutex); 2246 2249 if (!khugepaged_enabled())