Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2025-02-19-17-49' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"18 hotfixes. 5 are cc:stable and the remainder address post-6.13
issues or aren't considered necessary for -stable kernels.

10 are for MM and 8 are for non-MM. All are singletons, please see the
changelogs for details"

* tag 'mm-hotfixes-stable-2025-02-19-17-49' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
test_xarray: fix failure in check_pause when CONFIG_XARRAY_MULTI is not defined
kasan: don't call find_vm_area() in a PREEMPT_RT kernel
MAINTAINERS: update Nick's contact info
selftests/mm: fix check for running THP tests
mm: hugetlb: avoid fallback for specific node allocation of 1G pages
memcg: avoid dead loop when setting memory.max
mailmap: update Nick's entry
mm: pgtable: fix incorrect reclaim of non-empty PTE pages
taskstats: modify taskstats version
getdelays: fix error format characters
mm/migrate_device: don't add folio to be freed to LRU in migrate_device_finalize()
tools/mm: fix build warnings with musl-libc
mailmap: add entry for Feng Tang
.mailmap: add entries for Jeff Johnson
mm,madvise,hugetlb: check for 0-length range after end address adjustment
mm/zswap: fix inconsistency when zswap_store_page() fails
lib/iov_iter: fix import_iovec_ubuf iovec management
procfs: fix a locking bug in a vmcore_add_device_dump() error path

+120 -66
+4
.mailmap
··· 226 226 Felipe W Damasio <felipewd@terra.com.br> 227 227 Felix Kuhling <fxkuehl@gmx.de> 228 228 Felix Moeller <felix@derklecks.de> 229 + Feng Tang <feng.79.tang@gmail.com> <feng.tang@intel.com> 229 230 Fenglin Wu <quic_fenglinw@quicinc.com> <fenglinw@codeaurora.org> 230 231 Filipe Lautert <filipe@icewall.org> 231 232 Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au> ··· 318 317 Jean Tourrilhes <jt@hpl.hp.com> 319 318 Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org> 320 319 Jeff Garzik <jgarzik@pretzel.yyz.us> 320 + Jeff Johnson <jeff.johnson@oss.qualcomm.com> <jjohnson@codeaurora.org> 321 + Jeff Johnson <jeff.johnson@oss.qualcomm.com> <quic_jjohnson@quicinc.com> 321 322 Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net> 322 323 Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com> 323 324 Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com> ··· 534 531 Nicholas Piggin <npiggin@gmail.com> <npiggin@suse.de> 535 532 Nicholas Piggin <npiggin@gmail.com> <nickpiggin@yahoo.com.au> 536 533 Nicholas Piggin <npiggin@gmail.com> <piggin@cyberone.com.au> 534 + Nick Desaulniers <nick.desaulniers+lkml@gmail.com> <ndesaulniers@google.com> 537 535 Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com> 538 536 Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org> 539 537 Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
+1 -1
Documentation/process/embargoed-hardware-issues.rst
··· 308 308 309 309 Google Kees Cook <keescook@chromium.org> 310 310 311 - LLVM Nick Desaulniers <ndesaulniers@google.com> 311 + LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com> 312 312 ============= ======================================================== 313 313 314 314 If you want your organization to be added to the ambassadors list, please
+1 -1
Documentation/translations/sp_SP/process/embargoed-hardware-issues.rst
··· 287 287 288 288 Google Kees Cook <keescook@chromium.org> 289 289 290 - LLVM Nick Desaulniers <ndesaulniers@google.com> 290 + LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com> 291 291 ============= ======================================================== 292 292 293 293 Si quiere que su organización se añada a la lista de embajadores, por
+1 -1
MAINTAINERS
··· 5655 5655 5656 5656 CLANG/LLVM BUILD SUPPORT 5657 5657 M: Nathan Chancellor <nathan@kernel.org> 5658 - R: Nick Desaulniers <ndesaulniers@google.com> 5658 + R: Nick Desaulniers <nick.desaulniers+lkml@gmail.com> 5659 5659 R: Bill Wendling <morbo@google.com> 5660 5660 R: Justin Stitt <justinstitt@google.com> 5661 5661 L: llvm@lists.linux.dev
+4 -1
fs/proc/vmcore.c
··· 1524 1524 pr_warn_once("Unexpected adding of device dump\n"); 1525 1525 if (vmcore_open) { 1526 1526 ret = -EBUSY; 1527 - goto out_err; 1527 + goto unlock; 1528 1528 } 1529 1529 1530 1530 list_add_tail(&dump->list, &vmcoredd_list); 1531 1531 vmcoredd_update_size(data_size); 1532 1532 mutex_unlock(&vmcore_mutex); 1533 1533 return 0; 1534 + 1535 + unlock: 1536 + mutex_unlock(&vmcore_mutex); 1534 1537 1535 1538 out_err: 1536 1539 vfree(buf);
+1 -1
include/uapi/linux/taskstats.h
··· 34 34 */ 35 35 36 36 37 - #define TASKSTATS_VERSION 14 37 + #define TASKSTATS_VERSION 15 38 38 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN 39 39 * in linux/sched.h */ 40 40
+2 -1
lib/iov_iter.c
··· 1428 1428 struct iovec *iov = *iovp; 1429 1429 ssize_t ret; 1430 1430 1431 + *iovp = NULL; 1432 + 1431 1433 if (compat) 1432 1434 ret = copy_compat_iovec_from_user(iov, uvec, 1); 1433 1435 else ··· 1440 1438 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); 1441 1439 if (unlikely(ret)) 1442 1440 return ret; 1443 - *iovp = NULL; 1444 1441 return i->count; 1445 1442 } 1446 1443
+8 -7
lib/test_xarray.c
··· 1418 1418 { 1419 1419 XA_STATE(xas, xa, 0); 1420 1420 void *entry; 1421 - unsigned int order; 1421 + int order; 1422 1422 unsigned long index = 1; 1423 1423 unsigned int count = 0; 1424 1424 ··· 1450 1450 xa_destroy(xa); 1451 1451 1452 1452 index = 0; 1453 - for (order = XA_CHUNK_SHIFT; order > 0; order--) { 1453 + for (order = order_limit - 1; order >= 0; order--) { 1454 1454 XA_BUG_ON(xa, xa_store_order(xa, index, order, 1455 1455 xa_mk_index(index), GFP_KERNEL)); 1456 1456 index += 1UL << order; ··· 1462 1462 rcu_read_lock(); 1463 1463 xas_for_each(&xas, entry, ULONG_MAX) { 1464 1464 XA_BUG_ON(xa, entry != xa_mk_index(index)); 1465 - index += 1UL << (XA_CHUNK_SHIFT - count); 1465 + index += 1UL << (order_limit - count - 1); 1466 1466 count++; 1467 1467 } 1468 1468 rcu_read_unlock(); 1469 - XA_BUG_ON(xa, count != XA_CHUNK_SHIFT); 1469 + XA_BUG_ON(xa, count != order_limit); 1470 1470 1471 1471 index = 0; 1472 1472 count = 0; 1473 - xas_set(&xas, XA_CHUNK_SIZE / 2 + 1); 1473 + /* test unaligned index */ 1474 + xas_set(&xas, 1 % (1UL << (order_limit - 1))); 1474 1475 rcu_read_lock(); 1475 1476 xas_for_each(&xas, entry, ULONG_MAX) { 1476 1477 XA_BUG_ON(xa, entry != xa_mk_index(index)); 1477 - index += 1UL << (XA_CHUNK_SHIFT - count); 1478 + index += 1UL << (order_limit - count - 1); 1478 1479 count++; 1479 1480 xas_pause(&xas); 1480 1481 } 1481 1482 rcu_read_unlock(); 1482 - XA_BUG_ON(xa, count != XA_CHUNK_SHIFT); 1483 + XA_BUG_ON(xa, count != order_limit); 1483 1484 1484 1485 xa_destroy(xa); 1485 1486
+1 -1
mm/hugetlb.c
··· 3145 3145 3146 3146 /* do node specific alloc */ 3147 3147 if (nid != NUMA_NO_NODE) { 3148 - m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 3148 + m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h), 3149 3149 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3150 3150 if (!m) 3151 3151 return 0;
+33 -1
mm/kasan/report.c
··· 370 370 sizeof(init_thread_union.stack)); 371 371 } 372 372 373 + /* 374 + * This function is invoked with report_lock (a raw_spinlock) held. A 375 + * PREEMPT_RT kernel cannot call find_vm_area() as it will acquire a sleeping 376 + * rt_spinlock. 377 + * 378 + * For !RT kernel, the PROVE_RAW_LOCK_NESTING config option will print a 379 + * lockdep warning for this raw_spinlock -> spinlock dependency. This config 380 + * option is enabled by default to ensure better test coverage to expose this 381 + * kind of RT kernel problem. This lockdep splat, however, can be suppressed 382 + * by using DEFINE_WAIT_OVERRIDE_MAP() if it serves a useful purpose and the 383 + * invalid PREEMPT_RT case has been taken care of. 384 + */ 385 + static inline struct vm_struct *kasan_find_vm_area(void *addr) 386 + { 387 + static DEFINE_WAIT_OVERRIDE_MAP(vmalloc_map, LD_WAIT_SLEEP); 388 + struct vm_struct *va; 389 + 390 + if (IS_ENABLED(CONFIG_PREEMPT_RT)) 391 + return NULL; 392 + 393 + /* 394 + * Suppress lockdep warning and fetch vmalloc area of the 395 + * offending address. 396 + */ 397 + lock_map_acquire_try(&vmalloc_map); 398 + va = find_vm_area(addr); 399 + lock_map_release(&vmalloc_map); 400 + return va; 401 + } 402 + 373 403 static void print_address_description(void *addr, u8 tag, 374 404 struct kasan_report_info *info) 375 405 { ··· 429 399 } 430 400 431 401 if (is_vmalloc_addr(addr)) { 432 - struct vm_struct *va = find_vm_area(addr); 402 + struct vm_struct *va = kasan_find_vm_area(addr); 433 403 434 404 if (va) { 435 405 pr_err("The buggy address belongs to the virtual mapping at\n" ··· 439 409 pr_err("\n"); 440 410 441 411 page = vmalloc_to_page(addr); 412 + } else { 413 + pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr); 442 414 } 443 415 } 444 416
+10 -1
mm/madvise.c
··· 933 933 */ 934 934 end = vma->vm_end; 935 935 } 936 - VM_WARN_ON(start >= end); 936 + /* 937 + * If the memory region between start and end was 938 + * originally backed by 4kB pages and then remapped to 939 + * be backed by hugepages while mmap_lock was dropped, 940 + * the adjustment for hugetlb vma above may have rounded 941 + * end down to the start address. 942 + */ 943 + if (start == end) 944 + return 0; 945 + VM_WARN_ON(start > end); 937 946 } 938 947 939 948 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
+1
mm/memcontrol.c
··· 4166 4166 memcg_memory_event(memcg, MEMCG_OOM); 4167 4167 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 4168 4168 break; 4169 + cond_resched(); 4169 4170 } 4170 4171 4171 4172 memcg_wb_domain_size_changed(memcg);
+14 -3
mm/memory.c
··· 1719 1719 pmd_t pmdval; 1720 1720 unsigned long start = addr; 1721 1721 bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details); 1722 - bool direct_reclaim = false; 1722 + bool direct_reclaim = true; 1723 1723 int nr; 1724 1724 1725 1725 retry: ··· 1734 1734 do { 1735 1735 bool any_skipped = false; 1736 1736 1737 - if (need_resched()) 1737 + if (need_resched()) { 1738 + direct_reclaim = false; 1738 1739 break; 1740 + } 1739 1741 1740 1742 nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss, 1741 1743 &force_flush, &force_break, &any_skipped); ··· 1745 1743 can_reclaim_pt = false; 1746 1744 if (unlikely(force_break)) { 1747 1745 addr += nr * PAGE_SIZE; 1746 + direct_reclaim = false; 1748 1747 break; 1749 1748 } 1750 1749 } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); 1751 1750 1752 - if (can_reclaim_pt && addr == end) 1751 + /* 1752 + * Fast path: try to hold the pmd lock and unmap the PTE page. 1753 + * 1754 + * If the pte lock was released midway (retry case), or if the attempt 1755 + * to hold the pmd lock failed, then we need to recheck all pte entries 1756 + * to ensure they are still none, thereby preventing the pte entries 1757 + * from being repopulated by another thread. 1758 + */ 1759 + if (can_reclaim_pt && direct_reclaim && addr == end) 1753 1760 direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval); 1754 1761 1755 1762 add_mm_rss_vec(mm, rss);
+4 -9
mm/migrate_device.c
··· 840 840 dst = src; 841 841 } 842 842 843 + if (!folio_is_zone_device(dst)) 844 + folio_add_lru(dst); 843 845 remove_migration_ptes(src, dst, 0); 844 846 folio_unlock(src); 845 - 846 - if (folio_is_zone_device(src)) 847 - folio_put(src); 848 - else 849 - folio_putback_lru(src); 847 + folio_put(src); 850 848 851 849 if (dst != src) { 852 850 folio_unlock(dst); 853 - if (folio_is_zone_device(dst)) 854 - folio_put(dst); 855 - else 856 - folio_putback_lru(dst); 851 + folio_put(dst); 857 852 } 858 853 } 859 854 }
+16 -19
mm/zswap.c
··· 1445 1445 * main API 1446 1446 **********************************/ 1447 1447 1448 - static ssize_t zswap_store_page(struct page *page, 1449 - struct obj_cgroup *objcg, 1450 - struct zswap_pool *pool) 1448 + static bool zswap_store_page(struct page *page, 1449 + struct obj_cgroup *objcg, 1450 + struct zswap_pool *pool) 1451 1451 { 1452 1452 swp_entry_t page_swpentry = page_swap_entry(page); 1453 1453 struct zswap_entry *entry, *old; ··· 1456 1456 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page)); 1457 1457 if (!entry) { 1458 1458 zswap_reject_kmemcache_fail++; 1459 - return -EINVAL; 1459 + return false; 1460 1460 } 1461 1461 1462 1462 if (!zswap_compress(page, entry, pool)) ··· 1483 1483 1484 1484 /* 1485 1485 * The entry is successfully compressed and stored in the tree, there is 1486 - * no further possibility of failure. Grab refs to the pool and objcg. 1487 - * These refs will be dropped by zswap_entry_free() when the entry is 1488 - * removed from the tree. 1486 + * no further possibility of failure. Grab refs to the pool and objcg, 1487 + * charge zswap memory, and increment zswap_stored_pages. 1488 + * The opposite actions will be performed by zswap_entry_free() 1489 + * when the entry is removed from the tree. 1489 1490 */ 1490 1491 zswap_pool_get(pool); 1491 - if (objcg) 1492 + if (objcg) { 1492 1493 obj_cgroup_get(objcg); 1494 + obj_cgroup_charge_zswap(objcg, entry->length); 1495 + } 1496 + atomic_long_inc(&zswap_stored_pages); 1493 1497 1494 1498 /* 1495 1499 * We finish initializing the entry while it's already in xarray. ··· 1514 1510 zswap_lru_add(&zswap_list_lru, entry); 1515 1511 } 1516 1512 1517 - return entry->length; 1513 + return true; 1518 1514 1519 1515 store_failed: 1520 1516 zpool_free(pool->zpool, entry->handle); 1521 1517 compress_failed: 1522 1518 zswap_entry_cache_free(entry); 1523 - return -EINVAL; 1519 + return false; 1524 1520 } 1525 1521 1526 1522 bool zswap_store(struct folio *folio) ··· 1530 1526 struct obj_cgroup *objcg = NULL; 1531 1527 struct mem_cgroup *memcg = NULL; 1532 1528 struct zswap_pool *pool; 1533 - size_t compressed_bytes = 0; 1534 1529 bool ret = false; 1535 1530 long index; 1536 1531 ··· 1567 1564 1568 1565 for (index = 0; index < nr_pages; ++index) { 1569 1566 struct page *page = folio_page(folio, index); 1570 - ssize_t bytes; 1571 1567 1572 - bytes = zswap_store_page(page, objcg, pool); 1573 - if (bytes < 0) 1568 + if (!zswap_store_page(page, objcg, pool)) 1574 1569 goto put_pool; 1575 - compressed_bytes += bytes; 1576 1570 } 1577 1571 1578 - if (objcg) { 1579 - obj_cgroup_charge_zswap(objcg, compressed_bytes); 1572 + if (objcg) 1580 1573 count_objcg_events(objcg, ZSWPOUT, nr_pages); 1581 - } 1582 1574 1583 - atomic_long_add(nr_pages, &zswap_stored_pages); 1584 1575 count_vm_events(ZSWPOUT, nr_pages); 1585 1576 1586 1577 ret = true;
+16 -16
tools/accounting/getdelays.c
··· 196 196 197 197 static void print_delayacct(struct taskstats *t) 198 198 { 199 - printf("\n\nCPU %15s%15s%15s%15s%15s%15s\n" 200 - " %15llu%15llu%15llu%15llu%15.3fms%13.6fms\n" 201 - "IO %15s%15s%15s%15s\n" 202 - " %15llu%15llu%15.3fms%13.6fms\n" 203 - "SWAP %15s%15s%15s%15s\n" 204 - " %15llu%15llu%15.3fms%13.6fms\n" 205 - "RECLAIM %12s%15s%15s%15s\n" 206 - " %15llu%15llu%15.3fms%13.6fms\n" 207 - "THRASHING%12s%15s%15s%15s\n" 208 - " %15llu%15llu%15.3fms%13.6fms\n" 209 - "COMPACT %12s%15s%15s%15s\n" 210 - " %15llu%15llu%15.3fms%13.6fms\n" 211 - "WPCOPY %12s%15s%15s%15s\n" 212 - " %15llu%15llu%15.3fms%13.6fms\n" 213 - "IRQ %15s%15s%15s%15s\n" 214 - " %15llu%15llu%15.3fms%13.6fms\n", 199 + printf("\n\nCPU %15s%15s%15s%15s%15s%15s%15s\n" 200 + " %15llu%15llu%15llu%15llu%15.3fms%13.6fms%13.6fms\n" 201 + "IO %15s%15s%15s%15s%15s\n" 202 + " %15llu%15llu%15.3fms%13.6fms%13.6fms\n" 203 + "SWAP %15s%15s%15s%15s%15s\n" 204 + " %15llu%15llu%15.3fms%13.6fms%13.6fms\n" 205 + "RECLAIM %12s%15s%15s%15s%15s\n" 206 + " %15llu%15llu%15.3fms%13.6fms%13.6fms\n" 207 + "THRASHING%12s%15s%15s%15s%15s\n" 208 + " %15llu%15llu%15.3fms%13.6fms%13.6fms\n" 209 + "COMPACT %12s%15s%15s%15s%15s\n" 210 + " %15llu%15llu%15.3fms%13.6fms%13.6fms\n" 211 + "WPCOPY %12s%15s%15s%15s%15s\n" 212 + " %15llu%15llu%15.3fms%13.6fms%13.6fms\n" 213 + "IRQ %15s%15s%15s%15s%15s\n" 214 + " %15llu%15llu%15.3fms%13.6fms%13.6fms\n", 215 215 "count", "real total", "virtual total", 216 216 "delay total", "delay average", "delay max", "delay min", 217 217 (unsigned long long)t->cpu_count,
+2 -2
tools/mm/page-types.c
··· 24 24 #include <signal.h> 25 25 #include <inttypes.h> 26 26 #include <sys/types.h> 27 - #include <sys/errno.h> 28 - #include <sys/fcntl.h> 27 + #include <errno.h> 28 + #include <fcntl.h> 29 29 #include <sys/mount.h> 30 30 #include <sys/statfs.h> 31 31 #include <sys/mman.h>
+1 -1
tools/testing/selftests/mm/run_vmtests.sh
··· 220 220 if test_selected ${CATEGORY}; then 221 221 # On memory constrainted systems some tests can fail to allocate hugepages. 222 222 # perform some cleanup before the test for a higher success rate. 223 - if [ ${CATEGORY} == "thp" ] | [ ${CATEGORY} == "hugetlb" ]; then 223 + if [ ${CATEGORY} == "thp" -o ${CATEGORY} == "hugetlb" ]; then 224 224 echo 3 > /proc/sys/vm/drop_caches 225 225 sleep 2 226 226 echo 1 > /proc/sys/vm/compact_memory