Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_urgent_for_v5.15_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Prevent a infinite loop in the MCE recovery on return to user space,
which was caused by a second MCE queueing work for the same page and
thereby creating a circular work list.

- Make kern_addr_valid() handle existing PMD entries, which are marked
not present in the higher level page table, correctly instead of
blindly dereferencing them.

- Pass a valid address to sanitize_phys(). This was caused by the
mixture of inclusive and exclusive ranges. memtype_reserve() expect
'end' being exclusive, but sanitize_phys() wants it inclusive. This
worked so far, but with end being the end of the physical address
space the fail is exposed.

- Increase the maximum supported GPIO numbers for 64bit. Newer SoCs
exceed the previous maximum.

* tag 'x86_urgent_for_v5.15_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mce: Avoid infinite loop for copy from user recovery
x86/mm: Fix kern_addr_valid() to cope with existing but not present entries
x86/platform: Increase maximum GPIO number for X86_64
x86/pat: Pass valid address to sanitize_phys()

+47 -15
+5
arch/x86/Kconfig
··· 339 339 config ARCH_HIBERNATION_POSSIBLE 340 340 def_bool y 341 341 342 + config ARCH_NR_GPIO 343 + int 344 + default 1024 if X86_64 345 + default 512 346 + 342 347 config ARCH_SUSPEND_POSSIBLE 343 348 def_bool y 344 349
+32 -11
arch/x86/kernel/cpu/mce/core.c
··· 1253 1253 1254 1254 static void kill_me_now(struct callback_head *ch) 1255 1255 { 1256 + struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me); 1257 + 1258 + p->mce_count = 0; 1256 1259 force_sig(SIGBUS); 1257 1260 } 1258 1261 ··· 1265 1262 int flags = MF_ACTION_REQUIRED; 1266 1263 int ret; 1267 1264 1265 + p->mce_count = 0; 1268 1266 pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); 1269 1267 1270 1268 if (!p->mce_ripv) ··· 1294 1290 } 1295 1291 } 1296 1292 1297 - static void queue_task_work(struct mce *m, int kill_current_task) 1293 + static void queue_task_work(struct mce *m, char *msg, int kill_current_task) 1298 1294 { 1299 - current->mce_addr = m->addr; 1300 - current->mce_kflags = m->kflags; 1301 - current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); 1302 - current->mce_whole_page = whole_page(m); 1295 + int count = ++current->mce_count; 1303 1296 1304 - if (kill_current_task) 1305 - current->mce_kill_me.func = kill_me_now; 1306 - else 1307 - current->mce_kill_me.func = kill_me_maybe; 1297 + /* First call, save all the details */ 1298 + if (count == 1) { 1299 + current->mce_addr = m->addr; 1300 + current->mce_kflags = m->kflags; 1301 + current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); 1302 + current->mce_whole_page = whole_page(m); 1303 + 1304 + if (kill_current_task) 1305 + current->mce_kill_me.func = kill_me_now; 1306 + else 1307 + current->mce_kill_me.func = kill_me_maybe; 1308 + } 1309 + 1310 + /* Ten is likely overkill. Don't expect more than two faults before task_work() */ 1311 + if (count > 10) 1312 + mce_panic("Too many consecutive machine checks while accessing user data", m, msg); 1313 + 1314 + /* Second or later call, make sure page address matches the one from first call */ 1315 + if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT)) 1316 + mce_panic("Consecutive machine checks to different user pages", m, msg); 1317 + 1318 + /* Do not call task_work_add() more than once */ 1319 + if (count > 1) 1320 + return; 1308 1321 1309 1322 task_work_add(current, &current->mce_kill_me, TWA_RESUME); 1310 1323 } ··· 1459 1438 /* If this triggers there is no way to recover. Die hard. */ 1460 1439 BUG_ON(!on_thread_stack() || !user_mode(regs)); 1461 1440 1462 - queue_task_work(&m, kill_current_task); 1441 + queue_task_work(&m, msg, kill_current_task); 1463 1442 1464 1443 } else { 1465 1444 /* ··· 1477 1456 } 1478 1457 1479 1458 if (m.kflags & MCE_IN_KERNEL_COPYIN) 1480 - queue_task_work(&m, kill_current_task); 1459 + queue_task_work(&m, msg, kill_current_task); 1481 1460 } 1482 1461 out: 1483 1462 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+3 -3
arch/x86/mm/init_64.c
··· 1432 1432 return 0; 1433 1433 1434 1434 p4d = p4d_offset(pgd, addr); 1435 - if (p4d_none(*p4d)) 1435 + if (!p4d_present(*p4d)) 1436 1436 return 0; 1437 1437 1438 1438 pud = pud_offset(p4d, addr); 1439 - if (pud_none(*pud)) 1439 + if (!pud_present(*pud)) 1440 1440 return 0; 1441 1441 1442 1442 if (pud_large(*pud)) 1443 1443 return pfn_valid(pud_pfn(*pud)); 1444 1444 1445 1445 pmd = pmd_offset(pud, addr); 1446 - if (pmd_none(*pmd)) 1446 + if (!pmd_present(*pmd)) 1447 1447 return 0; 1448 1448 1449 1449 if (pmd_large(*pmd))
+6 -1
arch/x86/mm/pat/memtype.c
··· 583 583 int err = 0; 584 584 585 585 start = sanitize_phys(start); 586 - end = sanitize_phys(end); 586 + 587 + /* 588 + * The end address passed into this function is exclusive, but 589 + * sanitize_phys() expects an inclusive address. 590 + */ 591 + end = sanitize_phys(end - 1) + 1; 587 592 if (start >= end) { 588 593 WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, 589 594 start, end - 1, cattr_name(req_type));
+1
include/linux/sched.h
··· 1471 1471 mce_whole_page : 1, 1472 1472 __mce_reserved : 62; 1473 1473 struct callback_head mce_kill_me; 1474 + int mce_count; 1474 1475 #endif 1475 1476 1476 1477 #ifdef CONFIG_KRETPROBES