Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

ARM: move vmalloc() lazy-page table population

Split the vmalloc() lazy-page table population from
do_translation_fault() into a new vmalloc_fault() function.

Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>

+68 -58
+68 -58
arch/arm/mm/fault.c
··· 261 261 } 262 262 #endif 263 263 264 + /* 265 + * Handle a vmalloc fault, copying the non-leaf page table entries from 266 + * init_mm.pgd. Any kernel context can trigger this, so we must not sleep 267 + * or enable interrupts. Having two CPUs execute this for the same page is 268 + * no problem, we'll just copy the same data twice. 269 + * 270 + * Returns false on failure. 271 + */ 272 + static bool __kprobes __maybe_unused vmalloc_fault(unsigned long addr) 273 + { 274 + unsigned int index; 275 + pgd_t *pgd, *pgd_k; 276 + p4d_t *p4d, *p4d_k; 277 + pud_t *pud, *pud_k; 278 + pmd_t *pmd, *pmd_k; 279 + 280 + index = pgd_index(addr); 281 + 282 + pgd = cpu_get_pgd() + index; 283 + pgd_k = init_mm.pgd + index; 284 + 285 + p4d = p4d_offset(pgd, addr); 286 + p4d_k = p4d_offset(pgd_k, addr); 287 + 288 + if (p4d_none(*p4d_k)) 289 + return false; 290 + if (!p4d_present(*p4d)) 291 + set_p4d(p4d, *p4d_k); 292 + 293 + pud = pud_offset(p4d, addr); 294 + pud_k = pud_offset(p4d_k, addr); 295 + 296 + if (pud_none(*pud_k)) 297 + return false; 298 + if (!pud_present(*pud)) 299 + set_pud(pud, *pud_k); 300 + 301 + pmd = pmd_offset(pud, addr); 302 + pmd_k = pmd_offset(pud_k, addr); 303 + 304 + #ifdef CONFIG_ARM_LPAE 305 + /* 306 + * Only one hardware entry per PMD with LPAE. 307 + */ 308 + index = 0; 309 + #else 310 + /* 311 + * On ARM one Linux PGD entry contains two hardware entries (see page 312 + * tables layout in pgtable.h). We normally guarantee that we always 313 + * fill both L1 entries. But create_mapping() doesn't follow the rule. 314 + * It can create inidividual L1 entries, so here we have to call 315 + * pmd_none() check for the entry really corresponded to address, not 316 + * for the first of pair. 317 + */ 318 + index = (addr >> SECTION_SHIFT) & 1; 319 + #endif 320 + if (pmd_none(pmd_k[index])) 321 + return false; 322 + 323 + copy_pmd(pmd, pmd_k); 324 + 325 + return true; 326 + } 327 + 264 328 static int __kprobes 265 329 do_kernel_address_page_fault(struct mm_struct *mm, unsigned long addr, 266 330 unsigned int fsr, struct pt_regs *regs) ··· 560 496 * directly to do_kernel_address_page_fault() to handle. 561 497 * 562 498 * Otherwise, we're probably faulting in the vmalloc() area, so try to fix 563 - * that up. Note that we must not take any locks or enable interrupts in 564 - * this case. 499 + * that up via vmalloc_fault(). 565 500 * 566 - * If vmalloc() fixup fails, that means the non-leaf page tables did not 501 + * If vmalloc_fault() fails, that means the non-leaf page tables did not 567 502 * contain an entry for this address, so handle this via 568 503 * do_kernel_address_page_fault(). 569 504 */ ··· 571 508 do_translation_fault(unsigned long addr, unsigned int fsr, 572 509 struct pt_regs *regs) 573 510 { 574 - unsigned int index; 575 - pgd_t *pgd, *pgd_k; 576 - p4d_t *p4d, *p4d_k; 577 - pud_t *pud, *pud_k; 578 - pmd_t *pmd, *pmd_k; 579 - 580 511 if (addr < TASK_SIZE) 581 512 return do_page_fault(addr, fsr, regs); 582 513 583 - if (user_mode(regs)) 584 - goto bad_area; 514 + if (!user_mode(regs) && vmalloc_fault(addr)) 515 + return 0; 585 516 586 - index = pgd_index(addr); 587 - 588 - pgd = cpu_get_pgd() + index; 589 - pgd_k = init_mm.pgd + index; 590 - 591 - p4d = p4d_offset(pgd, addr); 592 - p4d_k = p4d_offset(pgd_k, addr); 593 - 594 - if (p4d_none(*p4d_k)) 595 - goto bad_area; 596 - if (!p4d_present(*p4d)) 597 - set_p4d(p4d, *p4d_k); 598 - 599 - pud = pud_offset(p4d, addr); 600 - pud_k = pud_offset(p4d_k, addr); 601 - 602 - if (pud_none(*pud_k)) 603 - goto bad_area; 604 - if (!pud_present(*pud)) 605 - set_pud(pud, *pud_k); 606 - 607 - pmd = pmd_offset(pud, addr); 608 - pmd_k = pmd_offset(pud_k, addr); 609 - 610 - #ifdef CONFIG_ARM_LPAE 611 - /* 612 - * Only one hardware entry per PMD with LPAE. 613 - */ 614 - index = 0; 615 - #else 616 - /* 617 - * On ARM one Linux PGD entry contains two hardware entries (see page 618 - * tables layout in pgtable.h). We normally guarantee that we always 619 - * fill both L1 entries. But create_mapping() doesn't follow the rule. 620 - * It can create inidividual L1 entries, so here we have to call 621 - * pmd_none() check for the entry really corresponded to address, not 622 - * for the first of pair. 623 - */ 624 - index = (addr >> SECTION_SHIFT) & 1; 625 - #endif 626 - if (pmd_none(pmd_k[index])) 627 - goto bad_area; 628 - 629 - copy_pmd(pmd, pmd_k); 630 - return 0; 631 - 632 - bad_area: 633 517 do_kernel_address_page_fault(current->mm, addr, fsr, regs); 634 518 635 519 return 0;