Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Rework ptep_set_access_flags and fix sun4c

Some changes done a while ago to avoid pounding on ptep_set_access_flags and
update_mmu_cache in some race situations break sun4c which requires
update_mmu_cache() to always be called on minor faults.

This patch reworks ptep_set_access_flags() semantics, implementations and
callers so that it's now responsible for returning whether an update is
necessary or not (basically whether the PTE actually changed). This allow
fixing the sparc implementation to always return 1 on sun4c.

[akpm@linux-foundation.org: fixes, cleanups]
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: David Miller <davem@davemloft.net>
Cc: Mark Fortescue <mark@mtfhpc.demon.co.uk>
Acked-by: William Lee Irwin III <wli@holomorphy.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Benjamin Herrenschmidt and committed by
Linus Torvalds
8dab5241 679ce0ac

+92 -46
+12 -5
include/asm-generic/pgtable.h
··· 27 27 * Largely same as above, but only sets the access flags (dirty, 28 28 * accessed, and writable). Furthermore, we know it always gets set 29 29 * to a "more permissive" setting, which allows most architectures 30 - * to optimize this. 30 + * to optimize this. We return whether the PTE actually changed, which 31 + * in turn instructs the caller to do things like update__mmu_cache. 32 + * This used to be done in the caller, but sparc needs minor faults to 33 + * force that call on sun4c so we changed this macro slightly 31 34 */ 32 35 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 33 - do { \ 34 - set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ 35 - flush_tlb_page(__vma, __address); \ 36 - } while (0) 36 + ({ \ 37 + int __changed = !pte_same(*(__ptep), __entry); \ 38 + if (__changed) { \ 39 + set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ 40 + flush_tlb_page(__vma, __address); \ 41 + } \ 42 + __changed; \ 43 + }) 37 44 #endif 38 45 39 46 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+5 -3
include/asm-i386/pgtable.h
··· 285 285 */ 286 286 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 287 287 #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ 288 - do { \ 289 - if (dirty) { \ 288 + ({ \ 289 + int __changed = !pte_same(*(ptep), entry); \ 290 + if (__changed && dirty) { \ 290 291 (ptep)->pte_low = (entry).pte_low; \ 291 292 pte_update_defer((vma)->vm_mm, (address), (ptep)); \ 292 293 flush_tlb_page(vma, address); \ 293 294 } \ 294 - } while (0) 295 + __changed; \ 296 + }) 295 297 296 298 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 297 299 #define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
+16 -9
include/asm-ia64/pgtable.h
··· 533 533 * daccess_bit in ivt.S). 534 534 */ 535 535 #ifdef CONFIG_SMP 536 - # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 537 - do { \ 538 - if (__safely_writable) { \ 539 - set_pte(__ptep, __entry); \ 540 - flush_tlb_page(__vma, __addr); \ 541 - } \ 542 - } while (0) 536 + # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 537 + ({ \ 538 + int __changed = !pte_same(*(__ptep), __entry); \ 539 + if (__changed && __safely_writable) { \ 540 + set_pte(__ptep, __entry); \ 541 + flush_tlb_page(__vma, __addr); \ 542 + } \ 543 + __changed; \ 544 + }) 543 545 #else 544 - # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 545 - ptep_establish(__vma, __addr, __ptep, __entry) 546 + # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 547 + ({ \ 548 + int __changed = !pte_same(*(__ptep), __entry); \ 549 + if (__changed) \ 550 + ptep_establish(__vma, __addr, __ptep, __entry); \ 551 + __changed; \ 552 + }) 546 553 #endif 547 554 548 555 # ifdef CONFIG_VIRTUAL_MEM_MAP
+8 -4
include/asm-powerpc/pgtable-ppc32.h
··· 673 673 } 674 674 675 675 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 676 - do { \ 677 - __ptep_set_access_flags(__ptep, __entry, __dirty); \ 678 - flush_tlb_page_nohash(__vma, __address); \ 679 - } while(0) 676 + ({ \ 677 + int __changed = !pte_same(*(__ptep), __entry); \ 678 + if (__changed) { \ 679 + __ptep_set_access_flags(__ptep, __entry, __dirty); \ 680 + flush_tlb_page_nohash(__vma, __address); \ 681 + } \ 682 + __changed; \ 683 + }) 680 684 681 685 /* 682 686 * Macro to mark a page protection value as "uncacheable".
+8 -4
include/asm-powerpc/pgtable-ppc64.h
··· 413 413 :"cc"); 414 414 } 415 415 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 416 - do { \ 417 - __ptep_set_access_flags(__ptep, __entry, __dirty); \ 418 - flush_tlb_page_nohash(__vma, __address); \ 419 - } while(0) 416 + ({ \ 417 + int __changed = !pte_same(*(__ptep), __entry); \ 418 + if (__changed) { \ 419 + __ptep_set_access_flags(__ptep, __entry, __dirty); \ 420 + flush_tlb_page_nohash(__vma, __address); \ 421 + } \ 422 + __changed; \ 423 + }) 420 424 421 425 /* 422 426 * Macro to mark a page protection value as "uncacheable".
+8 -4
include/asm-ppc/pgtable.h
··· 694 694 } 695 695 696 696 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 697 - do { \ 698 - __ptep_set_access_flags(__ptep, __entry, __dirty); \ 699 - flush_tlb_page_nohash(__vma, __address); \ 700 - } while(0) 697 + ({ \ 698 + int __changed = !pte_same(*(__ptep), __entry); \ 699 + if (__changed) { \ 700 + __ptep_set_access_flags(__ptep, __entry, __dirty); \ 701 + flush_tlb_page_nohash(__vma, __address); \ 702 + } \ 703 + __changed; \ 704 + }) 701 705 702 706 /* 703 707 * Macro to mark a page protection value as "uncacheable".
+6 -1
include/asm-s390/pgtable.h
··· 744 744 } 745 745 746 746 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 747 - ptep_establish(__vma, __address, __ptep, __entry) 747 + ({ \ 748 + int __changed = !pte_same(*(__ptep), __entry); \ 749 + if (__changed) \ 750 + ptep_establish(__vma, __address, __ptep, __entry); \ 751 + __changed; \ 752 + }) 748 753 749 754 /* 750 755 * Test and clear dirty bit in storage key.
+11
include/asm-sparc/pgtable.h
··· 446 446 #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) 447 447 #define GET_PFN(pfn) (pfn & 0x0fffffffUL) 448 448 449 + #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 450 + #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 451 + ({ \ 452 + int __changed = !pte_same(*(__ptep), __entry); \ 453 + if (__changed) { \ 454 + set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ 455 + flush_tlb_page(__vma, __address); \ 456 + } \ 457 + (sparc_cpu_model == sun4c) || __changed; \ 458 + }) 459 + 449 460 #include <asm-generic/pgtable.h> 450 461 451 462 #endif /* !(__ASSEMBLY__) */
+8 -6
include/asm-x86_64/pgtable.h
··· 395 395 * bit at the same time. */ 396 396 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 397 397 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 398 - do { \ 399 - if (__dirty) { \ 400 - set_pte(__ptep, __entry); \ 401 - flush_tlb_page(__vma, __address); \ 402 - } \ 403 - } while (0) 398 + ({ \ 399 + int __changed = !pte_same(*(__ptep), __entry); \ 400 + if (__changed && __dirty) { \ 401 + set_pte(__ptep, __entry); \ 402 + flush_tlb_page(__vma, __address); \ 403 + } \ 404 + __changed; \ 405 + }) 404 406 405 407 /* Encode and de-code a swap entry */ 406 408 #define __swp_type(x) (((x).val >> 1) & 0x3f)
+4 -3
mm/hugetlb.c
··· 326 326 pte_t entry; 327 327 328 328 entry = pte_mkwrite(pte_mkdirty(*ptep)); 329 - ptep_set_access_flags(vma, address, ptep, entry, 1); 330 - update_mmu_cache(vma, address, entry); 331 - lazy_mmu_prot_update(entry); 329 + if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { 330 + update_mmu_cache(vma, address, entry); 331 + lazy_mmu_prot_update(entry); 332 + } 332 333 } 333 334 334 335
+6 -7
mm/memory.c
··· 1691 1691 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1692 1692 entry = pte_mkyoung(orig_pte); 1693 1693 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1694 - ptep_set_access_flags(vma, address, page_table, entry, 1); 1695 - update_mmu_cache(vma, address, entry); 1696 - lazy_mmu_prot_update(entry); 1694 + if (ptep_set_access_flags(vma, address, page_table, entry,1)) { 1695 + update_mmu_cache(vma, address, entry); 1696 + lazy_mmu_prot_update(entry); 1697 + } 1697 1698 ret |= VM_FAULT_WRITE; 1698 1699 goto unlock; 1699 1700 } ··· 2526 2525 pte_t *pte, pmd_t *pmd, int write_access) 2527 2526 { 2528 2527 pte_t entry; 2529 - pte_t old_entry; 2530 2528 spinlock_t *ptl; 2531 2529 2532 - old_entry = entry = *pte; 2530 + entry = *pte; 2533 2531 if (!pte_present(entry)) { 2534 2532 if (pte_none(entry)) { 2535 2533 if (vma->vm_ops) { ··· 2561 2561 entry = pte_mkdirty(entry); 2562 2562 } 2563 2563 entry = pte_mkyoung(entry); 2564 - if (!pte_same(old_entry, entry)) { 2565 - ptep_set_access_flags(vma, address, pte, entry, write_access); 2564 + if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { 2566 2565 update_mmu_cache(vma, address, entry); 2567 2566 lazy_mmu_prot_update(entry); 2568 2567 } else {