Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: ce4100: Set pci ops via callback instead of module init
x86/mm: Fix pgd_lock deadlock
x86/mm: Handle mm_fault_error() in kernel space
x86: Don't check for BIOS corruption in first 64K when there's no need to

+45 -37
+6
arch/x86/include/asm/ce4100.h
··· 1 + #ifndef _ASM_CE4100_H_ 2 + #define _ASM_CE4100_H_ 3 + 4 + int ce4100_pci_init(void); 5 + 6 + #endif
+4 -4
arch/x86/kernel/check.c
··· 106 106 addr += size; 107 107 } 108 108 109 - printk(KERN_INFO "Scanning %d areas for low memory corruption\n", 110 - num_scan_areas); 109 + if (num_scan_areas) 110 + printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); 111 111 } 112 112 113 113 ··· 143 143 { 144 144 check_for_bios_corruption(); 145 145 schedule_delayed_work(&bios_check_work, 146 - round_jiffies_relative(corruption_check_period*HZ)); 146 + round_jiffies_relative(corruption_check_period*HZ)); 147 147 } 148 148 149 149 static int start_periodic_check_for_corruption(void) 150 150 { 151 - if (!memory_corruption_check || corruption_check_period == 0) 151 + if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0) 152 152 return 0; 153 153 154 154 printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
+10 -4
arch/x86/mm/fault.c
··· 229 229 for (address = VMALLOC_START & PMD_MASK; 230 230 address >= TASK_SIZE && address < FIXADDR_TOP; 231 231 address += PMD_SIZE) { 232 - 233 - unsigned long flags; 234 232 struct page *page; 235 233 236 - spin_lock_irqsave(&pgd_lock, flags); 234 + spin_lock(&pgd_lock); 237 235 list_for_each_entry(page, &pgd_list, lru) { 238 236 spinlock_t *pgt_lock; 239 237 pmd_t *ret; 240 238 239 + /* the pgt_lock only for Xen */ 241 240 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 242 241 243 242 spin_lock(pgt_lock); ··· 246 247 if (!ret) 247 248 break; 248 249 } 249 - spin_unlock_irqrestore(&pgd_lock, flags); 250 + spin_unlock(&pgd_lock); 250 251 } 251 252 } 252 253 ··· 827 828 unsigned long address, unsigned int fault) 828 829 { 829 830 if (fault & VM_FAULT_OOM) { 831 + /* Kernel mode? Handle exceptions or die: */ 832 + if (!(error_code & PF_USER)) { 833 + up_read(&current->mm->mmap_sem); 834 + no_context(regs, error_code, address); 835 + return; 836 + } 837 + 830 838 out_of_memory(regs, error_code, address); 831 839 } else { 832 840 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+3 -3
arch/x86/mm/init_64.c
··· 105 105 106 106 for (address = start; address <= end; address += PGDIR_SIZE) { 107 107 const pgd_t *pgd_ref = pgd_offset_k(address); 108 - unsigned long flags; 109 108 struct page *page; 110 109 111 110 if (pgd_none(*pgd_ref)) 112 111 continue; 113 112 114 - spin_lock_irqsave(&pgd_lock, flags); 113 + spin_lock(&pgd_lock); 115 114 list_for_each_entry(page, &pgd_list, lru) { 116 115 pgd_t *pgd; 117 116 spinlock_t *pgt_lock; 118 117 119 118 pgd = (pgd_t *)page_address(page) + pgd_index(address); 119 + /* the pgt_lock only for Xen */ 120 120 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 121 121 spin_lock(pgt_lock); 122 122 ··· 128 128 129 129 spin_unlock(pgt_lock); 130 130 } 131 - spin_unlock_irqrestore(&pgd_lock, flags); 131 + spin_unlock(&pgd_lock); 132 132 } 133 133 } 134 134
+8 -10
arch/x86/mm/pageattr.c
··· 57 57 58 58 void update_page_count(int level, unsigned long pages) 59 59 { 60 - unsigned long flags; 61 - 62 60 /* Protect against CPA */ 63 - spin_lock_irqsave(&pgd_lock, flags); 61 + spin_lock(&pgd_lock); 64 62 direct_pages_count[level] += pages; 65 - spin_unlock_irqrestore(&pgd_lock, flags); 63 + spin_unlock(&pgd_lock); 66 64 } 67 65 68 66 static void split_page_count(int level) ··· 392 394 try_preserve_large_page(pte_t *kpte, unsigned long address, 393 395 struct cpa_data *cpa) 394 396 { 395 - unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; 397 + unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; 396 398 pte_t new_pte, old_pte, *tmp; 397 399 pgprot_t old_prot, new_prot, req_prot; 398 400 int i, do_split = 1; ··· 401 403 if (cpa->force_split) 402 404 return 1; 403 405 404 - spin_lock_irqsave(&pgd_lock, flags); 406 + spin_lock(&pgd_lock); 405 407 /* 406 408 * Check for races, another CPU might have split this page 407 409 * up already: ··· 496 498 } 497 499 498 500 out_unlock: 499 - spin_unlock_irqrestore(&pgd_lock, flags); 501 + spin_unlock(&pgd_lock); 500 502 501 503 return do_split; 502 504 } 503 505 504 506 static int split_large_page(pte_t *kpte, unsigned long address) 505 507 { 506 - unsigned long flags, pfn, pfninc = 1; 508 + unsigned long pfn, pfninc = 1; 507 509 unsigned int i, level; 508 510 pte_t *pbase, *tmp; 509 511 pgprot_t ref_prot; ··· 517 519 if (!base) 518 520 return -ENOMEM; 519 521 520 - spin_lock_irqsave(&pgd_lock, flags); 522 + spin_lock(&pgd_lock); 521 523 /* 522 524 * Check for races, another CPU might have split this page 523 525 * up for us already: ··· 589 591 */ 590 592 if (base) 591 593 __free_page(base); 592 - spin_unlock_irqrestore(&pgd_lock, flags); 594 + spin_unlock(&pgd_lock); 593 595 594 596 return 0; 595 597 }
+4 -7
arch/x86/mm/pgtable.c
··· 121 121 122 122 static void pgd_dtor(pgd_t *pgd) 123 123 { 124 - unsigned long flags; /* can be called from interrupt context */ 125 - 126 124 if (SHARED_KERNEL_PMD) 127 125 return; 128 126 129 - spin_lock_irqsave(&pgd_lock, flags); 127 + spin_lock(&pgd_lock); 130 128 pgd_list_del(pgd); 131 - spin_unlock_irqrestore(&pgd_lock, flags); 129 + spin_unlock(&pgd_lock); 132 130 } 133 131 134 132 /* ··· 258 260 { 259 261 pgd_t *pgd; 260 262 pmd_t *pmds[PREALLOCATED_PMDS]; 261 - unsigned long flags; 262 263 263 264 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); 264 265 ··· 277 280 * respect to anything walking the pgd_list, so that they 278 281 * never see a partially populated pgd. 279 282 */ 280 - spin_lock_irqsave(&pgd_lock, flags); 283 + spin_lock(&pgd_lock); 281 284 282 285 pgd_ctor(mm, pgd); 283 286 pgd_prepopulate_pmd(mm, pgd, pmds); 284 287 285 - spin_unlock_irqrestore(&pgd_lock, flags); 288 + spin_unlock(&pgd_lock); 286 289 287 290 return pgd; 288 291
+4 -3
arch/x86/pci/ce4100.c
··· 34 34 #include <linux/pci.h> 35 35 #include <linux/init.h> 36 36 37 + #include <asm/ce4100.h> 37 38 #include <asm/pci_x86.h> 38 39 39 40 struct sim_reg { ··· 307 306 .write = ce4100_conf_write, 308 307 }; 309 308 310 - static int __init ce4100_pci_init(void) 309 + int __init ce4100_pci_init(void) 311 310 { 312 311 init_sim_regs(); 313 312 raw_pci_ops = &ce4100_pci_conf; 314 - return 0; 313 + /* Indicate caller that it should invoke pci_legacy_init() */ 314 + return 1; 315 315 } 316 - subsys_initcall(ce4100_pci_init);
+2
arch/x86/platform/ce4100/ce4100.c
··· 15 15 #include <linux/serial_reg.h> 16 16 #include <linux/serial_8250.h> 17 17 18 + #include <asm/ce4100.h> 18 19 #include <asm/setup.h> 19 20 #include <asm/io.h> 20 21 ··· 130 129 x86_init.resources.probe_roms = x86_init_noop; 131 130 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 132 131 x86_init.mpparse.find_smp_config = sdv_find_smp_config; 132 + x86_init.pci.init = ce4100_pci_init; 133 133 }
+4 -6
arch/x86/xen/mmu.c
··· 986 986 */ 987 987 void xen_mm_pin_all(void) 988 988 { 989 - unsigned long flags; 990 989 struct page *page; 991 990 992 - spin_lock_irqsave(&pgd_lock, flags); 991 + spin_lock(&pgd_lock); 993 992 994 993 list_for_each_entry(page, &pgd_list, lru) { 995 994 if (!PagePinned(page)) { ··· 997 998 } 998 999 } 999 1000 1000 - spin_unlock_irqrestore(&pgd_lock, flags); 1001 + spin_unlock(&pgd_lock); 1001 1002 } 1002 1003 1003 1004 /* ··· 1098 1099 */ 1099 1100 void xen_mm_unpin_all(void) 1100 1101 { 1101 - unsigned long flags; 1102 1102 struct page *page; 1103 1103 1104 - spin_lock_irqsave(&pgd_lock, flags); 1104 + spin_lock(&pgd_lock); 1105 1105 1106 1106 list_for_each_entry(page, &pgd_list, lru) { 1107 1107 if (PageSavePinned(page)) { ··· 1110 1112 } 1111 1113 } 1112 1114 1113 - spin_unlock_irqrestore(&pgd_lock, flags); 1115 + spin_unlock(&pgd_lock); 1114 1116 } 1115 1117 1116 1118 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)