Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip:
x86, fpu: fix CONFIG_PREEMPT=y corruption of application's FPU stack
suspend-vs-iommu: prevent suspend if we could not resume
x86: section mismatch fix
x86: fix Xorg crash with xf86MapVidMem error
x86: fix pointer type warning in arch/x86/mm/init_64.c:early_memtest
x86: fix bad pmd ffff810000207xxx(9090909090909090)
x86: ioremap fix failing nesting check
x86: fix broken math-emu with lazy allocation of fpu area
x86: enable preemption in delay
x86: disable preemption in native_smp_prepare_cpus
x86: fix APIC warning on 32bit v2

+179 -65
+14 -2
arch/x86/kernel/acpi/boot.c
··· 242 242 243 243 static void __cpuinit acpi_register_lapic(int id, u8 enabled) 244 244 { 245 + unsigned int ver = 0; 246 + 245 247 if (!enabled) { 246 248 ++disabled_cpus; 247 249 return; 248 250 } 249 251 250 - generic_processor_info(id, 0); 252 + #ifdef CONFIG_X86_32 253 + if (boot_cpu_physical_apicid != -1U) 254 + ver = apic_version[boot_cpu_physical_apicid]; 255 + #endif 256 + 257 + generic_processor_info(id, ver); 251 258 } 252 259 253 260 static int __init ··· 774 767 mp_lapic_addr = address; 775 768 776 769 set_fixmap_nocache(FIX_APIC_BASE, address); 777 - if (boot_cpu_physical_apicid == -1U) 770 + if (boot_cpu_physical_apicid == -1U) { 778 771 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); 772 + #ifdef CONFIG_X86_32 773 + apic_version[boot_cpu_physical_apicid] = 774 + GET_APIC_VERSION(apic_read(APIC_LVR)); 775 + #endif 776 + } 779 777 } 780 778 781 779 static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
+29 -15
arch/x86/kernel/i387.c
··· 56 56 57 57 void __init init_thread_xstate(void) 58 58 { 59 + if (!HAVE_HWFP) { 60 + xstate_size = sizeof(struct i387_soft_struct); 61 + return; 62 + } 63 + 59 64 if (cpu_has_fxsr) 60 65 xstate_size = sizeof(struct i387_fxsave_struct); 61 66 #ifdef CONFIG_X86_32 ··· 99 94 int init_fpu(struct task_struct *tsk) 100 95 { 101 96 if (tsk_used_math(tsk)) { 102 - if (tsk == current) 97 + if (HAVE_HWFP && tsk == current) 103 98 unlazy_fpu(tsk); 104 99 return 0; 105 100 } ··· 113 108 if (!tsk->thread.xstate) 114 109 return -ENOMEM; 115 110 } 111 + 112 + #ifdef CONFIG_X86_32 113 + if (!HAVE_HWFP) { 114 + memset(tsk->thread.xstate, 0, xstate_size); 115 + finit(); 116 + set_stopped_child_used_math(tsk); 117 + return 0; 118 + } 119 + #endif 116 120 117 121 if (cpu_has_fxsr) { 118 122 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; ··· 344 330 struct user_i387_ia32_struct env; 345 331 int ret; 346 332 347 - if (!HAVE_HWFP) 348 - return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); 349 - 350 333 ret = init_fpu(target); 351 334 if (ret) 352 335 return ret; 336 + 337 + if (!HAVE_HWFP) 338 + return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); 353 339 354 340 if (!cpu_has_fxsr) { 355 341 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, ··· 374 360 struct user_i387_ia32_struct env; 375 361 int ret; 376 362 377 - if (!HAVE_HWFP) 378 - return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); 379 - 380 363 ret = init_fpu(target); 381 364 if (ret) 382 365 return ret; 383 366 384 367 set_stopped_child_used_math(target); 368 + 369 + if (!HAVE_HWFP) 370 + return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); 385 371 386 372 if (!cpu_has_fxsr) { 387 373 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, ··· 488 474 int restore_i387_ia32(struct _fpstate_ia32 __user *buf) 489 475 { 490 476 int err; 477 + struct task_struct *tsk = current; 491 478 492 - if (HAVE_HWFP) { 493 - struct task_struct *tsk = current; 494 - 479 + if (HAVE_HWFP) 495 480 clear_fpu(tsk); 496 481 497 - if (!used_math()) { 498 - err = init_fpu(tsk); 499 - if (err) 500 - return err; 501 - } 482 + if (!used_math()) { 483 + err = init_fpu(tsk); 484 + if (err) 485 + return err; 486 + } 502 487 488 + if (HAVE_HWFP) { 503 489 if (cpu_has_fxsr) 504 490 err = restore_i387_fxsave(buf); 505 491 else
+30 -1
arch/x86/kernel/pci-gart_64.c
··· 26 26 #include <linux/kdebug.h> 27 27 #include <linux/scatterlist.h> 28 28 #include <linux/iommu-helper.h> 29 + #include <linux/sysdev.h> 29 30 #include <asm/atomic.h> 30 31 #include <asm/io.h> 31 32 #include <asm/mtrr.h> ··· 549 548 return aper_base; 550 549 } 551 550 551 + static int gart_resume(struct sys_device *dev) 552 + { 553 + return 0; 554 + } 555 + 556 + static int gart_suspend(struct sys_device *dev, pm_message_t state) 557 + { 558 + return -EINVAL; 559 + } 560 + 561 + static struct sysdev_class gart_sysdev_class = { 562 + .name = "gart", 563 + .suspend = gart_suspend, 564 + .resume = gart_resume, 565 + 566 + }; 567 + 568 + static struct sys_device device_gart = { 569 + .id = 0, 570 + .cls = &gart_sysdev_class, 571 + }; 572 + 552 573 /* 553 574 * Private Northbridge GATT initialization in case we cannot use the 554 575 * AGP driver for some reason. ··· 581 558 unsigned aper_base, new_aper_base; 582 559 struct pci_dev *dev; 583 560 void *gatt; 584 - int i; 561 + int i, error; 585 562 586 563 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); 587 564 aper_size = aper_base = info->aper_size = 0; ··· 629 606 630 607 pci_write_config_dword(dev, 0x90, ctl); 631 608 } 609 + 610 + error = sysdev_class_register(&gart_sysdev_class); 611 + if (!error) 612 + error = sysdev_register(&device_gart); 613 + if (error) 614 + panic("Could not register gart_sysdev -- would corrupt data on next suspend"); 632 615 flush_gart(); 633 616 634 617 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
+4 -1
arch/x86/kernel/process_32.c
··· 649 649 /* If the task has used fpu the last 5 timeslices, just do a full 650 650 * restore of the math state immediately to avoid the trap; the 651 651 * chances of needing FPU soon are obviously high now 652 + * 653 + * tsk_used_math() checks prevent calling math_state_restore(), 654 + * which can sleep in the case of !tsk_used_math() 652 655 */ 653 - if (next_p->fpu_counter > 5) 656 + if (tsk_used_math(next_p) && next_p->fpu_counter > 5) 654 657 math_state_restore(); 655 658 656 659 /*
+4 -1
arch/x86/kernel/process_64.c
··· 658 658 /* If the task has used fpu the last 5 timeslices, just do a full 659 659 * restore of the math state immediately to avoid the trap; the 660 660 * chances of needing FPU soon are obviously high now 661 + * 662 + * tsk_used_math() checks prevent calling math_state_restore(), 663 + * which can sleep in the case of !tsk_used_math() 661 664 */ 662 - if (next_p->fpu_counter>5) 665 + if (tsk_used_math(next_p) && next_p->fpu_counter > 5) 663 666 math_state_restore(); 664 667 return prev_p; 665 668 }
+4 -1
arch/x86/kernel/smpboot.c
··· 1190 1190 */ 1191 1191 void __init native_smp_prepare_cpus(unsigned int max_cpus) 1192 1192 { 1193 + preempt_disable(); 1193 1194 nmi_watchdog_default(); 1194 1195 smp_cpu_index_default(); 1195 1196 current_cpu_data = boot_cpu_data; ··· 1207 1206 if (smp_sanity_check(max_cpus) < 0) { 1208 1207 printk(KERN_INFO "SMP disabled\n"); 1209 1208 disable_smp(); 1210 - return; 1209 + goto out; 1211 1210 } 1212 1211 1213 1212 preempt_disable(); ··· 1247 1246 printk(KERN_INFO "CPU%d: ", 0); 1248 1247 print_cpu_info(&cpu_data(0)); 1249 1248 setup_boot_clock(); 1249 + out: 1250 + preempt_enable(); 1250 1251 } 1251 1252 /* 1252 1253 * Early setup to make printk work.
+27 -4
arch/x86/lib/delay_32.c
··· 44 44 static void delay_tsc(unsigned long loops) 45 45 { 46 46 unsigned long bclock, now; 47 + int cpu; 47 48 48 - preempt_disable(); /* TSC's are per-cpu */ 49 + preempt_disable(); 50 + cpu = smp_processor_id(); 49 51 rdtscl(bclock); 50 - do { 51 - rep_nop(); 52 + for (;;) { 52 53 rdtscl(now); 53 - } while ((now-bclock) < loops); 54 + if ((now - bclock) >= loops) 55 + break; 56 + 57 + /* Allow RT tasks to run */ 58 + preempt_enable(); 59 + rep_nop(); 60 + preempt_disable(); 61 + 62 + /* 63 + * It is possible that we moved to another CPU, and 64 + * since TSC's are per-cpu we need to calculate 65 + * that. The delay must guarantee that we wait "at 66 + * least" the amount of time. Being moved to another 67 + * CPU could make the wait longer but we just need to 68 + * make sure we waited long enough. Rebalance the 69 + * counter for this CPU. 70 + */ 71 + if (unlikely(cpu != smp_processor_id())) { 72 + loops -= (now - bclock); 73 + cpu = smp_processor_id(); 74 + rdtscl(bclock); 75 + } 76 + } 54 77 preempt_enable(); 55 78 } 56 79
+26 -4
arch/x86/lib/delay_64.c
··· 31 31 void __delay(unsigned long loops) 32 32 { 33 33 unsigned bclock, now; 34 + int cpu; 34 35 35 - preempt_disable(); /* TSC's are pre-cpu */ 36 + preempt_disable(); 37 + cpu = smp_processor_id(); 36 38 rdtscl(bclock); 37 - do { 38 - rep_nop(); 39 + for (;;) { 39 40 rdtscl(now); 41 + if ((now - bclock) >= loops) 42 + break; 43 + 44 + /* Allow RT tasks to run */ 45 + preempt_enable(); 46 + rep_nop(); 47 + preempt_disable(); 48 + 49 + /* 50 + * It is possible that we moved to another CPU, and 51 + * since TSC's are per-cpu we need to calculate 52 + * that. The delay must guarantee that we wait "at 53 + * least" the amount of time. Being moved to another 54 + * CPU could make the wait longer but we just need to 55 + * make sure we waited long enough. Rebalance the 56 + * counter for this CPU. 57 + */ 58 + if (unlikely(cpu != smp_processor_id())) { 59 + loops -= (now - bclock); 60 + cpu = smp_processor_id(); 61 + rdtscl(bclock); 62 + } 40 63 } 41 - while ((now-bclock) < loops); 42 64 preempt_enable(); 43 65 } 44 66 EXPORT_SYMBOL(__delay);
+8 -5
arch/x86/math-emu/fpu_entry.c
··· 30 30 #include <asm/uaccess.h> 31 31 #include <asm/desc.h> 32 32 #include <asm/user.h> 33 + #include <asm/i387.h> 33 34 34 35 #include "fpu_system.h" 35 36 #include "fpu_emu.h" ··· 147 146 unsigned long code_limit = 0; /* Initialized to stop compiler warnings */ 148 147 struct desc_struct code_descriptor; 149 148 149 + if (!used_math()) { 150 + if (init_fpu(current)) { 151 + do_group_exit(SIGKILL); 152 + return; 153 + } 154 + } 155 + 150 156 #ifdef RE_ENTRANT_CHECKING 151 157 if (emulating) { 152 158 printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n"); 153 159 } 154 160 RE_ENTRANT_CHECK_ON; 155 161 #endif /* RE_ENTRANT_CHECKING */ 156 - 157 - if (!used_math()) { 158 - finit(); 159 - set_used_math(); 160 - } 161 162 162 163 SETUP_DATA_AREA(arg); 163 164
+3 -3
arch/x86/mm/init_64.c
··· 206 206 pmd_t *last_pmd = pmd + PTRS_PER_PMD; 207 207 208 208 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { 209 - if (!pmd_present(*pmd)) 209 + if (pmd_none(*pmd)) 210 210 continue; 211 211 if (vaddr < (unsigned long) _text || vaddr > end) 212 212 set_pmd(pmd, __pmd(0)); ··· 506 506 507 507 static void __init early_memtest(unsigned long start, unsigned long end) 508 508 { 509 - unsigned long t_start, t_size; 509 + u64 t_start, t_size; 510 510 unsigned pattern; 511 511 512 512 if (!memtest_pattern) ··· 525 525 if (t_start + t_size > end) 526 526 t_size = end - t_start; 527 527 528 - printk(KERN_CONT "\n %016lx - %016lx pattern %d", 528 + printk(KERN_CONT "\n %016llx - %016llx pattern %d", 529 529 t_start, t_start + t_size, pattern); 530 530 531 531 memtest(t_start, t_size, pattern);
+3 -2
arch/x86/mm/ioremap.c
··· 593 593 unsigned long offset; 594 594 unsigned int nrpages; 595 595 enum fixed_addresses idx; 596 - unsigned int nesting; 596 + int nesting; 597 597 598 598 nesting = --early_ioremap_nested; 599 - WARN_ON(nesting < 0); 599 + if (WARN_ON(nesting < 0)) 600 + return; 600 601 601 602 if (early_ioremap_debug) { 602 603 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
+25 -26
arch/x86/mm/pat.c
··· 34 34 printk(KERN_INFO "%s\n", reason); 35 35 } 36 36 37 - static int nopat(char *str) 37 + static int __init nopat(char *str) 38 38 { 39 39 pat_disable("PAT support disabled."); 40 40 return 0; ··· 151 151 unsigned long pat_type; 152 152 u8 mtrr_type; 153 153 154 - mtrr_type = mtrr_type_lookup(start, end); 155 - if (mtrr_type == 0xFF) { /* MTRR not enabled */ 156 - *ret_prot = prot; 157 - return 0; 158 - } 159 - if (mtrr_type == 0xFE) { /* MTRR match error */ 160 - *ret_prot = _PAGE_CACHE_UC; 161 - return -1; 162 - } 163 - if (mtrr_type != MTRR_TYPE_UNCACHABLE && 164 - mtrr_type != MTRR_TYPE_WRBACK && 165 - mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */ 166 - *ret_prot = _PAGE_CACHE_UC; 167 - return -1; 168 - } 169 - 170 154 pat_type = prot & _PAGE_CACHE_MASK; 171 155 prot &= (~_PAGE_CACHE_MASK); 172 156 173 - /* Currently doing intersection by hand. Optimize it later. */ 157 + /* 158 + * We return the PAT request directly for types where PAT takes 159 + * precedence with respect to MTRR and for UC_MINUS. 160 + * Consistency checks with other PAT requests is done later 161 + * while going through memtype list. 162 + */ 174 163 if (pat_type == _PAGE_CACHE_WC) { 175 164 *ret_prot = prot | _PAGE_CACHE_WC; 165 + return 0; 176 166 } else if (pat_type == _PAGE_CACHE_UC_MINUS) { 177 167 *ret_prot = prot | _PAGE_CACHE_UC_MINUS; 178 - } else if (pat_type == _PAGE_CACHE_UC || 179 - mtrr_type == MTRR_TYPE_UNCACHABLE) { 168 + return 0; 169 + } else if (pat_type == _PAGE_CACHE_UC) { 170 + *ret_prot = prot | _PAGE_CACHE_UC; 171 + return 0; 172 + } 173 + 174 + /* 175 + * Look for MTRR hint to get the effective type in case where PAT 176 + * request is for WB. 177 + */ 178 + mtrr_type = mtrr_type_lookup(start, end); 179 + 180 + if (mtrr_type == MTRR_TYPE_UNCACHABLE) { 180 181 *ret_prot = prot | _PAGE_CACHE_UC; 181 182 } else if (mtrr_type == MTRR_TYPE_WRCOMB) { 182 183 *ret_prot = prot | _PAGE_CACHE_WC; ··· 234 233 235 234 if (req_type == -1) { 236 235 /* 237 - * Special case where caller wants to inherit from mtrr or 238 - * existing pat mapping, defaulting to UC_MINUS in case of 239 - * no match. 236 + * Call mtrr_lookup to get the type hint. This is an 237 + * optimization for /dev/mem mmap'ers into WB memory (BIOS 238 + * tools and ACPI tools). Use WB request for WB memory and use 239 + * UC_MINUS otherwise. 240 240 */ 241 241 u8 mtrr_type = mtrr_type_lookup(start, end); 242 - if (mtrr_type == 0xFE) { /* MTRR match error */ 243 - err = -1; 244 - } 245 242 246 243 if (mtrr_type == MTRR_TYPE_WRBACK) { 247 244 req_type = _PAGE_CACHE_WB;
+2
include/asm-x86/i387.h
··· 193 193 194 194 #else /* CONFIG_X86_32 */ 195 195 196 + extern void finit(void); 197 + 196 198 static inline void tolerant_fwait(void) 197 199 { 198 200 asm volatile("fnclex ; fwait");