Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: Fix oops in identify_cpu() on CPUs without CPUID
x86: Clear incorrectly forced X86_FEATURE_LAHF_LM flag
x86, mce: therm_throt - change when we print messages
x86: Add reboot quirk for every 5 series MacBook/Pro

+48 -37
+7
arch/x86/kernel/cpu/amd.c
··· 400 400 level = cpuid_eax(1); 401 401 if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 402 402 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 403 + 404 + /* 405 + * Some BIOSes incorrectly force this feature, but only K8 406 + * revision D (model = 0x14) and later actually support it. 407 + */ 408 + if (c->x86_model < 0x14) 409 + clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 403 410 } 404 411 if (c->x86 == 0x10 || c->x86 == 0x11) 405 412 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+24 -24
arch/x86/kernel/cpu/common.c
··· 59 59 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 60 60 } 61 61 62 - static const struct cpu_dev *this_cpu __cpuinitdata; 62 + static void __cpuinit default_init(struct cpuinfo_x86 *c) 63 + { 64 + #ifdef CONFIG_X86_64 65 + display_cacheinfo(c); 66 + #else 67 + /* Not much we can do here... */ 68 + /* Check if at least it has cpuid */ 69 + if (c->cpuid_level == -1) { 70 + /* No cpuid. It must be an ancient CPU */ 71 + if (c->x86 == 4) 72 + strcpy(c->x86_model_id, "486"); 73 + else if (c->x86 == 3) 74 + strcpy(c->x86_model_id, "386"); 75 + } 76 + #endif 77 + } 78 + 79 + static const struct cpu_dev __cpuinitconst default_cpu = { 80 + .c_init = default_init, 81 + .c_vendor = "Unknown", 82 + .c_x86_vendor = X86_VENDOR_UNKNOWN, 83 + }; 84 + 85 + static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; 63 86 64 87 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 65 88 #ifdef CONFIG_X86_64 ··· 354 331 } 355 332 356 333 static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; 357 - 358 - static void __cpuinit default_init(struct cpuinfo_x86 *c) 359 - { 360 - #ifdef CONFIG_X86_64 361 - display_cacheinfo(c); 362 - #else 363 - /* Not much we can do here... */ 364 - /* Check if at least it has cpuid */ 365 - if (c->cpuid_level == -1) { 366 - /* No cpuid. It must be an ancient CPU */ 367 - if (c->x86 == 4) 368 - strcpy(c->x86_model_id, "486"); 369 - else if (c->x86 == 3) 370 - strcpy(c->x86_model_id, "386"); 371 - } 372 - #endif 373 - } 374 - 375 - static const struct cpu_dev __cpuinitconst default_cpu = { 376 - .c_init = default_init, 377 - .c_vendor = "Unknown", 378 - .c_x86_vendor = X86_VENDOR_UNKNOWN, 379 - }; 380 334 381 335 static void __cpuinit get_model_name(struct cpuinfo_x86 *c) 382 336 {
+11 -7
arch/x86/kernel/cpu/mcheck/therm_throt.c
··· 36 36 37 37 static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; 38 38 static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); 39 + static DEFINE_PER_CPU(bool, thermal_throttle_active); 39 40 40 41 static atomic_t therm_throt_en = ATOMIC_INIT(0); 41 42 ··· 97 96 { 98 97 unsigned int cpu = smp_processor_id(); 99 98 __u64 tmp_jiffs = get_jiffies_64(); 99 + bool was_throttled = __get_cpu_var(thermal_throttle_active); 100 + bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr; 100 101 101 - if (curr) 102 + if (is_throttled) 102 103 __get_cpu_var(thermal_throttle_count)++; 103 104 104 - if (time_before64(tmp_jiffs, __get_cpu_var(next_check))) 105 + if (!(was_throttled ^ is_throttled) && 106 + time_before64(tmp_jiffs, __get_cpu_var(next_check))) 105 107 return 0; 106 108 107 109 __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL; 108 110 109 111 /* if we just entered the thermal event */ 110 - if (curr) { 112 + if (is_throttled) { 111 113 printk(KERN_CRIT "CPU%d: Temperature above threshold, " 112 - "cpu clock throttled (total events = %lu)\n", cpu, 113 - __get_cpu_var(thermal_throttle_count)); 114 + "cpu clock throttled (total events = %lu)\n", 115 + cpu, __get_cpu_var(thermal_throttle_count)); 114 116 115 117 add_taint(TAINT_MACHINE_CHECK); 116 - } else { 117 - printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu); 118 + } else if (was_throttled) { 119 + printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); 118 120 } 119 121 120 122 return 1;
+6 -6
arch/x86/kernel/reboot.c
··· 418 418 } 419 419 420 420 static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { 421 - { /* Handle problems with rebooting on Apple MacBook5,2 */ 421 + { /* Handle problems with rebooting on Apple MacBook5 */ 422 422 .callback = set_pci_reboot, 423 - .ident = "Apple MacBook", 423 + .ident = "Apple MacBook5", 424 424 .matches = { 425 425 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 426 - DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"), 426 + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), 427 427 }, 428 428 }, 429 - { /* Handle problems with rebooting on Apple MacBookPro5,1 */ 429 + { /* Handle problems with rebooting on Apple MacBookPro5 */ 430 430 .callback = set_pci_reboot, 431 - .ident = "Apple MacBookPro5,1", 431 + .ident = "Apple MacBookPro5", 432 432 .matches = { 433 433 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 434 - DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"), 434 + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"), 435 435 }, 436 436 }, 437 437 { }