Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Fix stale cpufreq_cpu_governor pointer
[CPUFREQ] Resolve time unit thinko in ondemand/conservative govs
[CPUFREQ] speedstep-ich: fix error caused by 394122ab144dae4b276d74644a2f11c44a60ac5c
[CPUFREQ] Fix use after free on governor restore
[CPUFREQ] acpi-cpufreq: blacklist Intel 0f68: Fix HT detection and put in notification message
[CPUFREQ] powernow-k8: Fix test in get_transition_latency()
[CPUFREQ] longhaul: select Longhaul version 2 for capable CPUs

+70 -32
+17 -6
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 526 526 527 527 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) 528 528 { 529 - /* http://www.intel.com/Assets/PDF/specupdate/314554.pdf 529 + /* Intel Xeon Processor 7100 Series Specification Update 530 + * http://www.intel.com/Assets/PDF/specupdate/314554.pdf 530 531 * AL30: A Machine Check Exception (MCE) Occurring during an 531 532 * Enhanced Intel SpeedStep Technology Ratio Change May Cause 532 - * Both Processor Cores to Lock Up when HT is enabled*/ 533 + * Both Processor Cores to Lock Up. */ 533 534 if (c->x86_vendor == X86_VENDOR_INTEL) { 534 535 if ((c->x86 == 15) && 535 536 (c->x86_model == 6) && 536 - (c->x86_mask == 8) && smt_capable()) 537 + (c->x86_mask == 8)) { 538 + printk(KERN_INFO "acpi-cpufreq: Intel(R) " 539 + "Xeon(R) 7100 Errata AL30, processors may " 540 + "lock up on frequency changes: disabling " 541 + "acpi-cpufreq.\n"); 537 542 return -ENODEV; 543 + } 538 544 } 539 545 return 0; 540 546 } ··· 555 549 unsigned int result = 0; 556 550 struct cpuinfo_x86 *c = &cpu_data(policy->cpu); 557 551 struct acpi_processor_performance *perf; 552 + #ifdef CONFIG_SMP 553 + static int blacklisted; 554 + #endif 558 555 559 556 dprintk("acpi_cpufreq_cpu_init\n"); 560 557 561 558 #ifdef CONFIG_SMP 562 - result = acpi_cpufreq_blacklist(c); 563 - if (result) 564 - return result; 559 + if (blacklisted) 560 + return blacklisted; 561 + blacklisted = acpi_cpufreq_blacklist(c); 562 + if (blacklisted) 563 + return blacklisted; 565 564 #endif 566 565 567 566 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
+1 -1
arch/x86/kernel/cpu/cpufreq/longhaul.c
··· 813 813 memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr)); 814 814 break; 815 815 case 1 ... 15: 816 - longhaul_version = TYPE_LONGHAUL_V1; 816 + longhaul_version = TYPE_LONGHAUL_V2; 817 817 if (c->x86_mask < 8) { 818 818 cpu_model = CPU_SAMUEL2; 819 819 cpuname = "C3 'Samuel 2' [C5B]";
+1 -1
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 1022 1022 * set it to 1 to avoid problems in the future. 1023 1023 * For all others it's a BIOS bug. 1024 1024 */ 1025 - if (!boot_cpu_data.x86 == 0x11) 1025 + if (boot_cpu_data.x86 != 0x11) 1026 1026 printk(KERN_ERR FW_WARN PFX "Invalid zero transition " 1027 1027 "latency\n"); 1028 1028 max_latency = 1;
+7 -12
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
··· 232 232 return 0; 233 233 } 234 234 235 - struct get_freq_data { 236 - unsigned int speed; 237 - unsigned int processor; 238 - }; 239 - 240 - static void get_freq_data(void *_data) 235 + static void get_freq_data(void *_speed) 241 236 { 242 - struct get_freq_data *data = _data; 237 + unsigned int *speed = _speed; 243 238 244 - data->speed = speedstep_get_frequency(data->processor); 239 + *speed = speedstep_get_frequency(speedstep_processor); 245 240 } 246 241 247 242 static unsigned int speedstep_get(unsigned int cpu) 248 243 { 249 - struct get_freq_data data = { .processor = cpu }; 244 + unsigned int speed; 250 245 251 246 /* You're supposed to ensure CPU is online. */ 252 - if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0) 247 + if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) 253 248 BUG(); 254 249 255 - dprintk("detected %u kHz as current frequency\n", data.speed); 256 - return data.speed; 250 + dprintk("detected %u kHz as current frequency\n", speed); 251 + return speed; 257 252 } 258 253 259 254 /**
+40 -8
drivers/cpufreq/cpufreq.c
··· 41 41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 42 42 #ifdef CONFIG_HOTPLUG_CPU 43 43 /* This one keeps track of the previously set governor of a removed CPU */ 44 - static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor); 44 + static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 45 45 #endif 46 46 static DEFINE_SPINLOCK(cpufreq_driver_lock); 47 47 ··· 774 774 #ifdef CONFIG_SMP 775 775 unsigned long flags; 776 776 unsigned int j; 777 - 778 777 #ifdef CONFIG_HOTPLUG_CPU 779 - if (per_cpu(cpufreq_cpu_governor, cpu)) { 780 - policy->governor = per_cpu(cpufreq_cpu_governor, cpu); 778 + struct cpufreq_governor *gov; 779 + 780 + gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); 781 + if (gov) { 782 + policy->governor = gov; 781 783 dprintk("Restoring governor %s for cpu %d\n", 782 784 policy->governor->name, cpu); 783 785 } ··· 951 949 static int cpufreq_add_dev(struct sys_device *sys_dev) 952 950 { 953 951 unsigned int cpu = sys_dev->id; 954 - int ret = 0; 952 + int ret = 0, found = 0; 955 953 struct cpufreq_policy *policy; 956 954 unsigned long flags; 957 955 unsigned int j; 956 + #ifdef CONFIG_HOTPLUG_CPU 957 + int sibling; 958 + #endif 958 959 959 960 if (cpu_is_offline(cpu)) 960 961 return 0; ··· 1004 999 INIT_WORK(&policy->update, handle_update); 1005 1000 1006 1001 /* Set governor before ->init, so that driver could check it */ 1007 - policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 1002 + #ifdef CONFIG_HOTPLUG_CPU 1003 + for_each_online_cpu(sibling) { 1004 + struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); 1005 + if (cp && cp->governor && 1006 + (cpumask_test_cpu(cpu, cp->related_cpus))) { 1007 + policy->governor = cp->governor; 1008 + found = 1; 1009 + break; 1010 + } 1011 + } 1012 + #endif 1013 + if (!found) 1014 + policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 1008 1015 /* call driver. From then on the cpufreq must be able 1009 1016 * to accept all calls to ->verify and ->setpolicy for this CPU 1010 1017 */ ··· 1128 1111 #ifdef CONFIG_SMP 1129 1112 1130 1113 #ifdef CONFIG_HOTPLUG_CPU 1131 - per_cpu(cpufreq_cpu_governor, cpu) = data->governor; 1114 + strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name, 1115 + CPUFREQ_NAME_LEN); 1132 1116 #endif 1133 1117 1134 1118 /* if we have other CPUs still registered, we need to unlink them, ··· 1153 1135 continue; 1154 1136 dprintk("removing link for cpu %u\n", j); 1155 1137 #ifdef CONFIG_HOTPLUG_CPU 1156 - per_cpu(cpufreq_cpu_governor, j) = data->governor; 1138 + strncpy(per_cpu(cpufreq_cpu_governor, j), 1139 + data->governor->name, CPUFREQ_NAME_LEN); 1157 1140 #endif 1158 1141 cpu_sys_dev = get_cpu_sysdev(j); 1159 1142 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); ··· 1625 1606 1626 1607 void cpufreq_unregister_governor(struct cpufreq_governor *governor) 1627 1608 { 1609 + #ifdef CONFIG_HOTPLUG_CPU 1610 + int cpu; 1611 + #endif 1612 + 1628 1613 if (!governor) 1629 1614 return; 1615 + 1616 + #ifdef CONFIG_HOTPLUG_CPU 1617 + for_each_present_cpu(cpu) { 1618 + if (cpu_online(cpu)) 1619 + continue; 1620 + if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) 1621 + strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); 1622 + } 1623 + #endif 1630 1624 1631 1625 mutex_lock(&cpufreq_governor_mutex); 1632 1626 list_del(&governor->governor_list);
+2 -2
drivers/cpufreq/cpufreq_conservative.c
··· 116 116 117 117 idle_time = cputime64_sub(cur_wall_time, busy_time); 118 118 if (wall) 119 - *wall = cur_wall_time; 119 + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); 120 120 121 - return idle_time; 121 + return (cputime64_t)jiffies_to_usecs(idle_time);; 122 122 } 123 123 124 124 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+2 -2
drivers/cpufreq/cpufreq_ondemand.c
··· 133 133 134 134 idle_time = cputime64_sub(cur_wall_time, busy_time); 135 135 if (wall) 136 - *wall = cur_wall_time; 136 + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); 137 137 138 - return idle_time; 138 + return (cputime64_t)jiffies_to_usecs(idle_time); 139 139 } 140 140 141 141 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)