Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
kvm: fix kvm reboot crash when MAXSMP is used
cpumask: alloc zeroed cpumask for static cpumask_var_ts
cpumask: introduce zalloc_cpumask_var

+39 -12
+1 -1
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 550 550 return -ENOMEM; 551 551 } 552 552 for_each_possible_cpu(i) { 553 - if (!alloc_cpumask_var_node( 553 + if (!zalloc_cpumask_var_node( 554 554 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, 555 555 GFP_KERNEL, cpu_to_node(i))) { 556 556
+1 -1
arch/x86/kernel/cpu/cpufreq/powernow-k7.c
··· 322 322 goto err0; 323 323 } 324 324 325 - if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, 325 + if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, 326 326 GFP_KERNEL)) { 327 327 retval = -ENOMEM; 328 328 goto err05;
+1 -1
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 887 887 /* notify BIOS that we exist */ 888 888 acpi_processor_notify_smm(THIS_MODULE); 889 889 890 - if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 890 + if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 891 891 printk(KERN_ERR PFX 892 892 "unable to alloc powernow_k8_data cpumask\n"); 893 893 ret_val = -ENOMEM;
+1 -1
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
··· 471 471 472 472 if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) 473 473 return -ENOMEM; 474 - if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { 474 + if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { 475 475 free_cpumask_var(saved_mask); 476 476 return -ENOMEM; 477 477 }
+1 -1
arch/x86/kernel/cpu/mcheck/mce_64.c
··· 1163 1163 if (!mce_available(&boot_cpu_data)) 1164 1164 return -EIO; 1165 1165 1166 - alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); 1166 + zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); 1167 1167 1168 1168 err = mce_init_banks(); 1169 1169 if (err)
+1 -1
arch/x86/kernel/tlb_uv.c
··· 832 832 return 0; 833 833 834 834 for_each_possible_cpu(cur_cpu) 835 - alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 835 + zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 836 836 GFP_KERNEL, cpu_to_node(cur_cpu)); 837 837 838 838 uv_bau_retry_limit = 1;
+1 -1
drivers/acpi/processor_core.c
··· 844 844 if (!pr) 845 845 return -ENOMEM; 846 846 847 - if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 847 + if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 848 848 kfree(pr); 849 849 return -ENOMEM; 850 850 }
+1 -1
drivers/cpufreq/cpufreq.c
··· 808 808 ret = -ENOMEM; 809 809 goto nomem_out; 810 810 } 811 - if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { 811 + if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { 812 812 free_cpumask_var(policy->cpus); 813 813 kfree(policy); 814 814 ret = -ENOMEM;
+15
include/linux/cpumask.h
··· 1022 1022 1023 1023 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 1024 1024 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 1025 + bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 1026 + bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 1025 1027 void alloc_bootmem_cpumask_var(cpumask_var_t *mask); 1026 1028 void free_cpumask_var(cpumask_var_t mask); 1027 1029 void free_bootmem_cpumask_var(cpumask_var_t mask); ··· 1039 1037 static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 1040 1038 int node) 1041 1039 { 1040 + return true; 1041 + } 1042 + 1043 + static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 1044 + { 1045 + cpumask_clear(*mask); 1046 + return true; 1047 + } 1048 + 1049 + static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 1050 + int node) 1051 + { 1052 + cpumask_clear(*mask); 1042 1053 return true; 1043 1054 } 1044 1055
+1 -1
kernel/sched_cpupri.c
··· 165 165 vec->count = 0; 166 166 if (bootmem) 167 167 alloc_bootmem_cpumask_var(&vec->mask); 168 - else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) 168 + else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) 169 169 goto cleanup; 170 170 } 171 171
+1 -1
kernel/sched_rt.c
··· 1591 1591 unsigned int i; 1592 1592 1593 1593 for_each_possible_cpu(i) 1594 - alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 1594 + zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 1595 1595 GFP_KERNEL, cpu_to_node(i)); 1596 1596 } 1597 1597 #endif /* CONFIG_SMP */
+1 -1
kernel/smp.c
··· 52 52 switch (action) { 53 53 case CPU_UP_PREPARE: 54 54 case CPU_UP_PREPARE_FROZEN: 55 - if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 55 + if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 56 56 cpu_to_node(cpu))) 57 57 return NOTIFY_BAD; 58 58 break;
+12
lib/cpumask.c
··· 119 119 } 120 120 EXPORT_SYMBOL(alloc_cpumask_var_node); 121 121 122 + bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 123 + { 124 + return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); 125 + } 126 + EXPORT_SYMBOL(zalloc_cpumask_var_node); 127 + 122 128 /** 123 129 * alloc_cpumask_var - allocate a struct cpumask 124 130 * @mask: pointer to cpumask_var_t where the cpumask is returned ··· 140 134 return alloc_cpumask_var_node(mask, flags, numa_node_id()); 141 135 } 142 136 EXPORT_SYMBOL(alloc_cpumask_var); 137 + 138 + bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 139 + { 140 + return alloc_cpumask_var(mask, flags | __GFP_ZERO); 141 + } 142 + EXPORT_SYMBOL(zalloc_cpumask_var); 143 143 144 144 /** 145 145 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
+1 -1
virt/kvm/kvm_main.c
··· 2301 2301 2302 2302 bad_pfn = page_to_pfn(bad_page); 2303 2303 2304 - if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2304 + if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2305 2305 r = -ENOMEM; 2306 2306 goto out_free_0; 2307 2307 }