Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
rcu: Teach RCU that idle task is not quiscent state at boot

+52 -5
+6
include/linux/rcuclassic.h
··· 181 181 #define rcu_enter_nohz() do { } while (0) 182 182 #define rcu_exit_nohz() do { } while (0) 183 183 184 + /* A context switch is a grace period for rcuclassic. */ 185 + static inline int rcu_blocking_is_gp(void) 186 + { 187 + return num_online_cpus() == 1; 188 + } 189 + 184 190 #endif /* __LINUX_RCUCLASSIC_H */
+4
include/linux/rcupdate.h
··· 52 52 void (*func)(struct rcu_head *head); 53 53 }; 54 54 55 + /* Internal to kernel, but needed by rcupreempt.h. */ 56 + extern int rcu_scheduler_active; 57 + 55 58 #if defined(CONFIG_CLASSIC_RCU) 56 59 #include <linux/rcuclassic.h> 57 60 #elif defined(CONFIG_TREE_RCU) ··· 268 265 269 266 /* Internal to kernel */ 270 267 extern void rcu_init(void); 268 + extern void rcu_scheduler_starting(void); 271 269 extern int rcu_needs_cpu(int cpu); 272 270 273 271 #endif /* __LINUX_RCUPDATE_H */
+15
include/linux/rcupreempt.h
··· 142 142 #define rcu_exit_nohz() do { } while (0) 143 143 #endif /* CONFIG_NO_HZ */ 144 144 145 + /* 146 + * A context switch is a grace period for rcupreempt synchronize_rcu() 147 + * only during early boot, before the scheduler has been initialized. 148 + * So, how the heck do we get a context switch? Well, if the caller 149 + * invokes synchronize_rcu(), they are willing to accept a context 150 + * switch, so we simply pretend that one happened. 151 + * 152 + * After boot, there might be a blocked or preempted task in an RCU 153 + * read-side critical section, so we cannot then take the fastpath. 154 + */ 155 + static inline int rcu_blocking_is_gp(void) 156 + { 157 + return num_online_cpus() == 1 && !rcu_scheduler_active; 158 + } 159 + 145 160 #endif /* __LINUX_RCUPREEMPT_H */
+6
include/linux/rcutree.h
··· 326 326 } 327 327 #endif /* CONFIG_NO_HZ */ 328 328 329 + /* A context switch is a grace period for rcutree. */ 330 + static inline int rcu_blocking_is_gp(void) 331 + { 332 + return num_online_cpus() == 1; 333 + } 334 + 329 335 #endif /* __LINUX_RCUTREE_H */
+2 -1
init/main.c
··· 97 97 extern void tc_init(void); 98 98 #endif 99 99 100 - enum system_states system_state; 100 + enum system_states system_state __read_mostly; 101 101 EXPORT_SYMBOL(system_state); 102 102 103 103 /* ··· 463 463 * at least once to get things moving: 464 464 */ 465 465 init_idle_bootup_task(current); 466 + rcu_scheduler_starting(); 466 467 preempt_enable_no_resched(); 467 468 schedule(); 468 469 preempt_disable();
+2 -2
kernel/rcuclassic.c
··· 679 679 void rcu_check_callbacks(int cpu, int user) 680 680 { 681 681 if (user || 682 - (idle_cpu(cpu) && !in_softirq() && 683 - hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 682 + (idle_cpu(cpu) && rcu_scheduler_active && 683 + !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 684 684 685 685 /* 686 686 * Get here if this CPU took its interrupt from user
+12
kernel/rcupdate.c
··· 44 44 #include <linux/cpu.h> 45 45 #include <linux/mutex.h> 46 46 #include <linux/module.h> 47 + #include <linux/kernel_stat.h> 47 48 48 49 enum rcu_barrier { 49 50 RCU_BARRIER_STD, ··· 56 55 static atomic_t rcu_barrier_cpu_count; 57 56 static DEFINE_MUTEX(rcu_barrier_mutex); 58 57 static struct completion rcu_barrier_completion; 58 + int rcu_scheduler_active __read_mostly; 59 59 60 60 /* 61 61 * Awaken the corresponding synchronize_rcu() instance now that a ··· 82 80 void synchronize_rcu(void) 83 81 { 84 82 struct rcu_synchronize rcu; 83 + 84 + if (rcu_blocking_is_gp()) 85 + return; 86 + 85 87 init_completion(&rcu.completion); 86 88 /* Will wake me after RCU finished. */ 87 89 call_rcu(&rcu.head, wakeme_after_rcu); ··· 181 175 __rcu_init(); 182 176 } 183 177 178 + void rcu_scheduler_starting(void) 179 + { 180 + WARN_ON(num_online_cpus() != 1); 181 + WARN_ON(nr_context_switches() > 0); 182 + rcu_scheduler_active = 1; 183 + }
+3
kernel/rcupreempt.c
··· 1181 1181 { 1182 1182 struct rcu_synchronize rcu; 1183 1183 1184 + if (num_online_cpus() == 1) 1185 + return; /* blocking is gp if only one CPU! */ 1186 + 1184 1187 init_completion(&rcu.completion); 1185 1188 /* Will wake me after RCU finished. */ 1186 1189 call_rcu_sched(&rcu.head, wakeme_after_rcu);
+2 -2
kernel/rcutree.c
··· 948 948 void rcu_check_callbacks(int cpu, int user) 949 949 { 950 950 if (user || 951 - (idle_cpu(cpu) && !in_softirq() && 952 - hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 951 + (idle_cpu(cpu) && rcu_scheduler_active && 952 + !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 953 953 954 954 /* 955 955 * Get here if this CPU took its interrupt from user