Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched: cpu accounting controller (V2)

+150 -26
+7
include/linux/cgroup_subsys.h
··· 30 30 #endif 31 31 32 32 /* */ 33 + 34 + #ifdef CONFIG_CGROUP_CPUACCT 35 + SUBSYS(cpuacct) 36 + #endif 37 + 38 + /* */ 39 +
+7
init/Kconfig
··· 354 354 355 355 endchoice 356 356 357 + config CGROUP_CPUACCT 358 + bool "Simple CPU accounting cgroup subsystem" 359 + depends on CGROUPS 360 + help 361 + Provides a simple Resource Controller for monitoring the 362 + total CPU consumed by the tasks in a cgroup 363 + 357 364 config SYSFS_DEPRECATED 358 365 bool "Create deprecated sysfs files" 359 366 default y
+129 -26
kernel/sched.c
··· 854 854 struct rq_iterator *iterator); 855 855 #endif 856 856 857 + #ifdef CONFIG_CGROUP_CPUACCT 858 + static void cpuacct_charge(struct task_struct *tsk, u64 cputime); 859 + #else 860 + static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} 861 + #endif 862 + 857 863 #include "sched_stats.h" 858 864 #include "sched_idletask.c" 859 865 #include "sched_fair.c" ··· 7227 7221 return (u64) tg->shares; 7228 7222 } 7229 7223 7230 - static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft) 7231 - { 7232 - struct task_group *tg = cgroup_tg(cgrp); 7233 - unsigned long flags; 7234 - u64 res = 0; 7235 - int i; 7236 - 7237 - for_each_possible_cpu(i) { 7238 - /* 7239 - * Lock to prevent races with updating 64-bit counters 7240 - * on 32-bit arches. 7241 - */ 7242 - spin_lock_irqsave(&cpu_rq(i)->lock, flags); 7243 - res += tg->se[i]->sum_exec_runtime; 7244 - spin_unlock_irqrestore(&cpu_rq(i)->lock, flags); 7245 - } 7246 - /* Convert from ns to ms */ 7247 - do_div(res, NSEC_PER_MSEC); 7248 - 7249 - return res; 7250 - } 7251 - 7252 7224 static struct cftype cpu_files[] = { 7253 7225 { 7254 7226 .name = "shares", 7255 7227 .read_uint = cpu_shares_read_uint, 7256 7228 .write_uint = cpu_shares_write_uint, 7257 - }, 7258 - { 7259 - .name = "usage", 7260 - .read_uint = cpu_usage_read, 7261 7229 }, 7262 7230 }; 7263 7231 ··· 7252 7272 }; 7253 7273 7254 7274 #endif /* CONFIG_FAIR_CGROUP_SCHED */ 7275 + 7276 + #ifdef CONFIG_CGROUP_CPUACCT 7277 + 7278 + /* 7279 + * CPU accounting code for task groups. 7280 + * 7281 + * Based on the work by Paul Menage (menage@google.com) and Balbir Singh 7282 + * (balbir@in.ibm.com). 7283 + */ 7284 + 7285 + /* track cpu usage of a group of tasks */ 7286 + struct cpuacct { 7287 + struct cgroup_subsys_state css; 7288 + /* cpuusage holds pointer to a u64-type object on every cpu */ 7289 + u64 *cpuusage; 7290 + }; 7291 + 7292 + struct cgroup_subsys cpuacct_subsys; 7293 + 7294 + /* return cpu accounting group corresponding to this container */ 7295 + static inline struct cpuacct *cgroup_ca(struct cgroup *cont) 7296 + { 7297 + return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id), 7298 + struct cpuacct, css); 7299 + } 7300 + 7301 + /* return cpu accounting group to which this task belongs */ 7302 + static inline struct cpuacct *task_ca(struct task_struct *tsk) 7303 + { 7304 + return container_of(task_subsys_state(tsk, cpuacct_subsys_id), 7305 + struct cpuacct, css); 7306 + } 7307 + 7308 + /* create a new cpu accounting group */ 7309 + static struct cgroup_subsys_state *cpuacct_create( 7310 + struct cgroup_subsys *ss, struct cgroup *cont) 7311 + { 7312 + struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 7313 + 7314 + if (!ca) 7315 + return ERR_PTR(-ENOMEM); 7316 + 7317 + ca->cpuusage = alloc_percpu(u64); 7318 + if (!ca->cpuusage) { 7319 + kfree(ca); 7320 + return ERR_PTR(-ENOMEM); 7321 + } 7322 + 7323 + return &ca->css; 7324 + } 7325 + 7326 + /* destroy an existing cpu accounting group */ 7327 + static void cpuacct_destroy(struct cgroup_subsys *ss, 7328 + struct cgroup *cont) 7329 + { 7330 + struct cpuacct *ca = cgroup_ca(cont); 7331 + 7332 + free_percpu(ca->cpuusage); 7333 + kfree(ca); 7334 + } 7335 + 7336 + /* return total cpu usage (in nanoseconds) of a group */ 7337 + static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft) 7338 + { 7339 + struct cpuacct *ca = cgroup_ca(cont); 7340 + u64 totalcpuusage = 0; 7341 + int i; 7342 + 7343 + for_each_possible_cpu(i) { 7344 + u64 *cpuusage = percpu_ptr(ca->cpuusage, i); 7345 + 7346 + /* 7347 + * Take rq->lock to make 64-bit addition safe on 32-bit 7348 + * platforms. 7349 + */ 7350 + spin_lock_irq(&cpu_rq(i)->lock); 7351 + totalcpuusage += *cpuusage; 7352 + spin_unlock_irq(&cpu_rq(i)->lock); 7353 + } 7354 + 7355 + return totalcpuusage; 7356 + } 7357 + 7358 + static struct cftype files[] = { 7359 + { 7360 + .name = "usage", 7361 + .read_uint = cpuusage_read, 7362 + }, 7363 + }; 7364 + 7365 + static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont) 7366 + { 7367 + return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); 7368 + } 7369 + 7370 + /* 7371 + * charge this task's execution time to its accounting group. 7372 + * 7373 + * called with rq->lock held. 7374 + */ 7375 + static void cpuacct_charge(struct task_struct *tsk, u64 cputime) 7376 + { 7377 + struct cpuacct *ca; 7378 + 7379 + if (!cpuacct_subsys.active) 7380 + return; 7381 + 7382 + ca = task_ca(tsk); 7383 + if (ca) { 7384 + u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); 7385 + 7386 + *cpuusage += cputime; 7387 + } 7388 + } 7389 + 7390 + struct cgroup_subsys cpuacct_subsys = { 7391 + .name = "cpuacct", 7392 + .create = cpuacct_create, 7393 + .destroy = cpuacct_destroy, 7394 + .populate = cpuacct_populate, 7395 + .subsys_id = cpuacct_subsys_id, 7396 + }; 7397 + #endif /* CONFIG_CGROUP_CPUACCT */
+6
kernel/sched_fair.c
··· 351 351 352 352 __update_curr(cfs_rq, curr, delta_exec); 353 353 curr->exec_start = now; 354 + 355 + if (entity_is_task(curr)) { 356 + struct task_struct *curtask = task_of(curr); 357 + 358 + cpuacct_charge(curtask, delta_exec); 359 + } 354 360 } 355 361 356 362 static inline void
+1
kernel/sched_rt.c
··· 23 23 24 24 curr->se.sum_exec_runtime += delta_exec; 25 25 curr->se.exec_start = rq->clock; 26 + cpuacct_charge(curr, delta_exec); 26 27 } 27 28 28 29 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)