Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CGROUP_H
3#define _LINUX_CGROUP_H
4/*
5 * cgroup interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/nodemask.h>
14#include <linux/list.h>
15#include <linux/rculist.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs.h>
18#include <linux/seq_file.h>
19#include <linux/kernfs.h>
20#include <linux/jump_label.h>
21#include <linux/types.h>
22#include <linux/notifier.h>
23#include <linux/ns_common.h>
24#include <linux/nsproxy.h>
25#include <linux/user_namespace.h>
26#include <linux/refcount.h>
27#include <linux/kernel_stat.h>
28
29#include <linux/cgroup-defs.h>
30#include <linux/cgroup_namespace.h>
31
32struct kernel_clone_args;
33
34/*
35 * All weight knobs on the default hierarchy should use the following min,
36 * default and max values. The default value is the logarithmic center of
37 * MIN and MAX and allows 100x to be expressed in both directions.
38 */
39#define CGROUP_WEIGHT_MIN 1
40#define CGROUP_WEIGHT_DFL 100
41#define CGROUP_WEIGHT_MAX 10000
42
43#ifdef CONFIG_CGROUPS
44
45/*
46 * To avoid confusing the compiler (and generating warnings) with code
47 * that attempts to access what would be a 0-element array (i.e. sized
48 * to a potentially empty array when CGROUP_SUBSYS_COUNT == 0), this
49 * constant expression can be added.
50 */
51#define CGROUP_HAS_SUBSYS_CONFIG (CGROUP_SUBSYS_COUNT > 0)
52
53enum css_task_iter_flags {
54 CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
55 CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
56 CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
57};
58
59/* a css_task_iter should be treated as an opaque object */
60struct css_task_iter {
61 struct cgroup_subsys *ss;
62 unsigned int flags;
63
64 struct list_head *cset_pos;
65 struct list_head *cset_head;
66
67 struct list_head *tcset_pos;
68 struct list_head *tcset_head;
69
70 struct list_head *task_pos;
71
72 struct list_head *cur_tasks_head;
73 struct css_set *cur_cset;
74 struct css_set *cur_dcset;
75 struct task_struct *cur_task;
76 struct list_head iters_node; /* css_set->task_iters */
77};
78
79enum cgroup_lifetime_events {
80 CGROUP_LIFETIME_ONLINE,
81 CGROUP_LIFETIME_OFFLINE,
82};
83
84extern struct file_system_type cgroup_fs_type;
85extern struct cgroup_root cgrp_dfl_root;
86extern struct css_set init_css_set;
87extern struct mutex cgroup_mutex;
88extern spinlock_t css_set_lock;
89extern struct blocking_notifier_head cgroup_lifetime_notifier;
90
91#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
92#include <linux/cgroup_subsys.h>
93#undef SUBSYS
94
95#define SUBSYS(_x) \
96 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
97 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
98#include <linux/cgroup_subsys.h>
99#undef SUBSYS
100
101/**
102 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
103 * @ss: subsystem in question
104 */
105#define cgroup_subsys_enabled(ss) \
106 static_branch_likely(&ss ## _enabled_key)
107
108/**
109 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
110 * @ss: subsystem in question
111 */
112#define cgroup_subsys_on_dfl(ss) \
113 static_branch_likely(&ss ## _on_dfl_key)
114
115bool cgroup_on_dfl(const struct cgroup *cgrp);
116
117bool css_has_online_children(struct cgroup_subsys_state *css);
118struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
119struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
120 struct cgroup_subsys *ss);
121struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
122 struct cgroup_subsys *ss);
123struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
124 struct cgroup_subsys *ss);
125
126struct cgroup *cgroup_get_from_path(const char *path);
127struct cgroup *cgroup_get_from_fd(int fd);
128struct cgroup *cgroup_v1v2_get_from_fd(int fd);
129
130int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
131int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
132
133int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
134int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
135int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
136int cgroup_rm_cftypes(struct cftype *cfts);
137void cgroup_file_notify(struct cgroup_file *cfile);
138void cgroup_file_show(struct cgroup_file *cfile, bool show);
139
140int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
141int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
142 struct pid *pid, struct task_struct *tsk);
143
144void cgroup_fork(struct task_struct *p);
145extern int cgroup_can_fork(struct task_struct *p,
146 struct kernel_clone_args *kargs);
147extern void cgroup_cancel_fork(struct task_struct *p,
148 struct kernel_clone_args *kargs);
149extern void cgroup_post_fork(struct task_struct *p,
150 struct kernel_clone_args *kargs);
151void cgroup_task_exit(struct task_struct *p);
152void cgroup_task_dead(struct task_struct *p);
153void cgroup_task_release(struct task_struct *p);
154void cgroup_task_free(struct task_struct *p);
155
156int cgroup_init_early(void);
157int cgroup_init(void);
158
159int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
160
161/*
162 * Iteration helpers and macros.
163 */
164
165struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
166 struct cgroup_subsys_state *parent);
167struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
168 struct cgroup_subsys_state *css);
169struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
170struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
171 struct cgroup_subsys_state *css);
172
173struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
174 struct cgroup_subsys_state **dst_cssp);
175struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
176 struct cgroup_subsys_state **dst_cssp);
177
178void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
179 struct css_task_iter *it);
180struct task_struct *css_task_iter_next(struct css_task_iter *it);
181void css_task_iter_end(struct css_task_iter *it);
182
183/**
184 * css_for_each_child - iterate through children of a css
185 * @pos: the css * to use as the loop cursor
186 * @parent: css whose children to walk
187 *
188 * Walk @parent's children. Must be called under rcu_read_lock().
189 *
190 * If a subsystem synchronizes ->css_online() and the start of iteration, a
191 * css which finished ->css_online() is guaranteed to be visible in the
192 * future iterations and will stay visible until the last reference is put.
193 * A css which hasn't finished ->css_online() or already finished
194 * ->css_offline() may show up during traversal. It's each subsystem's
195 * responsibility to synchronize against on/offlining.
196 *
197 * It is allowed to temporarily drop RCU read lock during iteration. The
198 * caller is responsible for ensuring that @pos remains accessible until
199 * the start of the next iteration by, for example, bumping the css refcnt.
200 */
201#define css_for_each_child(pos, parent) \
202 for ((pos) = css_next_child(NULL, (parent)); (pos); \
203 (pos) = css_next_child((pos), (parent)))
204
205/**
206 * css_for_each_descendant_pre - pre-order walk of a css's descendants
207 * @pos: the css * to use as the loop cursor
208 * @root: css whose descendants to walk
209 *
210 * Walk @root's descendants. @root is included in the iteration and the
211 * first node to be visited. Must be called under rcu_read_lock().
212 *
213 * If a subsystem synchronizes ->css_online() and the start of iteration, a
214 * css which finished ->css_online() is guaranteed to be visible in the
215 * future iterations and will stay visible until the last reference is put.
216 * A css which hasn't finished ->css_online() or already finished
217 * ->css_offline() may show up during traversal. It's each subsystem's
218 * responsibility to synchronize against on/offlining.
219 *
220 * For example, the following guarantees that a descendant can't escape
221 * state updates of its ancestors.
222 *
223 * my_online(@css)
224 * {
225 * Lock @css's parent and @css;
226 * Inherit state from the parent;
227 * Unlock both.
228 * }
229 *
230 * my_update_state(@css)
231 * {
232 * css_for_each_descendant_pre(@pos, @css) {
233 * Lock @pos;
234 * if (@pos == @css)
235 * Update @css's state;
236 * else
237 * Verify @pos is alive and inherit state from its parent;
238 * Unlock @pos;
239 * }
240 * }
241 *
242 * As long as the inheriting step, including checking the parent state, is
243 * enclosed inside @pos locking, double-locking the parent isn't necessary
244 * while inheriting. The state update to the parent is guaranteed to be
245 * visible by walking order and, as long as inheriting operations to the
246 * same @pos are atomic to each other, multiple updates racing each other
247 * still result in the correct state. It's guaranateed that at least one
248 * inheritance happens for any css after the latest update to its parent.
249 *
250 * If checking parent's state requires locking the parent, each inheriting
251 * iteration should lock and unlock both @pos->parent and @pos.
252 *
253 * Alternatively, a subsystem may choose to use a single global lock to
254 * synchronize ->css_online() and ->css_offline() against tree-walking
255 * operations.
256 *
257 * It is allowed to temporarily drop RCU read lock during iteration. The
258 * caller is responsible for ensuring that @pos remains accessible until
259 * the start of the next iteration by, for example, bumping the css refcnt.
260 */
261#define css_for_each_descendant_pre(pos, css) \
262 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
263 (pos) = css_next_descendant_pre((pos), (css)))
264
265/**
266 * css_for_each_descendant_post - post-order walk of a css's descendants
267 * @pos: the css * to use as the loop cursor
268 * @css: css whose descendants to walk
269 *
270 * Similar to css_for_each_descendant_pre() but performs post-order
271 * traversal instead. @root is included in the iteration and the last
272 * node to be visited.
273 *
274 * If a subsystem synchronizes ->css_online() and the start of iteration, a
275 * css which finished ->css_online() is guaranteed to be visible in the
276 * future iterations and will stay visible until the last reference is put.
277 * A css which hasn't finished ->css_online() or already finished
278 * ->css_offline() may show up during traversal. It's each subsystem's
279 * responsibility to synchronize against on/offlining.
280 *
281 * Note that the walk visibility guarantee example described in pre-order
282 * walk doesn't apply the same to post-order walks.
283 */
284#define css_for_each_descendant_post(pos, css) \
285 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
286 (pos) = css_next_descendant_post((pos), (css)))
287
288/* iterate over child cgrps, lock should be held throughout iteration */
289#define cgroup_for_each_live_child(child, cgrp) \
290 list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
291 if (({ lockdep_assert_held(&cgroup_mutex); \
292 cgroup_is_dead(child); })) \
293 ; \
294 else
295
296/* walk live descendants in pre order */
297#define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \
298 css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \
299 if (({ lockdep_assert_held(&cgroup_mutex); \
300 (dsct) = (d_css)->cgroup; \
301 cgroup_is_dead(dsct); })) \
302 ; \
303 else
304
305/* walk live descendants in postorder */
306#define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) \
307 css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \
308 if (({ lockdep_assert_held(&cgroup_mutex); \
309 (dsct) = (d_css)->cgroup; \
310 cgroup_is_dead(dsct); })) \
311 ; \
312 else
313
314/**
315 * cgroup_taskset_for_each - iterate cgroup_taskset
316 * @task: the loop cursor
317 * @dst_css: the destination css
318 * @tset: taskset to iterate
319 *
320 * @tset may contain multiple tasks and they may belong to multiple
321 * processes.
322 *
323 * On the v2 hierarchy, there may be tasks from multiple processes and they
324 * may not share the source or destination csses.
325 *
326 * On traditional hierarchies, when there are multiple tasks in @tset, if a
327 * task of a process is in @tset, all tasks of the process are in @tset.
328 * Also, all are guaranteed to share the same source and destination csses.
329 *
330 * Iteration is not in any specific order.
331 */
332#define cgroup_taskset_for_each(task, dst_css, tset) \
333 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
334 (task); \
335 (task) = cgroup_taskset_next((tset), &(dst_css)))
336
337/**
338 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
339 * @leader: the loop cursor
340 * @dst_css: the destination css
341 * @tset: taskset to iterate
342 *
343 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
344 * may not contain any.
345 */
346#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
347 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
348 (leader); \
349 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
350 if ((leader) != (leader)->group_leader) \
351 ; \
352 else
353
354/*
355 * Inline functions.
356 */
357
358#ifdef CONFIG_DEBUG_CGROUP_REF
359void css_get(struct cgroup_subsys_state *css);
360void css_get_many(struct cgroup_subsys_state *css, unsigned int n);
361bool css_tryget(struct cgroup_subsys_state *css);
362bool css_tryget_online(struct cgroup_subsys_state *css);
363void css_put(struct cgroup_subsys_state *css);
364void css_put_many(struct cgroup_subsys_state *css, unsigned int n);
365#else
366#define CGROUP_REF_FN_ATTRS static inline
367#define CGROUP_REF_EXPORT(fn)
368#include <linux/cgroup_refcnt.h>
369#endif
370
371static inline u64 cgroup_id(const struct cgroup *cgrp)
372{
373 return cgrp->kn->id;
374}
375
376/**
377 * cgroup_css - obtain a cgroup's css for the specified subsystem
378 * @cgrp: the cgroup of interest
379 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
380 *
381 * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
382 * function must be called either under cgroup_mutex or rcu_read_lock() and
383 * the caller is responsible for pinning the returned css if it wants to
384 * keep accessing it outside the said locks. This function may return
385 * %NULL if @cgrp doesn't have @subsys_id enabled.
386 */
387static inline struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
388 struct cgroup_subsys *ss)
389{
390 if (CGROUP_HAS_SUBSYS_CONFIG && ss)
391 return rcu_dereference_check(cgrp->subsys[ss->id],
392 lockdep_is_held(&cgroup_mutex));
393 else
394 return &cgrp->self;
395}
396
397/**
398 * css_is_dying - test whether the specified css is dying
399 * @css: target css
400 *
401 * Test whether @css is in the process of offlining or already offline. In
402 * most cases, ->css_online() and ->css_offline() callbacks should be
403 * enough; however, the actual offline operations are RCU delayed and this
404 * test returns %true also when @css is scheduled to be offlined.
405 *
406 * This is useful, for example, when the use case requires synchronous
407 * behavior with respect to cgroup removal. cgroup removal schedules css
408 * offlining but the css can seem alive while the operation is being
409 * delayed. If the delay affects user visible semantics, this test can be
410 * used to resolve the situation.
411 */
412static inline bool css_is_dying(struct cgroup_subsys_state *css)
413{
414 return css->flags & CSS_DYING;
415}
416
417static inline bool css_is_online(struct cgroup_subsys_state *css)
418{
419 return css->flags & CSS_ONLINE;
420}
421
422static inline bool css_is_self(struct cgroup_subsys_state *css)
423{
424 if (css == &css->cgroup->self) {
425 /* cgroup::self should not have subsystem association */
426 WARN_ON(css->ss != NULL);
427 return true;
428 }
429
430 return false;
431}
432
433static inline bool cgroup_is_dead(const struct cgroup *cgrp)
434{
435 return !(cgrp->self.flags & CSS_ONLINE);
436}
437
438static inline void cgroup_get(struct cgroup *cgrp)
439{
440 css_get(&cgrp->self);
441}
442
443static inline bool cgroup_tryget(struct cgroup *cgrp)
444{
445 return css_tryget(&cgrp->self);
446}
447
448static inline void cgroup_put(struct cgroup *cgrp)
449{
450 css_put(&cgrp->self);
451}
452
453static inline void cgroup_lock(void)
454{
455 mutex_lock(&cgroup_mutex);
456}
457
458static inline void cgroup_unlock(void)
459{
460 mutex_unlock(&cgroup_mutex);
461}
462
463/**
464 * task_css_set_check - obtain a task's css_set with extra access conditions
465 * @task: the task to obtain css_set for
466 * @__c: extra condition expression to be passed to rcu_dereference_check()
467 *
468 * A task's css_set is RCU protected, initialized and exited while holding
469 * task_lock(), and can only be modified while holding both cgroup_mutex
470 * and task_lock() while the task is alive. This macro verifies that the
471 * caller is inside proper critical section and returns @task's css_set.
472 *
473 * The caller can also specify additional allowed conditions via @__c, such
474 * as locks used during the cgroup_subsys::attach() methods.
475 */
476#ifdef CONFIG_PROVE_RCU
477#define task_css_set_check(task, __c) \
478 rcu_dereference_check((task)->cgroups, \
479 rcu_read_lock_sched_held() || \
480 lockdep_is_held(&cgroup_mutex) || \
481 lockdep_is_held(&css_set_lock) || \
482 ((task)->flags & PF_EXITING) || (__c))
483#else
484#define task_css_set_check(task, __c) \
485 rcu_dereference((task)->cgroups)
486#endif
487
488/**
489 * task_css_check - obtain css for (task, subsys) w/ extra access conds
490 * @task: the target task
491 * @subsys_id: the target subsystem ID
492 * @__c: extra condition expression to be passed to rcu_dereference_check()
493 *
494 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
495 * synchronization rules are the same as task_css_set_check().
496 */
497#define task_css_check(task, subsys_id, __c) \
498 task_css_set_check((task), (__c))->subsys[(subsys_id)]
499
500/**
501 * task_css_set - obtain a task's css_set
502 * @task: the task to obtain css_set for
503 *
504 * See task_css_set_check().
505 */
506static inline struct css_set *task_css_set(struct task_struct *task)
507{
508 return task_css_set_check(task, false);
509}
510
511/**
512 * task_css - obtain css for (task, subsys)
513 * @task: the target task
514 * @subsys_id: the target subsystem ID
515 *
516 * See task_css_check().
517 */
518static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
519 int subsys_id)
520{
521 return task_css_check(task, subsys_id, false);
522}
523
524/**
525 * task_get_css - find and get the css for (task, subsys)
526 * @task: the target task
527 * @subsys_id: the target subsystem ID
528 *
529 * Find the css for the (@task, @subsys_id) combination, increment a
530 * reference on and return it. This function is guaranteed to return a
531 * valid css. The returned css may already have been offlined.
532 */
533static inline struct cgroup_subsys_state *
534task_get_css(struct task_struct *task, int subsys_id)
535{
536 struct cgroup_subsys_state *css;
537
538 rcu_read_lock();
539 while (true) {
540 css = task_css(task, subsys_id);
541 /*
542 * Can't use css_tryget_online() here. A task which has
543 * PF_EXITING set may stay associated with an offline css.
544 * If such task calls this function, css_tryget_online()
545 * will keep failing.
546 */
547 if (likely(css_tryget(css)))
548 break;
549 cpu_relax();
550 }
551 rcu_read_unlock();
552 return css;
553}
554
555/**
556 * task_css_is_root - test whether a task belongs to the root css
557 * @task: the target task
558 * @subsys_id: the target subsystem ID
559 *
560 * Test whether @task belongs to the root css on the specified subsystem.
561 * May be invoked in any context.
562 */
563static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
564{
565 return task_css_check(task, subsys_id, true) ==
566 init_css_set.subsys[subsys_id];
567}
568
569static inline struct cgroup *task_cgroup(struct task_struct *task,
570 int subsys_id)
571{
572 return task_css(task, subsys_id)->cgroup;
573}
574
575static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
576{
577 return task_css_set(task)->dfl_cgrp;
578}
579
580static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
581{
582 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
583
584 if (parent_css)
585 return container_of(parent_css, struct cgroup, self);
586 return NULL;
587}
588
589/**
590 * cgroup_is_descendant - test ancestry
591 * @cgrp: the cgroup to be tested
592 * @ancestor: possible ancestor of @cgrp
593 *
594 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
595 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
596 * and @ancestor are accessible.
597 */
598static inline bool cgroup_is_descendant(struct cgroup *cgrp,
599 struct cgroup *ancestor)
600{
601 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
602 return false;
603 return cgrp->ancestors[ancestor->level] == ancestor;
604}
605
606/**
607 * cgroup_ancestor - find ancestor of cgroup
608 * @cgrp: cgroup to find ancestor of
609 * @ancestor_level: level of ancestor to find starting from root
610 *
611 * Find ancestor of cgroup at specified level starting from root if it exists
612 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
613 * @ancestor_level.
614 *
615 * This function is safe to call as long as @cgrp is accessible.
616 */
617static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
618 int ancestor_level)
619{
620 if (ancestor_level < 0 || ancestor_level > cgrp->level)
621 return NULL;
622 return cgrp->ancestors[ancestor_level];
623}
624
625/**
626 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
627 * @task: the task to be tested
628 * @ancestor: possible ancestor of @task's cgroup
629 *
630 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
631 * It follows all the same rules as cgroup_is_descendant, and only applies
632 * to the default hierarchy.
633 */
634static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
635 struct cgroup *ancestor)
636{
637 struct css_set *cset = task_css_set(task);
638
639 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
640}
641
642/* no synchronization, the result can only be used as a hint */
643static inline bool cgroup_is_populated(struct cgroup *cgrp)
644{
645 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
646 cgrp->nr_populated_threaded_children;
647}
648
649/* returns ino associated with a cgroup */
650static inline ino_t cgroup_ino(struct cgroup *cgrp)
651{
652 return kernfs_ino(cgrp->kn);
653}
654
655/* cft/css accessors for cftype->write() operation */
656static inline struct cftype *of_cft(struct kernfs_open_file *of)
657{
658 return of->kn->priv;
659}
660
661struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
662
663/* cft/css accessors for cftype->seq_*() operations */
664static inline struct cftype *seq_cft(struct seq_file *seq)
665{
666 return of_cft(seq->private);
667}
668
669static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
670{
671 return of_css(seq->private);
672}
673
674/*
675 * Name / path handling functions. All are thin wrappers around the kernfs
676 * counterparts and can be called under any context.
677 */
678
679static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
680{
681 return kernfs_name(cgrp->kn, buf, buflen);
682}
683
684static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
685{
686 return kernfs_path(cgrp->kn, buf, buflen);
687}
688
689static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
690{
691 pr_cont_kernfs_name(cgrp->kn);
692}
693
694static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
695{
696 pr_cont_kernfs_path(cgrp->kn);
697}
698
699bool cgroup_psi_enabled(void);
700
701static inline void cgroup_init_kthreadd(void)
702{
703 /*
704 * kthreadd is inherited by all kthreads, keep it in the root so
705 * that the new kthreads are guaranteed to stay in the root until
706 * initialization is finished.
707 */
708 current->no_cgroup_migration = 1;
709}
710
711static inline void cgroup_kthread_ready(void)
712{
713 /*
714 * This kthread finished initialization. The creator should have
715 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
716 */
717 current->no_cgroup_migration = 0;
718}
719
720void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
721struct cgroup *__cgroup_get_from_id(u64 id);
722struct cgroup *cgroup_get_from_id(u64 id);
723#else /* !CONFIG_CGROUPS */
724
725struct cgroup_subsys_state;
726struct cgroup;
727
728static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
729static inline void css_get(struct cgroup_subsys_state *css) {}
730static inline void css_put(struct cgroup_subsys_state *css) {}
731static inline void cgroup_lock(void) {}
732static inline void cgroup_unlock(void) {}
733static inline int cgroup_attach_task_all(struct task_struct *from,
734 struct task_struct *t) { return 0; }
735static inline int cgroupstats_build(struct cgroupstats *stats,
736 struct dentry *dentry) { return -EINVAL; }
737
738static inline void cgroup_fork(struct task_struct *p) {}
739static inline int cgroup_can_fork(struct task_struct *p,
740 struct kernel_clone_args *kargs) { return 0; }
741static inline void cgroup_cancel_fork(struct task_struct *p,
742 struct kernel_clone_args *kargs) {}
743static inline void cgroup_post_fork(struct task_struct *p,
744 struct kernel_clone_args *kargs) {}
745static inline void cgroup_task_exit(struct task_struct *p) {}
746static inline void cgroup_task_dead(struct task_struct *p) {}
747static inline void cgroup_task_release(struct task_struct *p) {}
748static inline void cgroup_task_free(struct task_struct *p) {}
749
750static inline int cgroup_init_early(void) { return 0; }
751static inline int cgroup_init(void) { return 0; }
752static inline void cgroup_init_kthreadd(void) {}
753static inline void cgroup_kthread_ready(void) {}
754
755static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
756{
757 return NULL;
758}
759
760static inline bool cgroup_psi_enabled(void)
761{
762 return false;
763}
764
765static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
766 struct cgroup *ancestor)
767{
768 return true;
769}
770
771static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
772{}
773#endif /* !CONFIG_CGROUPS */
774
775#ifdef CONFIG_CGROUPS
776/*
777 * cgroup scalable recursive statistics.
778 */
779void css_rstat_updated(struct cgroup_subsys_state *css, int cpu);
780void css_rstat_flush(struct cgroup_subsys_state *css);
781
782/*
783 * Basic resource stats.
784 */
785#ifdef CONFIG_CGROUP_CPUACCT
786void cpuacct_charge(struct task_struct *tsk, u64 cputime);
787void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
788#else
789static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
790static inline void cpuacct_account_field(struct task_struct *tsk, int index,
791 u64 val) {}
792#endif
793
794void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
795void __cgroup_account_cputime_field(struct cgroup *cgrp,
796 enum cpu_usage_stat index, u64 delta_exec);
797
798static inline void cgroup_account_cputime(struct task_struct *task,
799 u64 delta_exec)
800{
801 struct cgroup *cgrp;
802
803 cpuacct_charge(task, delta_exec);
804
805 cgrp = task_dfl_cgroup(task);
806 if (cgroup_parent(cgrp))
807 __cgroup_account_cputime(cgrp, delta_exec);
808}
809
810static inline void cgroup_account_cputime_field(struct task_struct *task,
811 enum cpu_usage_stat index,
812 u64 delta_exec)
813{
814 struct cgroup *cgrp;
815
816 cpuacct_account_field(task, index, delta_exec);
817
818 cgrp = task_dfl_cgroup(task);
819 if (cgroup_parent(cgrp))
820 __cgroup_account_cputime_field(cgrp, index, delta_exec);
821}
822
823#else /* CONFIG_CGROUPS */
824
825static inline void cgroup_account_cputime(struct task_struct *task,
826 u64 delta_exec) {}
827static inline void cgroup_account_cputime_field(struct task_struct *task,
828 enum cpu_usage_stat index,
829 u64 delta_exec) {}
830
831#endif /* CONFIG_CGROUPS */
832
833/*
834 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
835 * definition in cgroup-defs.h.
836 */
837#ifdef CONFIG_SOCK_CGROUP_DATA
838
839void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
840void cgroup_sk_clone(struct sock_cgroup_data *skcd);
841void cgroup_sk_free(struct sock_cgroup_data *skcd);
842
843static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
844{
845 return skcd->cgroup;
846}
847
848#else /* CONFIG_CGROUP_DATA */
849
850static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
851static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
852static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
853
854#endif /* CONFIG_CGROUP_DATA */
855
856#ifdef CONFIG_CGROUPS
857
858void cgroup_enter_frozen(void);
859void cgroup_leave_frozen(bool always_leave);
860void cgroup_update_frozen(struct cgroup *cgrp);
861void cgroup_freeze(struct cgroup *cgrp, bool freeze);
862void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
863 struct cgroup *dst);
864
865static inline bool cgroup_task_frozen(struct task_struct *task)
866{
867 return task->frozen;
868}
869
870#else /* !CONFIG_CGROUPS */
871
872static inline void cgroup_enter_frozen(void) { }
873static inline void cgroup_leave_frozen(bool always_leave) { }
874static inline bool cgroup_task_frozen(struct task_struct *task)
875{
876 return false;
877}
878
879#endif /* !CONFIG_CGROUPS */
880
881#ifdef CONFIG_CGROUP_BPF
882static inline void cgroup_bpf_get(struct cgroup *cgrp)
883{
884 percpu_ref_get(&cgrp->bpf.refcnt);
885}
886
887static inline void cgroup_bpf_put(struct cgroup *cgrp)
888{
889 percpu_ref_put(&cgrp->bpf.refcnt);
890}
891
892#else /* CONFIG_CGROUP_BPF */
893
894static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
895static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
896
897#endif /* CONFIG_CGROUP_BPF */
898
899struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
900
901struct cgroup_of_peak *of_peak(struct kernfs_open_file *of);
902
903#endif /* _LINUX_CGROUP_H */