Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/oom_kill.c
4 *
5 * Copyright (C) 1998,2000 Rik van Riel
6 * Thanks go out to Claus Fischer for some serious inspiration and
7 * for goading me into coding this file...
8 * Copyright (C) 2010 Google, Inc.
9 * Rewritten by David Rientjes
10 *
11 * The routines in this file are used to kill a process when
12 * we're seriously out of memory. This gets called from __alloc_pages()
13 * in mm/page_alloc.c when we really run out of memory.
14 *
15 * Since we won't call these routines often (on a well-configured
16 * machine) this file will double as a 'coding guide' and a signpost
17 * for newbie kernel hackers. It features several pointers to major
18 * kernel subsystems and hints as to where to find out what things do.
19 */
20
21#include <linux/oom.h>
22#include <linux/mm.h>
23#include <linux/err.h>
24#include <linux/gfp.h>
25#include <linux/sched.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/task.h>
28#include <linux/sched/debug.h>
29#include <linux/swap.h>
30#include <linux/syscalls.h>
31#include <linux/timex.h>
32#include <linux/jiffies.h>
33#include <linux/cpuset.h>
34#include <linux/export.h>
35#include <linux/notifier.h>
36#include <linux/memcontrol.h>
37#include <linux/mempolicy.h>
38#include <linux/security.h>
39#include <linux/ptrace.h>
40#include <linux/freezer.h>
41#include <linux/ftrace.h>
42#include <linux/ratelimit.h>
43#include <linux/kthread.h>
44#include <linux/init.h>
45#include <linux/mmu_notifier.h>
46#include <linux/cred.h>
47#include <linux/nmi.h>
48
49#include <asm/tlb.h>
50#include "internal.h"
51#include "slab.h"
52
53#define CREATE_TRACE_POINTS
54#include <trace/events/oom.h>
55
56static int sysctl_panic_on_oom;
57static int sysctl_oom_kill_allocating_task;
58static int sysctl_oom_dump_tasks = 1;
59
60/*
61 * Serializes oom killer invocations (out_of_memory()) from all contexts to
62 * prevent from over eager oom killing (e.g. when the oom killer is invoked
63 * from different domains).
64 *
65 * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
66 * and mark_oom_victim
67 */
68DEFINE_MUTEX(oom_lock);
69/* Serializes oom_score_adj and oom_score_adj_min updates */
70DEFINE_MUTEX(oom_adj_mutex);
71
72static inline bool is_memcg_oom(struct oom_control *oc)
73{
74 return oc->memcg != NULL;
75}
76
77#ifdef CONFIG_NUMA
78/**
79 * oom_cpuset_eligible() - check task eligibility for kill
80 * @start: task struct of which task to consider
81 * @oc: pointer to struct oom_control
82 *
83 * Task eligibility is determined by whether or not a candidate task, @tsk,
84 * shares the same mempolicy nodes as current if it is bound by such a policy
85 * and whether or not it has the same set of allowed cpuset nodes.
86 *
87 * This function is assuming oom-killer context and 'current' has triggered
88 * the oom-killer.
89 */
90static bool oom_cpuset_eligible(struct task_struct *start,
91 struct oom_control *oc)
92{
93 struct task_struct *tsk;
94 bool ret = false;
95 const nodemask_t *mask = oc->nodemask;
96
97 rcu_read_lock();
98 for_each_thread(start, tsk) {
99 if (mask) {
100 /*
101 * If this is a mempolicy constrained oom, tsk's
102 * cpuset is irrelevant. Only return true if its
103 * mempolicy intersects current, otherwise it may be
104 * needlessly killed.
105 */
106 ret = mempolicy_in_oom_domain(tsk, mask);
107 } else {
108 /*
109 * This is not a mempolicy constrained oom, so only
110 * check the mems of tsk's cpuset.
111 */
112 ret = cpuset_mems_allowed_intersects(current, tsk);
113 }
114 if (ret)
115 break;
116 }
117 rcu_read_unlock();
118
119 return ret;
120}
121#else
122static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
123{
124 return true;
125}
126#endif /* CONFIG_NUMA */
127
128/*
129 * The process p may have detached its own ->mm while exiting or through
130 * kthread_use_mm(), but one or more of its subthreads may still have a valid
131 * pointer. Return p, or any of its subthreads with a valid ->mm, with
132 * task_lock() held.
133 */
134struct task_struct *find_lock_task_mm(struct task_struct *p)
135{
136 struct task_struct *t;
137
138 guard(rcu)();
139
140 for_each_thread(p, t) {
141 task_lock(t);
142 if (likely(t->mm))
143 return t;
144 task_unlock(t);
145 }
146
147 return NULL;
148}
149
150/*
151 * order == -1 means the oom kill is required by sysrq, otherwise only
152 * for display purposes.
153 */
154static inline bool is_sysrq_oom(struct oom_control *oc)
155{
156 return oc->order == -1;
157}
158
159/* return true if the task is not adequate as candidate victim task. */
160static bool oom_unkillable_task(struct task_struct *p)
161{
162 if (is_global_init(p))
163 return true;
164 if (p->flags & PF_KTHREAD)
165 return true;
166 return false;
167}
168
169/*
170 * Check whether unreclaimable slab amount is greater than
171 * all user memory(LRU pages).
172 * dump_unreclaimable_slab() could help in the case that
173 * oom due to too much unreclaimable slab used by kernel.
174*/
175static bool should_dump_unreclaim_slab(void)
176{
177 unsigned long nr_lru;
178
179 nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
180 global_node_page_state(NR_INACTIVE_ANON) +
181 global_node_page_state(NR_ACTIVE_FILE) +
182 global_node_page_state(NR_INACTIVE_FILE) +
183 global_node_page_state(NR_ISOLATED_ANON) +
184 global_node_page_state(NR_ISOLATED_FILE) +
185 global_node_page_state(NR_UNEVICTABLE);
186
187 return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
188}
189
190/**
191 * oom_badness - heuristic function to determine which candidate task to kill
192 * @p: task struct of which task we should calculate
193 * @totalpages: total present RAM allowed for page allocation
194 *
195 * The heuristic for determining which task to kill is made to be as simple and
196 * predictable as possible. The goal is to return the highest value for the
197 * task consuming the most memory to avoid subsequent oom failures.
198 */
199long oom_badness(struct task_struct *p, unsigned long totalpages)
200{
201 long points;
202 long adj;
203
204 if (oom_unkillable_task(p))
205 return LONG_MIN;
206
207 p = find_lock_task_mm(p);
208 if (!p)
209 return LONG_MIN;
210
211 /*
212 * Do not even consider tasks which are explicitly marked oom
213 * unkillable or have been already oom reaped or the are in
214 * the middle of vfork
215 */
216 adj = (long)p->signal->oom_score_adj;
217 if (adj == OOM_SCORE_ADJ_MIN ||
218 mm_flags_test(MMF_OOM_SKIP, p->mm) ||
219 in_vfork(p)) {
220 task_unlock(p);
221 return LONG_MIN;
222 }
223
224 /*
225 * The baseline for the badness score is the proportion of RAM that each
226 * task's rss, pagetable and swap space use.
227 */
228 points = get_mm_rss_sum(p->mm) + get_mm_counter_sum(p->mm, MM_SWAPENTS) +
229 mm_pgtables_bytes(p->mm) / PAGE_SIZE;
230 task_unlock(p);
231
232 /* Normalize to oom_score_adj units */
233 adj *= totalpages / 1000;
234 points += adj;
235
236 return points;
237}
238
239static const char * const oom_constraint_text[] = {
240 [CONSTRAINT_NONE] = "CONSTRAINT_NONE",
241 [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
242 [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
243 [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
244};
245
246/*
247 * Determine the type of allocation constraint.
248 */
249static enum oom_constraint constrained_alloc(struct oom_control *oc)
250{
251 struct zone *zone;
252 struct zoneref *z;
253 enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask);
254 bool cpuset_limited = false;
255 int nid;
256
257 if (is_memcg_oom(oc)) {
258 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
259 return CONSTRAINT_MEMCG;
260 }
261
262 /* Default to all available memory */
263 oc->totalpages = totalram_pages() + total_swap_pages;
264
265 if (!IS_ENABLED(CONFIG_NUMA))
266 return CONSTRAINT_NONE;
267
268 if (!oc->zonelist)
269 return CONSTRAINT_NONE;
270 /*
271 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
272 * to kill current.We have to random task kill in this case.
273 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
274 */
275 if (oc->gfp_mask & __GFP_THISNODE)
276 return CONSTRAINT_NONE;
277
278 /*
279 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
280 * the page allocator means a mempolicy is in effect. Cpuset policy
281 * is enforced in get_page_from_freelist().
282 */
283 if (oc->nodemask &&
284 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
285 oc->totalpages = total_swap_pages;
286 for_each_node_mask(nid, *oc->nodemask)
287 oc->totalpages += node_present_pages(nid);
288 return CONSTRAINT_MEMORY_POLICY;
289 }
290
291 /* Check this allocation failure is caused by cpuset's wall function */
292 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
293 highest_zoneidx, oc->nodemask)
294 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
295 cpuset_limited = true;
296
297 if (cpuset_limited) {
298 oc->totalpages = total_swap_pages;
299 for_each_node_mask(nid, cpuset_current_mems_allowed)
300 oc->totalpages += node_present_pages(nid);
301 return CONSTRAINT_CPUSET;
302 }
303 return CONSTRAINT_NONE;
304}
305
306static int oom_evaluate_task(struct task_struct *task, void *arg)
307{
308 struct oom_control *oc = arg;
309 long points;
310
311 if (oom_unkillable_task(task))
312 goto next;
313
314 /* p may not have freeable memory in nodemask */
315 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
316 goto next;
317
318 /*
319 * This task already has access to memory reserves and is being killed.
320 * Don't allow any other task to have access to the reserves unless
321 * the task has MMF_OOM_SKIP because chances that it would release
322 * any memory is quite low.
323 */
324 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
325 if (mm_flags_test(MMF_OOM_SKIP, task->signal->oom_mm))
326 goto next;
327 goto abort;
328 }
329
330 /*
331 * If task is allocating a lot of memory and has been marked to be
332 * killed first if it triggers an oom, then select it.
333 */
334 if (oom_task_origin(task)) {
335 points = LONG_MAX;
336 goto select;
337 }
338
339 points = oom_badness(task, oc->totalpages);
340 if (points == LONG_MIN || points < oc->chosen_points)
341 goto next;
342
343select:
344 if (oc->chosen)
345 put_task_struct(oc->chosen);
346 get_task_struct(task);
347 oc->chosen = task;
348 oc->chosen_points = points;
349next:
350 return 0;
351abort:
352 if (oc->chosen)
353 put_task_struct(oc->chosen);
354 oc->chosen = (void *)-1UL;
355 return 1;
356}
357
358/*
359 * Simple selection loop. We choose the process with the highest number of
360 * 'points'. In case scan was aborted, oc->chosen is set to -1.
361 */
362static void select_bad_process(struct oom_control *oc)
363{
364 oc->chosen_points = LONG_MIN;
365
366 if (is_memcg_oom(oc))
367 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
368 else {
369 struct task_struct *p;
370
371 rcu_read_lock();
372 for_each_process(p)
373 if (oom_evaluate_task(p, oc))
374 break;
375 rcu_read_unlock();
376 }
377}
378
379static int dump_task(struct task_struct *p, void *arg)
380{
381 struct oom_control *oc = arg;
382 struct task_struct *task;
383
384 if (oom_unkillable_task(p))
385 return 0;
386
387 /* p may not have freeable memory in nodemask */
388 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
389 return 0;
390
391 task = find_lock_task_mm(p);
392 if (!task) {
393 /*
394 * All of p's threads have already detached their mm's. There's
395 * no need to report them; they can't be oom killed anyway.
396 */
397 return 0;
398 }
399
400 pr_info("[%7d] %5d %5d %8lu %8lu %8lu %8lu %9lu %8ld %8lu %5hd %s\n",
401 task->pid, from_kuid(&init_user_ns, task_uid(task)),
402 task->tgid, task->mm->total_vm, get_mm_rss_sum(task->mm),
403 get_mm_counter_sum(task->mm, MM_ANONPAGES), get_mm_counter_sum(task->mm, MM_FILEPAGES),
404 get_mm_counter_sum(task->mm, MM_SHMEMPAGES), mm_pgtables_bytes(task->mm),
405 get_mm_counter_sum(task->mm, MM_SWAPENTS),
406 task->signal->oom_score_adj, task->comm);
407 task_unlock(task);
408
409 return 0;
410}
411
412/**
413 * dump_tasks - dump current memory state of all system tasks
414 * @oc: pointer to struct oom_control
415 *
416 * Dumps the current memory state of all eligible tasks. Tasks not in the same
417 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
418 * are not shown.
419 * State information includes task's pid, uid, tgid, vm size, rss,
420 * pgtables_bytes, swapents, oom_score_adj value, and name.
421 */
422static void dump_tasks(struct oom_control *oc)
423{
424 pr_info("Tasks state (memory values in pages):\n");
425 pr_info("[ pid ] uid tgid total_vm rss rss_anon rss_file rss_shmem pgtables_bytes swapents oom_score_adj name\n");
426
427 if (is_memcg_oom(oc))
428 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
429 else {
430 struct task_struct *p;
431 int i = 0;
432
433 rcu_read_lock();
434 for_each_process(p) {
435 /* Avoid potential softlockup warning */
436 if ((++i & 1023) == 0)
437 touch_softlockup_watchdog();
438 dump_task(p, oc);
439 }
440 rcu_read_unlock();
441 }
442}
443
444static void dump_oom_victim(struct oom_control *oc, struct task_struct *victim)
445{
446 /* one line summary of the oom killer context. */
447 pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
448 oom_constraint_text[oc->constraint],
449 nodemask_pr_args(oc->nodemask));
450 cpuset_print_current_mems_allowed();
451 mem_cgroup_print_oom_context(oc->memcg, victim);
452 pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
453 from_kuid(&init_user_ns, task_uid(victim)));
454}
455
456static void dump_header(struct oom_control *oc)
457{
458 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%d\n",
459 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
460 current->signal->oom_score_adj);
461 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
462 pr_warn("COMPACTION is disabled!!!\n");
463
464 dump_stack();
465 if (is_memcg_oom(oc))
466 mem_cgroup_print_oom_meminfo(oc->memcg);
467 else {
468 __show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask, gfp_zone(oc->gfp_mask));
469 if (should_dump_unreclaim_slab())
470 dump_unreclaimable_slab();
471 }
472 mem_cgroup_show_protected_memory(oc->memcg);
473 if (sysctl_oom_dump_tasks)
474 dump_tasks(oc);
475}
476
477/*
478 * Number of OOM victims in flight
479 */
480static atomic_t oom_victims = ATOMIC_INIT(0);
481static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
482
483static bool oom_killer_disabled __read_mostly;
484
485/*
486 * task->mm can be NULL if the task is the exited group leader. So to
487 * determine whether the task is using a particular mm, we examine all the
488 * task's threads: if one of those is using this mm then this task was also
489 * using it.
490 */
491bool process_shares_mm(const struct task_struct *p, const struct mm_struct *mm)
492{
493 const struct task_struct *t;
494
495 for_each_thread(p, t) {
496 const struct mm_struct *t_mm = READ_ONCE(t->mm);
497 if (t_mm)
498 return t_mm == mm;
499 }
500 return false;
501}
502
503#ifdef CONFIG_MMU
504/*
505 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
506 * victim (if that is possible) to help the OOM killer to move on.
507 */
508static struct task_struct *oom_reaper_th;
509static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
510static struct task_struct *oom_reaper_list;
511static DEFINE_SPINLOCK(oom_reaper_lock);
512
513static bool __oom_reap_task_mm(struct mm_struct *mm)
514{
515 struct vm_area_struct *vma;
516 bool ret = true;
517 MA_STATE(mas, &mm->mm_mt, ULONG_MAX, ULONG_MAX);
518
519 /*
520 * Tell all users of get_user/copy_from_user etc... that the content
521 * is no longer stable. No barriers really needed because unmapping
522 * should imply barriers already and the reader would hit a page fault
523 * if it stumbled over a reaped memory.
524 */
525 mm_flags_set(MMF_UNSTABLE, mm);
526
527 /*
528 * It might start racing with the dying task and compete for shared
529 * resources - e.g. page table lock contention has been observed.
530 * Reduce those races by reaping the oom victim from the other end
531 * of the address space.
532 */
533 mas_for_each_rev(&mas, vma, 0) {
534 if (vma->vm_flags & (VM_HUGETLB|VM_PFNMAP))
535 continue;
536
537 /*
538 * Only anonymous pages have a good chance to be dropped
539 * without additional steps which we cannot afford as we
540 * are OOM already.
541 *
542 * We do not even care about fs backed pages because all
543 * which are reclaimable have already been reclaimed and
544 * we do not want to block exit_mmap by keeping mm ref
545 * count elevated without a good reason.
546 */
547 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
548 if (zap_vma_for_reaping(vma))
549 ret = false;
550 }
551 }
552
553 return ret;
554}
555
556/*
557 * Reaps the address space of the given task.
558 *
559 * Returns true on success and false if none or part of the address space
560 * has been reclaimed and the caller should retry later.
561 */
562static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
563{
564 bool ret = true;
565
566 if (!mmap_read_trylock(mm)) {
567 trace_skip_task_reaping(tsk->pid);
568 return false;
569 }
570
571 /*
572 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
573 * work on the mm anymore. The check for MMF_OOM_SKIP must run
574 * under mmap_lock for reading because it serializes against the
575 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
576 */
577 if (mm_flags_test(MMF_OOM_SKIP, mm)) {
578 trace_skip_task_reaping(tsk->pid);
579 goto out_unlock;
580 }
581
582 trace_start_task_reaping(tsk->pid);
583
584 /* failed to reap part of the address space. Try again later */
585 ret = __oom_reap_task_mm(mm);
586 if (!ret)
587 goto out_finish;
588
589 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
590 task_pid_nr(tsk), tsk->comm,
591 K(get_mm_counter_sum(mm, MM_ANONPAGES)),
592 K(get_mm_counter_sum(mm, MM_FILEPAGES)),
593 K(get_mm_counter_sum(mm, MM_SHMEMPAGES)));
594out_finish:
595 trace_finish_task_reaping(tsk->pid);
596out_unlock:
597 mmap_read_unlock(mm);
598
599 return ret;
600}
601
602#define MAX_OOM_REAP_RETRIES 10
603static void oom_reap_task(struct task_struct *tsk)
604{
605 int attempts = 0;
606 struct mm_struct *mm = tsk->signal->oom_mm;
607
608 /* Retry the mmap_read_trylock(mm) a few times */
609 while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
610 schedule_timeout_idle(HZ/10);
611
612 if (attempts <= MAX_OOM_REAP_RETRIES ||
613 mm_flags_test(MMF_OOM_SKIP, mm))
614 goto done;
615
616 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
617 task_pid_nr(tsk), tsk->comm);
618 sched_show_task(tsk);
619 debug_show_all_locks();
620
621done:
622 tsk->oom_reaper_list = NULL;
623
624 /*
625 * Hide this mm from OOM killer because it has been either reaped or
626 * somebody can't call mmap_write_unlock(mm).
627 */
628 mm_flags_set(MMF_OOM_SKIP, mm);
629
630 /* Drop a reference taken by queue_oom_reaper */
631 put_task_struct(tsk);
632}
633
634static int oom_reaper(void *unused)
635{
636 set_freezable();
637
638 while (true) {
639 struct task_struct *tsk = NULL;
640
641 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
642 spin_lock_irq(&oom_reaper_lock);
643 if (oom_reaper_list != NULL) {
644 tsk = oom_reaper_list;
645 oom_reaper_list = tsk->oom_reaper_list;
646 }
647 spin_unlock_irq(&oom_reaper_lock);
648
649 if (tsk)
650 oom_reap_task(tsk);
651 }
652
653 return 0;
654}
655
656static void wake_oom_reaper(struct timer_list *timer)
657{
658 struct task_struct *tsk = container_of(timer, struct task_struct,
659 oom_reaper_timer);
660 struct mm_struct *mm = tsk->signal->oom_mm;
661 unsigned long flags;
662
663 /* The victim managed to terminate on its own - see exit_mmap */
664 if (mm_flags_test(MMF_OOM_SKIP, mm)) {
665 put_task_struct(tsk);
666 return;
667 }
668
669 spin_lock_irqsave(&oom_reaper_lock, flags);
670 tsk->oom_reaper_list = oom_reaper_list;
671 oom_reaper_list = tsk;
672 spin_unlock_irqrestore(&oom_reaper_lock, flags);
673 trace_wake_reaper(tsk->pid);
674 wake_up(&oom_reaper_wait);
675}
676
677/*
678 * Give the OOM victim time to exit naturally before invoking the oom_reaping.
679 * The timers timeout is arbitrary... the longer it is, the longer the worst
680 * case scenario for the OOM can take. If it is too small, the oom_reaper can
681 * get in the way and release resources needed by the process exit path.
682 * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
683 * before the exit path is able to wake the futex waiters.
684 */
685#define OOM_REAPER_DELAY (2*HZ)
686static void queue_oom_reaper(struct task_struct *tsk)
687{
688 /* mm is already queued? */
689 if (mm_flags_test_and_set(MMF_OOM_REAP_QUEUED, tsk->signal->oom_mm))
690 return;
691
692 get_task_struct(tsk);
693 timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
694 tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
695 add_timer(&tsk->oom_reaper_timer);
696}
697
698#ifdef CONFIG_SYSCTL
699static const struct ctl_table vm_oom_kill_table[] = {
700 {
701 .procname = "panic_on_oom",
702 .data = &sysctl_panic_on_oom,
703 .maxlen = sizeof(sysctl_panic_on_oom),
704 .mode = 0644,
705 .proc_handler = proc_dointvec_minmax,
706 .extra1 = SYSCTL_ZERO,
707 .extra2 = SYSCTL_TWO,
708 },
709 {
710 .procname = "oom_kill_allocating_task",
711 .data = &sysctl_oom_kill_allocating_task,
712 .maxlen = sizeof(sysctl_oom_kill_allocating_task),
713 .mode = 0644,
714 .proc_handler = proc_dointvec,
715 },
716 {
717 .procname = "oom_dump_tasks",
718 .data = &sysctl_oom_dump_tasks,
719 .maxlen = sizeof(sysctl_oom_dump_tasks),
720 .mode = 0644,
721 .proc_handler = proc_dointvec,
722 },
723};
724#endif
725
726static int __init oom_init(void)
727{
728 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
729#ifdef CONFIG_SYSCTL
730 register_sysctl_init("vm", vm_oom_kill_table);
731#endif
732 return 0;
733}
734subsys_initcall(oom_init)
735#else
736static inline void queue_oom_reaper(struct task_struct *tsk)
737{
738}
739#endif /* CONFIG_MMU */
740
741/**
742 * mark_oom_victim - mark the given task as OOM victim
743 * @tsk: task to mark
744 *
745 * Has to be called with oom_lock held and never after
746 * oom has been disabled already.
747 *
748 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
749 * under task_lock or operate on the current).
750 */
751static void mark_oom_victim(struct task_struct *tsk)
752{
753 const struct cred *cred;
754 struct mm_struct *mm = tsk->mm;
755
756 WARN_ON(oom_killer_disabled);
757 /* OOM killer might race with memcg OOM */
758 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
759 return;
760
761 /* oom_mm is bound to the signal struct life time. */
762 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
763 mmgrab(tsk->signal->oom_mm);
764
765 /*
766 * Make sure that the process is woken up from uninterruptible sleep
767 * if it is frozen because OOM killer wouldn't be able to free any
768 * memory and livelock. The freezer will thaw the tasks that are OOM
769 * victims regardless of the PM freezing and cgroup freezing states.
770 */
771 thaw_process(tsk);
772 atomic_inc(&oom_victims);
773 cred = get_task_cred(tsk);
774 trace_mark_victim(tsk, cred->uid.val);
775 put_cred(cred);
776}
777
778/**
779 * exit_oom_victim - note the exit of an OOM victim
780 */
781void exit_oom_victim(void)
782{
783 clear_thread_flag(TIF_MEMDIE);
784
785 if (!atomic_dec_return(&oom_victims))
786 wake_up_all(&oom_victims_wait);
787}
788
789/**
790 * oom_killer_enable - enable OOM killer
791 */
792void oom_killer_enable(void)
793{
794 oom_killer_disabled = false;
795 pr_info("OOM killer enabled.\n");
796}
797
798/**
799 * oom_killer_disable - disable OOM killer
800 * @timeout: maximum timeout to wait for oom victims in jiffies
801 *
802 * Forces all page allocations to fail rather than trigger OOM killer.
803 * Will block and wait until all OOM victims are killed or the given
804 * timeout expires.
805 *
806 * The function cannot be called when there are runnable user tasks because
807 * the userspace would see unexpected allocation failures as a result. Any
808 * new usage of this function should be consulted with MM people.
809 *
810 * Returns true if successful and false if the OOM killer cannot be
811 * disabled.
812 */
813bool oom_killer_disable(signed long timeout)
814{
815 signed long ret;
816
817 /*
818 * Make sure to not race with an ongoing OOM killer. Check that the
819 * current is not killed (possibly due to sharing the victim's memory).
820 */
821 if (mutex_lock_killable(&oom_lock))
822 return false;
823 oom_killer_disabled = true;
824 mutex_unlock(&oom_lock);
825
826 ret = wait_event_interruptible_timeout(oom_victims_wait,
827 !atomic_read(&oom_victims), timeout);
828 if (ret <= 0) {
829 oom_killer_enable();
830 return false;
831 }
832 pr_info("OOM killer disabled.\n");
833
834 return true;
835}
836
837static inline bool __task_will_free_mem(struct task_struct *task)
838{
839 struct signal_struct *sig = task->signal;
840
841 /*
842 * A coredumping process may sleep for an extended period in
843 * coredump_task_exit(), so the oom killer cannot assume that
844 * the process will promptly exit and release memory.
845 */
846 if (sig->core_state)
847 return false;
848
849 if (sig->flags & SIGNAL_GROUP_EXIT)
850 return true;
851
852 if (thread_group_empty(task) && (task->flags & PF_EXITING))
853 return true;
854
855 return false;
856}
857
858/*
859 * Checks whether the given task is dying or exiting and likely to
860 * release its address space. This means that all threads and processes
861 * sharing the same mm have to be killed or exiting.
862 * Caller has to make sure that task->mm is stable (hold task_lock or
863 * it operates on the current).
864 */
865static bool task_will_free_mem(struct task_struct *task)
866{
867 struct mm_struct *mm = task->mm;
868 struct task_struct *p;
869 bool ret = true;
870
871 /*
872 * Skip tasks without mm because it might have passed its exit_mm and
873 * exit_oom_victim. oom_reaper could have rescued that but do not rely
874 * on that for now. We can consider find_lock_task_mm in future.
875 */
876 if (!mm)
877 return false;
878
879 if (!__task_will_free_mem(task))
880 return false;
881
882 /*
883 * This task has already been drained by the oom reaper so there are
884 * only small chances it will free some more
885 */
886 if (mm_flags_test(MMF_OOM_SKIP, mm))
887 return false;
888
889 if (atomic_read(&mm->mm_users) <= 1)
890 return true;
891
892 /*
893 * Make sure that all tasks which share the mm with the given tasks
894 * are dying as well to make sure that a) nobody pins its mm and
895 * b) the task is also reapable by the oom reaper.
896 */
897 rcu_read_lock();
898 for_each_process(p) {
899 if (!process_shares_mm(p, mm))
900 continue;
901 if (same_thread_group(task, p))
902 continue;
903 ret = __task_will_free_mem(p);
904 if (!ret)
905 break;
906 }
907 rcu_read_unlock();
908
909 return ret;
910}
911
912static void __oom_kill_process(struct task_struct *victim, const char *message)
913{
914 struct task_struct *p;
915 struct mm_struct *mm;
916 bool can_oom_reap = true;
917
918 p = find_lock_task_mm(victim);
919 if (!p) {
920 pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
921 message, task_pid_nr(victim), victim->comm);
922 put_task_struct(victim);
923 return;
924 } else if (victim != p) {
925 get_task_struct(p);
926 put_task_struct(victim);
927 victim = p;
928 }
929
930 /* Get a reference to safely compare mm after task_unlock(victim) */
931 mm = victim->mm;
932 mmgrab(mm);
933
934 /* Raise event before sending signal: task reaper must see this */
935 count_vm_event(OOM_KILL);
936 memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
937
938 /*
939 * We should send SIGKILL before granting access to memory reserves
940 * in order to prevent the OOM victim from depleting the memory
941 * reserves from the user space under its control.
942 */
943 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
944 mark_oom_victim(victim);
945 pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%d\n",
946 message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
947 K(get_mm_counter_sum(mm, MM_ANONPAGES)),
948 K(get_mm_counter_sum(mm, MM_FILEPAGES)),
949 K(get_mm_counter_sum(mm, MM_SHMEMPAGES)),
950 from_kuid(&init_user_ns, task_uid(victim)),
951 mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
952 task_unlock(victim);
953
954 /*
955 * Kill all user processes sharing victim->mm in other thread groups, if
956 * any. They don't get access to memory reserves, though, to avoid
957 * depletion of all memory. This prevents mm->mmap_lock livelock when an
958 * oom killed thread cannot exit because it requires the semaphore and
959 * its contended by another thread trying to allocate memory itself.
960 * That thread will now get access to memory reserves since it has a
961 * pending fatal signal.
962 */
963 rcu_read_lock();
964 for_each_process(p) {
965 if (!process_shares_mm(p, mm))
966 continue;
967 if (same_thread_group(p, victim))
968 continue;
969 if (is_global_init(p)) {
970 can_oom_reap = false;
971 mm_flags_set(MMF_OOM_SKIP, mm);
972 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
973 task_pid_nr(victim), victim->comm,
974 task_pid_nr(p), p->comm);
975 continue;
976 }
977 /*
978 * No kthread_use_mm() user needs to read from the userspace so
979 * we are ok to reap it.
980 */
981 if (unlikely(p->flags & PF_KTHREAD))
982 continue;
983 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
984 }
985 rcu_read_unlock();
986
987 if (can_oom_reap)
988 queue_oom_reaper(victim);
989
990 mmdrop(mm);
991 put_task_struct(victim);
992}
993
994/*
995 * Kill provided task unless it's secured by setting
996 * oom_score_adj to OOM_SCORE_ADJ_MIN.
997 */
998static int oom_kill_memcg_member(struct task_struct *task, void *message)
999{
1000 if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
1001 !is_global_init(task)) {
1002 get_task_struct(task);
1003 __oom_kill_process(task, message);
1004 }
1005 return 0;
1006}
1007
1008static void oom_kill_process(struct oom_control *oc, const char *message)
1009{
1010 struct task_struct *victim = oc->chosen;
1011 struct mem_cgroup *oom_group;
1012 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1013 DEFAULT_RATELIMIT_BURST);
1014
1015 /*
1016 * If the task is already exiting, don't alarm the sysadmin or kill
1017 * its children or threads, just give it access to memory reserves
1018 * so it can die quickly
1019 */
1020 task_lock(victim);
1021 if (task_will_free_mem(victim)) {
1022 mark_oom_victim(victim);
1023 queue_oom_reaper(victim);
1024 task_unlock(victim);
1025 put_task_struct(victim);
1026 return;
1027 }
1028 task_unlock(victim);
1029
1030 if (__ratelimit(&oom_rs)) {
1031 dump_header(oc);
1032 dump_oom_victim(oc, victim);
1033 }
1034
1035 /*
1036 * Do we need to kill the entire memory cgroup?
1037 * Or even one of the ancestor memory cgroups?
1038 * Check this out before killing the victim task.
1039 */
1040 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
1041
1042 __oom_kill_process(victim, message);
1043
1044 /*
1045 * If necessary, kill all tasks in the selected memory cgroup.
1046 */
1047 if (oom_group) {
1048 memcg_memory_event(oom_group, MEMCG_OOM_GROUP_KILL);
1049 mem_cgroup_print_oom_group(oom_group);
1050 mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
1051 (void *)message);
1052 mem_cgroup_put(oom_group);
1053 }
1054}
1055
1056/*
1057 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1058 */
1059static void check_panic_on_oom(struct oom_control *oc)
1060{
1061 if (likely(!sysctl_panic_on_oom))
1062 return;
1063 if (sysctl_panic_on_oom != 2) {
1064 /*
1065 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1066 * does not panic for cpuset, mempolicy, or memcg allocation
1067 * failures.
1068 */
1069 if (oc->constraint != CONSTRAINT_NONE)
1070 return;
1071 }
1072 /* Do not panic for oom kills triggered by sysrq */
1073 if (is_sysrq_oom(oc))
1074 return;
1075 dump_header(oc);
1076 panic("Out of memory: %s panic_on_oom is enabled\n",
1077 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1078}
1079
1080static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1081
1082int register_oom_notifier(struct notifier_block *nb)
1083{
1084 return blocking_notifier_chain_register(&oom_notify_list, nb);
1085}
1086EXPORT_SYMBOL_GPL(register_oom_notifier);
1087
1088int unregister_oom_notifier(struct notifier_block *nb)
1089{
1090 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1091}
1092EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1093
1094/**
1095 * out_of_memory - kill the "best" process when we run out of memory
1096 * @oc: pointer to struct oom_control
1097 *
1098 * If we run out of memory, we have the choice between either
1099 * killing a random task (bad), letting the system crash (worse)
1100 * OR try to be smart about which process to kill. Note that we
1101 * don't have to be perfect here, we just have to be good.
1102 */
1103bool out_of_memory(struct oom_control *oc)
1104{
1105 unsigned long freed = 0;
1106
1107 if (oom_killer_disabled)
1108 return false;
1109
1110 if (!is_memcg_oom(oc)) {
1111 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1112 if (freed > 0 && !is_sysrq_oom(oc))
1113 /* Got some memory back in the last second. */
1114 return true;
1115 }
1116
1117 /*
1118 * If current has a pending SIGKILL or is exiting, then automatically
1119 * select it. The goal is to allow it to allocate so that it may
1120 * quickly exit and free its memory.
1121 */
1122 if (task_will_free_mem(current)) {
1123 mark_oom_victim(current);
1124 queue_oom_reaper(current);
1125 return true;
1126 }
1127
1128 /*
1129 * The OOM killer does not compensate for IO-less reclaim.
1130 * But mem_cgroup_oom() has to invoke the OOM killer even
1131 * if it is a GFP_NOFS allocation.
1132 */
1133 if (!(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
1134 return true;
1135
1136 /*
1137 * Check if there were limitations on the allocation (only relevant for
1138 * NUMA and memcg) that may require different handling.
1139 */
1140 oc->constraint = constrained_alloc(oc);
1141 if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1142 oc->nodemask = NULL;
1143 check_panic_on_oom(oc);
1144
1145 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1146 current->mm && !oom_unkillable_task(current) &&
1147 oom_cpuset_eligible(current, oc) &&
1148 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1149 get_task_struct(current);
1150 oc->chosen = current;
1151 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1152 return true;
1153 }
1154
1155 select_bad_process(oc);
1156 /* Found nothing?!?! */
1157 if (!oc->chosen) {
1158 dump_header(oc);
1159 pr_warn("Out of memory and no killable processes...\n");
1160 /*
1161 * If we got here due to an actual allocation at the
1162 * system level, we cannot survive this and will enter
1163 * an endless loop in the allocator. Bail out now.
1164 */
1165 if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1166 panic("System is deadlocked on memory\n");
1167 }
1168 if (oc->chosen && oc->chosen != (void *)-1UL)
1169 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1170 "Memory cgroup out of memory");
1171 return !!oc->chosen;
1172}
1173
1174/*
1175 * The pagefault handler calls here because some allocation has failed. We have
1176 * to take care of the memcg OOM here because this is the only safe context without
1177 * any locks held but let the oom killer triggered from the allocation context care
1178 * about the global OOM.
1179 */
1180void pagefault_out_of_memory(void)
1181{
1182 static DEFINE_RATELIMIT_STATE(pfoom_rs, DEFAULT_RATELIMIT_INTERVAL,
1183 DEFAULT_RATELIMIT_BURST);
1184
1185 if (mem_cgroup_oom_synchronize(true))
1186 return;
1187
1188 if (fatal_signal_pending(current))
1189 return;
1190
1191 if (__ratelimit(&pfoom_rs))
1192 pr_warn("Huh VM_FAULT_OOM leaked out to the #PF handler. Retrying PF\n");
1193}
1194
1195SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
1196{
1197#ifdef CONFIG_MMU
1198 struct mm_struct *mm = NULL;
1199 struct task_struct *task;
1200 struct task_struct *p;
1201 unsigned int f_flags;
1202 bool reap = false;
1203 long ret = 0;
1204
1205 if (flags)
1206 return -EINVAL;
1207
1208 task = pidfd_get_task(pidfd, &f_flags);
1209 if (IS_ERR(task))
1210 return PTR_ERR(task);
1211
1212 /*
1213 * Make sure to choose a thread which still has a reference to mm
1214 * during the group exit
1215 */
1216 p = find_lock_task_mm(task);
1217 if (!p) {
1218 ret = -ESRCH;
1219 goto put_task;
1220 }
1221
1222 mm = p->mm;
1223 mmgrab(mm);
1224
1225 if (task_will_free_mem(p))
1226 reap = true;
1227 else {
1228 /* Error only if the work has not been done already */
1229 if (!mm_flags_test(MMF_OOM_SKIP, mm))
1230 ret = -EINVAL;
1231 }
1232 task_unlock(p);
1233
1234 if (!reap)
1235 goto drop_mm;
1236
1237 if (mmap_read_lock_killable(mm)) {
1238 ret = -EINTR;
1239 goto drop_mm;
1240 }
1241 /*
1242 * Check MMF_OOM_SKIP again under mmap_read_lock protection to ensure
1243 * possible change in exit_mmap is seen
1244 */
1245 if (!mm_flags_test(MMF_OOM_SKIP, mm) && !__oom_reap_task_mm(mm))
1246 ret = -EAGAIN;
1247 mmap_read_unlock(mm);
1248
1249drop_mm:
1250 mmdrop(mm);
1251put_task:
1252 put_task_struct(task);
1253 return ret;
1254#else
1255 return -ENOSYS;
1256#endif /* CONFIG_MMU */
1257}