Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef LINUX_MM_INLINE_H
3#define LINUX_MM_INLINE_H
4
5#include <linux/atomic.h>
6#include <linux/huge_mm.h>
7#include <linux/mm_types.h>
8#include <linux/swap.h>
9#include <linux/string.h>
10#include <linux/userfaultfd_k.h>
11#include <linux/leafops.h>
12
13/**
14 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
15 * @folio: The folio to test.
16 *
17 * We would like to get this info without a page flag, but the state
18 * needs to survive until the folio is last deleted from the LRU, which
19 * could be as far down as __page_cache_release.
20 *
21 * Return: An integer (not a boolean!) used to sort a folio onto the
22 * right LRU list and to account folios correctly.
23 * 1 if @folio is a regular filesystem backed page cache folio
24 * or a lazily freed anonymous folio (e.g. via MADV_FREE).
25 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
26 * ram or swap backed folio.
27 */
28static inline int folio_is_file_lru(const struct folio *folio)
29{
30 return !folio_test_swapbacked(folio);
31}
32
33static __always_inline void __update_lru_size(struct lruvec *lruvec,
34 enum lru_list lru, enum zone_type zid,
35 long nr_pages)
36{
37 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
38
39 lockdep_assert_held(&lruvec->lru_lock);
40 WARN_ON_ONCE(nr_pages != (int)nr_pages);
41
42 mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
43 __mod_zone_page_state(&pgdat->node_zones[zid],
44 NR_ZONE_LRU_BASE + lru, nr_pages);
45}
46
47static __always_inline void update_lru_size(struct lruvec *lruvec,
48 enum lru_list lru, enum zone_type zid,
49 long nr_pages)
50{
51 __update_lru_size(lruvec, lru, zid, nr_pages);
52#ifdef CONFIG_MEMCG
53 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
54#endif
55}
56
57/**
58 * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
59 * @folio: The folio that was on lru and now has a zero reference.
60 */
61static __always_inline void __folio_clear_lru_flags(struct folio *folio)
62{
63 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
64
65 __folio_clear_lru(folio);
66
67 /* this shouldn't happen, so leave the flags to bad_page() */
68 if (folio_test_active(folio) && folio_test_unevictable(folio))
69 return;
70
71 __folio_clear_active(folio);
72 __folio_clear_unevictable(folio);
73}
74
75/**
76 * folio_lru_list - Which LRU list should a folio be on?
77 * @folio: The folio to test.
78 *
79 * Return: The LRU list a folio should be on, as an index
80 * into the array of LRU lists.
81 */
82static __always_inline enum lru_list folio_lru_list(const struct folio *folio)
83{
84 enum lru_list lru;
85
86 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
87
88 if (folio_test_unevictable(folio))
89 return LRU_UNEVICTABLE;
90
91 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
92 if (folio_test_active(folio))
93 lru += LRU_ACTIVE;
94
95 return lru;
96}
97
98#ifdef CONFIG_LRU_GEN
99
100static inline bool lru_gen_switching(void)
101{
102 DECLARE_STATIC_KEY_FALSE(lru_switch);
103
104 return static_branch_unlikely(&lru_switch);
105}
106#ifdef CONFIG_LRU_GEN_ENABLED
107static inline bool lru_gen_enabled(void)
108{
109 DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
110
111 return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
112}
113#else
114static inline bool lru_gen_enabled(void)
115{
116 DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
117
118 return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
119}
120#endif
121
122static inline bool lru_gen_in_fault(void)
123{
124 return current->in_lru_fault;
125}
126
127static inline int lru_gen_from_seq(unsigned long seq)
128{
129 return seq % MAX_NR_GENS;
130}
131
132static inline int lru_hist_from_seq(unsigned long seq)
133{
134 return seq % NR_HIST_GENS;
135}
136
137static inline int lru_tier_from_refs(int refs, bool workingset)
138{
139 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
140
141 /* see the comment on MAX_NR_TIERS */
142 return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
143}
144
145static inline int folio_lru_refs(const struct folio *folio)
146{
147 unsigned long flags = READ_ONCE(folio->flags.f);
148
149 if (!(flags & BIT(PG_referenced)))
150 return 0;
151 /*
152 * Return the total number of accesses including PG_referenced. Also see
153 * the comment on LRU_REFS_FLAGS.
154 */
155 return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
156}
157
158static inline int folio_lru_gen(const struct folio *folio)
159{
160 unsigned long flags = READ_ONCE(folio->flags.f);
161
162 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
163}
164
165static inline bool lru_gen_is_active(const struct lruvec *lruvec, int gen)
166{
167 unsigned long max_seq = lruvec->lrugen.max_seq;
168
169 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
170
171 /* see the comment on MIN_NR_GENS */
172 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
173}
174
175static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
176 int old_gen, int new_gen)
177{
178 int type = folio_is_file_lru(folio);
179 int zone = folio_zonenum(folio);
180 int delta = folio_nr_pages(folio);
181 enum lru_list lru = type * LRU_INACTIVE_FILE;
182 struct lru_gen_folio *lrugen = &lruvec->lrugen;
183
184 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
185 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
186 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
187
188 if (old_gen >= 0)
189 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
190 lrugen->nr_pages[old_gen][type][zone] - delta);
191 if (new_gen >= 0)
192 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
193 lrugen->nr_pages[new_gen][type][zone] + delta);
194
195 /* addition */
196 if (old_gen < 0) {
197 if (lru_gen_is_active(lruvec, new_gen))
198 lru += LRU_ACTIVE;
199 __update_lru_size(lruvec, lru, zone, delta);
200 return;
201 }
202
203 /* deletion */
204 if (new_gen < 0) {
205 if (lru_gen_is_active(lruvec, old_gen))
206 lru += LRU_ACTIVE;
207 __update_lru_size(lruvec, lru, zone, -delta);
208 return;
209 }
210
211 /* promotion */
212 if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
213 __update_lru_size(lruvec, lru, zone, -delta);
214 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
215 }
216
217 /* demotion requires isolation, e.g., lru_deactivate_fn() */
218 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
219}
220
221static inline unsigned long lru_gen_folio_seq(const struct lruvec *lruvec,
222 const struct folio *folio,
223 bool reclaiming)
224{
225 int gen;
226 int type = folio_is_file_lru(folio);
227 const struct lru_gen_folio *lrugen = &lruvec->lrugen;
228
229 /*
230 * +-----------------------------------+-----------------------------------+
231 * | Accessed through page tables and | Accessed through file descriptors |
232 * | promoted by folio_update_gen() | and protected by folio_inc_gen() |
233 * +-----------------------------------+-----------------------------------+
234 * | PG_active (set while isolated) | |
235 * +-----------------+-----------------+-----------------+-----------------+
236 * | PG_workingset | PG_referenced | PG_workingset | LRU_REFS_FLAGS |
237 * +-----------------------------------+-----------------------------------+
238 * |<---------- MIN_NR_GENS ---------->| |
239 * |<---------------------------- MAX_NR_GENS ---------------------------->|
240 */
241 if (folio_test_active(folio))
242 gen = MIN_NR_GENS - folio_test_workingset(folio);
243 else if (reclaiming)
244 gen = MAX_NR_GENS;
245 else if ((!folio_is_file_lru(folio) && !folio_test_swapcache(folio)) ||
246 (folio_test_reclaim(folio) &&
247 (folio_test_dirty(folio) || folio_test_writeback(folio))))
248 gen = MIN_NR_GENS;
249 else
250 gen = MAX_NR_GENS - folio_test_workingset(folio);
251
252 return max(READ_ONCE(lrugen->max_seq) - gen + 1, READ_ONCE(lrugen->min_seq[type]));
253}
254
255static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
256{
257 unsigned long seq;
258 unsigned long flags;
259 int gen = folio_lru_gen(folio);
260 int type = folio_is_file_lru(folio);
261 int zone = folio_zonenum(folio);
262 struct lru_gen_folio *lrugen = &lruvec->lrugen;
263
264 VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
265
266 if (folio_test_unevictable(folio) || !lrugen->enabled)
267 return false;
268
269 seq = lru_gen_folio_seq(lruvec, folio, reclaiming);
270 gen = lru_gen_from_seq(seq);
271 flags = (gen + 1UL) << LRU_GEN_PGOFF;
272 /* see the comment on MIN_NR_GENS about PG_active */
273 set_mask_bits(&folio->flags.f, LRU_GEN_MASK | BIT(PG_active), flags);
274
275 lru_gen_update_size(lruvec, folio, -1, gen);
276 /* for folio_rotate_reclaimable() */
277 if (reclaiming)
278 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
279 else
280 list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
281
282 return true;
283}
284
285static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
286{
287 unsigned long flags;
288 int gen = folio_lru_gen(folio);
289
290 if (gen < 0)
291 return false;
292
293 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
294 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
295
296 /* for folio_migrate_flags() */
297 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
298 flags = set_mask_bits(&folio->flags.f, LRU_GEN_MASK, flags);
299 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
300
301 lru_gen_update_size(lruvec, folio, gen, -1);
302 list_del(&folio->lru);
303
304 return true;
305}
306
307static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
308{
309 unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK;
310
311 set_mask_bits(&new->flags.f, LRU_REFS_MASK, refs);
312}
313#else /* !CONFIG_LRU_GEN */
314
315static inline bool lru_gen_enabled(void)
316{
317 return false;
318}
319
320static inline bool lru_gen_switching(void)
321{
322 return false;
323}
324
325static inline bool lru_gen_in_fault(void)
326{
327 return false;
328}
329
330static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
331{
332 return false;
333}
334
335static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
336{
337 return false;
338}
339
340static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
341{
342
343}
344#endif /* CONFIG_LRU_GEN */
345
346static __always_inline
347void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
348{
349 enum lru_list lru = folio_lru_list(folio);
350
351 if (lru_gen_add_folio(lruvec, folio, false))
352 return;
353
354 update_lru_size(lruvec, lru, folio_zonenum(folio),
355 folio_nr_pages(folio));
356 if (lru != LRU_UNEVICTABLE)
357 list_add(&folio->lru, &lruvec->lists[lru]);
358}
359
360static __always_inline
361void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
362{
363 enum lru_list lru = folio_lru_list(folio);
364
365 if (lru_gen_add_folio(lruvec, folio, true))
366 return;
367
368 update_lru_size(lruvec, lru, folio_zonenum(folio),
369 folio_nr_pages(folio));
370 /* This is not expected to be used on LRU_UNEVICTABLE */
371 list_add_tail(&folio->lru, &lruvec->lists[lru]);
372}
373
374static __always_inline
375void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
376{
377 enum lru_list lru = folio_lru_list(folio);
378
379 if (lru_gen_del_folio(lruvec, folio, false))
380 return;
381
382 if (lru != LRU_UNEVICTABLE)
383 list_del(&folio->lru);
384 update_lru_size(lruvec, lru, folio_zonenum(folio),
385 -folio_nr_pages(folio));
386}
387
388#ifdef CONFIG_ANON_VMA_NAME
389/* mmap_lock should be read-locked */
390static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
391{
392 if (anon_name)
393 kref_get(&anon_name->kref);
394}
395
396static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
397{
398 if (anon_name)
399 kref_put(&anon_name->kref, anon_vma_name_free);
400}
401
402static inline
403struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
404{
405 /* Prevent anon_name refcount saturation early on */
406 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
407 anon_vma_name_get(anon_name);
408 return anon_name;
409
410 }
411 return anon_vma_name_alloc(anon_name->name);
412}
413
414static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
415 struct vm_area_struct *new_vma)
416{
417 struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
418
419 if (anon_name)
420 new_vma->anon_name = anon_vma_name_reuse(anon_name);
421}
422
423static inline void free_anon_vma_name(struct vm_area_struct *vma)
424{
425 /*
426 * Not using anon_vma_name because it generates a warning if mmap_lock
427 * is not held, which might be the case here.
428 */
429 anon_vma_name_put(vma->anon_name);
430}
431
432static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
433 struct anon_vma_name *anon_name2)
434{
435 if (anon_name1 == anon_name2)
436 return true;
437
438 return anon_name1 && anon_name2 &&
439 !strcmp(anon_name1->name, anon_name2->name);
440}
441
442#else /* CONFIG_ANON_VMA_NAME */
443static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
444static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
445static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
446 struct vm_area_struct *new_vma) {}
447static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
448
449static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
450 struct anon_vma_name *anon_name2)
451{
452 return true;
453}
454
455#endif /* CONFIG_ANON_VMA_NAME */
456
457void pfnmap_track_ctx_release(struct kref *ref);
458
459static inline void init_tlb_flush_pending(struct mm_struct *mm)
460{
461 atomic_set(&mm->tlb_flush_pending, 0);
462}
463
464static inline void inc_tlb_flush_pending(struct mm_struct *mm)
465{
466 atomic_inc(&mm->tlb_flush_pending);
467 /*
468 * The only time this value is relevant is when there are indeed pages
469 * to flush. And we'll only flush pages after changing them, which
470 * requires the PTL.
471 *
472 * So the ordering here is:
473 *
474 * atomic_inc(&mm->tlb_flush_pending);
475 * spin_lock(&ptl);
476 * ...
477 * set_pte_at();
478 * spin_unlock(&ptl);
479 *
480 * spin_lock(&ptl)
481 * mm_tlb_flush_pending();
482 * ....
483 * spin_unlock(&ptl);
484 *
485 * flush_tlb_range();
486 * atomic_dec(&mm->tlb_flush_pending);
487 *
488 * Where the increment if constrained by the PTL unlock, it thus
489 * ensures that the increment is visible if the PTE modification is
490 * visible. After all, if there is no PTE modification, nobody cares
491 * about TLB flushes either.
492 *
493 * This very much relies on users (mm_tlb_flush_pending() and
494 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
495 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
496 * locks (PPC) the unlock of one doesn't order against the lock of
497 * another PTL.
498 *
499 * The decrement is ordered by the flush_tlb_range(), such that
500 * mm_tlb_flush_pending() will not return false unless all flushes have
501 * completed.
502 */
503}
504
505static inline void dec_tlb_flush_pending(struct mm_struct *mm)
506{
507 /*
508 * See inc_tlb_flush_pending().
509 *
510 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
511 * not order against TLB invalidate completion, which is what we need.
512 *
513 * Therefore we must rely on tlb_flush_*() to guarantee order.
514 */
515 atomic_dec(&mm->tlb_flush_pending);
516}
517
518static inline bool mm_tlb_flush_pending(const struct mm_struct *mm)
519{
520 /*
521 * Must be called after having acquired the PTL; orders against that
522 * PTLs release and therefore ensures that if we observe the modified
523 * PTE we must also observe the increment from inc_tlb_flush_pending().
524 *
525 * That is, it only guarantees to return true if there is a flush
526 * pending for _this_ PTL.
527 */
528 return atomic_read(&mm->tlb_flush_pending);
529}
530
531static inline bool mm_tlb_flush_nested(const struct mm_struct *mm)
532{
533 /*
534 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
535 * for which there is a TLB flush pending in order to guarantee
536 * we've seen both that PTE modification and the increment.
537 *
538 * (no requirement on actually still holding the PTL, that is irrelevant)
539 */
540 return atomic_read(&mm->tlb_flush_pending) > 1;
541}
542
543#ifdef CONFIG_MMU
544/*
545 * Computes the pte marker to copy from the given source entry into dst_vma.
546 * If no marker should be copied, returns 0.
547 * The caller should insert a new pte created with make_pte_marker().
548 */
549static inline pte_marker copy_pte_marker(
550 softleaf_t entry, struct vm_area_struct *dst_vma)
551{
552 const pte_marker srcm = softleaf_to_marker(entry);
553 /* Always copy error entries. */
554 pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD);
555
556 /* Only copy PTE markers if UFFD register matches. */
557 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
558 dstm |= PTE_MARKER_UFFD_WP;
559
560 return dstm;
561}
562
563/*
564 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
565 * replace a none pte. NOTE! This should only be called when *pte is already
566 * cleared so we will never accidentally replace something valuable. Meanwhile
567 * none pte also means we are not demoting the pte so tlb flushed is not needed.
568 * E.g., when pte cleared the caller should have taken care of the tlb flush.
569 *
570 * Must be called with pgtable lock held so that no thread will see the none
571 * pte, and if they see it, they'll fault and serialize at the pgtable lock.
572 *
573 * Returns true if an uffd-wp pte was installed, false otherwise.
574 */
575static inline bool
576pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
577 pte_t *pte, pte_t pteval)
578{
579 bool arm_uffd_pte = false;
580
581 if (!uffd_supports_wp_marker())
582 return false;
583
584 /* The current status of the pte should be "cleared" before calling */
585 WARN_ON_ONCE(!pte_none(ptep_get(pte)));
586
587 /*
588 * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
589 * thing, because when zapping either it means it's dropping the
590 * page, or in TTU where the present pte will be quickly replaced
591 * with a swap pte. There's no way of leaking the bit.
592 */
593 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
594 return false;
595
596 /* A uffd-wp wr-protected normal pte */
597 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
598 arm_uffd_pte = true;
599
600 /*
601 * A uffd-wp wr-protected swap pte. Note: this should even cover an
602 * existing pte marker with uffd-wp bit set.
603 */
604 if (unlikely(pte_swp_uffd_wp_any(pteval)))
605 arm_uffd_pte = true;
606
607 if (unlikely(arm_uffd_pte)) {
608 set_pte_at(vma->vm_mm, addr, pte,
609 make_pte_marker(PTE_MARKER_UFFD_WP));
610 return true;
611 }
612
613 return false;
614}
615
616static inline bool vma_has_recency(const struct vm_area_struct *vma)
617{
618 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
619 return false;
620
621 if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
622 return false;
623
624 return true;
625}
626#endif
627
628/**
629 * num_pages_contiguous() - determine the number of contiguous pages
630 * that represent contiguous PFNs
631 * @pages: an array of page pointers
632 * @nr_pages: length of the array, at least 1
633 *
634 * Determine the number of contiguous pages that represent contiguous PFNs
635 * in @pages, starting from the first page.
636 *
637 * In some kernel configs contiguous PFNs will not have contiguous struct
638 * pages. In these configurations num_pages_contiguous() will return a num
639 * smaller than ideal number. The caller should continue to check for pfn
640 * contiguity after each call to num_pages_contiguous().
641 *
642 * Returns the number of contiguous pages.
643 */
644static inline size_t num_pages_contiguous(struct page **pages, size_t nr_pages)
645{
646 struct page *cur_page = pages[0];
647 unsigned long section = memdesc_section(cur_page->flags);
648 size_t i;
649
650 for (i = 1; i < nr_pages; i++) {
651 if (++cur_page != pages[i])
652 break;
653 /*
654 * In unproblematic kernel configs, page_to_section() == 0 and
655 * the whole check will get optimized out.
656 */
657 if (memdesc_section(cur_page->flags) != section)
658 break;
659 }
660
661 return i;
662}
663
664#endif