Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SWAP_H
3#define _LINUX_SWAP_H
4
5#include <linux/spinlock.h>
6#include <linux/linkage.h>
7#include <linux/mmzone.h>
8#include <linux/list.h>
9#include <linux/memcontrol.h>
10#include <linux/sched.h>
11#include <linux/node.h>
12#include <linux/fs.h>
13#include <linux/pagemap.h>
14#include <linux/atomic.h>
15#include <linux/page-flags.h>
16#include <uapi/linux/mempolicy.h>
17#include <asm/page.h>
18
19struct notifier_block;
20
21struct bio;
22
23#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
24#define SWAP_FLAG_PRIO_MASK 0x7fff
25#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
26#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
27#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
28
29#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
30 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
31 SWAP_FLAG_DISCARD_PAGES)
32#define SWAP_BATCH 64
33
34static inline int current_is_kswapd(void)
35{
36 return current->flags & PF_KSWAPD;
37}
38
39/*
40 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
41 * be swapped to. The swap type and the offset into that swap type are
42 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
43 * for the type means that the maximum number of swapcache pages is 27 bits
44 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
45 * the type/offset into the pte as 5/27 as well.
46 */
47#define MAX_SWAPFILES_SHIFT 5
48
49/*
50 * Use some of the swap files numbers for other purposes. This
51 * is a convenient way to hook into the VM to trigger special
52 * actions on faults.
53 */
54
55/*
56 * PTE markers are used to persist information onto PTEs that otherwise
57 * should be a none pte. As its name "PTE" hints, it should only be
58 * applied to the leaves of pgtables.
59 */
60#define SWP_PTE_MARKER_NUM 1
61#define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
62 SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
63
64/*
65 * Unaddressable device memory support. See include/linux/hmm.h and
66 * Documentation/mm/hmm.rst. Short description is we need struct pages for
67 * device memory that is unaddressable (inaccessible) by CPU, so that we can
68 * migrate part of a process memory to device memory.
69 *
70 * When a page is migrated from CPU to device, we set the CPU page table entry
71 * to a special SWP_DEVICE_{READ|WRITE} entry.
72 *
73 * When a page is mapped by the device for exclusive access we set the CPU page
74 * table entries to a special SWP_DEVICE_EXCLUSIVE entry.
75 */
76#ifdef CONFIG_DEVICE_PRIVATE
77#define SWP_DEVICE_NUM 3
78#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
79#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
80#define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
81#else
82#define SWP_DEVICE_NUM 0
83#endif
84
85/*
86 * Page migration support.
87 *
88 * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
89 * indicates that the referenced (part of) an anonymous page is exclusive to
90 * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
91 * (part of) an anonymous page that are mapped writable are exclusive to a
92 * single process.
93 */
94#ifdef CONFIG_MIGRATION
95#define SWP_MIGRATION_NUM 3
96#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
97#define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
98#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
99#else
100#define SWP_MIGRATION_NUM 0
101#endif
102
103/*
104 * Handling of hardware poisoned pages with memory corruption.
105 */
106#ifdef CONFIG_MEMORY_FAILURE
107#define SWP_HWPOISON_NUM 1
108#define SWP_HWPOISON MAX_SWAPFILES
109#else
110#define SWP_HWPOISON_NUM 0
111#endif
112
113#define MAX_SWAPFILES \
114 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
115 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
116 SWP_PTE_MARKER_NUM)
117
118/*
119 * Magic header for a swap area. The first part of the union is
120 * what the swap magic looks like for the old (limited to 128MB)
121 * swap area format, the second part of the union adds - in the
122 * old reserved area - some extra information. Note that the first
123 * kilobyte is reserved for boot loader or disk label stuff...
124 *
125 * Having the magic at the end of the PAGE_SIZE makes detecting swap
126 * areas somewhat tricky on machines that support multiple page sizes.
127 * For 2.5 we'll probably want to move the magic to just beyond the
128 * bootbits...
129 */
130union swap_header {
131 struct {
132 char reserved[PAGE_SIZE - 10];
133 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
134 } magic;
135 struct {
136 char bootbits[1024]; /* Space for disklabel etc. */
137 __u32 version;
138 __u32 last_page;
139 __u32 nr_badpages;
140 unsigned char sws_uuid[16];
141 unsigned char sws_volume[16];
142 __u32 padding[117];
143 __u32 badpages[1];
144 } info;
145};
146
147/*
148 * current->reclaim_state points to one of these when a task is running
149 * memory reclaim
150 */
151struct reclaim_state {
152 /* pages reclaimed outside of LRU-based reclaim */
153 unsigned long reclaimed;
154#ifdef CONFIG_LRU_GEN
155 /* per-thread mm walk data */
156 struct lru_gen_mm_walk *mm_walk;
157#endif
158};
159
160/*
161 * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based
162 * reclaim
163 * @pages: number of pages reclaimed
164 *
165 * If the current process is undergoing a reclaim operation, increment the
166 * number of reclaimed pages by @pages.
167 */
168static inline void mm_account_reclaimed_pages(unsigned long pages)
169{
170 if (current->reclaim_state)
171 current->reclaim_state->reclaimed += pages;
172}
173
174#ifdef __KERNEL__
175
176struct address_space;
177struct sysinfo;
178struct writeback_control;
179struct zone;
180
181/*
182 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
183 * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
184 * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
185 * from setup, they're handled identically.
186 *
187 * We always assume that blocks are of size PAGE_SIZE.
188 */
189struct swap_extent {
190 struct rb_node rb_node;
191 pgoff_t start_page;
192 pgoff_t nr_pages;
193 sector_t start_block;
194};
195
196/*
197 * Max bad pages in the new format..
198 */
199#define MAX_SWAP_BADPAGES \
200 ((offsetof(union swap_header, magic.magic) - \
201 offsetof(union swap_header, info.badpages)) / sizeof(int))
202
203enum {
204 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
205 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
206 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
207 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
208 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
209 SWP_BLKDEV = (1 << 6), /* its a block device */
210 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
211 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
212 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
213 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
214 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
215 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
216 /* add others here before... */
217};
218
219#define SWAP_CLUSTER_MAX 32UL
220#define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10)
221#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
222
223/*
224 * The first page in the swap file is the swap header, which is always marked
225 * bad to prevent it from being allocated as an entry. This also prevents the
226 * cluster to which it belongs being marked free. Therefore 0 is safe to use as
227 * a sentinel to indicate an entry is not valid.
228 */
229#define SWAP_ENTRY_INVALID 0
230
231#ifdef CONFIG_THP_SWAP
232#define SWAP_NR_ORDERS (PMD_ORDER + 1)
233#else
234#define SWAP_NR_ORDERS 1
235#endif
236
237/*
238 * We keep using same cluster for rotational device so IO will be sequential.
239 * The purpose is to optimize SWAP throughput on these device.
240 */
241struct swap_sequential_cluster {
242 unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
243};
244
245/*
246 * The in-memory structure used to track swap areas.
247 */
248struct swap_info_struct {
249 struct percpu_ref users; /* indicate and keep swap device valid. */
250 unsigned long flags; /* SWP_USED etc: see above */
251 signed short prio; /* swap priority of this type */
252 struct plist_node list; /* entry in swap_active_head */
253 signed char type; /* strange name for an index */
254 unsigned int max; /* size of this swap device */
255 unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */
256 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
257 struct list_head free_clusters; /* free clusters list */
258 struct list_head full_clusters; /* full clusters list */
259 struct list_head nonfull_clusters[SWAP_NR_ORDERS];
260 /* list of cluster that contains at least one free slot */
261 struct list_head frag_clusters[SWAP_NR_ORDERS];
262 /* list of cluster that are fragmented or contented */
263 unsigned int pages; /* total of usable pages of swap */
264 atomic_long_t inuse_pages; /* number of those currently in use */
265 struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */
266 spinlock_t global_cluster_lock; /* Serialize usage of global cluster */
267 struct rb_root swap_extent_root;/* root of the swap extent rbtree */
268 struct block_device *bdev; /* swap device or bdev of swap file */
269 struct file *swap_file; /* seldom referenced */
270 struct completion comp; /* seldom referenced */
271 spinlock_t lock; /*
272 * protect map scan related fields like
273 * inuse_pages and all cluster lists.
274 * Other fields are only changed
275 * at swapon/swapoff, so are protected
276 * by swap_lock. changing flags need
277 * hold this lock and swap_lock. If
278 * both locks need hold, hold swap_lock
279 * first.
280 */
281 struct work_struct discard_work; /* discard worker */
282 struct work_struct reclaim_work; /* reclaim worker */
283 struct list_head discard_clusters; /* discard clusters list */
284 struct plist_node avail_list; /* entry in swap_avail_head */
285};
286
287static inline swp_entry_t page_swap_entry(struct page *page)
288{
289 struct folio *folio = page_folio(page);
290 swp_entry_t entry = folio->swap;
291
292 entry.val += folio_page_idx(folio, page);
293 return entry;
294}
295
296/* linux/mm/workingset.c */
297bool workingset_test_recent(void *shadow, bool file, bool *workingset,
298 bool flush);
299void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
300void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
301void workingset_refault(struct folio *folio, void *shadow);
302void workingset_activation(struct folio *folio);
303
304/* linux/mm/page_alloc.c */
305extern unsigned long totalreserve_pages;
306
307/* Definition of global_zone_page_state not available yet */
308#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
309
310
311/* linux/mm/swap.c */
312void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
313 unsigned int nr_io, unsigned int nr_rotated);
314void lru_note_cost_refault(struct folio *);
315void folio_add_lru(struct folio *);
316void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
317void mark_page_accessed(struct page *);
318void folio_mark_accessed(struct folio *);
319
320static inline bool folio_may_be_lru_cached(struct folio *folio)
321{
322 /*
323 * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting.
324 * Holding small numbers of low-order mTHP folios in per-CPU LRU cache
325 * will be sensible, but nobody has implemented and tested that yet.
326 */
327 return !folio_test_large(folio);
328}
329
330extern atomic_t lru_disable_count;
331
332static inline bool lru_cache_disabled(void)
333{
334 return atomic_read(&lru_disable_count);
335}
336
337static inline void lru_cache_enable(void)
338{
339 atomic_dec(&lru_disable_count);
340}
341
342extern void lru_cache_disable(void);
343extern void lru_add_drain(void);
344extern void lru_add_drain_cpu(int cpu);
345extern void lru_add_drain_cpu_zone(struct zone *zone);
346extern void lru_add_drain_all(void);
347void folio_deactivate(struct folio *folio);
348void folio_mark_lazyfree(struct folio *folio);
349extern void swap_setup(void);
350
351/* linux/mm/vmscan.c */
352extern unsigned long zone_reclaimable_pages(struct zone *zone);
353extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
354 gfp_t gfp_mask, nodemask_t *mask);
355unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
356
357#define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
358#define MEMCG_RECLAIM_PROACTIVE (1 << 2)
359#define MIN_SWAPPINESS 0
360#define MAX_SWAPPINESS 200
361
362/* Just reclaim from anon folios in proactive memory reclaim */
363#define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1)
364
365extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
366 unsigned long nr_pages,
367 gfp_t gfp_mask,
368 unsigned int reclaim_options,
369 int *swappiness);
370extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
371 gfp_t gfp_mask, bool noswap,
372 pg_data_t *pgdat,
373 unsigned long *nr_scanned);
374extern unsigned long shrink_all_memory(unsigned long nr_pages);
375extern int vm_swappiness;
376long remove_mapping(struct address_space *mapping, struct folio *folio);
377
378#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
379extern int reclaim_register_node(struct node *node);
380extern void reclaim_unregister_node(struct node *node);
381
382#else
383
384static inline int reclaim_register_node(struct node *node)
385{
386 return 0;
387}
388
389static inline void reclaim_unregister_node(struct node *node)
390{
391}
392#endif /* CONFIG_SYSFS && CONFIG_NUMA */
393
394#ifdef CONFIG_NUMA
395extern int sysctl_min_unmapped_ratio;
396extern int sysctl_min_slab_ratio;
397#endif
398
399void check_move_unevictable_folios(struct folio_batch *fbatch);
400
401extern void __meminit kswapd_run(int nid);
402extern void __meminit kswapd_stop(int nid);
403
404#ifdef CONFIG_SWAP
405
406int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
407 unsigned long nr_pages, sector_t start_block);
408int generic_swapfile_activate(struct swap_info_struct *, struct file *,
409 sector_t *);
410
411static inline unsigned long total_swapcache_pages(void)
412{
413 return global_node_page_state(NR_SWAPCACHE);
414}
415
416void free_swap_cache(struct folio *folio);
417void free_folio_and_swap_cache(struct folio *folio);
418void free_pages_and_swap_cache(struct encoded_page **, int);
419/* linux/mm/swapfile.c */
420extern atomic_long_t nr_swap_pages;
421extern long total_swap_pages;
422extern atomic_t nr_rotate_swap;
423
424/* Swap 50% full? Release swapcache more aggressively.. */
425static inline bool vm_swap_full(void)
426{
427 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
428}
429
430static inline long get_nr_swap_pages(void)
431{
432 return atomic_long_read(&nr_swap_pages);
433}
434
435extern void si_swapinfo(struct sysinfo *);
436int swap_type_of(dev_t device, sector_t offset);
437int find_first_swap(dev_t *device);
438extern unsigned int count_swap_pages(int, int);
439extern sector_t swapdev_block(int, pgoff_t);
440extern int __swap_count(swp_entry_t entry);
441extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry);
442extern int swp_swapcount(swp_entry_t entry);
443struct backing_dev_info;
444extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
445sector_t swap_folio_sector(struct folio *folio);
446
447/*
448 * If there is an existing swap slot reference (swap entry) and the caller
449 * guarantees that there is no race modification of it (e.g., PTL
450 * protecting the swap entry in page table; shmem's cmpxchg protects t
451 * he swap entry in shmem mapping), these two helpers below can be used
452 * to put/dup the entries directly.
453 *
454 * All entries must be allocated by folio_alloc_swap(). And they must have
455 * a swap count > 1. See comments of folio_*_swap helpers for more info.
456 */
457int swap_dup_entry_direct(swp_entry_t entry);
458void swap_put_entries_direct(swp_entry_t entry, int nr);
459
460/*
461 * folio_free_swap tries to free the swap entries pinned by a swap cache
462 * folio, it has to be here to be called by other components.
463 */
464bool folio_free_swap(struct folio *folio);
465
466/* Allocate / free (hibernation) exclusive entries */
467swp_entry_t swap_alloc_hibernation_slot(int type);
468void swap_free_hibernation_slot(swp_entry_t entry);
469
470static inline void put_swap_device(struct swap_info_struct *si)
471{
472 percpu_ref_put(&si->users);
473}
474
475#else /* CONFIG_SWAP */
476static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
477{
478 return NULL;
479}
480
481static inline void put_swap_device(struct swap_info_struct *si)
482{
483}
484
485#define get_nr_swap_pages() 0L
486#define total_swap_pages 0L
487#define total_swapcache_pages() 0UL
488#define vm_swap_full() 0
489
490#define si_swapinfo(val) \
491 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
492#define free_folio_and_swap_cache(folio) \
493 folio_put(folio)
494#define free_pages_and_swap_cache(pages, nr) \
495 release_pages((pages), (nr));
496
497static inline void free_swap_cache(struct folio *folio)
498{
499}
500
501static inline int swap_dup_entry_direct(swp_entry_t ent)
502{
503 return 0;
504}
505
506static inline void swap_put_entries_direct(swp_entry_t ent, int nr)
507{
508}
509
510static inline int __swap_count(swp_entry_t entry)
511{
512 return 0;
513}
514
515static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry)
516{
517 return false;
518}
519
520static inline int swp_swapcount(swp_entry_t entry)
521{
522 return 0;
523}
524
525static inline bool folio_free_swap(struct folio *folio)
526{
527 return false;
528}
529
530static inline int add_swap_extent(struct swap_info_struct *sis,
531 unsigned long start_page,
532 unsigned long nr_pages, sector_t start_block)
533{
534 return -EINVAL;
535}
536#endif /* CONFIG_SWAP */
537#ifdef CONFIG_MEMCG
538static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
539{
540 /* Cgroup2 doesn't have per-cgroup swappiness */
541 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
542 return READ_ONCE(vm_swappiness);
543
544 /* root ? */
545 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
546 return READ_ONCE(vm_swappiness);
547
548 return READ_ONCE(memcg->swappiness);
549}
550
551void lru_reparent_memcg(struct mem_cgroup *memcg, struct mem_cgroup *parent, int nid);
552#else
553static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
554{
555 return READ_ONCE(vm_swappiness);
556}
557#endif
558
559#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
560void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp);
561static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
562{
563 if (mem_cgroup_disabled())
564 return;
565 __folio_throttle_swaprate(folio, gfp);
566}
567#else
568static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
569{
570}
571#endif
572
573#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
574int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
575static inline int mem_cgroup_try_charge_swap(struct folio *folio,
576 swp_entry_t entry)
577{
578 if (mem_cgroup_disabled())
579 return 0;
580 return __mem_cgroup_try_charge_swap(folio, entry);
581}
582
583extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
584static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
585{
586 if (mem_cgroup_disabled())
587 return;
588 __mem_cgroup_uncharge_swap(entry, nr_pages);
589}
590
591extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
592extern bool mem_cgroup_swap_full(struct folio *folio);
593#else
594static inline int mem_cgroup_try_charge_swap(struct folio *folio,
595 swp_entry_t entry)
596{
597 return 0;
598}
599
600static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
601 unsigned int nr_pages)
602{
603}
604
605static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
606{
607 return get_nr_swap_pages();
608}
609
610static inline bool mem_cgroup_swap_full(struct folio *folio)
611{
612 return vm_swap_full();
613}
614#endif
615
616/* for_each_managed_zone_pgdat - helper macro to iterate over all managed zones in a pgdat up to
617 * and including the specified highidx
618 * @zone: The current zone in the iterator
619 * @pgdat: The pgdat which node_zones are being iterated
620 * @idx: The index variable
621 * @highidx: The index of the highest zone to return
622 *
623 * This macro iterates through all managed zones up to and including the specified highidx.
624 * The zone iterator enters an invalid state after macro call and must be reinitialized
625 * before it can be used again.
626 */
627#define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx) \
628 for ((idx) = 0, (zone) = (pgdat)->node_zones; \
629 (idx) <= (highidx); \
630 (idx)++, (zone)++) \
631 if (!managed_zone(zone)) \
632 continue; \
633 else
634
635#endif /* __KERNEL__*/
636#endif /* _LINUX_SWAP_H */