Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

folio_batch: rename PAGEVEC_SIZE to FOLIO_BATCH_SIZE

struct pagevec no longer exists. Rename the macro appropriately.

Link: https://lkml.kernel.org/r/20260225-pagevec_cleanup-v2-4-716868cc2d11@columbia.edu
Signed-off-by: Tal Zussman <tz2294@columbia.edu>
Acked-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Tal Zussman and committed by
Andrew Morton
511f04aa 4e1d77a8

+15 -15
+2 -2
fs/btrfs/extent_io.c
··· 2095 2095 struct eb_batch { 2096 2096 unsigned int nr; 2097 2097 unsigned int cur; 2098 - struct extent_buffer *ebs[PAGEVEC_SIZE]; 2098 + struct extent_buffer *ebs[FOLIO_BATCH_SIZE]; 2099 2099 }; 2100 2100 2101 2101 static inline bool eb_batch_add(struct eb_batch *batch, struct extent_buffer *eb) 2102 2102 { 2103 2103 batch->ebs[batch->nr++] = eb; 2104 - return (batch->nr < PAGEVEC_SIZE); 2104 + return (batch->nr < FOLIO_BATCH_SIZE); 2105 2105 } 2106 2106 2107 2107 static inline void eb_batch_init(struct eb_batch *batch)
+3 -3
include/linux/folio_batch.h
··· 12 12 #include <linux/types.h> 13 13 14 14 /* 31 pointers + header align the folio_batch structure to a power of two */ 15 - #define PAGEVEC_SIZE 31 15 + #define FOLIO_BATCH_SIZE 31 16 16 17 17 struct folio; 18 18 ··· 29 29 unsigned char nr; 30 30 unsigned char i; 31 31 bool percpu_pvec_drained; 32 - struct folio *folios[PAGEVEC_SIZE]; 32 + struct folio *folios[FOLIO_BATCH_SIZE]; 33 33 }; 34 34 35 35 /** ··· 58 58 59 59 static inline unsigned int folio_batch_space(const struct folio_batch *fbatch) 60 60 { 61 - return PAGEVEC_SIZE - fbatch->nr; 61 + return FOLIO_BATCH_SIZE - fbatch->nr; 62 62 } 63 63 64 64 /**
+3 -3
include/linux/folio_queue.h
··· 29 29 */ 30 30 struct folio_queue { 31 31 struct folio_batch vec; /* Folios in the queue segment */ 32 - u8 orders[PAGEVEC_SIZE]; /* Order of each folio */ 32 + u8 orders[FOLIO_BATCH_SIZE]; /* Order of each folio */ 33 33 struct folio_queue *next; /* Next queue segment or NULL */ 34 34 struct folio_queue *prev; /* Previous queue segment of NULL */ 35 35 unsigned long marks; /* 1-bit mark per folio */ 36 36 unsigned long marks2; /* Second 1-bit mark per folio */ 37 - #if PAGEVEC_SIZE > BITS_PER_LONG 37 + #if FOLIO_BATCH_SIZE > BITS_PER_LONG 38 38 #error marks is not big enough 39 39 #endif 40 40 unsigned int rreq_id; ··· 70 70 */ 71 71 static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq) 72 72 { 73 - return PAGEVEC_SIZE; 73 + return FOLIO_BATCH_SIZE; 74 74 } 75 75 76 76 /**
+2 -2
mm/shmem.c
··· 1113 1113 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 1114 1114 pgoff_t end = (lend + 1) >> PAGE_SHIFT; 1115 1115 struct folio_batch fbatch; 1116 - pgoff_t indices[PAGEVEC_SIZE]; 1116 + pgoff_t indices[FOLIO_BATCH_SIZE]; 1117 1117 struct folio *folio; 1118 1118 bool same_folio; 1119 1119 long nr_swaps_freed = 0; ··· 1510 1510 struct address_space *mapping = inode->i_mapping; 1511 1511 pgoff_t start = 0; 1512 1512 struct folio_batch fbatch; 1513 - pgoff_t indices[PAGEVEC_SIZE]; 1513 + pgoff_t indices[FOLIO_BATCH_SIZE]; 1514 1514 int ret = 0; 1515 1515 1516 1516 do {
+1 -1
mm/swap.c
··· 1018 1018 void release_pages(release_pages_arg arg, int nr) 1019 1019 { 1020 1020 struct folio_batch fbatch; 1021 - int refs[PAGEVEC_SIZE]; 1021 + int refs[FOLIO_BATCH_SIZE]; 1022 1022 struct encoded_page **encoded = arg.encoded_pages; 1023 1023 int i; 1024 1024
+1 -1
mm/swap_state.c
··· 385 385 void free_pages_and_swap_cache(struct encoded_page **pages, int nr) 386 386 { 387 387 struct folio_batch folios; 388 - unsigned int refs[PAGEVEC_SIZE]; 388 + unsigned int refs[FOLIO_BATCH_SIZE]; 389 389 390 390 folio_batch_init(&folios); 391 391 for (int i = 0; i < nr; i++) {
+3 -3
mm/truncate.c
··· 369 369 pgoff_t start; /* inclusive */ 370 370 pgoff_t end; /* exclusive */ 371 371 struct folio_batch fbatch; 372 - pgoff_t indices[PAGEVEC_SIZE]; 372 + pgoff_t indices[FOLIO_BATCH_SIZE]; 373 373 pgoff_t index; 374 374 int i; 375 375 struct folio *folio; ··· 534 534 unsigned long mapping_try_invalidate(struct address_space *mapping, 535 535 pgoff_t start, pgoff_t end, unsigned long *nr_failed) 536 536 { 537 - pgoff_t indices[PAGEVEC_SIZE]; 537 + pgoff_t indices[FOLIO_BATCH_SIZE]; 538 538 struct folio_batch fbatch; 539 539 pgoff_t index = start; 540 540 unsigned long ret; ··· 672 672 int invalidate_inode_pages2_range(struct address_space *mapping, 673 673 pgoff_t start, pgoff_t end) 674 674 { 675 - pgoff_t indices[PAGEVEC_SIZE]; 675 + pgoff_t indices[FOLIO_BATCH_SIZE]; 676 676 struct folio_batch fbatch; 677 677 pgoff_t index; 678 678 int i;