Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * A test of splitting PMD THPs and PTE-mapped THPs from a specified virtual
4 * address range in a process via <debugfs>/split_huge_pages interface.
5 */
6
7#define _GNU_SOURCE
8#include <assert.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <stdarg.h>
12#include <unistd.h>
13#include <inttypes.h>
14#include <string.h>
15#include <fcntl.h>
16#include <sys/mman.h>
17#include <sys/mount.h>
18#include <sys/param.h>
19#include <malloc.h>
20#include <stdbool.h>
21#include <time.h>
22#include "vm_util.h"
23#include "kselftest.h"
24#include "thp_settings.h"
25
26uint64_t pagesize;
27unsigned int pageshift;
28uint64_t pmd_pagesize;
29unsigned int pmd_order;
30int *expected_orders;
31
32#define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages"
33#define SMAP_PATH "/proc/self/smaps"
34#define INPUT_MAX 80
35
36#define PID_FMT "%d,0x%lx,0x%lx,%d"
37#define PID_FMT_OFFSET "%d,0x%lx,0x%lx,%d,%d"
38#define PATH_FMT "%s,0x%lx,0x%lx,%d"
39
40const char *pagemap_proc = "/proc/self/pagemap";
41const char *kpageflags_proc = "/proc/kpageflags";
42int pagemap_fd;
43int kpageflags_fd;
44
45static bool is_backed_by_folio(char *vaddr, int order, int pagemap_fd,
46 int kpageflags_fd)
47{
48 const uint64_t folio_head_flags = KPF_THP | KPF_COMPOUND_HEAD;
49 const uint64_t folio_tail_flags = KPF_THP | KPF_COMPOUND_TAIL;
50 const unsigned long nr_pages = 1UL << order;
51 unsigned long pfn_head;
52 uint64_t pfn_flags;
53 unsigned long pfn;
54 unsigned long i;
55
56 pfn = pagemap_get_pfn(pagemap_fd, vaddr);
57
58 /* non present page */
59 if (pfn == -1UL)
60 return false;
61
62 if (pageflags_get(pfn, kpageflags_fd, &pfn_flags))
63 goto fail;
64
65 /* check for order-0 pages */
66 if (!order) {
67 if (pfn_flags & (folio_head_flags | folio_tail_flags))
68 return false;
69 return true;
70 }
71
72 /* non THP folio */
73 if (!(pfn_flags & KPF_THP))
74 return false;
75
76 pfn_head = pfn & ~(nr_pages - 1);
77
78 if (pageflags_get(pfn_head, kpageflags_fd, &pfn_flags))
79 goto fail;
80
81 /* head PFN has no compound_head flag set */
82 if ((pfn_flags & folio_head_flags) != folio_head_flags)
83 return false;
84
85 /* check all tail PFN flags */
86 for (i = 1; i < nr_pages; i++) {
87 if (pageflags_get(pfn_head + i, kpageflags_fd, &pfn_flags))
88 goto fail;
89 if ((pfn_flags & folio_tail_flags) != folio_tail_flags)
90 return false;
91 }
92
93 /*
94 * check the PFN after this folio, but if its flags cannot be obtained,
95 * assume this folio has the expected order
96 */
97 if (pageflags_get(pfn_head + nr_pages, kpageflags_fd, &pfn_flags))
98 return true;
99
100 /* If we find another tail page, then the folio is larger. */
101 return (pfn_flags & folio_tail_flags) != folio_tail_flags;
102fail:
103 ksft_exit_fail_msg("Failed to get folio info\n");
104 return false;
105}
106
107static int vaddr_pageflags_get(char *vaddr, int pagemap_fd, int kpageflags_fd,
108 uint64_t *flags)
109{
110 unsigned long pfn;
111
112 pfn = pagemap_get_pfn(pagemap_fd, vaddr);
113
114 /* non-present PFN */
115 if (pfn == -1UL)
116 return 1;
117
118 if (pageflags_get(pfn, kpageflags_fd, flags))
119 return -1;
120
121 return 0;
122}
123
124/*
125 * gather_after_split_folio_orders - scan through [vaddr_start, len) and record
126 * folio orders
127 *
128 * @vaddr_start: start vaddr
129 * @len: range length
130 * @pagemap_fd: file descriptor to /proc/<pid>/pagemap
131 * @kpageflags_fd: file descriptor to /proc/kpageflags
132 * @orders: output folio order array
133 * @nr_orders: folio order array size
134 *
135 * gather_after_split_folio_orders() scan through [vaddr_start, len) and check
136 * all folios within the range and record their orders. All order-0 pages will
137 * be recorded. Non-present vaddr is skipped.
138 *
139 * NOTE: the function is used to check folio orders after a split is performed,
140 * so it assumes [vaddr_start, len) fully maps to after-split folios within that
141 * range.
142 *
143 * Return: 0 - no error, -1 - unhandled cases
144 */
145static int gather_after_split_folio_orders(char *vaddr_start, size_t len,
146 int pagemap_fd, int kpageflags_fd, int orders[], int nr_orders)
147{
148 uint64_t page_flags = 0;
149 int cur_order = -1;
150 char *vaddr;
151
152 if (pagemap_fd == -1 || kpageflags_fd == -1)
153 return -1;
154 if (!orders)
155 return -1;
156 if (nr_orders <= 0)
157 return -1;
158
159 for (vaddr = vaddr_start; vaddr < vaddr_start + len;) {
160 char *next_folio_vaddr;
161 int status;
162
163 status = vaddr_pageflags_get(vaddr, pagemap_fd, kpageflags_fd,
164 &page_flags);
165 if (status < 0)
166 return -1;
167
168 /* skip non present vaddr */
169 if (status == 1) {
170 vaddr += psize();
171 continue;
172 }
173
174 /* all order-0 pages with possible false postive (non folio) */
175 if (!(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) {
176 orders[0]++;
177 vaddr += psize();
178 continue;
179 }
180
181 /* skip non thp compound pages */
182 if (!(page_flags & KPF_THP)) {
183 vaddr += psize();
184 continue;
185 }
186
187 /* vpn points to part of a THP at this point */
188 if (page_flags & KPF_COMPOUND_HEAD)
189 cur_order = 1;
190 else {
191 vaddr += psize();
192 continue;
193 }
194
195 next_folio_vaddr = vaddr + (1UL << (cur_order + pshift()));
196
197 if (next_folio_vaddr >= vaddr_start + len)
198 break;
199
200 while ((status = vaddr_pageflags_get(next_folio_vaddr,
201 pagemap_fd, kpageflags_fd,
202 &page_flags)) >= 0) {
203 /*
204 * non present vaddr, next compound head page, or
205 * order-0 page
206 */
207 if (status == 1 ||
208 (page_flags & KPF_COMPOUND_HEAD) ||
209 !(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) {
210 if (cur_order < nr_orders) {
211 orders[cur_order]++;
212 cur_order = -1;
213 vaddr = next_folio_vaddr;
214 }
215 break;
216 }
217
218 cur_order++;
219 next_folio_vaddr = vaddr + (1UL << (cur_order + pshift()));
220 }
221
222 if (status < 0)
223 return status;
224 }
225 if (cur_order > 0 && cur_order < nr_orders)
226 orders[cur_order]++;
227 return 0;
228}
229
230static int check_after_split_folio_orders(char *vaddr_start, size_t len,
231 int pagemap_fd, int kpageflags_fd, int orders[], int nr_orders)
232{
233 int *vaddr_orders;
234 int status;
235 int i;
236
237 vaddr_orders = (int *)malloc(sizeof(int) * nr_orders);
238
239 if (!vaddr_orders)
240 ksft_exit_fail_msg("Cannot allocate memory for vaddr_orders");
241
242 memset(vaddr_orders, 0, sizeof(int) * nr_orders);
243 status = gather_after_split_folio_orders(vaddr_start, len, pagemap_fd,
244 kpageflags_fd, vaddr_orders, nr_orders);
245 if (status)
246 ksft_exit_fail_msg("gather folio info failed\n");
247
248 for (i = 0; i < nr_orders; i++)
249 if (vaddr_orders[i] != orders[i]) {
250 ksft_print_msg("order %d: expected: %d got %d\n", i,
251 orders[i], vaddr_orders[i]);
252 status = -1;
253 }
254
255 free(vaddr_orders);
256 return status;
257}
258
259static void write_debugfs(const char *fmt, ...)
260{
261 char input[INPUT_MAX];
262 int ret;
263 va_list argp;
264
265 va_start(argp, fmt);
266 ret = vsnprintf(input, INPUT_MAX, fmt, argp);
267 va_end(argp);
268
269 if (ret >= INPUT_MAX)
270 ksft_exit_fail_msg("%s: Debugfs input is too long\n", __func__);
271
272 write_file(SPLIT_DEBUGFS, input, ret + 1);
273}
274
275static char *allocate_zero_filled_hugepage(size_t len)
276{
277 char *result;
278 size_t i;
279
280 result = memalign(pmd_pagesize, len);
281 if (!result) {
282 printf("Fail to allocate memory\n");
283 exit(EXIT_FAILURE);
284 }
285
286 madvise(result, len, MADV_HUGEPAGE);
287
288 for (i = 0; i < len; i++)
289 result[i] = (char)0;
290
291 return result;
292}
293
294static void verify_rss_anon_split_huge_page_all_zeroes(char *one_page, int nr_hpages, size_t len)
295{
296 unsigned long rss_anon_before, rss_anon_after;
297 size_t i;
298
299 if (!check_huge_anon(one_page, nr_hpages, pmd_pagesize))
300 ksft_exit_fail_msg("No THP is allocated\n");
301
302 rss_anon_before = rss_anon();
303 if (!rss_anon_before)
304 ksft_exit_fail_msg("No RssAnon is allocated before split\n");
305
306 /* split all THPs */
307 write_debugfs(PID_FMT, getpid(), (uint64_t)one_page,
308 (uint64_t)one_page + len, 0);
309
310 for (i = 0; i < len; i++)
311 if (one_page[i] != (char)0)
312 ksft_exit_fail_msg("%ld byte corrupted\n", i);
313
314 if (!check_huge_anon(one_page, 0, pmd_pagesize))
315 ksft_exit_fail_msg("Still AnonHugePages not split\n");
316
317 rss_anon_after = rss_anon();
318 if (rss_anon_after >= rss_anon_before)
319 ksft_exit_fail_msg("Incorrect RssAnon value. Before: %ld After: %ld\n",
320 rss_anon_before, rss_anon_after);
321}
322
323static void split_pmd_zero_pages(void)
324{
325 char *one_page;
326 int nr_hpages = 4;
327 size_t len = nr_hpages * pmd_pagesize;
328
329 one_page = allocate_zero_filled_hugepage(len);
330 verify_rss_anon_split_huge_page_all_zeroes(one_page, nr_hpages, len);
331 ksft_test_result_pass("Split zero filled huge pages successful\n");
332 free(one_page);
333}
334
335static void split_pmd_thp_to_order(int order)
336{
337 char *one_page;
338 size_t len = 4 * pmd_pagesize;
339 size_t i;
340
341 one_page = memalign(pmd_pagesize, len);
342 if (!one_page)
343 ksft_exit_fail_msg("Fail to allocate memory: %s\n", strerror(errno));
344
345 madvise(one_page, len, MADV_HUGEPAGE);
346
347 for (i = 0; i < len; i++)
348 one_page[i] = (char)i;
349
350 if (!check_huge_anon(one_page, 4, pmd_pagesize))
351 ksft_exit_fail_msg("No THP is allocated\n");
352
353 /* split all THPs */
354 write_debugfs(PID_FMT, getpid(), (uint64_t)one_page,
355 (uint64_t)one_page + len, order);
356
357 for (i = 0; i < len; i++)
358 if (one_page[i] != (char)i)
359 ksft_exit_fail_msg("%ld byte corrupted\n", i);
360
361 memset(expected_orders, 0, sizeof(int) * (pmd_order + 1));
362 expected_orders[order] = 4 << (pmd_order - order);
363
364 if (check_after_split_folio_orders(one_page, len, pagemap_fd,
365 kpageflags_fd, expected_orders,
366 (pmd_order + 1)))
367 ksft_exit_fail_msg("Unexpected THP split\n");
368
369 if (!check_huge_anon(one_page, 0, pmd_pagesize))
370 ksft_exit_fail_msg("Still AnonHugePages not split\n");
371
372 ksft_test_result_pass("Split huge pages to order %d successful\n", order);
373 free(one_page);
374}
375
376static void split_pte_mapped_thp(void)
377{
378 const size_t nr_thps = 4;
379 const size_t thp_area_size = nr_thps * pmd_pagesize;
380 const size_t page_area_size = nr_thps * pagesize;
381 char *thp_area, *tmp, *page_area = MAP_FAILED;
382 size_t i;
383
384 thp_area = mmap((void *)(1UL << 30), thp_area_size, PROT_READ | PROT_WRITE,
385 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
386 if (thp_area == MAP_FAILED) {
387 ksft_test_result_fail("Fail to allocate memory: %s\n", strerror(errno));
388 return;
389 }
390
391 madvise(thp_area, thp_area_size, MADV_HUGEPAGE);
392
393 for (i = 0; i < thp_area_size; i++)
394 thp_area[i] = (char)i;
395
396 if (!check_huge_anon(thp_area, nr_thps, pmd_pagesize)) {
397 ksft_test_result_skip("Not all THPs allocated\n");
398 goto out;
399 }
400
401 /*
402 * To challenge spitting code, we will mremap a single page of each
403 * THP (page[i] of thp[i]) in the thp_area into page_area. This will
404 * replace the PMD mappings in the thp_area by PTE mappings first,
405 * but leaving the THP unsplit, to then create a page-sized hole in
406 * the thp_area.
407 * We will then manually trigger splitting of all THPs through the
408 * single mremap'ed pages of each THP in the page_area.
409 */
410 page_area = mmap(NULL, page_area_size, PROT_READ | PROT_WRITE,
411 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
412 if (page_area == MAP_FAILED) {
413 ksft_test_result_fail("Fail to allocate memory: %s\n", strerror(errno));
414 goto out;
415 }
416
417 for (i = 0; i < nr_thps; i++) {
418 tmp = mremap(thp_area + pmd_pagesize * i + pagesize * i,
419 pagesize, pagesize, MREMAP_MAYMOVE|MREMAP_FIXED,
420 page_area + pagesize * i);
421 if (tmp != MAP_FAILED)
422 continue;
423 ksft_test_result_fail("mremap failed: %s\n", strerror(errno));
424 goto out;
425 }
426
427 /*
428 * Verify that our THPs were not split yet. Note that
429 * check_huge_anon() cannot be used as it checks for PMD mappings.
430 */
431 for (i = 0; i < nr_thps; i++) {
432 if (is_backed_by_folio(page_area + i * pagesize, pmd_order,
433 pagemap_fd, kpageflags_fd))
434 continue;
435 ksft_test_result_fail("THP %zu missing after mremap\n", i);
436 goto out;
437 }
438
439 /* Split all THPs through the remapped pages. */
440 write_debugfs(PID_FMT, getpid(), (uint64_t)page_area,
441 (uint64_t)page_area + page_area_size, 0);
442
443 /* Corruption during mremap or split? */
444 for (i = 0; i < page_area_size; i++) {
445 if (page_area[i] == (char)i)
446 continue;
447 ksft_test_result_fail("%zu byte corrupted\n", i);
448 goto out;
449 }
450
451 /* Split failed? */
452 for (i = 0; i < nr_thps; i++) {
453 if (is_backed_by_folio(page_area + i * pagesize, 0,
454 pagemap_fd, kpageflags_fd))
455 continue;
456 ksft_test_result_fail("THP %zu not split\n", i);
457 }
458
459 ksft_test_result_pass("Split PTE-mapped huge pages successful\n");
460out:
461 munmap(thp_area, thp_area_size);
462 if (page_area != MAP_FAILED)
463 munmap(page_area, page_area_size);
464}
465
466static void split_file_backed_thp(int order)
467{
468 int status;
469 int fd;
470 char tmpfs_template[] = "/tmp/thp_split_XXXXXX";
471 const char *tmpfs_loc = mkdtemp(tmpfs_template);
472 char testfile[INPUT_MAX];
473 ssize_t num_written, num_read;
474 char *file_buf1, *file_buf2;
475 uint64_t pgoff_start = 0, pgoff_end = 1024;
476 int i;
477
478 ksft_print_msg("Please enable pr_debug in split_huge_pages_in_file() for more info.\n");
479
480 file_buf1 = (char *)malloc(pmd_pagesize);
481 file_buf2 = (char *)malloc(pmd_pagesize);
482
483 if (!file_buf1 || !file_buf2) {
484 ksft_print_msg("cannot allocate file buffers\n");
485 goto out;
486 }
487
488 for (i = 0; i < pmd_pagesize; i++)
489 file_buf1[i] = (char)i;
490 memset(file_buf2, 0, pmd_pagesize);
491
492 status = mount("tmpfs", tmpfs_loc, "tmpfs", 0, "huge=always,size=4m");
493
494 if (status)
495 ksft_exit_fail_msg("Unable to create a tmpfs for testing\n");
496
497 status = snprintf(testfile, INPUT_MAX, "%s/thp_file", tmpfs_loc);
498 if (status >= INPUT_MAX) {
499 ksft_print_msg("Fail to create file-backed THP split testing file\n");
500 goto cleanup;
501 }
502
503 fd = open(testfile, O_CREAT|O_RDWR, 0664);
504 if (fd == -1) {
505 ksft_perror("Cannot open testing file");
506 goto cleanup;
507 }
508
509 /* write pmd size data to the file, so a file-backed THP can be allocated */
510 num_written = write(fd, file_buf1, pmd_pagesize);
511
512 if (num_written == -1 || num_written != pmd_pagesize) {
513 ksft_perror("Failed to write data to testing file");
514 goto close_file;
515 }
516
517 /* split the file-backed THP */
518 write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, order);
519
520 /* check file content after split */
521 status = lseek(fd, 0, SEEK_SET);
522 if (status == -1) {
523 ksft_perror("Cannot lseek file");
524 goto close_file;
525 }
526
527 num_read = read(fd, file_buf2, num_written);
528 if (num_read == -1 || num_read != num_written) {
529 ksft_perror("Cannot read file content back");
530 goto close_file;
531 }
532
533 if (strncmp(file_buf1, file_buf2, pmd_pagesize) != 0) {
534 ksft_print_msg("File content changed\n");
535 goto close_file;
536 }
537
538 close(fd);
539 status = unlink(testfile);
540 if (status) {
541 ksft_perror("Cannot remove testing file");
542 goto cleanup;
543 }
544
545 status = umount(tmpfs_loc);
546 if (status) {
547 rmdir(tmpfs_loc);
548 ksft_exit_fail_msg("Unable to umount %s\n", tmpfs_loc);
549 }
550
551 status = rmdir(tmpfs_loc);
552 if (status)
553 ksft_exit_fail_msg("cannot remove tmp dir: %s\n", strerror(errno));
554
555 ksft_print_msg("Please check dmesg for more information\n");
556 ksft_test_result_pass("File-backed THP split to order %d test done\n", order);
557 return;
558
559close_file:
560 close(fd);
561cleanup:
562 umount(tmpfs_loc);
563 rmdir(tmpfs_loc);
564out:
565 ksft_exit_fail_msg("Error occurred\n");
566}
567
568static bool prepare_thp_fs(const char *xfs_path, char *thp_fs_template,
569 const char **thp_fs_loc)
570{
571 if (xfs_path) {
572 *thp_fs_loc = xfs_path;
573 return false;
574 }
575
576 *thp_fs_loc = mkdtemp(thp_fs_template);
577
578 if (!*thp_fs_loc)
579 ksft_exit_fail_msg("cannot create temp folder\n");
580
581 return true;
582}
583
584static void cleanup_thp_fs(const char *thp_fs_loc, bool created_tmp)
585{
586 int status;
587
588 if (!created_tmp)
589 return;
590
591 status = rmdir(thp_fs_loc);
592 if (status)
593 ksft_exit_fail_msg("cannot remove tmp dir: %s\n",
594 strerror(errno));
595}
596
597static int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size,
598 int *fd, char **addr)
599{
600 size_t i;
601 unsigned char buf[1024];
602
603 srand(time(NULL));
604
605 *fd = open(testfile, O_CREAT | O_RDWR, 0664);
606 if (*fd == -1)
607 ksft_exit_fail_msg("Failed to create a file at %s\n", testfile);
608
609 assert(fd_size % sizeof(buf) == 0);
610 for (i = 0; i < sizeof(buf); i++)
611 buf[i] = (unsigned char)i;
612 for (i = 0; i < fd_size; i += sizeof(buf))
613 write(*fd, buf, sizeof(buf));
614
615 close(*fd);
616 sync();
617 *fd = open("/proc/sys/vm/drop_caches", O_WRONLY);
618 if (*fd == -1) {
619 ksft_perror("open drop_caches");
620 goto err_out_unlink;
621 }
622 if (write(*fd, "3", 1) != 1) {
623 ksft_perror("write to drop_caches");
624 goto err_out_unlink;
625 }
626 close(*fd);
627
628 *fd = open(testfile, O_RDWR);
629 if (*fd == -1) {
630 ksft_perror("Failed to open testfile\n");
631 goto err_out_unlink;
632 }
633
634 *addr = mmap(NULL, fd_size, PROT_READ|PROT_WRITE, MAP_SHARED, *fd, 0);
635 if (*addr == (char *)-1) {
636 ksft_perror("cannot mmap");
637 goto err_out_close;
638 }
639 madvise(*addr, fd_size, MADV_HUGEPAGE);
640
641 force_read_pages(*addr, fd_size / pmd_pagesize, pmd_pagesize);
642
643 if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) {
644 ksft_print_msg("No large pagecache folio generated, please provide a filesystem supporting large folio\n");
645 munmap(*addr, fd_size);
646 close(*fd);
647 unlink(testfile);
648 ksft_test_result_skip("Pagecache folio split skipped\n");
649 return -2;
650 }
651 return 0;
652err_out_close:
653 close(*fd);
654err_out_unlink:
655 unlink(testfile);
656 ksft_exit_fail_msg("Failed to create large pagecache folios\n");
657 return -1;
658}
659
660static void split_thp_in_pagecache_to_order_at(size_t fd_size,
661 const char *fs_loc, int order, int offset)
662{
663 int fd;
664 char *split_addr;
665 char *addr;
666 size_t i;
667 char testfile[INPUT_MAX];
668 int err = 0;
669
670 err = snprintf(testfile, INPUT_MAX, "%s/test", fs_loc);
671
672 if (err < 0)
673 ksft_exit_fail_msg("cannot generate right test file name\n");
674
675 err = create_pagecache_thp_and_fd(testfile, fd_size, &fd, &addr);
676 if (err)
677 return;
678
679 err = 0;
680
681 memset(expected_orders, 0, sizeof(int) * (pmd_order + 1));
682 /*
683 * use [split_addr, split_addr + pagesize) range to split THPs, since
684 * the debugfs function always split a range with pagesize step and
685 * providing a full [addr, addr + fd_size) range can trigger multiple
686 * splits, complicating after-split result checking.
687 */
688 if (offset == -1) {
689 for (split_addr = addr; split_addr < addr + fd_size; split_addr += pmd_pagesize)
690 write_debugfs(PID_FMT, getpid(), (uint64_t)split_addr,
691 (uint64_t)split_addr + pagesize, order);
692
693 expected_orders[order] = fd_size / (pagesize << order);
694 } else {
695 int times = fd_size / pmd_pagesize;
696
697 for (split_addr = addr; split_addr < addr + fd_size; split_addr += pmd_pagesize)
698 write_debugfs(PID_FMT_OFFSET, getpid(), (uint64_t)split_addr,
699 (uint64_t)split_addr + pagesize, order, offset);
700
701 for (i = order + 1; i < pmd_order; i++)
702 expected_orders[i] = times;
703 expected_orders[order] = 2 * times;
704 }
705
706 for (i = 0; i < fd_size; i++)
707 if (*(addr + i) != (char)i) {
708 ksft_print_msg("%lu byte corrupted in the file\n", i);
709 err = EXIT_FAILURE;
710 goto out;
711 }
712
713 if (check_after_split_folio_orders(addr, fd_size, pagemap_fd,
714 kpageflags_fd, expected_orders,
715 (pmd_order + 1))) {
716 ksft_print_msg("Unexpected THP split\n");
717 err = 1;
718 goto out;
719 }
720
721 if (!check_huge_file(addr, 0, pmd_pagesize)) {
722 ksft_print_msg("Still FilePmdMapped not split\n");
723 err = EXIT_FAILURE;
724 goto out;
725 }
726
727out:
728 munmap(addr, fd_size);
729 close(fd);
730 unlink(testfile);
731 if (offset == -1) {
732 if (err)
733 ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order);
734 ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order);
735 } else {
736 if (err)
737 ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d at in-folio offset %d failed\n", order, offset);
738 ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d at in-folio offset %d passed\n", order, offset);
739 }
740}
741
742int main(int argc, char **argv)
743{
744 int i;
745 size_t fd_size;
746 char *optional_xfs_path = NULL;
747 char fs_loc_template[] = "/tmp/thp_fs_XXXXXX";
748 const char *fs_loc;
749 bool created_tmp;
750 int offset;
751 unsigned int nr_pages;
752 unsigned int tests;
753
754 ksft_print_header();
755
756 if (geteuid() != 0) {
757 ksft_print_msg("Please run the benchmark as root\n");
758 ksft_finished();
759 }
760
761 if (!thp_is_enabled())
762 ksft_exit_skip("Transparent Hugepages not available\n");
763
764 if (argc > 1)
765 optional_xfs_path = argv[1];
766
767 pagesize = getpagesize();
768 pageshift = ffs(pagesize) - 1;
769 pmd_pagesize = read_pmd_pagesize();
770 if (!pmd_pagesize)
771 ksft_exit_fail_msg("Reading PMD pagesize failed\n");
772
773 nr_pages = pmd_pagesize / pagesize;
774 pmd_order = sz2ord(pmd_pagesize, pagesize);
775
776 expected_orders = (int *)malloc(sizeof(int) * (pmd_order + 1));
777 if (!expected_orders)
778 ksft_exit_fail_msg("Fail to allocate memory: %s\n", strerror(errno));
779
780 tests = 2 + (pmd_order - 1) + (2 * pmd_order) + (pmd_order - 1) * 4 + 2;
781 ksft_set_plan(tests);
782
783 pagemap_fd = open(pagemap_proc, O_RDONLY);
784 if (pagemap_fd == -1)
785 ksft_exit_fail_msg("read pagemap: %s\n", strerror(errno));
786
787 kpageflags_fd = open(kpageflags_proc, O_RDONLY);
788 if (kpageflags_fd == -1)
789 ksft_exit_fail_msg("read kpageflags: %s\n", strerror(errno));
790
791 fd_size = 2 * pmd_pagesize;
792
793 split_pmd_zero_pages();
794
795 for (i = 0; i < pmd_order; i++)
796 if (i != 1)
797 split_pmd_thp_to_order(i);
798
799 split_pte_mapped_thp();
800 for (i = 0; i < pmd_order; i++)
801 split_file_backed_thp(i);
802
803 created_tmp = prepare_thp_fs(optional_xfs_path, fs_loc_template,
804 &fs_loc);
805 for (i = pmd_order - 1; i >= 0; i--)
806 split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, -1);
807
808 for (i = 0; i < pmd_order; i++)
809 for (offset = 0;
810 offset < nr_pages;
811 offset += MAX(nr_pages / 4, 1 << i))
812 split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, offset);
813 cleanup_thp_fs(fs_loc, created_tmp);
814
815 close(pagemap_fd);
816 close(kpageflags_fd);
817 free(expected_orders);
818
819 ksft_finished();
820
821 return 0;
822}