Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'sysctl-6.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/sysctl/sysctl

Pull sysctl updates from Joel Granados:

- Move vm_table members out of kernel/sysctl.c

All vm_table array members have moved to their respective subsystems
leading to the removal of vm_table from kernel/sysctl.c. This
increases modularity by placing the ctl_tables closer to where they
are actually used and at the same time reducing the chances of merge
conflicts in kernel/sysctl.c.

- ctl_table range fixes

Replace the proc_handler function that checks variable ranges in
coredump_sysctls and vdso_table with the one that actually uses the
extra{1,2} pointers as min/max values. This tightens the range of the
values that users can pass into the kernel effectively preventing
{under,over}flows.

- Misc fixes

Correct grammar errors and typos in test messages. Update sysctl
files in MAINTAINERS. Constified and removed array size in
declaration for alignment_tbl

* tag 'sysctl-6.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/sysctl/sysctl: (22 commits)
selftests/sysctl: fix wording of help messages
selftests: fix spelling/grammar errors in sysctl/sysctl.sh
MAINTAINERS: Update sysctl file list in MAINTAINERS
sysctl: Fix underflow value setting risk in vm_table
coredump: Fixes core_pipe_limit sysctl proc_handler
sysctl: remove unneeded include
sysctl: remove the vm_table
sh: vdso: move the sysctl to arch/sh/kernel/vsyscall/vsyscall.c
x86: vdso: move the sysctl to arch/x86/entry/vdso/vdso32-setup.c
fs: dcache: move the sysctl to fs/dcache.c
sunrpc: simplify rpcauth_cache_shrink_count()
fs: drop_caches: move sysctl to fs/drop_caches.c
fs: fs-writeback: move sysctl to fs/fs-writeback.c
mm: nommu: move sysctl to mm/nommu.c
security: min_addr: move sysctl to security/min_addr.c
mm: mmap: move sysctl to mm/mmap.c
mm: util: move sysctls to mm/util.c
mm: vmscan: move vmscan sysctls to mm/vmscan.c
mm: swap: move sysctl to mm/swap.c
mm: filemap: move sysctl to mm/filemap.c
...

+350 -322
+4 -3
MAINTAINERS
··· 19081 19081 T: git git://git.kernel.org/pub/scm/linux/kernel/git/sysctl/sysctl.git sysctl-next 19082 19082 F: fs/proc/proc_sysctl.c 19083 19083 F: include/linux/sysctl.h 19084 - F: kernel/sysctl-test.c 19085 - F: kernel/sysctl.c 19086 - F: tools/testing/selftests/sysctl/ 19084 + F: kernel/sysctl* 19085 + F: tools/testing/selftests/sysctl/* 19086 + F: lib/test_sysctl.c 19087 + F: scripts/check-sysctl-docs 19087 19088 19088 19089 PS3 NETWORK SUPPORT 19089 19090 M: Geoff Levand <geoff@infradead.org>
+1 -1
arch/csky/abiv1/alignment.c
··· 300 300 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr); 301 301 } 302 302 303 - static struct ctl_table alignment_tbl[5] = { 303 + static const struct ctl_table alignment_tbl[] = { 304 304 { 305 305 .procname = "kernel_enable", 306 306 .data = &align_kern_enable,
+21
arch/sh/kernel/vsyscall/vsyscall.c
··· 14 14 #include <linux/module.h> 15 15 #include <linux/elf.h> 16 16 #include <linux/sched.h> 17 + #include <linux/sysctl.h> 17 18 #include <linux/err.h> 18 19 19 20 /* ··· 30 29 return 1; 31 30 } 32 31 __setup("vdso=", vdso_setup); 32 + 33 + static const struct ctl_table vdso_table[] = { 34 + { 35 + .procname = "vdso_enabled", 36 + .data = &vdso_enabled, 37 + .maxlen = sizeof(vdso_enabled), 38 + .mode = 0644, 39 + .proc_handler = proc_dointvec_minmax, 40 + .extra1 = SYSCTL_ZERO, 41 + .extra2 = SYSCTL_ONE, 42 + }, 43 + }; 33 44 34 45 /* 35 46 * These symbols are defined by vsyscall.o to mark the bounds ··· 70 57 71 58 return 0; 72 59 } 60 + 61 + static int __init vm_sysctl_init(void) 62 + { 63 + register_sysctl_init("vm", vdso_table); 64 + return 0; 65 + } 66 + 67 + fs_initcall(vm_sysctl_init); 73 68 74 69 /* Setup a VMA at program startup for the vsyscall page */ 75 70 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+11 -5
arch/x86/entry/vdso/vdso32-setup.c
··· 51 51 __setup_param("vdso=", vdso_setup, vdso32_setup, 0); 52 52 #endif 53 53 54 - #ifdef CONFIG_X86_64 55 54 56 55 #ifdef CONFIG_SYSCTL 57 - /* Register vsyscall32 into the ABI table */ 58 56 #include <linux/sysctl.h> 59 57 60 - static const struct ctl_table abi_table2[] = { 58 + static const struct ctl_table vdso_table[] = { 61 59 { 60 + #ifdef CONFIG_X86_64 62 61 .procname = "vsyscall32", 62 + #else 63 + .procname = "vdso_enabled", 64 + #endif 63 65 .data = &vdso32_enabled, 64 66 .maxlen = sizeof(int), 65 67 .mode = 0644, ··· 73 71 74 72 static __init int ia32_binfmt_init(void) 75 73 { 76 - register_sysctl("abi", abi_table2); 74 + #ifdef CONFIG_X86_64 75 + /* Register vsyscall32 into the ABI table */ 76 + register_sysctl("abi", vdso_table); 77 + #else 78 + register_sysctl_init("vm", vdso_table); 79 + #endif 77 80 return 0; 78 81 } 79 82 __initcall(ia32_binfmt_init); 80 83 #endif /* CONFIG_SYSCTL */ 81 84 82 - #endif /* CONFIG_X86_64 */
+3 -1
fs/coredump.c
··· 1042 1042 .data = &core_pipe_limit, 1043 1043 .maxlen = sizeof(unsigned int), 1044 1044 .mode = 0644, 1045 - .proc_handler = proc_dointvec, 1045 + .proc_handler = proc_dointvec_minmax, 1046 + .extra1 = SYSCTL_ZERO, 1047 + .extra2 = SYSCTL_INT_MAX, 1046 1048 }, 1047 1049 { 1048 1050 .procname = "core_file_note_size_limit",
+19 -2
fs/dcache.c
··· 73 73 * If no ancestor relationship: 74 74 * arbitrary, since it's serialized on rename_lock 75 75 */ 76 - int sysctl_vfs_cache_pressure __read_mostly = 100; 77 - EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 76 + static int sysctl_vfs_cache_pressure __read_mostly = 100; 77 + 78 + unsigned long vfs_pressure_ratio(unsigned long val) 79 + { 80 + return mult_frac(val, sysctl_vfs_cache_pressure, 100); 81 + } 82 + EXPORT_SYMBOL_GPL(vfs_pressure_ratio); 78 83 79 84 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 80 85 ··· 216 211 }, 217 212 }; 218 213 214 + static const struct ctl_table vm_dcache_sysctls[] = { 215 + { 216 + .procname = "vfs_cache_pressure", 217 + .data = &sysctl_vfs_cache_pressure, 218 + .maxlen = sizeof(sysctl_vfs_cache_pressure), 219 + .mode = 0644, 220 + .proc_handler = proc_dointvec_minmax, 221 + .extra1 = SYSCTL_ZERO, 222 + }, 223 + }; 224 + 219 225 static int __init init_fs_dcache_sysctls(void) 220 226 { 227 + register_sysctl_init("vm", vm_dcache_sysctls); 221 228 register_sysctl_init("fs", fs_dcache_sysctls); 222 229 return 0; 223 230 }
+21 -2
fs/drop_caches.c
··· 14 14 #include "internal.h" 15 15 16 16 /* A global variable is a bit ugly, but it keeps the code simple */ 17 - int sysctl_drop_caches; 17 + static int sysctl_drop_caches; 18 18 19 19 static void drop_pagecache_sb(struct super_block *sb, void *unused) 20 20 { ··· 48 48 iput(toput_inode); 49 49 } 50 50 51 - int drop_caches_sysctl_handler(const struct ctl_table *table, int write, 51 + static int drop_caches_sysctl_handler(const struct ctl_table *table, int write, 52 52 void *buffer, size_t *length, loff_t *ppos) 53 53 { 54 54 int ret; ··· 77 77 } 78 78 return 0; 79 79 } 80 + 81 + static const struct ctl_table drop_caches_table[] = { 82 + { 83 + .procname = "drop_caches", 84 + .data = &sysctl_drop_caches, 85 + .maxlen = sizeof(int), 86 + .mode = 0200, 87 + .proc_handler = drop_caches_sysctl_handler, 88 + .extra1 = SYSCTL_ONE, 89 + .extra2 = SYSCTL_FOUR, 90 + }, 91 + }; 92 + 93 + static int __init init_vm_drop_caches_sysctls(void) 94 + { 95 + register_sysctl_init("vm", drop_caches_table); 96 + return 0; 97 + } 98 + fs_initcall(init_vm_drop_caches_sysctls);
+21 -9
fs/fs-writeback.c
··· 65 65 * timestamps written to disk after 12 hours, but in the worst case a 66 66 * few inodes might not their timestamps updated for 24 hours. 67 67 */ 68 - unsigned int dirtytime_expire_interval = 12 * 60 * 60; 68 + static unsigned int dirtytime_expire_interval = 12 * 60 * 60; 69 69 70 70 static inline struct inode *wb_inode(struct list_head *head) 71 71 { ··· 2435 2435 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 2436 2436 } 2437 2437 2438 - static int __init start_dirtytime_writeback(void) 2439 - { 2440 - schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 2441 - return 0; 2442 - } 2443 - __initcall(start_dirtytime_writeback); 2444 - 2445 - int dirtytime_interval_handler(const struct ctl_table *table, int write, 2438 + static int dirtytime_interval_handler(const struct ctl_table *table, int write, 2446 2439 void *buffer, size_t *lenp, loff_t *ppos) 2447 2440 { 2448 2441 int ret; ··· 2445 2452 mod_delayed_work(system_wq, &dirtytime_work, 0); 2446 2453 return ret; 2447 2454 } 2455 + 2456 + static const struct ctl_table vm_fs_writeback_table[] = { 2457 + { 2458 + .procname = "dirtytime_expire_seconds", 2459 + .data = &dirtytime_expire_interval, 2460 + .maxlen = sizeof(dirtytime_expire_interval), 2461 + .mode = 0644, 2462 + .proc_handler = dirtytime_interval_handler, 2463 + .extra1 = SYSCTL_ZERO, 2464 + }, 2465 + }; 2466 + 2467 + static int __init start_dirtytime_writeback(void) 2468 + { 2469 + schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 2470 + register_sysctl_init("vm", vm_fs_writeback_table); 2471 + return 0; 2472 + } 2473 + __initcall(start_dirtytime_writeback); 2448 2474 2449 2475 /** 2450 2476 * __mark_inode_dirty - internal function to mark an inode dirty
+1 -6
include/linux/dcache.h
··· 519 519 return d_really_is_positive(dentry) && !d_unhashed(dentry); 520 520 } 521 521 522 - extern int sysctl_vfs_cache_pressure; 523 - 524 - static inline unsigned long vfs_pressure_ratio(unsigned long val) 525 - { 526 - return mult_frac(val, sysctl_vfs_cache_pressure, 100); 527 - } 522 + unsigned long vfs_pressure_ratio(unsigned long val); 528 523 529 524 /** 530 525 * d_inode - Get the actual inode of this dentry
-23
include/linux/mm.h
··· 40 40 struct pt_regs; 41 41 struct folio_batch; 42 42 43 - extern int sysctl_page_lock_unfairness; 44 - 45 43 void mm_core_init(void); 46 44 void init_mm_internals(void); 47 45 ··· 76 78 } 77 79 78 80 extern void * high_memory; 79 - extern int page_cluster; 80 - extern const int page_cluster_max; 81 81 82 82 #ifdef CONFIG_SYSCTL 83 83 extern int sysctl_legacy_va_layout; ··· 204 208 205 209 extern unsigned long sysctl_user_reserve_kbytes; 206 210 extern unsigned long sysctl_admin_reserve_kbytes; 207 - 208 - extern int sysctl_overcommit_memory; 209 - extern int sysctl_overcommit_ratio; 210 - extern unsigned long sysctl_overcommit_kbytes; 211 - 212 - int overcommit_ratio_handler(const struct ctl_table *, int, void *, size_t *, 213 - loff_t *); 214 - int overcommit_kbytes_handler(const struct ctl_table *, int, void *, size_t *, 215 - loff_t *); 216 - int overcommit_policy_handler(const struct ctl_table *, int, void *, size_t *, 217 - loff_t *); 218 211 219 212 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 220 213 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) ··· 3794 3809 3795 3810 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); 3796 3811 3797 - #ifdef CONFIG_SYSCTL 3798 - extern int sysctl_drop_caches; 3799 - int drop_caches_sysctl_handler(const struct ctl_table *, int, void *, size_t *, 3800 - loff_t *); 3801 - #endif 3802 - 3803 3812 void drop_slab(void); 3804 3813 3805 3814 #ifndef CONFIG_MMU ··· 4064 4085 unsigned long wp_shared_mapping_range(struct address_space *mapping, 4065 4086 pgoff_t first_index, pgoff_t nr); 4066 4087 #endif 4067 - 4068 - extern int sysctl_nr_trim_pages; 4069 4088 4070 4089 #ifdef CONFIG_ANON_VMA_NAME 4071 4090 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
-2
include/linux/mman.h
··· 59 59 | MAP_HUGE_1GB) 60 60 61 61 extern int sysctl_overcommit_memory; 62 - extern int sysctl_overcommit_ratio; 63 - extern unsigned long sysctl_overcommit_kbytes; 64 62 extern struct percpu_counter vm_committed_as; 65 63 66 64 #ifdef CONFIG_SMP
-9
include/linux/swap.h
··· 433 433 long remove_mapping(struct address_space *mapping, struct folio *folio); 434 434 435 435 #ifdef CONFIG_NUMA 436 - extern int node_reclaim_mode; 437 436 extern int sysctl_min_unmapped_ratio; 438 437 extern int sysctl_min_slab_ratio; 439 - #else 440 - #define node_reclaim_mode 0 441 438 #endif 442 - 443 - static inline bool node_reclaim_enabled(void) 444 - { 445 - /* Is any node_reclaim_mode bit set? */ 446 - return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); 447 - } 448 439 449 440 void check_move_unevictable_folios(struct folio_batch *fbatch); 450 441
-11
include/linux/vmstat.h
··· 10 10 #include <linux/static_key.h> 11 11 #include <linux/mmdebug.h> 12 12 13 - extern int sysctl_stat_interval; 14 - 15 13 #ifdef CONFIG_NUMA 16 - #define ENABLE_NUMA_STAT 1 17 - #define DISABLE_NUMA_STAT 0 18 - extern int sysctl_vm_numa_stat; 19 14 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); 20 - int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write, 21 - void *buffer, size_t *length, loff_t *ppos); 22 15 #endif 23 16 24 17 struct reclaim_stat { ··· 296 303 void quiet_vmstat(void); 297 304 void cpu_vm_stats_fold(int cpu); 298 305 void refresh_zone_stat_thresholds(void); 299 - 300 - struct ctl_table; 301 - int vmstat_refresh(const struct ctl_table *, int write, void *buffer, size_t *lenp, 302 - loff_t *ppos); 303 306 304 307 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *); 305 308
-4
include/linux/writeback.h
··· 327 327 /* These are exported to sysctl. */ 328 328 extern unsigned int dirty_writeback_interval; 329 329 extern unsigned int dirty_expire_interval; 330 - extern unsigned int dirtytime_expire_interval; 331 330 extern int laptop_mode; 332 - 333 - int dirtytime_interval_handler(const struct ctl_table *table, int write, 334 - void *buffer, size_t *lenp, loff_t *ppos); 335 331 336 332 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); 337 333 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
-221
kernel/sysctl.c
··· 20 20 */ 21 21 22 22 #include <linux/module.h> 23 - #include <linux/mm.h> 24 - #include <linux/swap.h> 25 - #include <linux/slab.h> 26 23 #include <linux/sysctl.h> 27 24 #include <linux/bitmap.h> 28 25 #include <linux/signal.h> ··· 28 31 #include <linux/proc_fs.h> 29 32 #include <linux/security.h> 30 33 #include <linux/ctype.h> 31 - #include <linux/kmemleak.h> 32 34 #include <linux/filter.h> 33 35 #include <linux/fs.h> 34 36 #include <linux/init.h> ··· 38 42 #include <linux/highuid.h> 39 43 #include <linux/writeback.h> 40 44 #include <linux/ratelimit.h> 41 - #include <linux/hugetlb.h> 42 45 #include <linux/initrd.h> 43 46 #include <linux/key.h> 44 47 #include <linux/times.h> 45 48 #include <linux/limits.h> 46 - #include <linux/dcache.h> 47 49 #include <linux/syscalls.h> 48 - #include <linux/vmstat.h> 49 50 #include <linux/nfs_fs.h> 50 51 #include <linux/acpi.h> 51 52 #include <linux/reboot.h> 52 53 #include <linux/ftrace.h> 53 - #include <linux/oom.h> 54 54 #include <linux/kmod.h> 55 55 #include <linux/capability.h> 56 56 #include <linux/binfmts.h> 57 57 #include <linux/sched/sysctl.h> 58 58 #include <linux/mount.h> 59 - #include <linux/userfaultfd_k.h> 60 59 #include <linux/pid.h> 61 60 62 61 #include "../lib/kstrtox.h" ··· 113 122 114 123 static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT; 115 124 #endif /* CONFIG_PROC_SYSCTL */ 116 - 117 - #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ 118 - defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) 119 - int sysctl_legacy_va_layout; 120 - #endif 121 - 122 125 #endif /* CONFIG_SYSCTL */ 123 126 124 127 /* ··· 1886 1901 #endif 1887 1902 }; 1888 1903 1889 - static const struct ctl_table vm_table[] = { 1890 - { 1891 - .procname = "overcommit_memory", 1892 - .data = &sysctl_overcommit_memory, 1893 - .maxlen = sizeof(sysctl_overcommit_memory), 1894 - .mode = 0644, 1895 - .proc_handler = overcommit_policy_handler, 1896 - .extra1 = SYSCTL_ZERO, 1897 - .extra2 = SYSCTL_TWO, 1898 - }, 1899 - { 1900 - .procname = "overcommit_ratio", 1901 - .data = &sysctl_overcommit_ratio, 1902 - .maxlen = sizeof(sysctl_overcommit_ratio), 1903 - .mode = 0644, 1904 - .proc_handler = overcommit_ratio_handler, 1905 - }, 1906 - { 1907 - .procname = "overcommit_kbytes", 1908 - .data = &sysctl_overcommit_kbytes, 1909 - .maxlen = sizeof(sysctl_overcommit_kbytes), 1910 - .mode = 0644, 1911 - .proc_handler = overcommit_kbytes_handler, 1912 - }, 1913 - { 1914 - .procname = "page-cluster", 1915 - .data = &page_cluster, 1916 - .maxlen = sizeof(int), 1917 - .mode = 0644, 1918 - .proc_handler = proc_dointvec_minmax, 1919 - .extra1 = SYSCTL_ZERO, 1920 - .extra2 = (void *)&page_cluster_max, 1921 - }, 1922 - { 1923 - .procname = "dirtytime_expire_seconds", 1924 - .data = &dirtytime_expire_interval, 1925 - .maxlen = sizeof(dirtytime_expire_interval), 1926 - .mode = 0644, 1927 - .proc_handler = dirtytime_interval_handler, 1928 - .extra1 = SYSCTL_ZERO, 1929 - }, 1930 - { 1931 - .procname = "swappiness", 1932 - .data = &vm_swappiness, 1933 - .maxlen = sizeof(vm_swappiness), 1934 - .mode = 0644, 1935 - .proc_handler = proc_dointvec_minmax, 1936 - .extra1 = SYSCTL_ZERO, 1937 - .extra2 = SYSCTL_TWO_HUNDRED, 1938 - }, 1939 - #ifdef CONFIG_NUMA 1940 - { 1941 - .procname = "numa_stat", 1942 - .data = &sysctl_vm_numa_stat, 1943 - .maxlen = sizeof(int), 1944 - .mode = 0644, 1945 - .proc_handler = sysctl_vm_numa_stat_handler, 1946 - .extra1 = SYSCTL_ZERO, 1947 - .extra2 = SYSCTL_ONE, 1948 - }, 1949 - #endif 1950 - { 1951 - .procname = "drop_caches", 1952 - .data = &sysctl_drop_caches, 1953 - .maxlen = sizeof(int), 1954 - .mode = 0200, 1955 - .proc_handler = drop_caches_sysctl_handler, 1956 - .extra1 = SYSCTL_ONE, 1957 - .extra2 = SYSCTL_FOUR, 1958 - }, 1959 - { 1960 - .procname = "page_lock_unfairness", 1961 - .data = &sysctl_page_lock_unfairness, 1962 - .maxlen = sizeof(sysctl_page_lock_unfairness), 1963 - .mode = 0644, 1964 - .proc_handler = proc_dointvec_minmax, 1965 - .extra1 = SYSCTL_ZERO, 1966 - }, 1967 - #ifdef CONFIG_MMU 1968 - { 1969 - .procname = "max_map_count", 1970 - .data = &sysctl_max_map_count, 1971 - .maxlen = sizeof(sysctl_max_map_count), 1972 - .mode = 0644, 1973 - .proc_handler = proc_dointvec_minmax, 1974 - .extra1 = SYSCTL_ZERO, 1975 - }, 1976 - #else 1977 - { 1978 - .procname = "nr_trim_pages", 1979 - .data = &sysctl_nr_trim_pages, 1980 - .maxlen = sizeof(sysctl_nr_trim_pages), 1981 - .mode = 0644, 1982 - .proc_handler = proc_dointvec_minmax, 1983 - .extra1 = SYSCTL_ZERO, 1984 - }, 1985 - #endif 1986 - { 1987 - .procname = "vfs_cache_pressure", 1988 - .data = &sysctl_vfs_cache_pressure, 1989 - .maxlen = sizeof(sysctl_vfs_cache_pressure), 1990 - .mode = 0644, 1991 - .proc_handler = proc_dointvec_minmax, 1992 - .extra1 = SYSCTL_ZERO, 1993 - }, 1994 - #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ 1995 - defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) 1996 - { 1997 - .procname = "legacy_va_layout", 1998 - .data = &sysctl_legacy_va_layout, 1999 - .maxlen = sizeof(sysctl_legacy_va_layout), 2000 - .mode = 0644, 2001 - .proc_handler = proc_dointvec_minmax, 2002 - .extra1 = SYSCTL_ZERO, 2003 - }, 2004 - #endif 2005 - #ifdef CONFIG_NUMA 2006 - { 2007 - .procname = "zone_reclaim_mode", 2008 - .data = &node_reclaim_mode, 2009 - .maxlen = sizeof(node_reclaim_mode), 2010 - .mode = 0644, 2011 - .proc_handler = proc_dointvec_minmax, 2012 - .extra1 = SYSCTL_ZERO, 2013 - }, 2014 - #endif 2015 - #ifdef CONFIG_SMP 2016 - { 2017 - .procname = "stat_interval", 2018 - .data = &sysctl_stat_interval, 2019 - .maxlen = sizeof(sysctl_stat_interval), 2020 - .mode = 0644, 2021 - .proc_handler = proc_dointvec_jiffies, 2022 - }, 2023 - { 2024 - .procname = "stat_refresh", 2025 - .data = NULL, 2026 - .maxlen = 0, 2027 - .mode = 0600, 2028 - .proc_handler = vmstat_refresh, 2029 - }, 2030 - #endif 2031 - #ifdef CONFIG_MMU 2032 - { 2033 - .procname = "mmap_min_addr", 2034 - .data = &dac_mmap_min_addr, 2035 - .maxlen = sizeof(unsigned long), 2036 - .mode = 0644, 2037 - .proc_handler = mmap_min_addr_handler, 2038 - }, 2039 - #endif 2040 - #if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \ 2041 - (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) 2042 - { 2043 - .procname = "vdso_enabled", 2044 - #ifdef CONFIG_X86_32 2045 - .data = &vdso32_enabled, 2046 - .maxlen = sizeof(vdso32_enabled), 2047 - #else 2048 - .data = &vdso_enabled, 2049 - .maxlen = sizeof(vdso_enabled), 2050 - #endif 2051 - .mode = 0644, 2052 - .proc_handler = proc_dointvec, 2053 - .extra1 = SYSCTL_ZERO, 2054 - }, 2055 - #endif 2056 - { 2057 - .procname = "user_reserve_kbytes", 2058 - .data = &sysctl_user_reserve_kbytes, 2059 - .maxlen = sizeof(sysctl_user_reserve_kbytes), 2060 - .mode = 0644, 2061 - .proc_handler = proc_doulongvec_minmax, 2062 - }, 2063 - { 2064 - .procname = "admin_reserve_kbytes", 2065 - .data = &sysctl_admin_reserve_kbytes, 2066 - .maxlen = sizeof(sysctl_admin_reserve_kbytes), 2067 - .mode = 0644, 2068 - .proc_handler = proc_doulongvec_minmax, 2069 - }, 2070 - #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 2071 - { 2072 - .procname = "mmap_rnd_bits", 2073 - .data = &mmap_rnd_bits, 2074 - .maxlen = sizeof(mmap_rnd_bits), 2075 - .mode = 0600, 2076 - .proc_handler = proc_dointvec_minmax, 2077 - .extra1 = (void *)&mmap_rnd_bits_min, 2078 - .extra2 = (void *)&mmap_rnd_bits_max, 2079 - }, 2080 - #endif 2081 - #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 2082 - { 2083 - .procname = "mmap_rnd_compat_bits", 2084 - .data = &mmap_rnd_compat_bits, 2085 - .maxlen = sizeof(mmap_rnd_compat_bits), 2086 - .mode = 0600, 2087 - .proc_handler = proc_dointvec_minmax, 2088 - .extra1 = (void *)&mmap_rnd_compat_bits_min, 2089 - .extra2 = (void *)&mmap_rnd_compat_bits_max, 2090 - }, 2091 - #endif 2092 - }; 2093 - 2094 1904 int __init sysctl_init_bases(void) 2095 1905 { 2096 1906 register_sysctl_init("kernel", kern_table); 2097 - register_sysctl_init("vm", vm_table); 2098 1907 2099 1908 return 0; 2100 1909 }
+15 -3
mm/filemap.c
··· 47 47 #include <linux/splice.h> 48 48 #include <linux/rcupdate_wait.h> 49 49 #include <linux/sched/mm.h> 50 + #include <linux/sysctl.h> 50 51 #include <asm/pgalloc.h> 51 52 #include <asm/tlbflush.h> 52 53 #include "internal.h" ··· 1078 1077 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)]; 1079 1078 } 1080 1079 1080 + /* How many times do we accept lock stealing from under a waiter? */ 1081 + static int sysctl_page_lock_unfairness = 5; 1082 + static const struct ctl_table filemap_sysctl_table[] = { 1083 + { 1084 + .procname = "page_lock_unfairness", 1085 + .data = &sysctl_page_lock_unfairness, 1086 + .maxlen = sizeof(sysctl_page_lock_unfairness), 1087 + .mode = 0644, 1088 + .proc_handler = proc_dointvec_minmax, 1089 + .extra1 = SYSCTL_ZERO, 1090 + } 1091 + }; 1092 + 1081 1093 void __init pagecache_init(void) 1082 1094 { 1083 1095 int i; ··· 1099 1085 init_waitqueue_head(&folio_wait_table[i]); 1100 1086 1101 1087 page_writeback_init(); 1088 + register_sysctl_init("vm", filemap_sysctl_table); 1102 1089 } 1103 1090 1104 1091 /* ··· 1246 1231 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; 1247 1232 return true; 1248 1233 } 1249 - 1250 - /* How many times do we accept lock stealing from under a waiter? */ 1251 - int sysctl_page_lock_unfairness = 5; 1252 1234 1253 1235 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr, 1254 1236 int state, enum behavior behavior)
+10
mm/internal.h
··· 1097 1097 #define NODE_RECLAIM_SUCCESS 1 1098 1098 1099 1099 #ifdef CONFIG_NUMA 1100 + extern int node_reclaim_mode; 1101 + 1100 1102 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); 1101 1103 extern int find_next_best_node(int node, nodemask_t *used_node_mask); 1102 1104 #else 1105 + #define node_reclaim_mode 0 1106 + 1103 1107 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, 1104 1108 unsigned int order) 1105 1109 { ··· 1114 1110 return NUMA_NO_NODE; 1115 1111 } 1116 1112 #endif 1113 + 1114 + static inline bool node_reclaim_enabled(void) 1115 + { 1116 + /* Is any node_reclaim_mode bit set? */ 1117 + return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); 1118 + } 1117 1119 1118 1120 /* 1119 1121 * mm/memory-failure.c
+54
mm/mmap.c
··· 1543 1543 &special_mapping_vmops); 1544 1544 } 1545 1545 1546 + #ifdef CONFIG_SYSCTL 1547 + #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ 1548 + defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) 1549 + int sysctl_legacy_va_layout; 1550 + #endif 1551 + 1552 + static const struct ctl_table mmap_table[] = { 1553 + { 1554 + .procname = "max_map_count", 1555 + .data = &sysctl_max_map_count, 1556 + .maxlen = sizeof(sysctl_max_map_count), 1557 + .mode = 0644, 1558 + .proc_handler = proc_dointvec_minmax, 1559 + .extra1 = SYSCTL_ZERO, 1560 + }, 1561 + #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ 1562 + defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) 1563 + { 1564 + .procname = "legacy_va_layout", 1565 + .data = &sysctl_legacy_va_layout, 1566 + .maxlen = sizeof(sysctl_legacy_va_layout), 1567 + .mode = 0644, 1568 + .proc_handler = proc_dointvec_minmax, 1569 + .extra1 = SYSCTL_ZERO, 1570 + }, 1571 + #endif 1572 + #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 1573 + { 1574 + .procname = "mmap_rnd_bits", 1575 + .data = &mmap_rnd_bits, 1576 + .maxlen = sizeof(mmap_rnd_bits), 1577 + .mode = 0600, 1578 + .proc_handler = proc_dointvec_minmax, 1579 + .extra1 = (void *)&mmap_rnd_bits_min, 1580 + .extra2 = (void *)&mmap_rnd_bits_max, 1581 + }, 1582 + #endif 1583 + #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 1584 + { 1585 + .procname = "mmap_rnd_compat_bits", 1586 + .data = &mmap_rnd_compat_bits, 1587 + .maxlen = sizeof(mmap_rnd_compat_bits), 1588 + .mode = 0600, 1589 + .proc_handler = proc_dointvec_minmax, 1590 + .extra1 = (void *)&mmap_rnd_compat_bits_min, 1591 + .extra2 = (void *)&mmap_rnd_compat_bits_max, 1592 + }, 1593 + #endif 1594 + }; 1595 + #endif /* CONFIG_SYSCTL */ 1596 + 1546 1597 /* 1547 1598 * initialise the percpu counter for VM 1548 1599 */ ··· 1603 1552 1604 1553 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 1605 1554 VM_BUG_ON(ret); 1555 + #ifdef CONFIG_SYSCTL 1556 + register_sysctl_init("vm", mmap_table); 1557 + #endif 1606 1558 } 1607 1559 1608 1560 /*
+14 -1
mm/nommu.c
··· 48 48 unsigned long max_mapnr; 49 49 EXPORT_SYMBOL(max_mapnr); 50 50 unsigned long highest_memmap_pfn; 51 - int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; 52 51 int heap_stack_gap = 0; 53 52 54 53 atomic_long_t mmap_pages_allocated; ··· 391 392 return mm->brk = brk; 392 393 } 393 394 395 + static int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; 396 + 397 + static const struct ctl_table nommu_table[] = { 398 + { 399 + .procname = "nr_trim_pages", 400 + .data = &sysctl_nr_trim_pages, 401 + .maxlen = sizeof(sysctl_nr_trim_pages), 402 + .mode = 0644, 403 + .proc_handler = proc_dointvec_minmax, 404 + .extra1 = SYSCTL_ZERO, 405 + }, 406 + }; 407 + 394 408 /* 395 409 * initialise the percpu counter for VM and region record slabs 396 410 */ ··· 414 402 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 415 403 VM_BUG_ON(ret); 416 404 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT); 405 + register_sysctl_init("vm", nommu_table); 417 406 } 418 407 419 408 /*
+15 -1
mm/swap.c
··· 45 45 46 46 /* How many pages do we try to swap or page in/out together? As a power of 2 */ 47 47 int page_cluster; 48 - const int page_cluster_max = 31; 48 + static const int page_cluster_max = 31; 49 49 50 50 struct cpu_fbatches { 51 51 /* ··· 1076 1076 fbatch->nr = j; 1077 1077 } 1078 1078 1079 + static const struct ctl_table swap_sysctl_table[] = { 1080 + { 1081 + .procname = "page-cluster", 1082 + .data = &page_cluster, 1083 + .maxlen = sizeof(int), 1084 + .mode = 0644, 1085 + .proc_handler = proc_dointvec_minmax, 1086 + .extra1 = SYSCTL_ZERO, 1087 + .extra2 = (void *)&page_cluster_max, 1088 + } 1089 + }; 1090 + 1079 1091 /* 1080 1092 * Perform any setup for the swap system 1081 1093 */ ··· 1104 1092 * Right now other parts of the system means that we 1105 1093 * _really_ don't want to cluster much more 1106 1094 */ 1095 + 1096 + register_sysctl_init("vm", swap_sysctl_table); 1107 1097 }
+1
mm/swap.h
··· 3 3 #define _MM_SWAP_H 4 4 5 5 struct mempolicy; 6 + extern int page_cluster; 6 7 7 8 #ifdef CONFIG_SWAP 8 9 #include <linux/swapops.h> /* for swp_offset */
+59 -8
mm/util.c
··· 12 12 #include <linux/security.h> 13 13 #include <linux/swap.h> 14 14 #include <linux/swapops.h> 15 + #include <linux/sysctl.h> 15 16 #include <linux/mman.h> 16 17 #include <linux/hugetlb.h> 17 18 #include <linux/vmalloc.h> ··· 748 747 EXPORT_SYMBOL(folio_mc_copy); 749 748 750 749 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 751 - int sysctl_overcommit_ratio __read_mostly = 50; 752 - unsigned long sysctl_overcommit_kbytes __read_mostly; 750 + static int sysctl_overcommit_ratio __read_mostly = 50; 751 + static unsigned long sysctl_overcommit_kbytes __read_mostly; 753 752 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 754 753 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 755 754 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 756 755 757 - int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer, 758 - size_t *lenp, loff_t *ppos) 756 + #ifdef CONFIG_SYSCTL 757 + 758 + static int overcommit_ratio_handler(const struct ctl_table *table, int write, 759 + void *buffer, size_t *lenp, loff_t *ppos) 759 760 { 760 761 int ret; 761 762 ··· 772 769 percpu_counter_sync(&vm_committed_as); 773 770 } 774 771 775 - int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer, 776 - size_t *lenp, loff_t *ppos) 772 + static int overcommit_policy_handler(const struct ctl_table *table, int write, 773 + void *buffer, size_t *lenp, loff_t *ppos) 777 774 { 778 775 struct ctl_table t; 779 776 int new_policy = -1; ··· 808 805 return ret; 809 806 } 810 807 811 - int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer, 812 - size_t *lenp, loff_t *ppos) 808 + static int overcommit_kbytes_handler(const struct ctl_table *table, int write, 809 + void *buffer, size_t *lenp, loff_t *ppos) 813 810 { 814 811 int ret; 815 812 ··· 818 815 sysctl_overcommit_ratio = 0; 819 816 return ret; 820 817 } 818 + 819 + static const struct ctl_table util_sysctl_table[] = { 820 + { 821 + .procname = "overcommit_memory", 822 + .data = &sysctl_overcommit_memory, 823 + .maxlen = sizeof(sysctl_overcommit_memory), 824 + .mode = 0644, 825 + .proc_handler = overcommit_policy_handler, 826 + .extra1 = SYSCTL_ZERO, 827 + .extra2 = SYSCTL_TWO, 828 + }, 829 + { 830 + .procname = "overcommit_ratio", 831 + .data = &sysctl_overcommit_ratio, 832 + .maxlen = sizeof(sysctl_overcommit_ratio), 833 + .mode = 0644, 834 + .proc_handler = overcommit_ratio_handler, 835 + }, 836 + { 837 + .procname = "overcommit_kbytes", 838 + .data = &sysctl_overcommit_kbytes, 839 + .maxlen = sizeof(sysctl_overcommit_kbytes), 840 + .mode = 0644, 841 + .proc_handler = overcommit_kbytes_handler, 842 + }, 843 + { 844 + .procname = "user_reserve_kbytes", 845 + .data = &sysctl_user_reserve_kbytes, 846 + .maxlen = sizeof(sysctl_user_reserve_kbytes), 847 + .mode = 0644, 848 + .proc_handler = proc_doulongvec_minmax, 849 + }, 850 + { 851 + .procname = "admin_reserve_kbytes", 852 + .data = &sysctl_admin_reserve_kbytes, 853 + .maxlen = sizeof(sysctl_admin_reserve_kbytes), 854 + .mode = 0644, 855 + .proc_handler = proc_doulongvec_minmax, 856 + }, 857 + }; 858 + 859 + static int __init init_vm_util_sysctls(void) 860 + { 861 + register_sysctl_init("vm", util_sysctl_table); 862 + return 0; 863 + } 864 + subsys_initcall(init_vm_util_sysctls); 865 + #endif /* CONFIG_SYSCTL */ 821 866 822 867 /* 823 868 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
+23
mm/vmscan.c
··· 7404 7404 pgdat_kswapd_unlock(pgdat); 7405 7405 } 7406 7406 7407 + static const struct ctl_table vmscan_sysctl_table[] = { 7408 + { 7409 + .procname = "swappiness", 7410 + .data = &vm_swappiness, 7411 + .maxlen = sizeof(vm_swappiness), 7412 + .mode = 0644, 7413 + .proc_handler = proc_dointvec_minmax, 7414 + .extra1 = SYSCTL_ZERO, 7415 + .extra2 = SYSCTL_TWO_HUNDRED, 7416 + }, 7417 + #ifdef CONFIG_NUMA 7418 + { 7419 + .procname = "zone_reclaim_mode", 7420 + .data = &node_reclaim_mode, 7421 + .maxlen = sizeof(node_reclaim_mode), 7422 + .mode = 0644, 7423 + .proc_handler = proc_dointvec_minmax, 7424 + .extra1 = SYSCTL_ZERO, 7425 + } 7426 + #endif 7427 + }; 7428 + 7407 7429 static int __init kswapd_init(void) 7408 7430 { 7409 7431 int nid; ··· 7433 7411 swap_setup(); 7434 7412 for_each_node_state(nid, N_MEMORY) 7435 7413 kswapd_run(nid); 7414 + register_sysctl_init("vm", vmscan_sysctl_table); 7436 7415 return 0; 7437 7416 } 7438 7417
+40 -4
mm/vmstat.c
··· 31 31 32 32 #include "internal.h" 33 33 34 + #ifdef CONFIG_PROC_FS 34 35 #ifdef CONFIG_NUMA 35 - int sysctl_vm_numa_stat = ENABLE_NUMA_STAT; 36 + #define ENABLE_NUMA_STAT 1 37 + static int sysctl_vm_numa_stat = ENABLE_NUMA_STAT; 36 38 37 39 /* zero numa counters within a zone */ 38 40 static void zero_zone_numa_counters(struct zone *zone) ··· 76 74 77 75 static DEFINE_MUTEX(vm_numa_stat_lock); 78 76 79 - int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write, 77 + static int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write, 80 78 void *buffer, size_t *length, loff_t *ppos) 81 79 { 82 80 int ret, oldval; ··· 104 102 return ret; 105 103 } 106 104 #endif 105 + #endif /* CONFIG_PROC_FS */ 107 106 108 107 #ifdef CONFIG_VM_EVENT_COUNTERS 109 108 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; ··· 1943 1940 1944 1941 #ifdef CONFIG_SMP 1945 1942 static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1946 - int sysctl_stat_interval __read_mostly = HZ; 1943 + static int sysctl_stat_interval __read_mostly = HZ; 1947 1944 static int vmstat_late_init_done; 1948 1945 1949 1946 #ifdef CONFIG_PROC_FS ··· 1952 1949 refresh_cpu_vm_stats(true); 1953 1950 } 1954 1951 1955 - int vmstat_refresh(const struct ctl_table *table, int write, 1952 + static int vmstat_refresh(const struct ctl_table *table, int write, 1956 1953 void *buffer, size_t *lenp, loff_t *ppos) 1957 1954 { 1958 1955 long val; ··· 2201 2198 late_initcall(vmstat_late_init); 2202 2199 #endif 2203 2200 2201 + #ifdef CONFIG_PROC_FS 2202 + static const struct ctl_table vmstat_table[] = { 2203 + #ifdef CONFIG_SMP 2204 + { 2205 + .procname = "stat_interval", 2206 + .data = &sysctl_stat_interval, 2207 + .maxlen = sizeof(sysctl_stat_interval), 2208 + .mode = 0644, 2209 + .proc_handler = proc_dointvec_jiffies, 2210 + }, 2211 + { 2212 + .procname = "stat_refresh", 2213 + .data = NULL, 2214 + .maxlen = 0, 2215 + .mode = 0600, 2216 + .proc_handler = vmstat_refresh, 2217 + }, 2218 + #endif 2219 + #ifdef CONFIG_NUMA 2220 + { 2221 + .procname = "numa_stat", 2222 + .data = &sysctl_vm_numa_stat, 2223 + .maxlen = sizeof(int), 2224 + .mode = 0644, 2225 + .proc_handler = sysctl_vm_numa_stat_handler, 2226 + .extra1 = SYSCTL_ZERO, 2227 + .extra2 = SYSCTL_ONE, 2228 + }, 2229 + #endif 2230 + }; 2231 + #endif 2232 + 2204 2233 struct workqueue_struct *mm_percpu_wq; 2205 2234 2206 2235 void __init init_mm_internals(void) ··· 2264 2229 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op); 2265 2230 proc_create_seq("vmstat", 0444, NULL, &vmstat_op); 2266 2231 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op); 2232 + register_sysctl_init("vm", vmstat_table); 2267 2233 #endif 2268 2234 } 2269 2235
+1 -1
net/sunrpc/auth.c
··· 489 489 rpcauth_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 490 490 491 491 { 492 - return number_cred_unused * sysctl_vfs_cache_pressure / 100; 492 + return number_cred_unused; 493 493 } 494 494 495 495 static void
+11
security/min_addr.c
··· 44 44 return ret; 45 45 } 46 46 47 + static const struct ctl_table min_addr_sysctl_table[] = { 48 + { 49 + .procname = "mmap_min_addr", 50 + .data = &dac_mmap_min_addr, 51 + .maxlen = sizeof(unsigned long), 52 + .mode = 0644, 53 + .proc_handler = mmap_min_addr_handler, 54 + }, 55 + }; 56 + 47 57 static int __init init_mmap_min_addr(void) 48 58 { 59 + register_sysctl_init("vm", min_addr_sysctl_table); 49 60 update_mmap_min_addr(); 50 61 51 62 return 0;
+5 -5
tools/testing/selftests/sysctl/sysctl.sh
··· 21 21 # ENABLED: 1 if enabled, 0 otherwise 22 22 # TARGET: test target file required on the test_sysctl module 23 23 # SKIP_NO_TARGET: 1 skip if TARGET not there 24 - # 0 run eventhough TARGET not there 24 + # 0 run even though TARGET not there 25 25 # 26 26 # Once these are enabled please leave them as-is. Write your own test, 27 27 # we have tons of space. ··· 764 764 fi 765 765 766 766 if [ ! -f /proc/cmdline ]; then 767 - echo -e "SKIPPING\nThere is no /proc/cmdline to check for paramter" 767 + echo -e "SKIPPING\nThere is no /proc/cmdline to check for parameter" 768 768 return $ksft_skip 769 769 fi 770 770 ··· 857 857 echo 858 858 echo "TEST_ID x NUM_TEST" 859 859 echo "TEST_ID: Test ID" 860 - echo "NUM_TESTS: Number of recommended times to run the test" 860 + echo "NUM_TESTS: Recommended number of times to run the test" 861 861 echo 862 862 echo "0001 x $(get_test_count 0001) - tests proc_dointvec_minmax()" 863 863 echo "0002 x $(get_test_count 0002) - tests proc_dostring()" ··· 884 884 echo "Valid tests: 0001-$MAX_TEST" 885 885 echo "" 886 886 echo " all Runs all tests (default)" 887 - echo " -t Run test ID the number amount of times is recommended" 887 + echo " -t Run test ID the recommended number of times" 888 888 echo " -w Watch test ID run until it runs into an error" 889 889 echo " -c Run test ID once" 890 890 echo " -s Run test ID x test-count number of times" ··· 898 898 echo Example uses: 899 899 echo 900 900 echo "$TEST_NAME.sh -- executes all tests" 901 - echo "$TEST_NAME.sh -t 0002 -- Executes test ID 0002 number of times is recomended" 901 + echo "$TEST_NAME.sh -t 0002 -- Executes test ID 0002 the recommended number of times" 902 902 echo "$TEST_NAME.sh -w 0002 -- Watch test ID 0002 run until an error occurs" 903 903 echo "$TEST_NAME.sh -s 0002 -- Run test ID 0002 once" 904 904 echo "$TEST_NAME.sh -c 0002 3 -- Run test ID 0002 three times"