Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'random-6.1-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random

Pull more random number generator updates from Jason Donenfeld:
"This time with some large scale treewide cleanups.

The intent of this pull is to clean up the way callers fetch random
integers. The current rules for doing this right are:

- If you want a secure or an insecure random u64, use get_random_u64()

- If you want a secure or an insecure random u32, use get_random_u32()

The old function prandom_u32() has been deprecated for a while
now and is just a wrapper around get_random_u32(). Same for
get_random_int().

- If you want a secure or an insecure random u16, use get_random_u16()

- If you want a secure or an insecure random u8, use get_random_u8()

- If you want secure or insecure random bytes, use get_random_bytes().

The old function prandom_bytes() has been deprecated for a while
now and has long been a wrapper around get_random_bytes()

- If you want a non-uniform random u32, u16, or u8 bounded by a
certain open interval maximum, use prandom_u32_max()

I say "non-uniform", because it doesn't do any rejection sampling
or divisions. Hence, it stays within the prandom_*() namespace, not
the get_random_*() namespace.

I'm currently investigating a "uniform" function for 6.2. We'll see
what comes of that.

By applying these rules uniformly, we get several benefits:

- By using prandom_u32_max() with an upper-bound that the compiler
can prove at compile-time is ≤65536 or ≤256, internally
get_random_u16() or get_random_u8() is used, which wastes fewer
batched random bytes, and hence has higher throughput.

- By using prandom_u32_max() instead of %, when the upper-bound is
not a constant, division is still avoided, because
prandom_u32_max() uses a faster multiplication-based trick instead.

- By using get_random_u16() or get_random_u8() in cases where the
return value is intended to indeed be a u16 or a u8, we waste fewer
batched random bytes, and hence have higher throughput.

This series was originally done by hand while I was on an airplane
without Internet. Later, Kees and I worked on retroactively figuring
out what could be done with Coccinelle and what had to be done
manually, and then we split things up based on that.

So while this touches a lot of files, the actual amount of code that's
hand fiddled is comfortably small"

* tag 'random-6.1-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random:
prandom: remove unused functions
treewide: use get_random_bytes() when possible
treewide: use get_random_u32() when possible
treewide: use get_random_{u8,u16}() when possible, part 2
treewide: use get_random_{u8,u16}() when possible, part 1
treewide: use prandom_u32_max() when possible, part 2
treewide: use prandom_u32_max() when possible, part 1

+378 -421
+1 -1
Documentation/networking/filter.rst
··· 305 305 vlan_tci skb_vlan_tag_get(skb) 306 306 vlan_avail skb_vlan_tag_present(skb) 307 307 vlan_tpid skb->vlan_proto 308 - rand prandom_u32() 308 + rand get_random_u32() 309 309 =================================== ================================================= 310 310 311 311 These extensions can also be prefixed with '#'.
+1 -1
arch/arm/kernel/process.c
··· 371 371 372 372 slots = ((last - first) >> PAGE_SHIFT) + 1; 373 373 374 - offset = get_random_int() % slots; 374 + offset = prandom_u32_max(slots); 375 375 376 376 addr = first + (offset << PAGE_SHIFT); 377 377
+1 -1
arch/arm/kernel/signal.c
··· 655 655 PAGE_SIZE / sizeof(u32)); 656 656 657 657 /* Give the signal return code some randomness */ 658 - offset = 0x200 + (get_random_int() & 0x7fc); 658 + offset = 0x200 + (get_random_u16() & 0x7fc); 659 659 signal_return_offset = offset; 660 660 661 661 /* Copy signal return handlers into the page */
+1 -1
arch/arm64/kernel/process.c
··· 591 591 unsigned long arch_align_stack(unsigned long sp) 592 592 { 593 593 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 594 - sp -= get_random_int() & ~PAGE_MASK; 594 + sp -= prandom_u32_max(PAGE_SIZE); 595 595 return sp & ~0xf; 596 596 } 597 597
+1 -1
arch/arm64/kernel/syscall.c
··· 67 67 * 68 68 * The resulting 5 bits of entropy is seen in SP[8:4]. 69 69 */ 70 - choose_random_kstack_offset(get_random_int() & 0x1FF); 70 + choose_random_kstack_offset(get_random_u16() & 0x1FF); 71 71 } 72 72 73 73 static inline bool has_syscall_work(unsigned long flags)
+1 -1
arch/loongarch/kernel/process.c
··· 293 293 unsigned long arch_align_stack(unsigned long sp) 294 294 { 295 295 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 296 - sp -= get_random_int() & ~PAGE_MASK; 296 + sp -= prandom_u32_max(PAGE_SIZE); 297 297 298 298 return sp & STACK_ALIGN; 299 299 }
+1 -1
arch/loongarch/kernel/vdso.c
··· 78 78 unsigned long base = STACK_TOP; 79 79 80 80 if (current->flags & PF_RANDOMIZE) { 81 - base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1); 81 + base += prandom_u32_max(VDSO_RANDOMIZE_SIZE); 82 82 base = PAGE_ALIGN(base); 83 83 } 84 84
+1 -1
arch/mips/kernel/process.c
··· 711 711 unsigned long arch_align_stack(unsigned long sp) 712 712 { 713 713 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 714 - sp -= get_random_int() & ~PAGE_MASK; 714 + sp -= prandom_u32_max(PAGE_SIZE); 715 715 716 716 return sp & ALMASK; 717 717 }
+1 -1
arch/mips/kernel/vdso.c
··· 79 79 } 80 80 81 81 if (current->flags & PF_RANDOMIZE) { 82 - base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1); 82 + base += prandom_u32_max(VDSO_RANDOMIZE_SIZE); 83 83 base = PAGE_ALIGN(base); 84 84 } 85 85
+1 -1
arch/parisc/kernel/process.c
··· 284 284 285 285 static inline unsigned long brk_rnd(void) 286 286 { 287 - return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT; 287 + return (get_random_u32() & BRK_RND_MASK) << PAGE_SHIFT; 288 288 } 289 289 290 290 unsigned long arch_randomize_brk(struct mm_struct *mm)
+2 -2
arch/parisc/kernel/sys_parisc.c
··· 239 239 unsigned long rnd = 0; 240 240 241 241 if (current->flags & PF_RANDOMIZE) 242 - rnd = get_random_int() & MMAP_RND_MASK; 242 + rnd = get_random_u32() & MMAP_RND_MASK; 243 243 244 244 return rnd << PAGE_SHIFT; 245 245 } 246 246 247 247 unsigned long arch_mmap_rnd(void) 248 248 { 249 - return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT; 249 + return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT; 250 250 } 251 251 252 252 static unsigned long mmap_legacy_base(void)
+1 -1
arch/parisc/kernel/vdso.c
··· 75 75 76 76 map_base = mm->mmap_base; 77 77 if (current->flags & PF_RANDOMIZE) 78 - map_base -= (get_random_int() & 0x1f) * PAGE_SIZE; 78 + map_base -= prandom_u32_max(0x20) * PAGE_SIZE; 79 79 80 80 vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0); 81 81
+1 -1
arch/powerpc/crypto/crc-vpmsum_test.c
··· 82 82 83 83 if (len <= offset) 84 84 continue; 85 - prandom_bytes(data, len); 85 + get_random_bytes(data, len); 86 86 len -= offset; 87 87 88 88 crypto_shash_update(crct10dif_shash, data+offset, len);
+1 -1
arch/powerpc/kernel/process.c
··· 2303 2303 unsigned long arch_align_stack(unsigned long sp) 2304 2304 { 2305 2305 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 2306 - sp -= get_random_int() & ~PAGE_MASK; 2306 + sp -= prandom_u32_max(PAGE_SIZE); 2307 2307 return sp & ~0xf; 2308 2308 }
+2 -2
arch/s390/kernel/process.c
··· 224 224 unsigned long arch_align_stack(unsigned long sp) 225 225 { 226 226 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 227 - sp -= get_random_int() & ~PAGE_MASK; 227 + sp -= prandom_u32_max(PAGE_SIZE); 228 228 return sp & ~0xf; 229 229 } 230 230 231 231 static inline unsigned long brk_rnd(void) 232 232 { 233 - return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT; 233 + return (get_random_u16() & BRK_RND_MASK) << PAGE_SHIFT; 234 234 } 235 235 236 236 unsigned long arch_randomize_brk(struct mm_struct *mm)
+1 -1
arch/s390/kernel/vdso.c
··· 227 227 end -= len; 228 228 229 229 if (end > start) { 230 - offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); 230 + offset = prandom_u32_max(((end - start) >> PAGE_SHIFT) + 1); 231 231 addr = start + (offset << PAGE_SHIFT); 232 232 } else { 233 233 addr = start;
+1 -1
arch/s390/mm/mmap.c
··· 37 37 38 38 unsigned long arch_mmap_rnd(void) 39 39 { 40 - return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT; 40 + return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT; 41 41 } 42 42 43 43 static unsigned long mmap_base_legacy(unsigned long rnd)
+1 -1
arch/sparc/vdso/vma.c
··· 354 354 unsigned int offset; 355 355 356 356 /* This loses some more bits than a modulo, but is cheaper */ 357 - offset = get_random_int() & (PTRS_PER_PTE - 1); 357 + offset = prandom_u32_max(PTRS_PER_PTE); 358 358 return start + (offset << PAGE_SHIFT); 359 359 } 360 360
+1 -1
arch/um/kernel/process.c
··· 356 356 unsigned long arch_align_stack(unsigned long sp) 357 357 { 358 358 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 359 - sp -= get_random_int() % 8192; 359 + sp -= prandom_u32_max(8192); 360 360 return sp & ~0xf; 361 361 } 362 362 #endif
+1 -1
arch/x86/entry/vdso/vma.c
··· 327 327 end -= len; 328 328 329 329 if (end > start) { 330 - offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); 330 + offset = prandom_u32_max(((end - start) >> PAGE_SHIFT) + 1); 331 331 addr = start + (offset << PAGE_SHIFT); 332 332 } else { 333 333 addr = start;
+1 -1
arch/x86/kernel/cpu/amd.c
··· 503 503 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; 504 504 505 505 /* A random value per boot for bit slice [12:upper_bit) */ 506 - va_align.bits = get_random_int() & va_align.mask; 506 + va_align.bits = get_random_u32() & va_align.mask; 507 507 } 508 508 509 509 if (cpu_has(c, X86_FEATURE_MWAITX))
+1 -1
arch/x86/kernel/module.c
··· 53 53 */ 54 54 if (module_load_offset == 0) 55 55 module_load_offset = 56 - (get_random_int() % 1024 + 1) * PAGE_SIZE; 56 + (prandom_u32_max(1024) + 1) * PAGE_SIZE; 57 57 mutex_unlock(&module_kaslr_mutex); 58 58 } 59 59 return module_load_offset;
+1 -1
arch/x86/kernel/process.c
··· 965 965 unsigned long arch_align_stack(unsigned long sp) 966 966 { 967 967 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 968 - sp -= get_random_int() % 8192; 968 + sp -= prandom_u32_max(8192); 969 969 return sp & ~0xf; 970 970 } 971 971
+2 -2
arch/x86/mm/pat/cpa-test.c
··· 136 136 failed += print_split(&sa); 137 137 138 138 for (i = 0; i < NTEST; i++) { 139 - unsigned long pfn = prandom_u32() % max_pfn_mapped; 139 + unsigned long pfn = prandom_u32_max(max_pfn_mapped); 140 140 141 141 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); 142 - len[i] = prandom_u32() % NPAGES; 142 + len[i] = prandom_u32_max(NPAGES); 143 143 len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); 144 144 145 145 if (len[i] == 0)
+1 -1
block/blk-crypto-fallback.c
··· 539 539 if (blk_crypto_fallback_inited) 540 540 return 0; 541 541 542 - prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); 542 + get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); 543 543 544 544 err = bioset_init(&crypto_bio_split, 64, 0, 0); 545 545 if (err)
+1 -1
crypto/async_tx/raid6test.c
··· 37 37 int i; 38 38 39 39 for (i = 0; i < disks; i++) { 40 - prandom_bytes(page_address(data[i]), PAGE_SIZE); 40 + get_random_bytes(page_address(data[i]), PAGE_SIZE); 41 41 dataptrs[i] = data[i]; 42 42 dataoffs[i] = 0; 43 43 }
+47 -47
crypto/testmgr.c
··· 855 855 /* Generate a random length in range [0, max_len], but prefer smaller values */ 856 856 static unsigned int generate_random_length(unsigned int max_len) 857 857 { 858 - unsigned int len = prandom_u32() % (max_len + 1); 858 + unsigned int len = prandom_u32_max(max_len + 1); 859 859 860 - switch (prandom_u32() % 4) { 860 + switch (prandom_u32_max(4)) { 861 861 case 0: 862 862 return len % 64; 863 863 case 1: ··· 874 874 { 875 875 size_t bitpos; 876 876 877 - bitpos = prandom_u32() % (size * 8); 877 + bitpos = prandom_u32_max(size * 8); 878 878 buf[bitpos / 8] ^= 1 << (bitpos % 8); 879 879 } 880 880 881 881 /* Flip a random byte in the given nonempty data buffer */ 882 882 static void flip_random_byte(u8 *buf, size_t size) 883 883 { 884 - buf[prandom_u32() % size] ^= 0xff; 884 + buf[prandom_u32_max(size)] ^= 0xff; 885 885 } 886 886 887 887 /* Sometimes make some random changes to the given nonempty data buffer */ ··· 891 891 size_t i; 892 892 893 893 /* Sometimes flip some bits */ 894 - if (prandom_u32() % 4 == 0) { 895 - num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size * 8); 894 + if (prandom_u32_max(4) == 0) { 895 + num_flips = min_t(size_t, 1 << prandom_u32_max(8), size * 8); 896 896 for (i = 0; i < num_flips; i++) 897 897 flip_random_bit(buf, size); 898 898 } 899 899 900 900 /* Sometimes flip some bytes */ 901 - if (prandom_u32() % 4 == 0) { 902 - num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size); 901 + if (prandom_u32_max(4) == 0) { 902 + num_flips = min_t(size_t, 1 << prandom_u32_max(8), size); 903 903 for (i = 0; i < num_flips; i++) 904 904 flip_random_byte(buf, size); 905 905 } ··· 915 915 if (count == 0) 916 916 return; 917 917 918 - switch (prandom_u32() % 8) { /* Choose a generation strategy */ 918 + switch (prandom_u32_max(8)) { /* Choose a generation strategy */ 919 919 case 0: 920 920 case 1: 921 921 /* All the same byte, plus optional mutations */ 922 - switch (prandom_u32() % 4) { 922 + switch (prandom_u32_max(4)) { 923 923 case 0: 924 924 b = 0x00; 925 925 break; ··· 927 927 b = 0xff; 928 928 break; 929 929 default: 930 - b = (u8)prandom_u32(); 930 + b = get_random_u8(); 931 931 break; 932 932 } 933 933 memset(buf, b, count); ··· 935 935 break; 936 936 case 2: 937 937 /* Ascending or descending bytes, plus optional mutations */ 938 - increment = (u8)prandom_u32(); 939 - b = (u8)prandom_u32(); 938 + increment = get_random_u8(); 939 + b = get_random_u8(); 940 940 for (i = 0; i < count; i++, b += increment) 941 941 buf[i] = b; 942 942 mutate_buffer(buf, count); ··· 944 944 default: 945 945 /* Fully random bytes */ 946 946 for (i = 0; i < count; i++) 947 - buf[i] = (u8)prandom_u32(); 947 + buf[i] = get_random_u8(); 948 948 } 949 949 } 950 950 ··· 959 959 unsigned int this_len; 960 960 const char *flushtype_str; 961 961 962 - if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0) 962 + if (div == &divs[max_divs - 1] || prandom_u32_max(2) == 0) 963 963 this_len = remaining; 964 964 else 965 - this_len = 1 + (prandom_u32() % remaining); 965 + this_len = 1 + prandom_u32_max(remaining); 966 966 div->proportion_of_total = this_len; 967 967 968 - if (prandom_u32() % 4 == 0) 969 - div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128); 970 - else if (prandom_u32() % 2 == 0) 971 - div->offset = prandom_u32() % 32; 968 + if (prandom_u32_max(4) == 0) 969 + div->offset = (PAGE_SIZE - 128) + prandom_u32_max(128); 970 + else if (prandom_u32_max(2) == 0) 971 + div->offset = prandom_u32_max(32); 972 972 else 973 - div->offset = prandom_u32() % PAGE_SIZE; 974 - if (prandom_u32() % 8 == 0) 973 + div->offset = prandom_u32_max(PAGE_SIZE); 974 + if (prandom_u32_max(8) == 0) 975 975 div->offset_relative_to_alignmask = true; 976 976 977 977 div->flush_type = FLUSH_TYPE_NONE; 978 978 if (gen_flushes) { 979 - switch (prandom_u32() % 4) { 979 + switch (prandom_u32_max(4)) { 980 980 case 0: 981 981 div->flush_type = FLUSH_TYPE_REIMPORT; 982 982 break; ··· 988 988 989 989 if (div->flush_type != FLUSH_TYPE_NONE && 990 990 !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && 991 - prandom_u32() % 2 == 0) 991 + prandom_u32_max(2) == 0) 992 992 div->nosimd = true; 993 993 994 994 switch (div->flush_type) { ··· 1035 1035 1036 1036 p += scnprintf(p, end - p, "random:"); 1037 1037 1038 - switch (prandom_u32() % 4) { 1038 + switch (prandom_u32_max(4)) { 1039 1039 case 0: 1040 1040 case 1: 1041 1041 cfg->inplace_mode = OUT_OF_PLACE; ··· 1050 1050 break; 1051 1051 } 1052 1052 1053 - if (prandom_u32() % 2 == 0) { 1053 + if (prandom_u32_max(2) == 0) { 1054 1054 cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP; 1055 1055 p += scnprintf(p, end - p, " may_sleep"); 1056 1056 } 1057 1057 1058 - switch (prandom_u32() % 4) { 1058 + switch (prandom_u32_max(4)) { 1059 1059 case 0: 1060 1060 cfg->finalization_type = FINALIZATION_TYPE_FINAL; 1061 1061 p += scnprintf(p, end - p, " use_final"); ··· 1071 1071 } 1072 1072 1073 1073 if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && 1074 - prandom_u32() % 2 == 0) { 1074 + prandom_u32_max(2) == 0) { 1075 1075 cfg->nosimd = true; 1076 1076 p += scnprintf(p, end - p, " nosimd"); 1077 1077 } ··· 1084 1084 cfg->req_flags); 1085 1085 p += scnprintf(p, end - p, "]"); 1086 1086 1087 - if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32() % 2 == 0) { 1087 + if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32_max(2) == 0) { 1088 1088 p += scnprintf(p, end - p, " dst_divs=["); 1089 1089 p = generate_random_sgl_divisions(cfg->dst_divs, 1090 1090 ARRAY_SIZE(cfg->dst_divs), ··· 1093 1093 p += scnprintf(p, end - p, "]"); 1094 1094 } 1095 1095 1096 - if (prandom_u32() % 2 == 0) { 1097 - cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); 1096 + if (prandom_u32_max(2) == 0) { 1097 + cfg->iv_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK); 1098 1098 p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset); 1099 1099 } 1100 1100 1101 - if (prandom_u32() % 2 == 0) { 1102 - cfg->key_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); 1101 + if (prandom_u32_max(2) == 0) { 1102 + cfg->key_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK); 1103 1103 p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset); 1104 1104 } 1105 1105 ··· 1652 1652 vec->ksize = 0; 1653 1653 if (maxkeysize) { 1654 1654 vec->ksize = maxkeysize; 1655 - if (prandom_u32() % 4 == 0) 1656 - vec->ksize = 1 + (prandom_u32() % maxkeysize); 1655 + if (prandom_u32_max(4) == 0) 1656 + vec->ksize = 1 + prandom_u32_max(maxkeysize); 1657 1657 generate_random_bytes((u8 *)vec->key, vec->ksize); 1658 1658 1659 1659 vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key, ··· 2218 2218 const unsigned int aad_tail_size = aad_iv ? ivsize : 0; 2219 2219 const unsigned int authsize = vec->clen - vec->plen; 2220 2220 2221 - if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) { 2221 + if (prandom_u32_max(2) == 0 && vec->alen > aad_tail_size) { 2222 2222 /* Mutate the AAD */ 2223 2223 flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size); 2224 - if (prandom_u32() % 2 == 0) 2224 + if (prandom_u32_max(2) == 0) 2225 2225 return; 2226 2226 } 2227 - if (prandom_u32() % 2 == 0) { 2227 + if (prandom_u32_max(2) == 0) { 2228 2228 /* Mutate auth tag (assuming it's at the end of ciphertext) */ 2229 2229 flip_random_bit((u8 *)vec->ctext + vec->plen, authsize); 2230 2230 } else { ··· 2249 2249 const unsigned int ivsize = crypto_aead_ivsize(tfm); 2250 2250 const unsigned int authsize = vec->clen - vec->plen; 2251 2251 const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) && 2252 - (prefer_inauthentic || prandom_u32() % 4 == 0); 2252 + (prefer_inauthentic || prandom_u32_max(4) == 0); 2253 2253 2254 2254 /* Generate the AAD. */ 2255 2255 generate_random_bytes((u8 *)vec->assoc, vec->alen); ··· 2257 2257 /* Avoid implementation-defined behavior. */ 2258 2258 memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize); 2259 2259 2260 - if (inauthentic && prandom_u32() % 2 == 0) { 2260 + if (inauthentic && prandom_u32_max(2) == 0) { 2261 2261 /* Generate a random ciphertext. */ 2262 2262 generate_random_bytes((u8 *)vec->ctext, vec->clen); 2263 2263 } else { ··· 2321 2321 2322 2322 /* Key: length in [0, maxkeysize], but usually choose maxkeysize */ 2323 2323 vec->klen = maxkeysize; 2324 - if (prandom_u32() % 4 == 0) 2325 - vec->klen = prandom_u32() % (maxkeysize + 1); 2324 + if (prandom_u32_max(4) == 0) 2325 + vec->klen = prandom_u32_max(maxkeysize + 1); 2326 2326 generate_random_bytes((u8 *)vec->key, vec->klen); 2327 2327 vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen); 2328 2328 ··· 2331 2331 2332 2332 /* Tag length: in [0, maxauthsize], but usually choose maxauthsize */ 2333 2333 authsize = maxauthsize; 2334 - if (prandom_u32() % 4 == 0) 2335 - authsize = prandom_u32() % (maxauthsize + 1); 2334 + if (prandom_u32_max(4) == 0) 2335 + authsize = prandom_u32_max(maxauthsize + 1); 2336 2336 if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE) 2337 2337 authsize = MIN_COLLISION_FREE_AUTHSIZE; 2338 2338 if (WARN_ON(authsize > maxdatasize)) ··· 2342 2342 2343 2343 /* AAD, plaintext, and ciphertext lengths */ 2344 2344 total_len = generate_random_length(maxdatasize); 2345 - if (prandom_u32() % 4 == 0) 2345 + if (prandom_u32_max(4) == 0) 2346 2346 vec->alen = 0; 2347 2347 else 2348 2348 vec->alen = generate_random_length(total_len); ··· 2958 2958 2959 2959 /* Key: length in [0, maxkeysize], but usually choose maxkeysize */ 2960 2960 vec->klen = maxkeysize; 2961 - if (prandom_u32() % 4 == 0) 2962 - vec->klen = prandom_u32() % (maxkeysize + 1); 2961 + if (prandom_u32_max(4) == 0) 2962 + vec->klen = prandom_u32_max(maxkeysize + 1); 2963 2963 generate_random_bytes((u8 *)vec->key, vec->klen); 2964 2964 vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen); 2965 2965
+2 -2
drivers/block/drbd/drbd_receiver.c
··· 781 781 782 782 timeo = connect_int * HZ; 783 783 /* 28.5% random jitter */ 784 - timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7; 784 + timeo += prandom_u32_max(2) ? timeo / 7 : -timeo / 7; 785 785 786 786 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo); 787 787 if (err <= 0) ··· 1004 1004 drbd_warn(connection, "Error receiving initial packet\n"); 1005 1005 sock_release(s); 1006 1006 randomize: 1007 - if (prandom_u32() & 1) 1007 + if (prandom_u32_max(2)) 1008 1008 goto retry; 1009 1009 } 1010 1010 }
+5 -6
drivers/char/random.c
··· 97 97 * Returns whether or not the input pool has been seeded and thus guaranteed 98 98 * to supply cryptographically secure random numbers. This applies to: the 99 99 * /dev/urandom device, the get_random_bytes function, and the get_random_{u8, 100 - * u16,u32,u64,int,long} family of functions. 100 + * u16,u32,u64,long} family of functions. 101 101 * 102 102 * Returns: true if the input pool has been seeded. 103 103 * false if the input pool has not been seeded. ··· 161 161 * u16 get_random_u16() 162 162 * u32 get_random_u32() 163 163 * u64 get_random_u64() 164 - * unsigned int get_random_int() 165 164 * unsigned long get_random_long() 166 165 * 167 166 * These interfaces will return the requested number of random bytes 168 167 * into the given buffer or as a return value. This is equivalent to 169 - * a read from /dev/urandom. The u8, u16, u32, u64, int, and long 170 - * family of functions may be higher performance for one-off random 171 - * integers, because they do a bit of buffering and do not invoke 172 - * reseeding until the buffer is emptied. 168 + * a read from /dev/urandom. The u8, u16, u32, u64, long family of 169 + * functions may be higher performance for one-off random integers, 170 + * because they do a bit of buffering and do not invoke reseeding 171 + * until the buffer is emptied. 173 172 * 174 173 *********************************************************************/ 175 174
+1 -1
drivers/dma/dmatest.c
··· 312 312 { 313 313 unsigned long buf; 314 314 315 - prandom_bytes(&buf, sizeof(buf)); 315 + get_random_bytes(&buf, sizeof(buf)); 316 316 return buf; 317 317 } 318 318
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 2424 2424 /* Check whether the file_priv has already selected one ring. */ 2425 2425 if ((int)file_priv->bsd_engine < 0) 2426 2426 file_priv->bsd_engine = 2427 - get_random_int() % num_vcs_engines(dev_priv); 2427 + prandom_u32_max(num_vcs_engines(dev_priv)); 2428 2428 2429 2429 return file_priv->bsd_engine; 2430 2430 }
+3 -3
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 137 137 range = round_down(end - len, align) - round_up(start, align); 138 138 if (range) { 139 139 if (sizeof(unsigned long) == sizeof(u64)) { 140 - addr = get_random_long(); 140 + addr = get_random_u64(); 141 141 } else { 142 - addr = get_random_int(); 142 + addr = get_random_u32(); 143 143 if (range > U32_MAX) { 144 144 addr <<= 32; 145 - addr |= get_random_int(); 145 + addr |= get_random_u32(); 146 146 } 147 147 } 148 148 div64_u64_rem(addr, range, &addr);
+1 -1
drivers/gpu/drm/i915/selftests/i915_selftest.c
··· 135 135 int err = 0; 136 136 137 137 while (!i915_selftest.random_seed) 138 - i915_selftest.random_seed = get_random_int(); 138 + i915_selftest.random_seed = get_random_u32(); 139 139 140 140 i915_selftest.timeout_jiffies = 141 141 i915_selftest.timeout_ms ?
+1 -1
drivers/gpu/drm/tests/drm_buddy_test.c
··· 729 729 static int drm_buddy_init_test(struct kunit *test) 730 730 { 731 731 while (!random_seed) 732 - random_seed = get_random_int(); 732 + random_seed = get_random_u32(); 733 733 734 734 return 0; 735 735 }
+1 -1
drivers/gpu/drm/tests/drm_mm_test.c
··· 2212 2212 static int drm_mm_init_test(struct kunit *test) 2213 2213 { 2214 2214 while (!random_seed) 2215 - random_seed = get_random_int(); 2215 + random_seed = get_random_u32(); 2216 2216 2217 2217 return 0; 2218 2218 }
+1 -1
drivers/infiniband/core/cma.c
··· 3807 3807 3808 3808 inet_get_local_port_range(net, &low, &high); 3809 3809 remaining = (high - low) + 1; 3810 - rover = prandom_u32() % remaining + low; 3810 + rover = prandom_u32_max(remaining) + low; 3811 3811 retry: 3812 3812 if (last_used_port != rover) { 3813 3813 struct rdma_bind_list *bind_list;
+2 -2
drivers/infiniband/hw/cxgb4/cm.c
··· 734 734 &ep->com.remote_addr; 735 735 int ret; 736 736 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; 737 - u32 isn = (prandom_u32() & ~7UL) - 1; 737 + u32 isn = (get_random_u32() & ~7UL) - 1; 738 738 struct net_device *netdev; 739 739 u64 params; 740 740 ··· 2469 2469 } 2470 2470 2471 2471 if (!is_t4(adapter_type)) { 2472 - u32 isn = (prandom_u32() & ~7UL) - 1; 2472 + u32 isn = (get_random_u32() & ~7UL) - 1; 2473 2473 2474 2474 skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL); 2475 2475 rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
+2 -2
drivers/infiniband/hw/cxgb4/id_table.c
··· 54 54 55 55 if (obj < alloc->max) { 56 56 if (alloc->flags & C4IW_ID_TABLE_F_RANDOM) 57 - alloc->last += prandom_u32() % RANDOM_SKIP; 57 + alloc->last += prandom_u32_max(RANDOM_SKIP); 58 58 else 59 59 alloc->last = obj + 1; 60 60 if (alloc->last >= alloc->max) ··· 85 85 alloc->start = start; 86 86 alloc->flags = flags; 87 87 if (flags & C4IW_ID_TABLE_F_RANDOM) 88 - alloc->last = prandom_u32() % RANDOM_SKIP; 88 + alloc->last = prandom_u32_max(RANDOM_SKIP); 89 89 else 90 90 alloc->last = 0; 91 91 alloc->max = num;
+1 -1
drivers/infiniband/hw/hfi1/tid_rdma.c
··· 850 850 int i; 851 851 852 852 for (i = 0; i < RXE_NUM_TID_FLOWS; i++) { 853 - rcd->flows[i].generation = mask_generation(prandom_u32()); 853 + rcd->flows[i].generation = mask_generation(get_random_u32()); 854 854 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i); 855 855 } 856 856 }
+2 -3
drivers/infiniband/hw/hns/hns_roce_ah.c
··· 41 41 u16 sport; 42 42 43 43 if (!fl) 44 - sport = get_random_u32() % 45 - (IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 - 46 - IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) + 44 + sport = prandom_u32_max(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 - 45 + IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) + 47 46 IB_ROCE_UDP_ENCAP_VALID_PORT_MIN; 48 47 else 49 48 sport = rdma_flow_label_to_udp_sport(fl);
+1 -1
drivers/infiniband/hw/mlx4/mad.c
··· 96 96 __be64 mlx4_ib_gen_node_guid(void) 97 97 { 98 98 #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40)) 99 - return cpu_to_be64(NODE_GUID_HI | prandom_u32()); 99 + return cpu_to_be64(NODE_GUID_HI | get_random_u32()); 100 100 } 101 101 102 102 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 465 465 goto err_qp; 466 466 } 467 467 468 - psn = prandom_u32() & 0xffffff; 468 + psn = get_random_u32() & 0xffffff; 469 469 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); 470 470 if (ret) 471 471 goto err_modify;
+1 -2
drivers/infiniband/ulp/rtrs/rtrs-clt.c
··· 1517 1517 rtrs_clt_stop_and_destroy_conns(clt_path); 1518 1518 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 1519 1519 msecs_to_jiffies(delay_ms + 1520 - prandom_u32() % 1521 - RTRS_RECONNECT_SEED)); 1520 + prandom_u32_max(RTRS_RECONNECT_SEED))); 1522 1521 } 1523 1522 1524 1523 static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
+1 -1
drivers/md/bcache/request.c
··· 401 401 } 402 402 403 403 if (bypass_torture_test(dc)) { 404 - if ((get_random_int() & 3) == 3) 404 + if (prandom_u32_max(4) == 3) 405 405 goto skip; 406 406 else 407 407 goto rescale;
+1 -1
drivers/md/raid5-cache.c
··· 2994 2994 } 2995 2995 create: 2996 2996 if (create_super) { 2997 - log->last_cp_seq = prandom_u32(); 2997 + log->last_cp_seq = get_random_u32(); 2998 2998 cp = 0; 2999 2999 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq); 3000 3000 /*
+1 -1
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
··· 870 870 g = tpg_colors[col].g; 871 871 b = tpg_colors[col].b; 872 872 } else if (tpg->pattern == TPG_PAT_NOISE) { 873 - r = g = b = prandom_u32_max(256); 873 + r = g = b = get_random_u8(); 874 874 } else if (k == TPG_COLOR_RANDOM) { 875 875 r = g = b = tpg->qual_offset + prandom_u32_max(196); 876 876 } else if (k >= TPG_COLOR_RAMP) {
+2 -2
drivers/media/test-drivers/vivid/vivid-radio-rx.c
··· 104 104 break; 105 105 case 2: 106 106 rds.block |= V4L2_RDS_BLOCK_ERROR; 107 - rds.lsb = prandom_u32_max(256); 108 - rds.msb = prandom_u32_max(256); 107 + rds.lsb = get_random_u8(); 108 + rds.msb = get_random_u8(); 109 109 break; 110 110 case 3: /* Skip block altogether */ 111 111 if (i)
+3 -3
drivers/media/test-drivers/vivid/vivid-touch-cap.c
··· 210 210 211 211 /* Fill 10% of the values within range -3 and 3, zero the others */ 212 212 for (i = 0; i < size; i++) { 213 - unsigned int rand = get_random_int(); 213 + unsigned int rand = get_random_u32(); 214 214 215 215 if (rand % 10) 216 216 tch_buf[i] = 0; ··· 221 221 222 222 static inline int get_random_pressure(void) 223 223 { 224 - return get_random_int() % VIVID_PRESSURE_LIMIT; 224 + return prandom_u32_max(VIVID_PRESSURE_LIMIT); 225 225 } 226 226 227 227 static void vivid_tch_buf_set(struct v4l2_pix_format *f, ··· 272 272 return; 273 273 274 274 if (test_pat_idx == 0) 275 - dev->tch_pat_random = get_random_int(); 275 + dev->tch_pat_random = get_random_u32(); 276 276 rand = dev->tch_pat_random; 277 277 278 278 switch (test_pattern) {
+1 -1
drivers/misc/habanalabs/gaudi2/gaudi2.c
··· 2948 2948 2949 2949 static inline int gaudi2_get_non_zero_random_int(void) 2950 2950 { 2951 - int rand = get_random_int(); 2951 + int rand = get_random_u32(); 2952 2952 2953 2953 return rand ? rand : 1; 2954 2954 }
+2 -2
drivers/mmc/core/core.c
··· 97 97 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 98 98 return; 99 99 100 - data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)]; 101 - data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9; 100 + data->error = data_errors[prandom_u32_max(ARRAY_SIZE(data_errors))]; 101 + data->bytes_xfered = prandom_u32_max(data->bytes_xfered >> 9) << 9; 102 102 } 103 103 104 104 #else /* CONFIG_FAIL_MMC_REQUEST */
+1 -1
drivers/mmc/host/dw_mmc.c
··· 1858 1858 * Try to inject the error at random points during the data transfer. 1859 1859 */ 1860 1860 hrtimer_start(&host->fault_timer, 1861 - ms_to_ktime(prandom_u32() % 25), 1861 + ms_to_ktime(prandom_u32_max(25)), 1862 1862 HRTIMER_MODE_REL); 1863 1863 } 1864 1864
+4 -4
drivers/mtd/nand/raw/nandsim.c
··· 1393 1393 unsigned int page_no = ns->regs.row; 1394 1394 1395 1395 if (ns_read_error(page_no)) { 1396 - prandom_bytes(ns->buf.byte, num); 1396 + get_random_bytes(ns->buf.byte, num); 1397 1397 NS_WARN("simulating read error in page %u\n", page_no); 1398 1398 return 1; 1399 1399 } ··· 1402 1402 1403 1403 static void ns_do_bit_flips(struct nandsim *ns, int num) 1404 1404 { 1405 - if (bitflips && prandom_u32() < (1 << 22)) { 1405 + if (bitflips && get_random_u16() < (1 << 6)) { 1406 1406 int flips = 1; 1407 1407 if (bitflips > 1) 1408 - flips = (prandom_u32() % (int) bitflips) + 1; 1408 + flips = prandom_u32_max(bitflips) + 1; 1409 1409 while (flips--) { 1410 - int pos = prandom_u32() % (num * 8); 1410 + int pos = prandom_u32_max(num * 8); 1411 1411 ns->buf.byte[pos / 8] ^= (1 << (pos % 8)); 1412 1412 NS_WARN("read_page: flipping bit %d in page %d " 1413 1413 "reading from %d ecc: corrected=%u failed=%u\n",
+6 -6
drivers/mtd/tests/mtd_nandecctest.c
··· 47 47 static void single_bit_error_data(void *error_data, void *correct_data, 48 48 size_t size) 49 49 { 50 - unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE); 50 + unsigned int offset = prandom_u32_max(size * BITS_PER_BYTE); 51 51 52 52 memcpy(error_data, correct_data, size); 53 53 __change_bit_le(offset, error_data); ··· 58 58 { 59 59 unsigned int offset[2]; 60 60 61 - offset[0] = prandom_u32() % (size * BITS_PER_BYTE); 61 + offset[0] = prandom_u32_max(size * BITS_PER_BYTE); 62 62 do { 63 - offset[1] = prandom_u32() % (size * BITS_PER_BYTE); 63 + offset[1] = prandom_u32_max(size * BITS_PER_BYTE); 64 64 } while (offset[0] == offset[1]); 65 65 66 66 memcpy(error_data, correct_data, size); ··· 71 71 72 72 static unsigned int random_ecc_bit(size_t size) 73 73 { 74 - unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE); 74 + unsigned int offset = prandom_u32_max(3 * BITS_PER_BYTE); 75 75 76 76 if (size == 256) { 77 77 /* ··· 79 79 * and 17th bit) in ECC code for 256 byte data block 80 80 */ 81 81 while (offset == 16 || offset == 17) 82 - offset = prandom_u32() % (3 * BITS_PER_BYTE); 82 + offset = prandom_u32_max(3 * BITS_PER_BYTE); 83 83 } 84 84 85 85 return offset; ··· 266 266 goto error; 267 267 } 268 268 269 - prandom_bytes(correct_data, size); 269 + get_random_bytes(correct_data, size); 270 270 ecc_sw_hamming_calculate(correct_data, size, correct_ecc, sm_order); 271 271 for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) { 272 272 nand_ecc_test[i].prepare(error_data, error_ecc,
+1 -1
drivers/mtd/tests/speedtest.c
··· 223 223 if (!iobuf) 224 224 goto out; 225 225 226 - prandom_bytes(iobuf, mtd->erasesize); 226 + get_random_bytes(iobuf, mtd->erasesize); 227 227 228 228 bbt = kzalloc(ebcnt, GFP_KERNEL); 229 229 if (!bbt)
+5 -14
drivers/mtd/tests/stresstest.c
··· 45 45 unsigned int eb; 46 46 47 47 again: 48 - eb = prandom_u32(); 49 48 /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */ 50 - eb %= (ebcnt - 1); 49 + eb = prandom_u32_max(ebcnt - 1); 51 50 if (bbt[eb]) 52 51 goto again; 53 52 return eb; ··· 54 55 55 56 static int rand_offs(void) 56 57 { 57 - unsigned int offs; 58 - 59 - offs = prandom_u32(); 60 - offs %= bufsize; 61 - return offs; 58 + return prandom_u32_max(bufsize); 62 59 } 63 60 64 61 static int rand_len(int offs) 65 62 { 66 - unsigned int len; 67 - 68 - len = prandom_u32(); 69 - len %= (bufsize - offs); 70 - return len; 63 + return prandom_u32_max(bufsize - offs); 71 64 } 72 65 73 66 static int do_read(void) ··· 118 127 119 128 static int do_operation(void) 120 129 { 121 - if (prandom_u32() & 1) 130 + if (prandom_u32_max(2)) 122 131 return do_read(); 123 132 else 124 133 return do_write(); ··· 183 192 goto out; 184 193 for (i = 0; i < ebcnt; i++) 185 194 offsets[i] = mtd->erasesize; 186 - prandom_bytes(writebuf, bufsize); 195 + get_random_bytes(writebuf, bufsize); 187 196 188 197 bbt = kzalloc(ebcnt, GFP_KERNEL); 189 198 if (!bbt)
+1 -1
drivers/mtd/ubi/debug.c
··· 590 590 591 591 if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) { 592 592 range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min; 593 - ubi->dbg.power_cut_counter += prandom_u32() % range; 593 + ubi->dbg.power_cut_counter += prandom_u32_max(range); 594 594 } 595 595 return 0; 596 596 }
+3 -3
drivers/mtd/ubi/debug.h
··· 73 73 static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) 74 74 { 75 75 if (ubi->dbg.emulate_bitflips) 76 - return !(prandom_u32() % 200); 76 + return !prandom_u32_max(200); 77 77 return 0; 78 78 } 79 79 ··· 87 87 static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi) 88 88 { 89 89 if (ubi->dbg.emulate_io_failures) 90 - return !(prandom_u32() % 500); 90 + return !prandom_u32_max(500); 91 91 return 0; 92 92 } 93 93 ··· 101 101 static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi) 102 102 { 103 103 if (ubi->dbg.emulate_io_failures) 104 - return !(prandom_u32() % 400); 104 + return !prandom_u32_max(400); 105 105 return 0; 106 106 } 107 107
+1 -1
drivers/net/bonding/bond_main.c
··· 4806 4806 4807 4807 switch (packets_per_slave) { 4808 4808 case 0: 4809 - slave_id = prandom_u32(); 4809 + slave_id = get_random_u32(); 4810 4810 break; 4811 4811 case 1: 4812 4812 slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 3874 3874 3875 3875 if (bp->vnic_info[i].rss_hash_key) { 3876 3876 if (i == 0) 3877 - prandom_bytes(vnic->rss_hash_key, 3877 + get_random_bytes(vnic->rss_hash_key, 3878 3878 HW_HASH_KEY_SIZE); 3879 3879 else 3880 3880 memcpy(vnic->rss_hash_key,
+2 -3
drivers/net/ethernet/broadcom/cnic.c
··· 4105 4105 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) 4106 4106 atomic_set(&cp->csk_tbl[i].ref_count, 0); 4107 4107 4108 - port_id = prandom_u32(); 4109 - port_id %= CNIC_LOCAL_PORT_RANGE; 4108 + port_id = prandom_u32_max(CNIC_LOCAL_PORT_RANGE); 4110 4109 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 4111 4110 CNIC_LOCAL_PORT_MIN, port_id)) { 4112 4111 cnic_cm_free_mem(dev); ··· 4164 4165 { 4165 4166 u32 seed; 4166 4167 4167 - seed = prandom_u32(); 4168 + seed = get_random_u32(); 4168 4169 cnic_ctx_wr(dev, 45, 0, seed); 4169 4170 return 0; 4170 4171 }
+2 -2
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
··· 1063 1063 opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp)); 1064 1064 rpl5->opt0 = cpu_to_be64(opt0); 1065 1065 rpl5->opt2 = cpu_to_be32(opt2); 1066 - rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1); 1066 + rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1); 1067 1067 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 1068 1068 t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure); 1069 1069 cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry); ··· 1466 1466 tp->write_seq = snd_isn; 1467 1467 tp->snd_nxt = snd_isn; 1468 1468 tp->snd_una = snd_isn; 1469 - inet_sk(sk)->inet_id = prandom_u32(); 1469 + inet_sk(sk)->inet_id = get_random_u16(); 1470 1470 assign_rxopt(sk, opt); 1471 1471 1472 1472 if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
+2 -2
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
··· 919 919 current_timeo = *timeo_p; 920 920 noblock = (*timeo_p ? false : true); 921 921 if (csk_mem_free(cdev, sk)) { 922 - current_timeo = (prandom_u32() % (HZ / 5)) + 2; 923 - vm_wait = (prandom_u32() % (HZ / 5)) + 2; 922 + current_timeo = prandom_u32_max(HZ / 5) + 2; 923 + vm_wait = prandom_u32_max(HZ / 5) + 2; 924 924 } 925 925 926 926 add_wait_queue(sk_sleep(sk), &wait);
+4 -4
drivers/net/ethernet/rocker/rocker_main.c
··· 129 129 u64 test_reg; 130 130 u64 rnd; 131 131 132 - rnd = prandom_u32(); 132 + rnd = get_random_u32(); 133 133 rnd >>= 1; 134 134 rocker_write32(rocker, TEST_REG, rnd); 135 135 test_reg = rocker_read32(rocker, TEST_REG); ··· 139 139 return -EIO; 140 140 } 141 141 142 - rnd = prandom_u32(); 142 + rnd = get_random_u32(); 143 143 rnd <<= 31; 144 - rnd |= prandom_u32(); 144 + rnd |= get_random_u32(); 145 145 rocker_write64(rocker, TEST_REG64, rnd); 146 146 test_reg = rocker_read64(rocker, TEST_REG64); 147 147 if (test_reg != rnd * 2) { ··· 224 224 if (err) 225 225 goto unmap; 226 226 227 - prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); 227 + get_random_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); 228 228 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) 229 229 expect[i] = ~buf[i]; 230 230 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
+1 -1
drivers/net/hamradio/baycom_epp.c
··· 438 438 if ((--bc->hdlctx.slotcnt) > 0) 439 439 return 0; 440 440 bc->hdlctx.slotcnt = bc->ch_params.slottime; 441 - if ((prandom_u32() % 256) > bc->ch_params.ppersist) 441 + if (get_random_u8() > bc->ch_params.ppersist) 442 442 return 0; 443 443 } 444 444 }
+1 -1
drivers/net/hamradio/hdlcdrv.c
··· 377 377 if ((--s->hdlctx.slotcnt) > 0) 378 378 return; 379 379 s->hdlctx.slotcnt = s->ch_params.slottime; 380 - if ((prandom_u32() % 256) > s->ch_params.ppersist) 380 + if (get_random_u8() > s->ch_params.ppersist) 381 381 return; 382 382 start_tx(dev, s); 383 383 }
+1 -1
drivers/net/hamradio/yam.c
··· 626 626 yp->slotcnt = yp->slot / 10; 627 627 628 628 /* is random > persist ? */ 629 - if ((prandom_u32() % 256) > yp->pers) 629 + if (get_random_u8() > yp->pers) 630 630 return; 631 631 632 632 yam_start_tx(dev, yp);
+1 -1
drivers/net/phy/at803x.c
··· 1758 1758 1759 1759 static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev) 1760 1760 { 1761 - u16 seed_value = (prandom_u32() % QCA808X_MASTER_SLAVE_SEED_RANGE); 1761 + u16 seed_value = prandom_u32_max(QCA808X_MASTER_SLAVE_SEED_RANGE); 1762 1762 1763 1763 return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED, 1764 1764 QCA808X_MASTER_SLAVE_SEED_CFG,
+8 -8
drivers/net/wireguard/selftest/allowedips.c
··· 284 284 mutex_lock(&mutex); 285 285 286 286 for (i = 0; i < NUM_RAND_ROUTES; ++i) { 287 - prandom_bytes(ip, 4); 287 + get_random_bytes(ip, 4); 288 288 cidr = prandom_u32_max(32) + 1; 289 289 peer = peers[prandom_u32_max(NUM_PEERS)]; 290 290 if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr, ··· 299 299 } 300 300 for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { 301 301 memcpy(mutated, ip, 4); 302 - prandom_bytes(mutate_mask, 4); 302 + get_random_bytes(mutate_mask, 4); 303 303 mutate_amount = prandom_u32_max(32); 304 304 for (k = 0; k < mutate_amount / 8; ++k) 305 305 mutate_mask[k] = 0xff; ··· 310 310 for (k = 0; k < 4; ++k) 311 311 mutated[k] = (mutated[k] & mutate_mask[k]) | 312 312 (~mutate_mask[k] & 313 - prandom_u32_max(256)); 313 + get_random_u8()); 314 314 cidr = prandom_u32_max(32) + 1; 315 315 peer = peers[prandom_u32_max(NUM_PEERS)]; 316 316 if (wg_allowedips_insert_v4(&t, ··· 328 328 } 329 329 330 330 for (i = 0; i < NUM_RAND_ROUTES; ++i) { 331 - prandom_bytes(ip, 16); 331 + get_random_bytes(ip, 16); 332 332 cidr = prandom_u32_max(128) + 1; 333 333 peer = peers[prandom_u32_max(NUM_PEERS)]; 334 334 if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr, ··· 343 343 } 344 344 for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { 345 345 memcpy(mutated, ip, 16); 346 - prandom_bytes(mutate_mask, 16); 346 + get_random_bytes(mutate_mask, 16); 347 347 mutate_amount = prandom_u32_max(128); 348 348 for (k = 0; k < mutate_amount / 8; ++k) 349 349 mutate_mask[k] = 0xff; ··· 354 354 for (k = 0; k < 4; ++k) 355 355 mutated[k] = (mutated[k] & mutate_mask[k]) | 356 356 (~mutate_mask[k] & 357 - prandom_u32_max(256)); 357 + get_random_u8()); 358 358 cidr = prandom_u32_max(128) + 1; 359 359 peer = peers[prandom_u32_max(NUM_PEERS)]; 360 360 if (wg_allowedips_insert_v6(&t, ··· 381 381 382 382 for (j = 0;; ++j) { 383 383 for (i = 0; i < NUM_QUERIES; ++i) { 384 - prandom_bytes(ip, 4); 384 + get_random_bytes(ip, 4); 385 385 if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { 386 386 horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip); 387 387 pr_err("allowedips random v4 self-test: FAIL\n"); 388 388 goto free; 389 389 } 390 - prandom_bytes(ip, 16); 390 + get_random_bytes(ip, 16); 391 391 if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { 392 392 pr_err("allowedips random v6 self-test: FAIL\n"); 393 393 goto free;
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
··· 1128 1128 if (afx_hdl->is_listen && afx_hdl->my_listen_chan) 1129 1129 /* 100ms ~ 300ms */ 1130 1130 err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan, 1131 - 100 * (1 + prandom_u32() % 3)); 1131 + 100 * (1 + prandom_u32_max(3))); 1132 1132 else 1133 1133 err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan); 1134 1134
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
··· 177 177 memcpy(pfn_mac.mac, mac_addr, ETH_ALEN); 178 178 for (i = 0; i < ETH_ALEN; i++) { 179 179 pfn_mac.mac[i] &= mac_mask[i]; 180 - pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]); 180 + pfn_mac.mac[i] |= get_random_u8() & ~(mac_mask[i]); 181 181 } 182 182 /* Clear multi bit */ 183 183 pfn_mac.mac[0] &= 0xFE;
+1 -1
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
··· 1099 1099 iwl_mvm_mac_ap_iterator, &data); 1100 1100 1101 1101 if (data.beacon_device_ts) { 1102 - u32 rand = (prandom_u32() % (64 - 36)) + 36; 1102 + u32 rand = prandom_u32_max(64 - 36) + 36; 1103 1103 mvmvif->ap_beacon_time = data.beacon_device_ts + 1104 1104 ieee80211_tu_to_usec(data.beacon_int * rand / 1105 1105 100);
+2 -2
drivers/net/wireless/marvell/mwifiex/cfg80211.c
··· 239 239 tx_info->pkt_len = pkt_len; 240 240 241 241 mwifiex_form_mgmt_frame(skb, buf, len); 242 - *cookie = prandom_u32() | 1; 242 + *cookie = get_random_u32() | 1; 243 243 244 244 if (ieee80211_is_action(mgmt->frame_control)) 245 245 skb = mwifiex_clone_skb_for_tx_status(priv, ··· 303 303 duration); 304 304 305 305 if (!ret) { 306 - *cookie = prandom_u32() | 1; 306 + *cookie = get_random_u32() | 1; 307 307 priv->roc_cfg.cookie = *cookie; 308 308 priv->roc_cfg.chan = *chan; 309 309
+1 -1
drivers/net/wireless/microchip/wilc1000/cfg80211.c
··· 1161 1161 const u8 *vendor_ie; 1162 1162 int ret = 0; 1163 1163 1164 - *cookie = prandom_u32(); 1164 + *cookie = get_random_u32(); 1165 1165 priv->tx_cookie = *cookie; 1166 1166 mgmt = (const struct ieee80211_mgmt *)buf; 1167 1167
+1 -1
drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
··· 449 449 { 450 450 struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev); 451 451 const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf; 452 - u32 short_cookie = prandom_u32(); 452 + u32 short_cookie = get_random_u32(); 453 453 u16 flags = 0; 454 454 u16 freq; 455 455
+1 -1
drivers/net/wireless/st/cw1200/wsm.c
··· 1594 1594 edca = &priv->edca.params[i]; 1595 1595 score = ((edca->aifns + edca->cwmin) << 16) + 1596 1596 ((edca->cwmax - edca->cwmin) * 1597 - (get_random_int() & 0xFFFF)); 1597 + get_random_u16()); 1598 1598 if (score < best && (winner < 0 || i != 3)) { 1599 1599 best = score; 1600 1600 winner = i;
+1 -1
drivers/net/wireless/ti/wlcore/main.c
··· 6100 6100 wl1271_warning("Fuse mac address is zero. using random mac"); 6101 6101 /* Use TI oui and a random nic */ 6102 6102 oui_addr = WLCORE_TI_OUI_ADDRESS; 6103 - nic_addr = get_random_int(); 6103 + nic_addr = get_random_u32(); 6104 6104 } else { 6105 6105 oui_addr = wl->fuse_oui_addr; 6106 6106 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
+1 -1
drivers/nvme/common/auth.c
··· 23 23 24 24 mutex_lock(&nvme_dhchap_mutex); 25 25 if (!nvme_dhchap_seqnum) 26 - nvme_dhchap_seqnum = prandom_u32(); 26 + nvme_dhchap_seqnum = get_random_u32(); 27 27 else { 28 28 nvme_dhchap_seqnum++; 29 29 if (!nvme_dhchap_seqnum)
+2 -2
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
··· 254 254 } else if (is_t5(lldi->adapter_type)) { 255 255 struct cpl_t5_act_open_req *req = 256 256 (struct cpl_t5_act_open_req *)skb->head; 257 - u32 isn = (prandom_u32() & ~7UL) - 1; 257 + u32 isn = (get_random_u32() & ~7UL) - 1; 258 258 259 259 INIT_TP_WR(req, 0); 260 260 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ··· 282 282 } else { 283 283 struct cpl_t6_act_open_req *req = 284 284 (struct cpl_t6_act_open_req *)skb->head; 285 - u32 isn = (prandom_u32() & ~7UL) - 1; 285 + u32 isn = (get_random_u32() & ~7UL) - 1; 286 286 287 287 INIT_TP_WR(req, 0); 288 288 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+2 -2
drivers/scsi/fcoe/fcoe_ctlr.c
··· 2233 2233 2234 2234 if (fip->probe_tries < FIP_VN_RLIM_COUNT) { 2235 2235 fip->probe_tries++; 2236 - wait = prandom_u32() % FIP_VN_PROBE_WAIT; 2236 + wait = prandom_u32_max(FIP_VN_PROBE_WAIT); 2237 2237 } else 2238 2238 wait = FIP_VN_RLIM_INT; 2239 2239 mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait)); ··· 3125 3125 fcoe_all_vn2vn, 0); 3126 3126 fip->port_ka_time = jiffies + 3127 3127 msecs_to_jiffies(FIP_VN_BEACON_INT + 3128 - (prandom_u32() % FIP_VN_BEACON_FUZZ)); 3128 + prandom_u32_max(FIP_VN_BEACON_FUZZ)); 3129 3129 } 3130 3130 if (time_before(fip->port_ka_time, next_time)) 3131 3131 next_time = fip->port_ka_time;
+3 -3
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 2156 2156 * This function makes an running random selection decision on FCF record to 2157 2157 * use through a sequence of @fcf_cnt eligible FCF records with equal 2158 2158 * probability. To perform integer manunipulation of random numbers with 2159 - * size unit32_t, the lower 16 bits of the 32-bit random number returned 2160 - * from prandom_u32() are taken as the random random number generated. 2159 + * size unit32_t, a 16-bit random number returned from get_random_u16() is 2160 + * taken as the random random number generated. 2161 2161 * 2162 2162 * Returns true when outcome is for the newly read FCF record should be 2163 2163 * chosen; otherwise, return false when outcome is for keeping the previously ··· 2169 2169 uint32_t rand_num; 2170 2170 2171 2171 /* Get 16-bit uniform random number */ 2172 - rand_num = 0xFFFF & prandom_u32(); 2172 + rand_num = get_random_u16(); 2173 2173 2174 2174 /* Decision with probability 1/fcf_cnt */ 2175 2175 if ((fcf_cnt * rand_num) < 0xFFFF)
+1 -1
drivers/scsi/qedi/qedi_main.c
··· 618 618 sizeof(struct qedi_endpoint *)), GFP_KERNEL); 619 619 if (!qedi->ep_tbl) 620 620 return -ENOMEM; 621 - port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE; 621 + port_id = prandom_u32_max(QEDI_LOCAL_PORT_RANGE); 622 622 if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE, 623 623 QEDI_LOCAL_PORT_MIN, port_id)) { 624 624 qedi_cm_free_mem(qedi);
+1 -1
drivers/target/iscsi/cxgbit/cxgbit_cm.c
··· 1202 1202 opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); 1203 1203 1204 1204 opt2 |= T5_ISS_F; 1205 - rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1); 1205 + rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1); 1206 1206 1207 1207 opt2 |= T5_OPT_2_VALID_F; 1208 1208
+1 -1
drivers/thunderbolt/xdomain.c
··· 2437 2437 tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); 2438 2438 tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); 2439 2439 2440 - xdomain_property_block_gen = prandom_u32(); 2440 + xdomain_property_block_gen = get_random_u32(); 2441 2441 return 0; 2442 2442 } 2443 2443
+1 -1
drivers/video/fbdev/uvesafb.c
··· 167 167 memcpy(&m->id, &uvesafb_cn_id, sizeof(m->id)); 168 168 m->seq = seq; 169 169 m->len = len; 170 - m->ack = prandom_u32(); 170 + m->ack = get_random_u32(); 171 171 172 172 /* uvesafb_task structure */ 173 173 memcpy(m + 1, &task->t, sizeof(task->t));
+1 -1
fs/ceph/inode.c
··· 362 362 if (nsplits != ci->i_fragtree_nsplits) { 363 363 update = true; 364 364 } else if (nsplits) { 365 - i = prandom_u32() % nsplits; 365 + i = prandom_u32_max(nsplits); 366 366 id = le32_to_cpu(fragtree->splits[i].frag); 367 367 if (!__ceph_find_frag(ci, id)) 368 368 update = true;
+1 -1
fs/ceph/mdsmap.c
··· 29 29 return -1; 30 30 31 31 /* pick */ 32 - n = prandom_u32() % n; 32 + n = prandom_u32_max(n); 33 33 for (j = 0, i = 0; i < m->possible_max_rank; i++) { 34 34 if (CEPH_MDS_IS_READY(i, ignore_laggy)) 35 35 j++;
+1 -1
fs/exfat/inode.c
··· 552 552 inode->i_uid = sbi->options.fs_uid; 553 553 inode->i_gid = sbi->options.fs_gid; 554 554 inode_inc_iversion(inode); 555 - inode->i_generation = prandom_u32(); 555 + inode->i_generation = get_random_u32(); 556 556 557 557 if (info->attr & ATTR_SUBDIR) { /* directory */ 558 558 inode->i_generation &= ~1;
+1 -2
fs/ext2/ialloc.c
··· 277 277 int best_ndir = inodes_per_group; 278 278 int best_group = -1; 279 279 280 - group = prandom_u32(); 281 - parent_group = (unsigned)group % ngroups; 280 + parent_group = prandom_u32_max(ngroups); 282 281 for (i = 0; i < ngroups; i++) { 283 282 group = (parent_group + i) % ngroups; 284 283 desc = ext2_get_group_desc (sb, group, NULL);
+3 -4
fs/ext4/ialloc.c
··· 463 463 hinfo.hash_version = DX_HASH_HALF_MD4; 464 464 hinfo.seed = sbi->s_hash_seed; 465 465 ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo); 466 - grp = hinfo.hash; 466 + parent_group = hinfo.hash % ngroups; 467 467 } else 468 - grp = prandom_u32(); 469 - parent_group = (unsigned)grp % ngroups; 468 + parent_group = prandom_u32_max(ngroups); 470 469 for (i = 0; i < ngroups; i++) { 471 470 g = (parent_group + i) % ngroups; 472 471 get_orlov_stats(sb, g, flex_size, &stats); ··· 1279 1280 EXT4_GROUP_INFO_IBITMAP_CORRUPT); 1280 1281 goto out; 1281 1282 } 1282 - inode->i_generation = prandom_u32(); 1283 + inode->i_generation = get_random_u32(); 1283 1284 1284 1285 /* Precompute checksum seed for inode metadata */ 1285 1286 if (ext4_has_metadata_csum(sb)) {
+2 -2
fs/ext4/ioctl.c
··· 454 454 inode->i_ctime = inode_bl->i_ctime = current_time(inode); 455 455 inode_inc_iversion(inode); 456 456 457 - inode->i_generation = prandom_u32(); 458 - inode_bl->i_generation = prandom_u32(); 457 + inode->i_generation = get_random_u32(); 458 + inode_bl->i_generation = get_random_u32(); 459 459 ext4_reset_inode_seed(inode); 460 460 ext4_reset_inode_seed(inode_bl); 461 461
+1 -1
fs/ext4/mmp.c
··· 265 265 u32 new_seq; 266 266 267 267 do { 268 - new_seq = prandom_u32(); 268 + new_seq = get_random_u32(); 269 269 } while (new_seq > EXT4_MMP_SEQ_MAX); 270 270 271 271 return new_seq;
+3 -4
fs/ext4/super.c
··· 3782 3782 } 3783 3783 if (!progress) { 3784 3784 elr->lr_next_sched = jiffies + 3785 - (prandom_u32() 3786 - % (EXT4_DEF_LI_MAX_START_DELAY * HZ)); 3785 + prandom_u32_max(EXT4_DEF_LI_MAX_START_DELAY * HZ); 3787 3786 } 3788 3787 if (time_before(elr->lr_next_sched, next_wakeup)) 3789 3788 next_wakeup = elr->lr_next_sched; ··· 3929 3930 * spread the inode table initialization requests 3930 3931 * better. 3931 3932 */ 3932 - elr->lr_next_sched = jiffies + (prandom_u32() % 3933 - (EXT4_DEF_LI_MAX_START_DELAY * HZ)); 3933 + elr->lr_next_sched = jiffies + prandom_u32_max( 3934 + EXT4_DEF_LI_MAX_START_DELAY * HZ); 3934 3935 return elr; 3935 3936 } 3936 3937
+1 -1
fs/f2fs/gc.c
··· 282 282 283 283 /* let's select beginning hot/small space first in no_heap mode*/ 284 284 if (f2fs_need_rand_seg(sbi)) 285 - p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec); 285 + p->offset = prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec); 286 286 else if (test_opt(sbi, NOHEAP) && 287 287 (type == CURSEG_HOT_DATA || IS_NODESEG(type))) 288 288 p->offset = 0;
+1 -1
fs/f2fs/namei.c
··· 50 50 inode->i_blocks = 0; 51 51 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 52 52 F2FS_I(inode)->i_crtime = inode->i_mtime; 53 - inode->i_generation = prandom_u32(); 53 + inode->i_generation = get_random_u32(); 54 54 55 55 if (S_ISDIR(inode->i_mode)) 56 56 F2FS_I(inode)->i_current_depth = 1;
+4 -4
fs/f2fs/segment.c
··· 2534 2534 2535 2535 sanity_check_seg_type(sbi, seg_type); 2536 2536 if (f2fs_need_rand_seg(sbi)) 2537 - return prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec); 2537 + return prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec); 2538 2538 2539 2539 /* if segs_per_sec is large than 1, we need to keep original policy. */ 2540 2540 if (__is_large_section(sbi)) ··· 2588 2588 curseg->alloc_type = LFS; 2589 2589 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 2590 2590 curseg->fragment_remained_chunk = 2591 - prandom_u32() % sbi->max_fragment_chunk + 1; 2591 + prandom_u32_max(sbi->max_fragment_chunk) + 1; 2592 2592 } 2593 2593 2594 2594 static int __next_free_blkoff(struct f2fs_sb_info *sbi, ··· 2625 2625 /* To allocate block chunks in different sizes, use random number */ 2626 2626 if (--seg->fragment_remained_chunk <= 0) { 2627 2627 seg->fragment_remained_chunk = 2628 - prandom_u32() % sbi->max_fragment_chunk + 1; 2628 + prandom_u32_max(sbi->max_fragment_chunk) + 1; 2629 2629 seg->next_blkoff += 2630 - prandom_u32() % sbi->max_fragment_hole + 1; 2630 + prandom_u32_max(sbi->max_fragment_hole) + 1; 2631 2631 } 2632 2632 } 2633 2633 }
+1 -1
fs/fat/inode.c
··· 523 523 inode->i_uid = sbi->options.fs_uid; 524 524 inode->i_gid = sbi->options.fs_gid; 525 525 inode_inc_iversion(inode); 526 - inode->i_generation = prandom_u32(); 526 + inode->i_generation = get_random_u32(); 527 527 528 528 if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) { 529 529 inode->i_generation &= ~1;
+2 -2
fs/nfsd/nfs4state.c
··· 4375 4375 nn->nfsd4_grace = 90; 4376 4376 nn->somebody_reclaimed = false; 4377 4377 nn->track_reclaim_completes = false; 4378 - nn->clverifier_counter = prandom_u32(); 4379 - nn->clientid_base = prandom_u32(); 4378 + nn->clverifier_counter = get_random_u32(); 4379 + nn->clientid_base = get_random_u32(); 4380 4380 nn->clientid_counter = nn->clientid_base + 1; 4381 4381 nn->s2s_cp_cl_id = nn->clientid_counter++; 4382 4382
+3 -3
fs/ntfs3/fslog.c
··· 3819 3819 } 3820 3820 3821 3821 log_init_pg_hdr(log, page_size, page_size, 1, 1); 3822 - log_create(log, l_size, 0, get_random_int(), false, false); 3822 + log_create(log, l_size, 0, get_random_u32(), false, false); 3823 3823 3824 3824 log->ra = ra; 3825 3825 ··· 3893 3893 3894 3894 /* Do some checks based on whether we have a valid log page. */ 3895 3895 if (!rst_info.valid_page) { 3896 - open_log_count = get_random_int(); 3896 + open_log_count = get_random_u32(); 3897 3897 goto init_log_instance; 3898 3898 } 3899 3899 open_log_count = le32_to_cpu(ra2->open_log_count); ··· 4044 4044 memcpy(ra->clients, Add2Ptr(ra2, t16), 4045 4045 le16_to_cpu(ra2->ra_len) - t16); 4046 4046 4047 - log->current_openlog_count = get_random_int(); 4047 + log->current_openlog_count = get_random_u32(); 4048 4048 ra->open_log_count = cpu_to_le32(log->current_openlog_count); 4049 4049 log->ra_size = offsetof(struct RESTART_AREA, clients) + 4050 4050 sizeof(struct CLIENT_REC);
+5 -5
fs/ubifs/debug.c
··· 2467 2467 2468 2468 static inline int chance(unsigned int n, unsigned int out_of) 2469 2469 { 2470 - return !!((prandom_u32() % out_of) + 1 <= n); 2470 + return !!(prandom_u32_max(out_of) + 1 <= n); 2471 2471 2472 2472 } 2473 2473 ··· 2485 2485 if (chance(1, 2)) { 2486 2486 d->pc_delay = 1; 2487 2487 /* Fail within 1 minute */ 2488 - delay = prandom_u32() % 60000; 2488 + delay = prandom_u32_max(60000); 2489 2489 d->pc_timeout = jiffies; 2490 2490 d->pc_timeout += msecs_to_jiffies(delay); 2491 2491 ubifs_warn(c, "failing after %lums", delay); 2492 2492 } else { 2493 2493 d->pc_delay = 2; 2494 - delay = prandom_u32() % 10000; 2494 + delay = prandom_u32_max(10000); 2495 2495 /* Fail within 10000 operations */ 2496 2496 d->pc_cnt_max = delay; 2497 2497 ubifs_warn(c, "failing after %lu calls", delay); ··· 2571 2571 unsigned int from, to, ffs = chance(1, 2); 2572 2572 unsigned char *p = (void *)buf; 2573 2573 2574 - from = prandom_u32() % len; 2574 + from = prandom_u32_max(len); 2575 2575 /* Corruption span max to end of write unit */ 2576 2576 to = min(len, ALIGN(from + 1, c->max_write_size)); 2577 2577 ··· 2581 2581 if (ffs) 2582 2582 memset(p + from, 0xFF, to - from); 2583 2583 else 2584 - prandom_bytes(p + from, to - from); 2584 + get_random_bytes(p + from, to - from); 2585 2585 2586 2586 return to; 2587 2587 }
+1 -1
fs/ubifs/journal.c
··· 503 503 static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent) 504 504 { 505 505 if (c->double_hash) 506 - dent->cookie = (__force __le32) prandom_u32(); 506 + dent->cookie = (__force __le32) get_random_u32(); 507 507 else 508 508 dent->cookie = 0; 509 509 }
+7 -7
fs/ubifs/lpt_commit.c
··· 1970 1970 1971 1971 if (!dbg_is_chk_gen(c)) 1972 1972 return 0; 1973 - if (prandom_u32() & 3) 1973 + if (prandom_u32_max(4)) 1974 1974 return 0; 1975 1975 1976 1976 for (i = 0; i < c->lsave_cnt; i++) 1977 1977 c->lsave[i] = c->main_first; 1978 1978 1979 1979 list_for_each_entry(lprops, &c->empty_list, list) 1980 - c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; 1980 + c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum; 1981 1981 list_for_each_entry(lprops, &c->freeable_list, list) 1982 - c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; 1982 + c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum; 1983 1983 list_for_each_entry(lprops, &c->frdi_idx_list, list) 1984 - c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; 1984 + c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum; 1985 1985 1986 1986 heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; 1987 1987 for (i = 0; i < heap->cnt; i++) 1988 - c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; 1988 + c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum; 1989 1989 heap = &c->lpt_heap[LPROPS_DIRTY - 1]; 1990 1990 for (i = 0; i < heap->cnt; i++) 1991 - c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; 1991 + c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum; 1992 1992 heap = &c->lpt_heap[LPROPS_FREE - 1]; 1993 1993 for (i = 0; i < heap->cnt; i++) 1994 - c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; 1994 + c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum; 1995 1995 1996 1996 return 1; 1997 1997 }
+1 -1
fs/ubifs/tnc_commit.c
··· 700 700 c->ilebs[c->ileb_cnt++] = lnum; 701 701 dbg_cmt("LEB %d", lnum); 702 702 } 703 - if (dbg_is_chk_index(c) && !(prandom_u32() & 7)) 703 + if (dbg_is_chk_index(c) && !prandom_u32_max(8)) 704 704 return -ENOSPC; 705 705 return 0; 706 706 }
+1 -1
fs/xfs/libxfs/xfs_alloc.c
··· 1520 1520 1521 1521 #ifdef DEBUG 1522 1522 /* Randomly don't execute the first algorithm. */ 1523 - if (prandom_u32() & 1) 1523 + if (prandom_u32_max(2)) 1524 1524 return 0; 1525 1525 #endif 1526 1526
+2 -2
fs/xfs/libxfs/xfs_ialloc.c
··· 636 636 /* randomly do sparse inode allocations */ 637 637 if (xfs_has_sparseinodes(tp->t_mountp) && 638 638 igeo->ialloc_min_blks < igeo->ialloc_blks) 639 - do_sparse = prandom_u32() & 1; 639 + do_sparse = prandom_u32_max(2); 640 640 #endif 641 641 642 642 /* ··· 805 805 * number from being easily guessable. 806 806 */ 807 807 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno, 808 - args.agbno, args.len, prandom_u32()); 808 + args.agbno, args.len, get_random_u32()); 809 809 810 810 if (error) 811 811 return error;
+1 -1
fs/xfs/xfs_error.c
··· 274 274 275 275 ASSERT(error_tag < XFS_ERRTAG_MAX); 276 276 randfactor = mp->m_errortag[error_tag]; 277 - if (!randfactor || prandom_u32() % randfactor) 277 + if (!randfactor || prandom_u32_max(randfactor)) 278 278 return false; 279 279 280 280 xfs_warn_ratelimited(mp,
+1 -1
fs/xfs/xfs_icache.c
··· 596 596 */ 597 597 if (xfs_has_v3inodes(mp) && 598 598 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) { 599 - VFS_I(ip)->i_generation = prandom_u32(); 599 + VFS_I(ip)->i_generation = get_random_u32(); 600 600 } else { 601 601 struct xfs_buf *bp; 602 602
+1 -1
fs/xfs/xfs_log.c
··· 3544 3544 tic->t_curr_res = unit_res; 3545 3545 tic->t_cnt = cnt; 3546 3546 tic->t_ocnt = cnt; 3547 - tic->t_tid = prandom_u32(); 3547 + tic->t_tid = get_random_u32(); 3548 3548 if (permanent) 3549 3549 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3550 3550
+1 -1
include/linux/nodemask.h
··· 516 516 bit = first_node(*maskp); 517 517 break; 518 518 default: 519 - bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_int() % w); 519 + bit = find_nth_bit(maskp->bits, MAX_NUMNODES, prandom_u32_max(w)); 520 520 break; 521 521 } 522 522 return bit;
-12
include/linux/prandom.h
··· 12 12 #include <linux/percpu.h> 13 13 #include <linux/random.h> 14 14 15 - /* Deprecated: use get_random_u32 instead. */ 16 - static inline u32 prandom_u32(void) 17 - { 18 - return get_random_u32(); 19 - } 20 - 21 - /* Deprecated: use get_random_bytes instead. */ 22 - static inline void prandom_bytes(void *buf, size_t nbytes) 23 - { 24 - return get_random_bytes(buf, nbytes); 25 - } 26 - 27 15 struct rnd_state { 28 16 __u32 s1, s2, s3, s4; 29 17 };
-5
include/linux/random.h
··· 42 42 u16 get_random_u16(void); 43 43 u32 get_random_u32(void); 44 44 u64 get_random_u64(void); 45 - static inline unsigned int get_random_int(void) 46 - { 47 - return get_random_u32(); 48 - } 49 45 static inline unsigned long get_random_long(void) 50 46 { 51 47 #if BITS_PER_LONG == 64 ··· 96 100 declare_get_random_var_wait(u16, u16) 97 101 declare_get_random_var_wait(u32, u32) 98 102 declare_get_random_var_wait(u64, u32) 99 - declare_get_random_var_wait(int, unsigned int) 100 103 declare_get_random_var_wait(long, unsigned long) 101 104 #undef declare_get_random_var 102 105
+1 -1
include/net/netfilter/nf_queue.h
··· 43 43 static inline void init_hashrandom(u32 *jhash_initval) 44 44 { 45 45 while (*jhash_initval == 0) 46 - *jhash_initval = prandom_u32(); 46 + *jhash_initval = get_random_u32(); 47 47 } 48 48 49 49 static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
+1 -1
include/net/red.h
··· 363 363 364 364 static inline u32 red_random(const struct red_parms *p) 365 365 { 366 - return reciprocal_divide(prandom_u32(), p->max_P_reciprocal); 366 + return reciprocal_divide(get_random_u32(), p->max_P_reciprocal); 367 367 } 368 368 369 369 static inline int red_mark_probability(const struct red_parms *p,
+1 -1
include/net/sock.h
··· 2109 2109 2110 2110 static inline u32 net_tx_rndhash(void) 2111 2111 { 2112 - u32 v = prandom_u32(); 2112 + u32 v = get_random_u32(); 2113 2113 2114 2114 return v ?: 1; 2115 2115 }
+1 -1
kernel/bpf/bloom_filter.c
··· 158 158 attr->value_size / sizeof(u32); 159 159 160 160 if (!(attr->map_flags & BPF_F_ZERO_SEED)) 161 - bloom->hash_seed = get_random_int(); 161 + bloom->hash_seed = get_random_u32(); 162 162 163 163 return &bloom->map; 164 164 }
+3 -3
kernel/bpf/core.c
··· 1032 1032 hdr->size = size; 1033 1033 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 1034 1034 PAGE_SIZE - sizeof(*hdr)); 1035 - start = (get_random_int() % hole) & ~(alignment - 1); 1035 + start = prandom_u32_max(hole) & ~(alignment - 1); 1036 1036 1037 1037 /* Leave a random number of instructions before BPF code. */ 1038 1038 *image_ptr = &hdr->image[start]; ··· 1094 1094 1095 1095 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)), 1096 1096 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header)); 1097 - start = (get_random_int() % hole) & ~(alignment - 1); 1097 + start = prandom_u32_max(hole) & ~(alignment - 1); 1098 1098 1099 1099 *image_ptr = &ro_header->image[start]; 1100 1100 *rw_image = &(*rw_header)->image[start]; ··· 1216 1216 bool emit_zext) 1217 1217 { 1218 1218 struct bpf_insn *to = to_buff; 1219 - u32 imm_rnd = get_random_int(); 1219 + u32 imm_rnd = get_random_u32(); 1220 1220 s16 off; 1221 1221 1222 1222 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
+1 -1
kernel/bpf/hashtab.c
··· 527 527 if (htab->map.map_flags & BPF_F_ZERO_SEED) 528 528 htab->hashrnd = 0; 529 529 else 530 - htab->hashrnd = get_random_int(); 530 + htab->hashrnd = get_random_u32(); 531 531 532 532 htab_init_buckets(htab); 533 533
+1 -1
kernel/bpf/verifier.c
··· 13350 13350 aux[adj_idx].ptr_type == PTR_TO_CTX) 13351 13351 continue; 13352 13352 13353 - imm_rnd = get_random_int(); 13353 + imm_rnd = get_random_u32(); 13354 13354 rnd_hi32_patch[0] = insn; 13355 13355 rnd_hi32_patch[1].imm = imm_rnd; 13356 13356 rnd_hi32_patch[3].dst_reg = load_reg;
+2 -2
kernel/kcsan/selftest.c
··· 26 26 static bool __init test_requires(void) 27 27 { 28 28 /* random should be initialized for the below tests */ 29 - return prandom_u32() + prandom_u32() != 0; 29 + return get_random_u32() + get_random_u32() != 0; 30 30 } 31 31 32 32 /* ··· 46 46 unsigned long addr; 47 47 size_t verif_size; 48 48 49 - prandom_bytes(&addr, sizeof(addr)); 49 + get_random_bytes(&addr, sizeof(addr)); 50 50 if (addr < PAGE_SIZE) 51 51 addr = PAGE_SIZE; 52 52
+2 -2
kernel/locking/test-ww_mutex.c
··· 399 399 order[n] = n; 400 400 401 401 for (n = count - 1; n > 1; n--) { 402 - r = get_random_int() % (n + 1); 402 + r = prandom_u32_max(n + 1); 403 403 if (r != n) { 404 404 tmp = order[n]; 405 405 order[n] = order[r]; ··· 538 538 { 539 539 struct stress *stress = container_of(work, typeof(*stress), work); 540 540 const int nlocks = stress->nlocks; 541 - struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks); 541 + struct ww_mutex *lock = stress->locks + prandom_u32_max(nlocks); 542 542 int err; 543 543 544 544 do {
+1 -1
kernel/time/clocksource.c
··· 310 310 * CPUs that are currently online. 311 311 */ 312 312 for (i = 1; i < n; i++) { 313 - cpu = prandom_u32() % nr_cpu_ids; 313 + cpu = prandom_u32_max(nr_cpu_ids); 314 314 cpu = cpumask_next(cpu - 1, cpu_online_mask); 315 315 if (cpu >= nr_cpu_ids) 316 316 cpu = cpumask_first(cpu_online_mask);
+2 -2
lib/cmdline_kunit.c
··· 76 76 int rc = cmdline_test_values[i]; 77 77 int offset; 78 78 79 - sprintf(in, "%u%s", get_random_int() % 256, str); 79 + sprintf(in, "%u%s", get_random_u8(), str); 80 80 /* Only first '-' after the number will advance the pointer */ 81 81 offset = strlen(in) - strlen(str) + !!(rc == 2); 82 82 cmdline_do_one_test(test, in, rc, offset); ··· 94 94 int rc = strcmp(str, "") ? (strcmp(str, "-") ? 0 : 1) : 1; 95 95 int offset; 96 96 97 - sprintf(in, "%s%u", str, get_random_int() % 256); 97 + sprintf(in, "%s%u", str, get_random_u8()); 98 98 /* 99 99 * Only first and leading '-' not followed by integer 100 100 * will advance the pointer.
+1 -1
lib/fault-inject.c
··· 139 139 return false; 140 140 } 141 141 142 - if (attr->probability <= prandom_u32() % 100) 142 + if (attr->probability <= prandom_u32_max(100)) 143 143 return false; 144 144 145 145 if (!fail_stacktrace(attr))
+2 -2
lib/find_bit_benchmark.c
··· 174 174 bitmap_zero(bitmap2, BITMAP_LEN); 175 175 176 176 while (nbits--) { 177 - __set_bit(prandom_u32() % BITMAP_LEN, bitmap); 178 - __set_bit(prandom_u32() % BITMAP_LEN, bitmap2); 177 + __set_bit(prandom_u32_max(BITMAP_LEN), bitmap); 178 + __set_bit(prandom_u32_max(BITMAP_LEN), bitmap2); 179 179 } 180 180 181 181 test_find_next_bit(bitmap, BITMAP_LEN);
+1 -1
lib/kobject.c
··· 694 694 { 695 695 struct kobject *kobj = container_of(kref, struct kobject, kref); 696 696 #ifdef CONFIG_DEBUG_KOBJECT_RELEASE 697 - unsigned long delay = HZ + HZ * (get_random_int() & 0x3); 697 + unsigned long delay = HZ + HZ * prandom_u32_max(4); 698 698 pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n", 699 699 kobject_name(kobj), kobj, __func__, kobj->parent, delay); 700 700 INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
+2 -2
lib/random32.c
··· 47 47 * @state: pointer to state structure holding seeded state. 48 48 * 49 49 * This is used for pseudo-randomness with no outside seeding. 50 - * For more random results, use prandom_u32(). 50 + * For more random results, use get_random_u32(). 51 51 */ 52 52 u32 prandom_u32_state(struct rnd_state *state) 53 53 { ··· 69 69 * @bytes: the requested number of bytes 70 70 * 71 71 * This is used for pseudo-randomness with no outside seeding. 72 - * For more random results, use prandom_bytes(). 72 + * For more random results, use get_random_bytes(). 73 73 */ 74 74 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes) 75 75 {
+6 -6
lib/reed_solomon/test_rslib.c
··· 164 164 165 165 /* Load c with random data and encode */ 166 166 for (i = 0; i < dlen; i++) 167 - c[i] = prandom_u32() & nn; 167 + c[i] = get_random_u32() & nn; 168 168 169 169 memset(c + dlen, 0, nroots * sizeof(*c)); 170 170 encode_rs16(rs, c, dlen, c + dlen, 0); ··· 178 178 for (i = 0; i < errs; i++) { 179 179 do { 180 180 /* Error value must be nonzero */ 181 - errval = prandom_u32() & nn; 181 + errval = get_random_u32() & nn; 182 182 } while (errval == 0); 183 183 184 184 do { 185 185 /* Must not choose the same location twice */ 186 - errloc = prandom_u32() % len; 186 + errloc = prandom_u32_max(len); 187 187 } while (errlocs[errloc] != 0); 188 188 189 189 errlocs[errloc] = 1; ··· 194 194 for (i = 0; i < eras; i++) { 195 195 do { 196 196 /* Must not choose the same location twice */ 197 - errloc = prandom_u32() % len; 197 + errloc = prandom_u32_max(len); 198 198 } while (errlocs[errloc] != 0); 199 199 200 200 derrlocs[i] = errloc; 201 201 202 - if (ewsc && (prandom_u32() & 1)) { 202 + if (ewsc && prandom_u32_max(2)) { 203 203 /* Erasure with the symbol intact */ 204 204 errlocs[errloc] = 2; 205 205 } else { 206 206 /* Erasure with corrupted symbol */ 207 207 do { 208 208 /* Error value must be nonzero */ 209 - errval = prandom_u32() & nn; 209 + errval = get_random_u32() & nn; 210 210 } while (errval == 0); 211 211 212 212 errlocs[errloc] = 1;
+2 -2
lib/sbitmap.c
··· 21 21 int i; 22 22 23 23 for_each_possible_cpu(i) 24 - *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth; 24 + *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32_max(depth); 25 25 } 26 26 return 0; 27 27 } ··· 33 33 34 34 hint = this_cpu_read(*sb->alloc_hint); 35 35 if (unlikely(hint >= depth)) { 36 - hint = depth ? prandom_u32() % depth : 0; 36 + hint = depth ? prandom_u32_max(depth) : 0; 37 37 this_cpu_write(*sb->alloc_hint, hint); 38 38 } 39 39
+1 -1
lib/test-string_helpers.c
··· 587 587 for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++) 588 588 test_string_unescape("unescape", i, false); 589 589 test_string_unescape("unescape inplace", 590 - get_random_int() % (UNESCAPE_ANY + 1), true); 590 + prandom_u32_max(UNESCAPE_ANY + 1), true); 591 591 592 592 /* Without dictionary */ 593 593 for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
+1 -1
lib/test_fprobe.c
··· 145 145 static int fprobe_test_init(struct kunit *test) 146 146 { 147 147 do { 148 - rand1 = prandom_u32(); 148 + rand1 = get_random_u32(); 149 149 } while (rand1 <= div_factor); 150 150 151 151 target = fprobe_selftest_target;
+5 -5
lib/test_hexdump.c
··· 149 149 static void __init test_hexdump_set(int rowsize, bool ascii) 150 150 { 151 151 size_t d = min_t(size_t, sizeof(data_b), rowsize); 152 - size_t len = get_random_int() % d + 1; 152 + size_t len = prandom_u32_max(d) + 1; 153 153 154 154 test_hexdump(len, rowsize, 4, ascii); 155 155 test_hexdump(len, rowsize, 2, ascii); ··· 208 208 static void __init test_hexdump_overflow_set(size_t buflen, bool ascii) 209 209 { 210 210 unsigned int i = 0; 211 - int rs = (get_random_int() % 2 + 1) * 16; 211 + int rs = (prandom_u32_max(2) + 1) * 16; 212 212 213 213 do { 214 214 int gs = 1 << i; 215 - size_t len = get_random_int() % rs + gs; 215 + size_t len = prandom_u32_max(rs) + gs; 216 216 217 217 test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii); 218 218 } while (i++ < 3); ··· 223 223 unsigned int i; 224 224 int rowsize; 225 225 226 - rowsize = (get_random_int() % 2 + 1) * 16; 226 + rowsize = (prandom_u32_max(2) + 1) * 16; 227 227 for (i = 0; i < 16; i++) 228 228 test_hexdump_set(rowsize, false); 229 229 230 - rowsize = (get_random_int() % 2 + 1) * 16; 230 + rowsize = (prandom_u32_max(2) + 1) * 16; 231 231 for (i = 0; i < 16; i++) 232 232 test_hexdump_set(rowsize, true); 233 233
+1 -1
lib/test_kprobes.c
··· 341 341 stacktrace_driver = kprobe_stacktrace_driver; 342 342 343 343 do { 344 - rand1 = prandom_u32(); 344 + rand1 = get_random_u32(); 345 345 } while (rand1 <= div_factor); 346 346 return 0; 347 347 }
+1 -1
lib/test_list_sort.c
··· 71 71 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el); 72 72 73 73 /* force some equivalencies */ 74 - el->value = prandom_u32() % (TEST_LIST_LEN / 3); 74 + el->value = prandom_u32_max(TEST_LIST_LEN / 3); 75 75 el->serial = i; 76 76 el->poison1 = TEST_POISON1; 77 77 el->poison2 = TEST_POISON2;
+3 -3
lib/test_min_heap.c
··· 83 83 /* Test with randomly generated values. */ 84 84 heap.nr = ARRAY_SIZE(values); 85 85 for (i = 0; i < heap.nr; i++) 86 - values[i] = get_random_int(); 86 + values[i] = get_random_u32(); 87 87 88 88 min_heapify_all(&heap, &funcs); 89 89 err += pop_verify_heap(min_heap, &heap, &funcs); ··· 116 116 117 117 /* Test with randomly generated values. */ 118 118 while (heap.nr < heap.size) { 119 - temp = get_random_int(); 119 + temp = get_random_u32(); 120 120 min_heap_push(&heap, &temp, &funcs); 121 121 } 122 122 err += pop_verify_heap(min_heap, &heap, &funcs); ··· 158 158 159 159 /* Test with randomly generated values. */ 160 160 for (i = 0; i < ARRAY_SIZE(data); i++) { 161 - temp = get_random_int(); 161 + temp = get_random_u32(); 162 162 min_heap_pop_push(&heap, &temp, &funcs); 163 163 } 164 164 err += pop_verify_heap(min_heap, &heap, &funcs);
+1 -1
lib/test_objagg.c
··· 157 157 int err; 158 158 159 159 if (should_create_root) 160 - prandom_bytes(world->next_root_buf, 160 + get_random_bytes(world->next_root_buf, 161 161 sizeof(world->next_root_buf)); 162 162 163 163 objagg_obj = world_obj_get(world, objagg, key_id);
+3 -3
lib/test_rhashtable.c
··· 291 291 if (WARN_ON(err)) 292 292 goto out_free; 293 293 294 - k = prandom_u32(); 294 + k = get_random_u32(); 295 295 ret = 0; 296 296 for (i = 0; i < entries; i++) { 297 297 rhl_test_objects[i].value.id = k; ··· 369 369 pr_info("test %d random rhlist add/delete operations\n", entries); 370 370 for (j = 0; j < entries; j++) { 371 371 u32 i = prandom_u32_max(entries); 372 - u32 prand = prandom_u32(); 372 + u32 prand = get_random_u32(); 373 373 374 374 cond_resched(); 375 375 376 376 if (prand == 0) 377 - prand = prandom_u32(); 377 + prand = get_random_u32(); 378 378 379 379 if (prand & 1) { 380 380 prand >>= 1;
+5 -14
lib/test_vmalloc.c
··· 80 80 int i; 81 81 82 82 for (i = 0; i < test_loop_count; i++) { 83 - rnd = prandom_u32(); 83 + rnd = get_random_u8(); 84 84 85 85 /* 86 86 * Maximum 1024 pages, if PAGE_SIZE is 4096. ··· 151 151 int i; 152 152 153 153 for (i = 0; i < test_loop_count; i++) { 154 - n = prandom_u32(); 155 - n = (n % 100) + 1; 156 - 154 + n = prandom_u32_max(100) + 1; 157 155 p = vmalloc(n * PAGE_SIZE); 158 156 159 157 if (!p) ··· 291 293 return -1; 292 294 293 295 for (i = 0; i < 35000; i++) { 294 - unsigned int r; 295 - 296 - r = prandom_u32(); 297 - size = (r % (PAGE_SIZE / 4)) + 1; 296 + size = prandom_u32_max(PAGE_SIZE / 4) + 1; 298 297 299 298 /* 300 299 * Maximum PAGE_SIZE 301 300 */ 302 - r = prandom_u32(); 303 - align = 1 << ((r % 11) + 1); 301 + align = 1 << (prandom_u32_max(11) + 1); 304 302 305 303 pcpu[i] = __alloc_percpu(size, align); 306 304 if (!pcpu[i]) ··· 387 393 388 394 static void shuffle_array(int *arr, int n) 389 395 { 390 - unsigned int rnd; 391 396 int i, j; 392 397 393 398 for (i = n - 1; i > 0; i--) { 394 - rnd = prandom_u32(); 395 - 396 399 /* Cut the range. */ 397 - j = rnd % i; 400 + j = prandom_u32_max(i); 398 401 399 402 /* Swap indexes. */ 400 403 swap(arr[i], arr[j]);
+1 -1
lib/uuid.c
··· 52 52 53 53 static void __uuid_gen_common(__u8 b[16]) 54 54 { 55 - prandom_bytes(b, 16); 55 + get_random_bytes(b, 16); 56 56 /* reversion 0b10 */ 57 57 b[8] = (b[8] & 0x3F) | 0x80; 58 58 }
+3 -3
mm/kasan/kasan_test.c
··· 1299 1299 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1300 1300 1301 1301 for (i = 0; i < 256; i++) { 1302 - size = (get_random_int() % 1024) + 1; 1302 + size = prandom_u32_max(1024) + 1; 1303 1303 ptr = kmalloc(size, GFP_KERNEL); 1304 1304 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1305 1305 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); ··· 1308 1308 } 1309 1309 1310 1310 for (i = 0; i < 256; i++) { 1311 - order = (get_random_int() % 4) + 1; 1311 + order = prandom_u32_max(4) + 1; 1312 1312 pages = alloc_pages(GFP_KERNEL, order); 1313 1313 ptr = page_address(pages); 1314 1314 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); ··· 1321 1321 return; 1322 1322 1323 1323 for (i = 0; i < 256; i++) { 1324 - size = (get_random_int() % 1024) + 1; 1324 + size = prandom_u32_max(1024) + 1; 1325 1325 ptr = vmalloc(size); 1326 1326 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1327 1327 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+1 -1
mm/shmem.c
··· 2332 2332 inode_init_owner(&init_user_ns, inode, dir, mode); 2333 2333 inode->i_blocks = 0; 2334 2334 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 2335 - inode->i_generation = prandom_u32(); 2335 + inode->i_generation = get_random_u32(); 2336 2336 info = SHMEM_I(inode); 2337 2337 memset(info, 0, (char *)inode - (char *)info); 2338 2338 spin_lock_init(&info->lock);
+1 -1
mm/slab.c
··· 2381 2381 unsigned int rand; 2382 2382 2383 2383 /* Use best entropy available to define a random shift */ 2384 - rand = get_random_int(); 2384 + rand = get_random_u32(); 2385 2385 2386 2386 /* Use a random state if the pre-computed list is not available */ 2387 2387 if (!cachep->random_seq) {
+1 -1
mm/slub.c
··· 1881 1881 return false; 1882 1882 1883 1883 freelist_count = oo_objects(s->oo); 1884 - pos = get_random_int() % freelist_count; 1884 + pos = prandom_u32_max(freelist_count); 1885 1885 1886 1886 page_limit = slab->objects * s->size; 1887 1887 start = fixup_red_left(s, slab_address(slab));
+1 -1
net/802/garp.c
··· 407 407 { 408 408 unsigned long delay; 409 409 410 - delay = (u64)msecs_to_jiffies(garp_join_time) * prandom_u32() >> 32; 410 + delay = prandom_u32_max(msecs_to_jiffies(garp_join_time)); 411 411 mod_timer(&app->join_timer, jiffies + delay); 412 412 } 413 413
+1 -1
net/802/mrp.c
··· 592 592 { 593 593 unsigned long delay; 594 594 595 - delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32; 595 + delay = prandom_u32_max(msecs_to_jiffies(mrp_join_time)); 596 596 mod_timer(&app->join_timer, jiffies + delay); 597 597 } 598 598
+1 -1
net/ceph/mon_client.c
··· 222 222 max--; 223 223 } 224 224 225 - n = prandom_u32() % max; 225 + n = prandom_u32_max(max); 226 226 if (o >= 0 && n >= o) 227 227 n++; 228 228
+1 -1
net/ceph/osd_client.c
··· 1479 1479 1480 1480 static int pick_random_replica(const struct ceph_osds *acting) 1481 1481 { 1482 - int i = prandom_u32() % acting->size; 1482 + int i = prandom_u32_max(acting->size); 1483 1483 1484 1484 dout("%s picked osd%d, primary osd%d\n", __func__, 1485 1485 acting->osds[i], acting->primary);
+1 -1
net/core/neighbour.c
··· 111 111 112 112 unsigned long neigh_rand_reach_time(unsigned long base) 113 113 { 114 - return base ? (prandom_u32() % base) + (base >> 1) : 0; 114 + return base ? prandom_u32_max(base) + (base >> 1) : 0; 115 115 } 116 116 EXPORT_SYMBOL(neigh_rand_reach_time); 117 117
+23 -24
net/core/pktgen.c
··· 2324 2324 pkt_dev->curfl = 0; /*reset */ 2325 2325 } 2326 2326 } else { 2327 - flow = prandom_u32() % pkt_dev->cflows; 2327 + flow = prandom_u32_max(pkt_dev->cflows); 2328 2328 pkt_dev->curfl = flow; 2329 2329 2330 2330 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { ··· 2380 2380 else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { 2381 2381 __u16 t; 2382 2382 if (pkt_dev->flags & F_QUEUE_MAP_RND) { 2383 - t = prandom_u32() % 2384 - (pkt_dev->queue_map_max - 2385 - pkt_dev->queue_map_min + 1) 2386 - + pkt_dev->queue_map_min; 2383 + t = prandom_u32_max(pkt_dev->queue_map_max - 2384 + pkt_dev->queue_map_min + 1) + 2385 + pkt_dev->queue_map_min; 2387 2386 } else { 2388 2387 t = pkt_dev->cur_queue_map + 1; 2389 2388 if (t > pkt_dev->queue_map_max) ··· 2411 2412 __u32 tmp; 2412 2413 2413 2414 if (pkt_dev->flags & F_MACSRC_RND) 2414 - mc = prandom_u32() % pkt_dev->src_mac_count; 2415 + mc = prandom_u32_max(pkt_dev->src_mac_count); 2415 2416 else { 2416 2417 mc = pkt_dev->cur_src_mac_offset++; 2417 2418 if (pkt_dev->cur_src_mac_offset >= ··· 2437 2438 __u32 tmp; 2438 2439 2439 2440 if (pkt_dev->flags & F_MACDST_RND) 2440 - mc = prandom_u32() % pkt_dev->dst_mac_count; 2441 + mc = prandom_u32_max(pkt_dev->dst_mac_count); 2441 2442 2442 2443 else { 2443 2444 mc = pkt_dev->cur_dst_mac_offset++; ··· 2464 2465 for (i = 0; i < pkt_dev->nr_labels; i++) 2465 2466 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) 2466 2467 pkt_dev->labels[i] = MPLS_STACK_BOTTOM | 2467 - ((__force __be32)prandom_u32() & 2468 + ((__force __be32)get_random_u32() & 2468 2469 htonl(0x000fffff)); 2469 2470 } 2470 2471 2471 2472 if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) { 2472 - pkt_dev->vlan_id = prandom_u32() & (4096 - 1); 2473 + pkt_dev->vlan_id = prandom_u32_max(4096); 2473 2474 } 2474 2475 2475 2476 if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) { 2476 - pkt_dev->svlan_id = prandom_u32() & (4096 - 1); 2477 + pkt_dev->svlan_id = prandom_u32_max(4096); 2477 2478 } 2478 2479 2479 2480 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { 2480 2481 if (pkt_dev->flags & F_UDPSRC_RND) 2481 - pkt_dev->cur_udp_src = prandom_u32() % 2482 - (pkt_dev->udp_src_max - pkt_dev->udp_src_min) 2483 - + pkt_dev->udp_src_min; 2482 + pkt_dev->cur_udp_src = prandom_u32_max( 2483 + pkt_dev->udp_src_max - pkt_dev->udp_src_min) + 2484 + pkt_dev->udp_src_min; 2484 2485 2485 2486 else { 2486 2487 pkt_dev->cur_udp_src++; ··· 2491 2492 2492 2493 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { 2493 2494 if (pkt_dev->flags & F_UDPDST_RND) { 2494 - pkt_dev->cur_udp_dst = prandom_u32() % 2495 - (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) 2496 - + pkt_dev->udp_dst_min; 2495 + pkt_dev->cur_udp_dst = prandom_u32_max( 2496 + pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) + 2497 + pkt_dev->udp_dst_min; 2497 2498 } else { 2498 2499 pkt_dev->cur_udp_dst++; 2499 2500 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) ··· 2508 2509 if (imn < imx) { 2509 2510 __u32 t; 2510 2511 if (pkt_dev->flags & F_IPSRC_RND) 2511 - t = prandom_u32() % (imx - imn) + imn; 2512 + t = prandom_u32_max(imx - imn) + imn; 2512 2513 else { 2513 2514 t = ntohl(pkt_dev->cur_saddr); 2514 2515 t++; ··· 2530 2531 if (pkt_dev->flags & F_IPDST_RND) { 2531 2532 2532 2533 do { 2533 - t = prandom_u32() % 2534 - (imx - imn) + imn; 2534 + t = prandom_u32_max(imx - imn) + 2535 + imn; 2535 2536 s = htonl(t); 2536 2537 } while (ipv4_is_loopback(s) || 2537 2538 ipv4_is_multicast(s) || ··· 2568 2569 2569 2570 for (i = 0; i < 4; i++) { 2570 2571 pkt_dev->cur_in6_daddr.s6_addr32[i] = 2571 - (((__force __be32)prandom_u32() | 2572 + (((__force __be32)get_random_u32() | 2572 2573 pkt_dev->min_in6_daddr.s6_addr32[i]) & 2573 2574 pkt_dev->max_in6_daddr.s6_addr32[i]); 2574 2575 } ··· 2578 2579 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { 2579 2580 __u32 t; 2580 2581 if (pkt_dev->flags & F_TXSIZE_RND) { 2581 - t = prandom_u32() % 2582 - (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size) 2583 - + pkt_dev->min_pkt_size; 2582 + t = prandom_u32_max(pkt_dev->max_pkt_size - 2583 + pkt_dev->min_pkt_size) + 2584 + pkt_dev->min_pkt_size; 2584 2585 } else { 2585 2586 t = pkt_dev->cur_pkt_size + 1; 2586 2587 if (t > pkt_dev->max_pkt_size) ··· 2589 2590 pkt_dev->cur_pkt_size = t; 2590 2591 } else if (pkt_dev->n_imix_entries > 0) { 2591 2592 struct imix_pkt *entry; 2592 - __u32 t = prandom_u32() % IMIX_PRECISION; 2593 + __u32 t = prandom_u32_max(IMIX_PRECISION); 2593 2594 __u8 entry_index = pkt_dev->imix_distribution[t]; 2594 2595 2595 2596 entry = &pkt_dev->imix_entries[entry_index];
+1 -1
net/core/stream.c
··· 123 123 DEFINE_WAIT_FUNC(wait, woken_wake_function); 124 124 125 125 if (sk_stream_memory_free(sk)) 126 - current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; 126 + current_timeo = vm_wait = prandom_u32_max(HZ / 5) + 2; 127 127 128 128 add_wait_queue(sk_sleep(sk), &wait); 129 129
+2 -2
net/dccp/ipv4.c
··· 144 144 inet->inet_daddr, 145 145 inet->inet_sport, 146 146 inet->inet_dport); 147 - inet->inet_id = prandom_u32(); 147 + inet->inet_id = get_random_u16(); 148 148 149 149 err = dccp_connect(sk); 150 150 rt = NULL; ··· 443 443 RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); 444 444 newinet->mc_index = inet_iif(skb); 445 445 newinet->mc_ttl = ip_hdr(skb)->ttl; 446 - newinet->inet_id = prandom_u32(); 446 + newinet->inet_id = get_random_u16(); 447 447 448 448 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) 449 449 goto put_and_exit;
+1 -1
net/ipv4/datagram.c
··· 73 73 reuseport_has_conns(sk, true); 74 74 sk->sk_state = TCP_ESTABLISHED; 75 75 sk_set_txhash(sk); 76 - inet->inet_id = prandom_u32(); 76 + inet->inet_id = get_random_u16(); 77 77 78 78 sk_dst_set(sk, &rt->dst); 79 79 err = 0;
+3 -3
net/ipv4/igmp.c
··· 213 213 /* It must be called with locked im->lock */ 214 214 static void igmp_start_timer(struct ip_mc_list *im, int max_delay) 215 215 { 216 - int tv = prandom_u32() % max_delay; 216 + int tv = prandom_u32_max(max_delay); 217 217 218 218 im->tm_running = 1; 219 219 if (!mod_timer(&im->timer, jiffies+tv+2)) ··· 222 222 223 223 static void igmp_gq_start_timer(struct in_device *in_dev) 224 224 { 225 - int tv = prandom_u32() % in_dev->mr_maxdelay; 225 + int tv = prandom_u32_max(in_dev->mr_maxdelay); 226 226 unsigned long exp = jiffies + tv + 2; 227 227 228 228 if (in_dev->mr_gq_running && ··· 236 236 237 237 static void igmp_ifc_start_timer(struct in_device *in_dev, int delay) 238 238 { 239 - int tv = prandom_u32() % delay; 239 + int tv = prandom_u32_max(delay); 240 240 241 241 if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2)) 242 242 in_dev_hold(in_dev);
+1 -1
net/ipv4/inet_connection_sock.c
··· 314 314 if (likely(remaining > 1)) 315 315 remaining &= ~1U; 316 316 317 - offset = prandom_u32() % remaining; 317 + offset = prandom_u32_max(remaining); 318 318 /* __inet_hash_connect() favors ports having @low parity 319 319 * We do the opposite to not pollute connect() users. 320 320 */
+1 -1
net/ipv4/inet_hashtables.c
··· 1037 1037 * on low contention the randomness is maximal and on high contention 1038 1038 * it may be inexistent. 1039 1039 */ 1040 - i = max_t(int, i, (prandom_u32() & 7) * 2); 1040 + i = max_t(int, i, prandom_u32_max(8) * 2); 1041 1041 WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); 1042 1042 1043 1043 /* Head lock still held and bh's disabled */
+1 -1
net/ipv4/ip_output.c
··· 172 172 * Avoid using the hashed IP ident generator. 173 173 */ 174 174 if (sk->sk_protocol == IPPROTO_TCP) 175 - iph->id = (__force __be16)prandom_u32(); 175 + iph->id = (__force __be16)get_random_u16(); 176 176 else 177 177 __ip_select_ident(net, iph, 1); 178 178 }
+2 -2
net/ipv4/route.c
··· 3664 3664 { 3665 3665 atomic_set(&net->ipv4.rt_genid, 0); 3666 3666 atomic_set(&net->fnhe_genid, 0); 3667 - atomic_set(&net->ipv4.dev_addr_genid, get_random_int()); 3667 + atomic_set(&net->ipv4.dev_addr_genid, get_random_u32()); 3668 3668 return 0; 3669 3669 } 3670 3670 ··· 3719 3719 3720 3720 ip_idents = idents_hash; 3721 3721 3722 - prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); 3722 + get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); 3723 3723 3724 3724 ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents); 3725 3725
+1 -1
net/ipv4/tcp_cdg.c
··· 243 243 struct cdg *ca = inet_csk_ca(sk); 244 244 struct tcp_sock *tp = tcp_sk(sk); 245 245 246 - if (prandom_u32() <= nexp_u32(grad * backoff_factor)) 246 + if (get_random_u32() <= nexp_u32(grad * backoff_factor)) 247 247 return false; 248 248 249 249 if (use_ineff) {
+2 -2
net/ipv4/tcp_ipv4.c
··· 323 323 inet->inet_daddr); 324 324 } 325 325 326 - inet->inet_id = prandom_u32(); 326 + inet->inet_id = get_random_u16(); 327 327 328 328 if (tcp_fastopen_defer_connect(sk, &err)) 329 329 return err; ··· 1543 1543 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1544 1544 if (inet_opt) 1545 1545 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1546 - newinet->inet_id = prandom_u32(); 1546 + newinet->inet_id = get_random_u16(); 1547 1547 1548 1548 /* Set ToS of the new socket based upon the value of incoming SYN. 1549 1549 * ECT bits are set later in tcp_init_transfer().
+1 -1
net/ipv4/udp.c
··· 246 246 inet_get_local_port_range(net, &low, &high); 247 247 remaining = (high - low) + 1; 248 248 249 - rand = prandom_u32(); 249 + rand = get_random_u32(); 250 250 first = reciprocal_scale(rand, remaining) + low; 251 251 /* 252 252 * force rand to be an odd multiple of UDP_HTABLE_SIZE
+4 -4
net/ipv6/addrconf.c
··· 104 104 static inline s32 rfc3315_s14_backoff_init(s32 irt) 105 105 { 106 106 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */ 107 - u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt; 107 + u64 tmp = (900000 + prandom_u32_max(200001)) * (u64)irt; 108 108 do_div(tmp, 1000000); 109 109 return (s32)tmp; 110 110 } ··· 112 112 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt) 113 113 { 114 114 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */ 115 - u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt; 115 + u64 tmp = (1900000 + prandom_u32_max(200001)) * (u64)rt; 116 116 do_div(tmp, 1000000); 117 117 if ((s32)tmp > mrt) { 118 118 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */ 119 - tmp = (900000 + prandom_u32() % 200001) * (u64)mrt; 119 + tmp = (900000 + prandom_u32_max(200001)) * (u64)mrt; 120 120 do_div(tmp, 1000000); 121 121 } 122 122 return (s32)tmp; ··· 3967 3967 if (ifp->flags & IFA_F_OPTIMISTIC) 3968 3968 rand_num = 0; 3969 3969 else 3970 - rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1); 3970 + rand_num = prandom_u32_max(idev->cnf.rtr_solicit_delay ?: 1); 3971 3971 3972 3972 nonce = 0; 3973 3973 if (idev->cnf.enhanced_dad ||
+1 -1
net/ipv6/ip6_flowlabel.c
··· 220 220 spin_lock_bh(&ip6_fl_lock); 221 221 if (label == 0) { 222 222 for (;;) { 223 - fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK; 223 + fl->label = htonl(get_random_u32())&IPV6_FLOWLABEL_MASK; 224 224 if (fl->label) { 225 225 lfl = __fl_lookup(net, fl->label); 226 226 if (!lfl)
+5 -5
net/ipv6/mcast.c
··· 1050 1050 /* called with mc_lock */ 1051 1051 static void mld_gq_start_work(struct inet6_dev *idev) 1052 1052 { 1053 - unsigned long tv = prandom_u32() % idev->mc_maxdelay; 1053 + unsigned long tv = prandom_u32_max(idev->mc_maxdelay); 1054 1054 1055 1055 idev->mc_gq_running = 1; 1056 1056 if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2)) ··· 1068 1068 /* called with mc_lock */ 1069 1069 static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay) 1070 1070 { 1071 - unsigned long tv = prandom_u32() % delay; 1071 + unsigned long tv = prandom_u32_max(delay); 1072 1072 1073 1073 if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2)) 1074 1074 in6_dev_hold(idev); ··· 1085 1085 /* called with mc_lock */ 1086 1086 static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay) 1087 1087 { 1088 - unsigned long tv = prandom_u32() % delay; 1088 + unsigned long tv = prandom_u32_max(delay); 1089 1089 1090 1090 if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2)) 1091 1091 in6_dev_hold(idev); ··· 1130 1130 } 1131 1131 1132 1132 if (delay >= resptime) 1133 - delay = prandom_u32() % resptime; 1133 + delay = prandom_u32_max(resptime); 1134 1134 1135 1135 if (!mod_delayed_work(mld_wq, &ma->mca_work, delay)) 1136 1136 refcount_inc(&ma->mca_refcnt); ··· 2574 2574 2575 2575 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); 2576 2576 2577 - delay = prandom_u32() % unsolicited_report_interval(ma->idev); 2577 + delay = prandom_u32_max(unsolicited_report_interval(ma->idev)); 2578 2578 2579 2579 if (cancel_delayed_work(&ma->mca_work)) { 2580 2580 refcount_dec(&ma->mca_refcnt);
+1 -1
net/ipv6/output_core.c
··· 18 18 u32 id; 19 19 20 20 do { 21 - id = prandom_u32(); 21 + id = get_random_u32(); 22 22 } while (!id); 23 23 24 24 return id;
+1 -1
net/mac80211/rc80211_minstrel_ht.c
··· 2036 2036 2037 2037 memset(sample_table, 0xff, sizeof(sample_table)); 2038 2038 for (col = 0; col < SAMPLE_COLUMNS; col++) { 2039 - prandom_bytes(rnd, sizeof(rnd)); 2039 + get_random_bytes(rnd, sizeof(rnd)); 2040 2040 for (i = 0; i < MCS_GROUP_RATES; i++) { 2041 2041 new_idx = (i + rnd[i]) % MCS_GROUP_RATES; 2042 2042 while (sample_table[col][new_idx] != 0xff)
+1 -1
net/mac80211/scan.c
··· 641 641 if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) { 642 642 struct ieee80211_hdr *hdr = (void *)skb->data; 643 643 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 644 - u16 sn = get_random_u32(); 644 + u16 sn = get_random_u16(); 645 645 646 646 info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO; 647 647 hdr->seq_ctrl =
+1 -1
net/netfilter/ipvs/ip_vs_conn.c
··· 1308 1308 * Randomly scan 1/32 of the whole table every second 1309 1309 */ 1310 1310 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { 1311 - unsigned int hash = prandom_u32() & ip_vs_conn_tab_mask; 1311 + unsigned int hash = get_random_u32() & ip_vs_conn_tab_mask; 1312 1312 1313 1313 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { 1314 1314 if (cp->ipvs != ipvs)
+2 -2
net/netfilter/ipvs/ip_vs_twos.c
··· 71 71 * from 0 to total_weight 72 72 */ 73 73 total_weight += 1; 74 - rweight1 = prandom_u32() % total_weight; 75 - rweight2 = prandom_u32() % total_weight; 74 + rweight1 = prandom_u32_max(total_weight); 75 + rweight2 = prandom_u32_max(total_weight); 76 76 77 77 /* Pick two weighted servers */ 78 78 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
+2 -2
net/netfilter/nf_nat_core.c
··· 468 468 if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) 469 469 off = (ntohs(*keyptr) - ntohs(range->base_proto.all)); 470 470 else 471 - off = prandom_u32(); 471 + off = get_random_u16(); 472 472 473 473 attempts = range_size; 474 474 if (attempts > max_attempts) ··· 490 490 if (attempts >= range_size || attempts < 16) 491 491 return; 492 492 attempts /= 2; 493 - off = prandom_u32(); 493 + off = get_random_u16(); 494 494 goto another_round; 495 495 } 496 496
+1 -1
net/netfilter/xt_statistic.c
··· 34 34 35 35 switch (info->mode) { 36 36 case XT_STATISTIC_MODE_RANDOM: 37 - if ((prandom_u32() & 0x7FFFFFFF) < info->u.random.probability) 37 + if ((get_random_u32() & 0x7FFFFFFF) < info->u.random.probability) 38 38 ret = !ret; 39 39 break; 40 40 case XT_STATISTIC_MODE_NTH:
+1 -1
net/openvswitch/actions.c
··· 1033 1033 actions = nla_next(sample_arg, &rem); 1034 1034 1035 1035 if ((arg->probability != U32_MAX) && 1036 - (!arg->probability || prandom_u32() > arg->probability)) { 1036 + (!arg->probability || get_random_u32() > arg->probability)) { 1037 1037 if (last) 1038 1038 consume_skb(skb); 1039 1039 return 0;
+1 -1
net/packet/af_packet.c
··· 1350 1350 if (READ_ONCE(history[i]) == rxhash) 1351 1351 count++; 1352 1352 1353 - victim = prandom_u32() % ROLLOVER_HLEN; 1353 + victim = prandom_u32_max(ROLLOVER_HLEN); 1354 1354 1355 1355 /* Avoid dirtying the cache line if possible */ 1356 1356 if (READ_ONCE(history[victim]) != rxhash)
+1 -1
net/rds/bind.c
··· 104 104 return -EINVAL; 105 105 last = rover; 106 106 } else { 107 - rover = max_t(u16, prandom_u32(), 2); 107 + rover = max_t(u16, get_random_u16(), 2); 108 108 last = rover - 1; 109 109 } 110 110
+1 -1
net/sched/act_gact.c
··· 25 25 static int gact_net_rand(struct tcf_gact *gact) 26 26 { 27 27 smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */ 28 - if (prandom_u32() % gact->tcfg_pval) 28 + if (prandom_u32_max(gact->tcfg_pval)) 29 29 return gact->tcf_action; 30 30 return gact->tcfg_paction; 31 31 }
+1 -1
net/sched/act_sample.c
··· 168 168 psample_group = rcu_dereference_bh(s->psample_group); 169 169 170 170 /* randomly sample packets according to rate */ 171 - if (psample_group && (prandom_u32() % s->rate == 0)) { 171 + if (psample_group && (prandom_u32_max(s->rate) == 0)) { 172 172 if (!skb_at_tc_ingress(skb)) { 173 173 md.in_ifindex = skb->skb_iif; 174 174 md.out_ifindex = skb->dev->ifindex;
+4 -4
net/sched/sch_cake.c
··· 573 573 574 574 /* Simple BLUE implementation. Lack of ECN is deliberate. */ 575 575 if (vars->p_drop) 576 - drop |= (prandom_u32() < vars->p_drop); 576 + drop |= (get_random_u32() < vars->p_drop); 577 577 578 578 /* Overload the drop_next field as an activity timeout */ 579 579 if (!vars->count) ··· 2092 2092 2093 2093 WARN_ON(host_load > CAKE_QUEUES); 2094 2094 2095 - /* The shifted prandom_u32() is a way to apply dithering to 2096 - * avoid accumulating roundoff errors 2095 + /* The get_random_u16() is a way to apply dithering to avoid 2096 + * accumulating roundoff errors 2097 2097 */ 2098 2098 flow->deficit += (b->flow_quantum * quantum_div[host_load] + 2099 - (prandom_u32() >> 16)) >> 16; 2099 + get_random_u16()) >> 16; 2100 2100 list_move_tail(&flow->flowchain, &b->old_flows); 2101 2101 2102 2102 goto retry;
+11 -11
net/sched/sch_netem.c
··· 171 171 static void init_crandom(struct crndstate *state, unsigned long rho) 172 172 { 173 173 state->rho = rho; 174 - state->last = prandom_u32(); 174 + state->last = get_random_u32(); 175 175 } 176 176 177 177 /* get_crandom - correlated random number generator ··· 184 184 unsigned long answer; 185 185 186 186 if (!state || state->rho == 0) /* no correlation */ 187 - return prandom_u32(); 187 + return get_random_u32(); 188 188 189 - value = prandom_u32(); 189 + value = get_random_u32(); 190 190 rho = (u64)state->rho + 1; 191 191 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; 192 192 state->last = answer; ··· 200 200 static bool loss_4state(struct netem_sched_data *q) 201 201 { 202 202 struct clgstate *clg = &q->clg; 203 - u32 rnd = prandom_u32(); 203 + u32 rnd = get_random_u32(); 204 204 205 205 /* 206 206 * Makes a comparison between rnd and the transition ··· 268 268 269 269 switch (clg->state) { 270 270 case GOOD_STATE: 271 - if (prandom_u32() < clg->a1) 271 + if (get_random_u32() < clg->a1) 272 272 clg->state = BAD_STATE; 273 - if (prandom_u32() < clg->a4) 273 + if (get_random_u32() < clg->a4) 274 274 return true; 275 275 break; 276 276 case BAD_STATE: 277 - if (prandom_u32() < clg->a2) 277 + if (get_random_u32() < clg->a2) 278 278 clg->state = GOOD_STATE; 279 - if (prandom_u32() > clg->a3) 279 + if (get_random_u32() > clg->a3) 280 280 return true; 281 281 } 282 282 ··· 513 513 goto finish_segs; 514 514 } 515 515 516 - skb->data[prandom_u32() % skb_headlen(skb)] ^= 517 - 1<<(prandom_u32() % 8); 516 + skb->data[prandom_u32_max(skb_headlen(skb))] ^= 517 + 1<<prandom_u32_max(8); 518 518 } 519 519 520 520 if (unlikely(sch->q.qlen >= sch->limit)) { ··· 632 632 633 633 if (!q->slot_dist) 634 634 next_delay = q->slot_config.min_delay + 635 - (prandom_u32() * 635 + (get_random_u32() * 636 636 (q->slot_config.max_delay - 637 637 q->slot_config.min_delay) >> 32); 638 638 else
+1 -1
net/sched/sch_pie.c
··· 72 72 if (vars->accu_prob >= (MAX_PROB / 2) * 17) 73 73 return true; 74 74 75 - prandom_bytes(&rnd, 8); 75 + get_random_bytes(&rnd, 8); 76 76 if ((rnd >> BITS_PER_BYTE) < local_prob) { 77 77 vars->accu_prob = 0; 78 78 return true;
+1 -1
net/sched/sch_sfb.c
··· 379 379 goto enqueue; 380 380 } 381 381 382 - r = prandom_u32() & SFB_MAX_PROB; 382 + r = get_random_u16() & SFB_MAX_PROB; 383 383 384 384 if (unlikely(r < p_min)) { 385 385 if (unlikely(p_min > SFB_MAX_PROB / 2)) {
+2 -2
net/sctp/socket.c
··· 8319 8319 8320 8320 inet_get_local_port_range(net, &low, &high); 8321 8321 remaining = (high - low) + 1; 8322 - rover = prandom_u32() % remaining + low; 8322 + rover = prandom_u32_max(remaining) + low; 8323 8323 8324 8324 do { 8325 8325 rover++; ··· 9448 9448 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 9449 9449 newinet->inet_dport = htons(asoc->peer.port); 9450 9450 newinet->pmtudisc = inet->pmtudisc; 9451 - newinet->inet_id = prandom_u32(); 9451 + newinet->inet_id = get_random_u16(); 9452 9452 9453 9453 newinet->uc_ttl = inet->uc_ttl; 9454 9454 newinet->mc_loop = 1;
+2 -2
net/sunrpc/auth_gss/gss_krb5_wrap.c
··· 130 130 131 131 /* initialize to random value */ 132 132 if (i == 0) { 133 - i = prandom_u32(); 134 - i = (i << 32) | prandom_u32(); 133 + i = get_random_u32(); 134 + i = (i << 32) | get_random_u32(); 135 135 } 136 136 137 137 switch (conflen) {
+1 -1
net/sunrpc/cache.c
··· 677 677 678 678 /* Consider removing either the first or the last */ 679 679 if (cache_defer_cnt > DFR_MAX) { 680 - if (prandom_u32() & 1) 680 + if (prandom_u32_max(2)) 681 681 discard = list_entry(cache_defer_list.next, 682 682 struct cache_deferred_req, recent); 683 683 else
+1 -1
net/sunrpc/xprt.c
··· 1865 1865 static void 1866 1866 xprt_init_xid(struct rpc_xprt *xprt) 1867 1867 { 1868 - xprt->xid = prandom_u32(); 1868 + xprt->xid = get_random_u32(); 1869 1869 } 1870 1870 1871 1871 static void
+1 -1
net/sunrpc/xprtsock.c
··· 1619 1619 if (max < min) 1620 1620 return -EADDRINUSE; 1621 1621 range = max - min + 1; 1622 - rand = (unsigned short) prandom_u32() % range; 1622 + rand = prandom_u32_max(range); 1623 1623 return rand + min; 1624 1624 } 1625 1625
+1 -1
net/tipc/socket.c
··· 3010 3010 struct net *net = sock_net(sk); 3011 3011 struct tipc_net *tn = net_generic(net, tipc_net_id); 3012 3012 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 3013 - u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 3013 + u32 portid = prandom_u32_max(remaining) + TIPC_MIN_PORT; 3014 3014 3015 3015 while (remaining--) { 3016 3016 portid++;
+1 -1
net/unix/af_unix.c
··· 1147 1147 addr->name->sun_family = AF_UNIX; 1148 1148 refcount_set(&addr->refcnt, 1); 1149 1149 1150 - ordernum = prandom_u32(); 1150 + ordernum = get_random_u32(); 1151 1151 lastnum = ordernum & 0xFFFFF; 1152 1152 retry: 1153 1153 ordernum = (ordernum + 1) & 0xFFFFF;
+1 -1
net/xfrm/xfrm_state.c
··· 2072 2072 } else { 2073 2073 u32 spi = 0; 2074 2074 for (h = 0; h < high-low+1; h++) { 2075 - spi = low + prandom_u32()%(high-low+1); 2075 + spi = low + prandom_u32_max(high - low + 1); 2076 2076 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); 2077 2077 if (x0 == NULL) { 2078 2078 newspi = htonl(spi);