Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random

Pull randomness fixes from Ted Ts'o:
"Improve performance by using a lockless update mechanism suggested by
Linus, and make sure we refresh per-CPU entropy returned get_random_*
as soon as the CRNG is initialized"

* tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random:
random: invalidate batched entropy after crng init
random: use lockless method of accessing and updating f->reg_idx

+43 -6
+43 -6
drivers/char/random.c
··· 1 1 /* 2 2 * random.c -- A strong random number generator 3 3 * 4 + * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All 5 + * Rights Reserved. 6 + * 4 7 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 5 8 * 6 9 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All ··· 765 762 static struct crng_state **crng_node_pool __read_mostly; 766 763 #endif 767 764 765 + static void invalidate_batched_entropy(void); 766 + 768 767 static void crng_initialize(struct crng_state *crng) 769 768 { 770 769 int i; ··· 804 799 cp++; crng_init_cnt++; len--; 805 800 } 806 801 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 802 + invalidate_batched_entropy(); 807 803 crng_init = 1; 808 804 wake_up_interruptible(&crng_init_wait); 809 805 pr_notice("random: fast init done\n"); ··· 842 836 memzero_explicit(&buf, sizeof(buf)); 843 837 crng->init_time = jiffies; 844 838 if (crng == &primary_crng && crng_init < 2) { 839 + invalidate_batched_entropy(); 845 840 crng_init = 2; 846 841 process_random_ready_list(); 847 842 wake_up_interruptible(&crng_init_wait); ··· 1104 1097 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) 1105 1098 { 1106 1099 __u32 *ptr = (__u32 *) regs; 1107 - unsigned long flags; 1100 + unsigned int idx; 1108 1101 1109 1102 if (regs == NULL) 1110 1103 return 0; 1111 - local_irq_save(flags); 1112 - if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) 1113 - f->reg_idx = 0; 1114 - ptr += f->reg_idx++; 1115 - local_irq_restore(flags); 1104 + idx = READ_ONCE(f->reg_idx); 1105 + if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) 1106 + idx = 0; 1107 + ptr += idx++; 1108 + WRITE_ONCE(f->reg_idx, idx); 1116 1109 return *ptr; 1117 1110 } 1118 1111 ··· 2030 2023 }; 2031 2024 unsigned int position; 2032 2025 }; 2026 + static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); 2033 2027 2034 2028 /* 2035 2029 * Get a random word for internal kernel use only. The quality of the random ··· 2041 2033 u64 get_random_u64(void) 2042 2034 { 2043 2035 u64 ret; 2036 + bool use_lock = crng_init < 2; 2037 + unsigned long flags; 2044 2038 struct batched_entropy *batch; 2045 2039 2046 2040 #if BITS_PER_LONG == 64 ··· 2055 2045 #endif 2056 2046 2057 2047 batch = &get_cpu_var(batched_entropy_u64); 2048 + if (use_lock) 2049 + read_lock_irqsave(&batched_entropy_reset_lock, flags); 2058 2050 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { 2059 2051 extract_crng((u8 *)batch->entropy_u64); 2060 2052 batch->position = 0; 2061 2053 } 2062 2054 ret = batch->entropy_u64[batch->position++]; 2055 + if (use_lock) 2056 + read_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2063 2057 put_cpu_var(batched_entropy_u64); 2064 2058 return ret; 2065 2059 } ··· 2073 2059 u32 get_random_u32(void) 2074 2060 { 2075 2061 u32 ret; 2062 + bool use_lock = crng_init < 2; 2063 + unsigned long flags; 2076 2064 struct batched_entropy *batch; 2077 2065 2078 2066 if (arch_get_random_int(&ret)) 2079 2067 return ret; 2080 2068 2081 2069 batch = &get_cpu_var(batched_entropy_u32); 2070 + if (use_lock) 2071 + read_lock_irqsave(&batched_entropy_reset_lock, flags); 2082 2072 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { 2083 2073 extract_crng((u8 *)batch->entropy_u32); 2084 2074 batch->position = 0; 2085 2075 } 2086 2076 ret = batch->entropy_u32[batch->position++]; 2077 + if (use_lock) 2078 + read_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2087 2079 put_cpu_var(batched_entropy_u32); 2088 2080 return ret; 2089 2081 } 2090 2082 EXPORT_SYMBOL(get_random_u32); 2083 + 2084 + /* It's important to invalidate all potential batched entropy that might 2085 + * be stored before the crng is initialized, which we can do lazily by 2086 + * simply resetting the counter to zero so that it's re-extracted on the 2087 + * next usage. */ 2088 + static void invalidate_batched_entropy(void) 2089 + { 2090 + int cpu; 2091 + unsigned long flags; 2092 + 2093 + write_lock_irqsave(&batched_entropy_reset_lock, flags); 2094 + for_each_possible_cpu (cpu) { 2095 + per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; 2096 + per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; 2097 + } 2098 + write_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2099 + } 2091 2100 2092 2101 /** 2093 2102 * randomize_page - Generate a random, page aligned address