Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

crypto: Enable context analysis

Enable context analysis for crypto subsystem.

This demonstrates a larger conversion to use Clang's context
analysis. The benefit is additional static checking of locking rules,
along with better documentation.

Note the use of the __acquire_ret macro how to define an API where a
function returns a pointer to an object (struct scomp_scratch) with a
lock held. Additionally, the analysis only resolves aliases where the
analysis unambiguously sees that a variable was not reassigned after
initialization, requiring minor code changes.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-36-elver@google.com

authored by

Marco Elver and committed by
Peter Zijlstra
dc36d55d 87335b61

+35 -21
+2
crypto/Makefile
··· 3 3 # Cryptographic API 4 4 # 5 5 6 + CONTEXT_ANALYSIS := y 7 + 6 8 obj-$(CONFIG_CRYPTO) += crypto.o 7 9 crypto-y := api.o cipher.o 8 10
+3 -3
crypto/acompress.c
··· 449 449 } 450 450 EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams); 451 451 452 - struct crypto_acomp_stream *crypto_acomp_lock_stream_bh( 453 - struct crypto_acomp_streams *s) __acquires(stream) 452 + struct crypto_acomp_stream *_crypto_acomp_lock_stream_bh( 453 + struct crypto_acomp_streams *s) 454 454 { 455 455 struct crypto_acomp_stream __percpu *streams = s->streams; 456 456 int cpu = raw_smp_processor_id(); ··· 469 469 spin_lock(&ps->lock); 470 470 return ps; 471 471 } 472 - EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh); 472 + EXPORT_SYMBOL_GPL(_crypto_acomp_lock_stream_bh); 473 473 474 474 void acomp_walk_done_src(struct acomp_walk *walk, int used) 475 475 {
+2
crypto/algapi.c
··· 244 244 245 245 static void crypto_alg_finish_registration(struct crypto_alg *alg, 246 246 struct list_head *algs_to_put) 247 + __must_hold(&crypto_alg_sem) 247 248 { 248 249 struct crypto_alg *q; 249 250 ··· 300 299 301 300 static struct crypto_larval * 302 301 __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put) 302 + __must_hold(&crypto_alg_sem) 303 303 { 304 304 struct crypto_alg *q; 305 305 struct crypto_larval *larval;
+1
crypto/api.c
··· 57 57 58 58 static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, 59 59 u32 mask) 60 + __must_hold_shared(&crypto_alg_sem) 60 61 { 61 62 struct crypto_alg *q, *alg = NULL; 62 63 int best = -2;
+1 -1
crypto/crypto_engine.c
··· 453 453 snprintf(engine->name, sizeof(engine->name), 454 454 "%s-engine", dev_name(dev)); 455 455 456 - crypto_init_queue(&engine->queue, qlen); 457 456 spin_lock_init(&engine->queue_lock); 457 + crypto_init_queue(&engine->queue, qlen); 458 458 459 459 engine->kworker = kthread_run_worker(0, "%s", engine->name); 460 460 if (IS_ERR(engine->kworker)) {
+5
crypto/drbg.c
··· 232 232 */ 233 233 static int drbg_fips_continuous_test(struct drbg_state *drbg, 234 234 const unsigned char *entropy) 235 + __must_hold(&drbg->drbg_mutex) 235 236 { 236 237 unsigned short entropylen = drbg_sec_strength(drbg->core->flags); 237 238 int ret = 0; ··· 849 848 static inline int drbg_get_random_bytes(struct drbg_state *drbg, 850 849 unsigned char *entropy, 851 850 unsigned int entropylen) 851 + __must_hold(&drbg->drbg_mutex) 852 852 { 853 853 int ret; 854 854 ··· 864 862 } 865 863 866 864 static int drbg_seed_from_random(struct drbg_state *drbg) 865 + __must_hold(&drbg->drbg_mutex) 867 866 { 868 867 struct drbg_string data; 869 868 LIST_HEAD(seedlist); ··· 922 919 */ 923 920 static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, 924 921 bool reseed) 922 + __must_hold(&drbg->drbg_mutex) 925 923 { 926 924 int ret; 927 925 unsigned char entropy[((32 + 16) * 2)]; ··· 1157 1153 static int drbg_generate(struct drbg_state *drbg, 1158 1154 unsigned char *buf, unsigned int buflen, 1159 1155 struct drbg_string *addtl) 1156 + __must_hold(&drbg->drbg_mutex) 1160 1157 { 1161 1158 int len = 0; 1162 1159 LIST_HEAD(addtllist);
+1 -1
crypto/internal.h
··· 61 61 /* Maximum number of (rtattr) parameters for each template. */ 62 62 #define CRYPTO_MAX_ATTRS 32 63 63 64 - extern struct list_head crypto_alg_list; 65 64 extern struct rw_semaphore crypto_alg_sem; 65 + extern struct list_head crypto_alg_list __guarded_by(&crypto_alg_sem); 66 66 extern struct blocking_notifier_head crypto_chain; 67 67 68 68 int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
+3
crypto/proc.c
··· 19 19 #include "internal.h" 20 20 21 21 static void *c_start(struct seq_file *m, loff_t *pos) 22 + __acquires_shared(&crypto_alg_sem) 22 23 { 23 24 down_read(&crypto_alg_sem); 24 25 return seq_list_start(&crypto_alg_list, *pos); 25 26 } 26 27 27 28 static void *c_next(struct seq_file *m, void *p, loff_t *pos) 29 + __must_hold_shared(&crypto_alg_sem) 28 30 { 29 31 return seq_list_next(p, &crypto_alg_list, pos); 30 32 } 31 33 32 34 static void c_stop(struct seq_file *m, void *p) 35 + __releases_shared(&crypto_alg_sem) 33 36 { 34 37 up_read(&crypto_alg_sem); 35 38 }
+12 -12
crypto/scompress.c
··· 28 28 struct scomp_scratch { 29 29 spinlock_t lock; 30 30 union { 31 - void *src; 32 - unsigned long saddr; 31 + void *src __guarded_by(&lock); 32 + unsigned long saddr __guarded_by(&lock); 33 33 }; 34 34 }; 35 35 ··· 38 38 }; 39 39 40 40 static const struct crypto_type crypto_scomp_type; 41 - static int scomp_scratch_users; 42 41 static DEFINE_MUTEX(scomp_lock); 42 + static int scomp_scratch_users __guarded_by(&scomp_lock); 43 43 44 44 static cpumask_t scomp_scratch_want; 45 45 static void scomp_scratch_workfn(struct work_struct *work); ··· 67 67 } 68 68 69 69 static void crypto_scomp_free_scratches(void) 70 + __context_unsafe(/* frees @scratch */) 70 71 { 71 72 struct scomp_scratch *scratch; 72 73 int i; ··· 102 101 struct scomp_scratch *scratch; 103 102 104 103 scratch = per_cpu_ptr(&scomp_scratch, cpu); 105 - if (scratch->src) 104 + if (context_unsafe(scratch->src)) 106 105 continue; 107 106 if (scomp_alloc_scratch(scratch, cpu)) 108 107 break; ··· 112 111 } 113 112 114 113 static int crypto_scomp_alloc_scratches(void) 114 + __context_unsafe(/* allocates @scratch */) 115 115 { 116 116 unsigned int i = cpumask_first(cpu_possible_mask); 117 117 struct scomp_scratch *scratch; ··· 141 139 return ret; 142 140 } 143 141 144 - static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch) 142 + #define scomp_lock_scratch(...) __acquire_ret(_scomp_lock_scratch(__VA_ARGS__), &__ret->lock) 143 + static struct scomp_scratch *_scomp_lock_scratch(void) __acquires_ret 145 144 { 146 145 int cpu = raw_smp_processor_id(); 147 146 struct scomp_scratch *scratch; ··· 162 159 } 163 160 164 161 static inline void scomp_unlock_scratch(struct scomp_scratch *scratch) 165 - __releases(scratch) 162 + __releases(&scratch->lock) 166 163 { 167 164 spin_unlock(&scratch->lock); 168 165 } ··· 174 171 bool src_isvirt = acomp_request_src_isvirt(req); 175 172 bool dst_isvirt = acomp_request_dst_isvirt(req); 176 173 struct crypto_scomp *scomp = *tfm_ctx; 177 - struct crypto_acomp_stream *stream; 178 - struct scomp_scratch *scratch; 179 174 unsigned int slen = req->slen; 180 175 unsigned int dlen = req->dlen; 181 176 struct page *spage, *dpage; ··· 233 232 } while (0); 234 233 } 235 234 236 - stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams); 235 + struct crypto_acomp_stream *stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams); 237 236 238 237 if (!src_isvirt && !src) { 239 - const u8 *src; 238 + struct scomp_scratch *scratch = scomp_lock_scratch(); 239 + const u8 *src = scratch->src; 240 240 241 - scratch = scomp_lock_scratch(); 242 - src = scratch->src; 243 241 memcpy_from_sglist(scratch->src, req->src, 0, slen); 244 242 245 243 if (dir)
+4 -3
include/crypto/internal/acompress.h
··· 191 191 void crypto_acomp_free_streams(struct crypto_acomp_streams *s); 192 192 int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s); 193 193 194 - struct crypto_acomp_stream *crypto_acomp_lock_stream_bh( 195 - struct crypto_acomp_streams *s) __acquires(stream); 194 + #define crypto_acomp_lock_stream_bh(...) __acquire_ret(_crypto_acomp_lock_stream_bh(__VA_ARGS__), &__ret->lock); 195 + struct crypto_acomp_stream *_crypto_acomp_lock_stream_bh( 196 + struct crypto_acomp_streams *s) __acquires_ret; 196 197 197 198 static inline void crypto_acomp_unlock_stream_bh( 198 - struct crypto_acomp_stream *stream) __releases(stream) 199 + struct crypto_acomp_stream *stream) __releases(&stream->lock) 199 200 { 200 201 spin_unlock_bh(&stream->lock); 201 202 }
+1 -1
include/crypto/internal/engine.h
··· 45 45 46 46 struct list_head list; 47 47 spinlock_t queue_lock; 48 - struct crypto_queue queue; 48 + struct crypto_queue queue __guarded_by(&queue_lock); 49 49 struct device *dev; 50 50 51 51 struct kthread_worker *kworker;