Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

locking/lockdep: Avoid struct return in lock_stats()

Returning a large structure from the lock_stats() function causes clang
to have multiple copies of it on the stack and copy between them, which
can end up exceeding the frame size warning limit:

kernel/locking/lockdep.c:300:25: error: stack frame size (1464) exceeds limit (1280) in 'lock_stats' [-Werror,-Wframe-larger-than]
300 | struct lock_class_stats lock_stats(struct lock_class *class)

Change the calling conventions to directly operate on the caller's copy,
which apparently is what gcc does already.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Link: https://lore.kernel.org/r/20250610092941.2642847-1-arnd@kernel.org

authored by

Arnd Bergmann and committed by
Boqun Feng
d7c36d63 f84a15b9

+14 -17
+1 -1
include/linux/lockdep_types.h
··· 175 175 unsigned long bounces[nr_bounce_types]; 176 176 }; 177 177 178 - struct lock_class_stats lock_stats(struct lock_class *class); 178 + void lock_stats(struct lock_class *class, struct lock_class_stats *stats); 179 179 void clear_lock_stats(struct lock_class *class); 180 180 #endif 181 181
+12 -15
kernel/locking/lockdep.c
··· 297 297 dst->nr += src->nr; 298 298 } 299 299 300 - struct lock_class_stats lock_stats(struct lock_class *class) 300 + void lock_stats(struct lock_class *class, struct lock_class_stats *stats) 301 301 { 302 - struct lock_class_stats stats; 303 302 int cpu, i; 304 303 305 - memset(&stats, 0, sizeof(struct lock_class_stats)); 304 + memset(stats, 0, sizeof(struct lock_class_stats)); 306 305 for_each_possible_cpu(cpu) { 307 306 struct lock_class_stats *pcs = 308 307 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 309 308 310 - for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 311 - stats.contention_point[i] += pcs->contention_point[i]; 309 + for (i = 0; i < ARRAY_SIZE(stats->contention_point); i++) 310 + stats->contention_point[i] += pcs->contention_point[i]; 312 311 313 - for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) 314 - stats.contending_point[i] += pcs->contending_point[i]; 312 + for (i = 0; i < ARRAY_SIZE(stats->contending_point); i++) 313 + stats->contending_point[i] += pcs->contending_point[i]; 315 314 316 - lock_time_add(&pcs->read_waittime, &stats.read_waittime); 317 - lock_time_add(&pcs->write_waittime, &stats.write_waittime); 315 + lock_time_add(&pcs->read_waittime, &stats->read_waittime); 316 + lock_time_add(&pcs->write_waittime, &stats->write_waittime); 318 317 319 - lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); 320 - lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); 318 + lock_time_add(&pcs->read_holdtime, &stats->read_holdtime); 319 + lock_time_add(&pcs->write_holdtime, &stats->write_holdtime); 321 320 322 - for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) 323 - stats.bounces[i] += pcs->bounces[i]; 321 + for (i = 0; i < ARRAY_SIZE(stats->bounces); i++) 322 + stats->bounces[i] += pcs->bounces[i]; 324 323 } 325 - 326 - return stats; 327 324 } 328 325 329 326 void clear_lock_stats(struct lock_class *class)
+1 -1
kernel/locking/lockdep_proc.c
··· 657 657 if (!test_bit(idx, lock_classes_in_use)) 658 658 continue; 659 659 iter->class = class; 660 - iter->stats = lock_stats(class); 660 + lock_stats(class, &iter->stats); 661 661 iter++; 662 662 } 663 663