Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'bpf-fix-u32-s32-bounds-when-ranges-cross-min-max-boundary'

Eduard Zingerman says:

====================
bpf: Fix u32/s32 bounds when ranges cross min/max boundary

Cover the following cases in range refinement logic for 32-bit ranges:
- s32 range crosses U32_MAX/0 boundary, positive part of the s32 range
overlaps with u32 range.
- s32 range crosses U32_MAX/0 boundary, negative part of the s32 range
overlaps with u32 range.

These cases are already handled for 64-bit range refinement.

Without the fix the test in patch 2 is rejected by the verifier.
The test was reduced from sched-ext program.

Changelog:
- v2 -> v3:
- Reverted da653de268d3 (Paul)
- Removed !BPF_F_TEST_REG_INVARIANTS flag from
crossing_32_bit_signed_boundary_2() (Paul)
- v1 -> v2:
- Extended commit message and comments (Emil)
- Targeting 'bpf' tree instead of bpf-next (Alexei)

v1: https://lore.kernel.org/bpf/9a23fbacdc6d33ec8fcb3f6988395b5129f75369.camel@gmail.com/T
v2: https://lore.kernel.org/bpf/20260305-bpf-32-bit-range-overflow-v2-0-7169206a3041@gmail.com/
---
====================

Link: https://patch.msgid.link/20260306-bpf-32-bit-range-overflow-v3-0-f7f67e060a6b@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

+120 -19
+24
kernel/bpf/verifier.c
··· 2511 2511 if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { 2512 2512 reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); 2513 2513 reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); 2514 + } else { 2515 + if (reg->u32_max_value < (u32)reg->s32_min_value) { 2516 + /* See __reg64_deduce_bounds() for detailed explanation. 2517 + * Refine ranges in the following situation: 2518 + * 2519 + * 0 U32_MAX 2520 + * | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] | 2521 + * |----------------------------|----------------------------| 2522 + * |xxxxx s32 range xxxxxxxxx] [xxxxxxx| 2523 + * 0 S32_MAX S32_MIN -1 2524 + */ 2525 + reg->s32_min_value = (s32)reg->u32_min_value; 2526 + reg->u32_max_value = min_t(u32, reg->u32_max_value, reg->s32_max_value); 2527 + } else if ((u32)reg->s32_max_value < reg->u32_min_value) { 2528 + /* 2529 + * 0 U32_MAX 2530 + * | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] | 2531 + * |----------------------------|----------------------------| 2532 + * |xxxxxxxxx] [xxxxxxxxxxxx s32 range | 2533 + * 0 S32_MAX S32_MIN -1 2534 + */ 2535 + reg->s32_max_value = (s32)reg->u32_max_value; 2536 + reg->u32_min_value = max_t(u32, reg->u32_min_value, reg->s32_min_value); 2537 + } 2514 2538 } 2515 2539 } 2516 2540
+58 -18
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
··· 422 422 } 423 423 } 424 424 425 - static struct range range_improve(enum num_t t, struct range old, struct range new) 425 + static struct range range_intersection(enum num_t t, struct range old, struct range new) 426 426 { 427 427 return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b)); 428 + } 429 + 430 + /* 431 + * Result is precise when 'x' and 'y' overlap or form a continuous range, 432 + * result is an over-approximation if 'x' and 'y' do not overlap. 433 + */ 434 + static struct range range_union(enum num_t t, struct range x, struct range y) 435 + { 436 + if (!is_valid_range(t, x)) 437 + return y; 438 + if (!is_valid_range(t, y)) 439 + return x; 440 + return range(t, min_t(t, x.a, y.a), max_t(t, x.b, y.b)); 441 + } 442 + 443 + /* 444 + * This function attempts to improve x range intersecting it with y. 445 + * range_cast(... to_t ...) looses precision for ranges that pass to_t 446 + * min/max boundaries. To avoid such precision loses this function 447 + * splits both x and y into halves corresponding to non-overflowing 448 + * sub-ranges: [0, smin] and [smax, -1]. 449 + * Final result is computed as follows: 450 + * 451 + * ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪ 452 + * ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1])) 453 + * 454 + * Precision might still be lost if final union is not a continuous range. 455 + */ 456 + static struct range range_refine_in_halves(enum num_t x_t, struct range x, 457 + enum num_t y_t, struct range y) 458 + { 459 + struct range x_pos, x_neg, y_pos, y_neg, r_pos, r_neg; 460 + u64 smax, smin, neg_one; 461 + 462 + if (t_is_32(x_t)) { 463 + smax = (u64)(u32)S32_MAX; 464 + smin = (u64)(u32)S32_MIN; 465 + neg_one = (u64)(u32)(s32)(-1); 466 + } else { 467 + smax = (u64)S64_MAX; 468 + smin = (u64)S64_MIN; 469 + neg_one = U64_MAX; 470 + } 471 + x_pos = range_intersection(x_t, x, range(x_t, 0, smax)); 472 + x_neg = range_intersection(x_t, x, range(x_t, smin, neg_one)); 473 + y_pos = range_intersection(y_t, y, range(x_t, 0, smax)); 474 + y_neg = range_intersection(y_t, y, range(y_t, smin, neg_one)); 475 + r_pos = range_intersection(x_t, x_pos, range_cast(y_t, x_t, y_pos)); 476 + r_neg = range_intersection(x_t, x_neg, range_cast(y_t, x_t, y_neg)); 477 + return range_union(x_t, r_pos, r_neg); 478 + 428 479 } 429 480 430 481 static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y) 431 482 { 432 483 struct range y_cast; 484 + 485 + if (t_is_32(x_t) == t_is_32(y_t)) 486 + x = range_refine_in_halves(x_t, x, y_t, y); 433 487 434 488 y_cast = range_cast(y_t, x_t, y); 435 489 ··· 498 444 */ 499 445 if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX && 500 446 (s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX) 501 - return range_improve(x_t, x, y_cast); 447 + return range_intersection(x_t, x, y_cast); 502 448 503 449 /* the case when new range knowledge, *y*, is a 32-bit subregister 504 450 * range, while previous range knowledge, *x*, is a full register ··· 516 462 x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b)); 517 463 if (!is_valid_range(x_t, x_swap)) 518 464 return x; 519 - return range_improve(x_t, x, x_swap); 520 - } 521 - 522 - if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) { 523 - if (x_t == S64 && x.a > x.b) { 524 - if (x.b < y.a && x.a <= y.b) 525 - return range(x_t, x.a, y.b); 526 - if (x.a > y.b && x.b >= y.a) 527 - return range(x_t, y.a, x.b); 528 - } else if (x_t == U64 && y.a > y.b) { 529 - if (y.b < x.a && y.a <= x.b) 530 - return range(x_t, y.a, x.b); 531 - if (y.a > x.b && y.b >= x.a) 532 - return range(x_t, x.a, y.b); 533 - } 465 + return range_intersection(x_t, x, x_swap); 534 466 } 535 467 536 468 /* otherwise, plain range cast and intersection works */ 537 - return range_improve(x_t, x, y_cast); 469 + return range_intersection(x_t, x, y_cast); 538 470 } 539 471 540 472 /* =======================
+38 -1
tools/testing/selftests/bpf/progs/verifier_bounds.c
··· 1148 1148 SEC("xdp") 1149 1149 __description("bound check with JMP32_JSLT for crossing 32-bit signed boundary") 1150 1150 __success __retval(0) 1151 - __flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */ 1151 + __flag(BPF_F_TEST_REG_INVARIANTS) 1152 1152 __naked void crossing_32_bit_signed_boundary_2(void) 1153 1153 { 1154 1154 asm volatile (" \ ··· 1995 1995 if r0 == 0x10 goto +1; \ 1996 1996 r10 = 0; \ 1997 1997 exit; \ 1998 + " : 1999 + : __imm(bpf_get_prandom_u32) 2000 + : __clobber_all); 2001 + } 2002 + 2003 + SEC("socket") 2004 + __success 2005 + __flag(BPF_F_TEST_REG_INVARIANTS) 2006 + __naked void signed_unsigned_intersection32_case1(void *ctx) 2007 + { 2008 + asm volatile(" \ 2009 + call %[bpf_get_prandom_u32]; \ 2010 + w0 &= 0xffffffff; \ 2011 + if w0 < 0x3 goto 1f; /* on fall-through u32 range [3..U32_MAX] */ \ 2012 + if w0 s> 0x1 goto 1f; /* on fall-through s32 range [S32_MIN..1] */ \ 2013 + if w0 s< 0x0 goto 1f; /* range can be narrowed to [S32_MIN..-1] */ \ 2014 + r10 = 0; /* thus predicting the jump. */ \ 2015 + 1: exit; \ 2016 + " : 2017 + : __imm(bpf_get_prandom_u32) 2018 + : __clobber_all); 2019 + } 2020 + 2021 + SEC("socket") 2022 + __success 2023 + __flag(BPF_F_TEST_REG_INVARIANTS) 2024 + __naked void signed_unsigned_intersection32_case2(void *ctx) 2025 + { 2026 + asm volatile(" \ 2027 + call %[bpf_get_prandom_u32]; \ 2028 + w0 &= 0xffffffff; \ 2029 + if w0 > 0x80000003 goto 1f; /* on fall-through u32 range [0..S32_MIN+3] */ \ 2030 + if w0 s< -3 goto 1f; /* on fall-through s32 range [-3..S32_MAX] */ \ 2031 + if w0 s> 5 goto 1f; /* on fall-through s32 range [-3..5] */ \ 2032 + if w0 <= 5 goto 1f; /* range can be narrowed to [0..5] */ \ 2033 + r10 = 0; /* thus predicting the jump */ \ 2034 + 1: exit; \ 1998 2035 " : 1999 2036 : __imm(bpf_get_prandom_u32) 2000 2037 : __clobber_all);