Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

lib/crc32: Remove unused combination support

Remove crc32_le_combine() and crc32_le_shift(), since they are no longer
used.

Although combination is an interesting thing that can be done with CRCs,
it turned out that none of the users of it in the kernel were even close
to being worthwhile. All were much better off simply chaining the CRCs
or processing zeroes.

Let's remove the CRC32 combination code for now. It can come back
(potentially optimized with carryless multiplication instructions) if
there is ever a case where it would actually be worthwhile.

Link: https://lore.kernel.org/r/20250607032228.27868-1-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>

+1 -130
-25
include/linux/crc32.h
··· 48 48 static inline u32 crc32_optimizations(void) { return 0; } 49 49 #endif 50 50 51 - /** 52 - * crc32_le_combine - Combine two crc32 check values into one. For two 53 - * sequences of bytes, seq1 and seq2 with lengths len1 54 - * and len2, crc32_le() check values were calculated 55 - * for each, crc1 and crc2. 56 - * 57 - * @crc1: crc32 of the first block 58 - * @crc2: crc32 of the second block 59 - * @len2: length of the second block 60 - * 61 - * Return: The crc32_le() check value of seq1 and seq2 concatenated, 62 - * requiring only crc1, crc2, and len2. Note: If seq_full denotes 63 - * the concatenated memory area of seq1 with seq2, and crc_full 64 - * the crc32_le() value of seq_full, then crc_full == 65 - * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded 66 - * with the same initializer as crc1, and crc2 seed was 0. See 67 - * also crc32_combine_test(). 68 - */ 69 - u32 crc32_le_shift(u32 crc, size_t len); 70 - 71 - static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2) 72 - { 73 - return crc32_le_shift(crc1, len2) ^ crc2; 74 - } 75 - 76 51 #define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length) 77 52 78 53 /*
-67
lib/crc32.c
··· 25 25 /* see: Documentation/staging/crc32.rst for a description of algorithms */ 26 26 27 27 #include <linux/crc32.h> 28 - #include <linux/crc32poly.h> 29 28 #include <linux/module.h> 30 29 #include <linux/types.h> 31 30 ··· 49 50 return crc; 50 51 } 51 52 EXPORT_SYMBOL(crc32c_base); 52 - 53 - /* 54 - * This multiplies the polynomials x and y modulo the given modulus. 55 - * This follows the "little-endian" CRC convention that the lsbit 56 - * represents the highest power of x, and the msbit represents x^0. 57 - */ 58 - static u32 gf2_multiply(u32 x, u32 y, u32 modulus) 59 - { 60 - u32 product = x & 1 ? y : 0; 61 - int i; 62 - 63 - for (i = 0; i < 31; i++) { 64 - product = (product >> 1) ^ (product & 1 ? modulus : 0); 65 - x >>= 1; 66 - product ^= x & 1 ? y : 0; 67 - } 68 - 69 - return product; 70 - } 71 - 72 - /** 73 - * crc32_generic_shift - Append @len 0 bytes to crc, in logarithmic time 74 - * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient) 75 - * @len: The number of bytes. @crc is multiplied by x^(8*@len) 76 - * @polynomial: The modulus used to reduce the result to 32 bits. 77 - * 78 - * It's possible to parallelize CRC computations by computing a CRC 79 - * over separate ranges of a buffer, then summing them. 80 - * This shifts the given CRC by 8*len bits (i.e. produces the same effect 81 - * as appending len bytes of zero to the data), in time proportional 82 - * to log(len). 83 - */ 84 - static u32 crc32_generic_shift(u32 crc, size_t len, u32 polynomial) 85 - { 86 - u32 power = polynomial; /* CRC of x^32 */ 87 - int i; 88 - 89 - /* Shift up to 32 bits in the simple linear way */ 90 - for (i = 0; i < 8 * (int)(len & 3); i++) 91 - crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0); 92 - 93 - len >>= 2; 94 - if (!len) 95 - return crc; 96 - 97 - for (;;) { 98 - /* "power" is x^(2^i), modulo the polynomial */ 99 - if (len & 1) 100 - crc = gf2_multiply(crc, power, polynomial); 101 - 102 - len >>= 1; 103 - if (!len) 104 - break; 105 - 106 - /* Square power, advancing to x^(2^(i+1)) */ 107 - power = gf2_multiply(power, power, polynomial); 108 - } 109 - 110 - return crc; 111 - } 112 - 113 - u32 crc32_le_shift(u32 crc, size_t len) 114 - { 115 - return crc32_generic_shift(crc, len, CRC32_POLY_LE); 116 - } 117 - EXPORT_SYMBOL(crc32_le_shift); 118 53 119 54 u32 crc32_be_base(u32 crc, const u8 *p, size_t len) 120 55 {
+1 -38
lib/tests/crc_kunit.c
··· 36 36 * can fit any CRC up to CRC-64. The CRC is passed in, and is expected 37 37 * to be returned in, the least significant bits of the u64. The 38 38 * function is expected to *not* invert the CRC at the beginning and end. 39 - * @combine_func: Optional function to combine two CRCs. 40 39 */ 41 40 struct crc_variant { 42 41 int bits; 43 42 bool le; 44 43 u64 poly; 45 44 u64 (*func)(u64 crc, const u8 *p, size_t len); 46 - u64 (*combine_func)(u64 crc1, u64 crc2, size_t len2); 47 45 }; 48 46 49 47 static u32 rand32(void) ··· 142 144 } 143 145 144 146 /* Test that v->func gives the same CRCs as a reference implementation. */ 145 - static void crc_main_test(struct kunit *test, const struct crc_variant *v) 147 + static void crc_test(struct kunit *test, const struct crc_variant *v) 146 148 { 147 149 size_t i; 148 150 ··· 184 186 "Wrong result with len=%zu offset=%zu nosimd=%d", 185 187 len, offset, nosimd); 186 188 } 187 - } 188 - 189 - /* Test that CRC(concat(A, B)) == combine_CRCs(CRC(A), CRC(B), len(B)). */ 190 - static void crc_combine_test(struct kunit *test, const struct crc_variant *v) 191 - { 192 - int i; 193 - 194 - for (i = 0; i < 100; i++) { 195 - u64 init_crc = generate_random_initial_crc(v); 196 - size_t len1 = generate_random_length(CRC_KUNIT_MAX_LEN); 197 - size_t len2 = generate_random_length(CRC_KUNIT_MAX_LEN - len1); 198 - u64 crc1, crc2, expected_crc, actual_crc; 199 - 200 - prandom_bytes_state(&rng, test_buffer, len1 + len2); 201 - crc1 = v->func(init_crc, test_buffer, len1); 202 - crc2 = v->func(0, &test_buffer[len1], len2); 203 - expected_crc = v->func(init_crc, test_buffer, len1 + len2); 204 - actual_crc = v->combine_func(crc1, crc2, len2); 205 - KUNIT_EXPECT_EQ_MSG(test, expected_crc, actual_crc, 206 - "CRC combination gave wrong result with len1=%zu len2=%zu\n", 207 - len1, len2); 208 - } 209 - } 210 - 211 - static void crc_test(struct kunit *test, const struct crc_variant *v) 212 - { 213 - crc_main_test(test, v); 214 - if (v->combine_func) 215 - crc_combine_test(test, v); 216 189 } 217 190 218 191 static __always_inline void ··· 306 337 return crc32_le(crc, p, len); 307 338 } 308 339 309 - static u64 crc32_le_combine_wrapper(u64 crc1, u64 crc2, size_t len2) 310 - { 311 - return crc32_le_combine(crc1, crc2, len2); 312 - } 313 - 314 340 static const struct crc_variant crc_variant_crc32_le = { 315 341 .bits = 32, 316 342 .le = true, 317 343 .poly = 0xedb88320, 318 344 .func = crc32_le_wrapper, 319 - .combine_func = crc32_le_combine_wrapper, 320 345 }; 321 346 322 347 static void crc32_le_test(struct kunit *test)