Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'v6.1-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
"API:
- Feed untrusted RNGs into /dev/random
- Allow HWRNG sleeping to be more interruptible
- Create lib/utils module
- Setting private keys no longer required for akcipher
- Remove tcrypt mode=1000
- Reorganised Kconfig entries

Algorithms:
- Load x86/sha512 based on CPU features
- Add AES-NI/AVX/x86_64/GFNI assembler implementation of aria cipher

Drivers:
- Add HACE crypto driver aspeed"

* tag 'v6.1-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (124 commits)
crypto: aspeed - Remove redundant dev_err call
crypto: scatterwalk - Remove unused inline function scatterwalk_aligned()
crypto: aead - Remove unused inline functions from aead
crypto: bcm - Simplify obtain the name for cipher
crypto: marvell/octeontx - use sysfs_emit() to instead of scnprintf()
hwrng: core - start hwrng kthread also for untrusted sources
crypto: zip - remove the unneeded result variable
crypto: qat - add limit to linked list parsing
crypto: octeontx2 - Remove the unneeded result variable
crypto: ccp - Remove the unneeded result variable
crypto: aspeed - Fix check for platform_get_irq() errors
crypto: virtio - fix memory-leak
crypto: cavium - prevent integer overflow loading firmware
crypto: marvell/octeontx - prevent integer overflows
crypto: aspeed - fix build error when only CRYPTO_DEV_ASPEED is enabled
crypto: hisilicon/qm - fix the qos value initialization
crypto: sun4i-ss - use DEFINE_SHOW_ATTRIBUTE to simplify sun4i_ss_debugfs
crypto: tcrypt - add async speed test for aria cipher
crypto: aria-avx - add AES-NI/AVX/x86_64/GFNI assembler implementation of aria cipher
crypto: aria - prepare generic module for optimized implementations
...

+9140 -3057
+53
Documentation/devicetree/bindings/crypto/aspeed,ast2500-hace.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/crypto/aspeed,ast2500-hace.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: ASPEED HACE hash and crypto Hardware Accelerator Engines 8 + 9 + maintainers: 10 + - Neal Liu <neal_liu@aspeedtech.com> 11 + 12 + description: | 13 + The Hash and Crypto Engine (HACE) is designed to accelerate the throughput 14 + of hash data digest, encryption, and decryption. Basically, HACE can be 15 + divided into two independently engines - Hash Engine and Crypto Engine. 16 + 17 + properties: 18 + compatible: 19 + enum: 20 + - aspeed,ast2500-hace 21 + - aspeed,ast2600-hace 22 + 23 + reg: 24 + maxItems: 1 25 + 26 + clocks: 27 + maxItems: 1 28 + 29 + interrupts: 30 + maxItems: 1 31 + 32 + resets: 33 + maxItems: 1 34 + 35 + required: 36 + - compatible 37 + - reg 38 + - clocks 39 + - interrupts 40 + - resets 41 + 42 + additionalProperties: false 43 + 44 + examples: 45 + - | 46 + #include <dt-bindings/clock/ast2600-clock.h> 47 + hace: crypto@1e6d0000 { 48 + compatible = "aspeed,ast2600-hace"; 49 + reg = <0x1e6d0000 0x200>; 50 + interrupts = <4>; 51 + clocks = <&syscon ASPEED_CLK_GATE_YCLK>; 52 + resets = <&syscon ASPEED_RESET_HACE>; 53 + };
+2 -3
Documentation/virt/kvm/x86/amd-memory-encryption.rst
··· 89 89 90 90 The firmware can be initialized either by using its own non-volatile storage or 91 91 the OS can manage the NV storage for the firmware using the module parameter 92 - ``init_ex_path``. The file specified by ``init_ex_path`` must exist. To create 93 - a new NV storage file allocate the file with 32KB bytes of 0xFF as required by 94 - the SEV spec. 92 + ``init_ex_path``. If the file specified by ``init_ex_path`` does not exist or 93 + is invalid, the OS will create or override the file with output from PSP. 95 94 96 95 Returns: 0 on success, -negative on error 97 96
+7
MAINTAINERS
··· 3237 3237 F: Documentation/devicetree/bindings/usb/aspeed,ast2600-udc.yaml 3238 3238 F: drivers/usb/gadget/udc/aspeed_udc.c 3239 3239 3240 + ASPEED CRYPTO DRIVER 3241 + M: Neal Liu <neal_liu@aspeedtech.com> 3242 + L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers) 3243 + S: Maintained 3244 + F: Documentation/devicetree/bindings/crypto/aspeed,ast2500-hace.yaml 3245 + F: drivers/crypto/aspeed/ 3246 + 3240 3247 ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS 3241 3248 M: Corentin Chary <corentin.chary@gmail.com> 3242 3249 L: acpi4asus-user@lists.sourceforge.net
-4
arch/arm/Kconfig
··· 1850 1850 1851 1851 endmenu 1852 1852 1853 - if CRYPTO 1854 - source "arch/arm/crypto/Kconfig" 1855 - endif 1856 - 1857 1853 source "arch/arm/Kconfig.assembler"
+8
arch/arm/boot/dts/aspeed-g5.dtsi
··· 262 262 quality = <100>; 263 263 }; 264 264 265 + hace: crypto@1e6e3000 { 266 + compatible = "aspeed,ast2500-hace"; 267 + reg = <0x1e6e3000 0x100>; 268 + interrupts = <4>; 269 + clocks = <&syscon ASPEED_CLK_GATE_YCLK>; 270 + resets = <&syscon ASPEED_RESET_HACE>; 271 + }; 272 + 265 273 gfx: display@1e6e6000 { 266 274 compatible = "aspeed,ast2500-gfx", "syscon"; 267 275 reg = <0x1e6e6000 0x1000>;
+8
arch/arm/boot/dts/aspeed-g6.dtsi
··· 323 323 #size-cells = <1>; 324 324 ranges; 325 325 326 + hace: crypto@1e6d0000 { 327 + compatible = "aspeed,ast2600-hace"; 328 + reg = <0x1e6d0000 0x200>; 329 + interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>; 330 + clocks = <&syscon ASPEED_CLK_GATE_YCLK>; 331 + resets = <&syscon ASPEED_RESET_HACE>; 332 + }; 333 + 326 334 syscon: syscon@1e6e2000 { 327 335 compatible = "aspeed,ast2600-scu", "syscon", "simple-mfd"; 328 336 reg = <0x1e6e2000 0x1000>;
-1
arch/arm/configs/exynos_defconfig
··· 32 32 CONFIG_PM_DEBUG=y 33 33 CONFIG_PM_ADVANCED_DEBUG=y 34 34 CONFIG_ENERGY_MODEL=y 35 - CONFIG_ARM_CRYPTO=y 36 35 CONFIG_CRYPTO_SHA1_ARM_NEON=m 37 36 CONFIG_CRYPTO_SHA256_ARM=m 38 37 CONFIG_CRYPTO_SHA512_ARM=m
-1
arch/arm/configs/milbeaut_m10v_defconfig
··· 44 44 CONFIG_VFP=y 45 45 CONFIG_NEON=y 46 46 CONFIG_KERNEL_MODE_NEON=y 47 - CONFIG_ARM_CRYPTO=y 48 47 CONFIG_CRYPTO_SHA1_ARM_NEON=m 49 48 CONFIG_CRYPTO_SHA1_ARM_CE=m 50 49 CONFIG_CRYPTO_SHA2_ARM_CE=m
-1
arch/arm/configs/multi_v7_defconfig
··· 132 132 CONFIG_ARM_TEGRA_CPUIDLE=y 133 133 CONFIG_ARM_QCOM_SPM_CPUIDLE=y 134 134 CONFIG_KERNEL_MODE_NEON=y 135 - CONFIG_ARM_CRYPTO=y 136 135 CONFIG_CRYPTO_SHA1_ARM_NEON=m 137 136 CONFIG_CRYPTO_SHA1_ARM_CE=m 138 137 CONFIG_CRYPTO_SHA2_ARM_CE=m
-1
arch/arm/configs/omap2plus_defconfig
··· 53 53 CONFIG_ARM_CPUIDLE=y 54 54 CONFIG_KERNEL_MODE_NEON=y 55 55 CONFIG_PM_DEBUG=y 56 - CONFIG_ARM_CRYPTO=y 57 56 CONFIG_CRYPTO_SHA1_ARM_NEON=m 58 57 CONFIG_CRYPTO_SHA256_ARM=m 59 58 CONFIG_CRYPTO_SHA512_ARM=m
-1
arch/arm/configs/pxa_defconfig
··· 34 34 CONFIG_ARM_PXA2xx_CPUFREQ=m 35 35 CONFIG_CPU_IDLE=y 36 36 CONFIG_ARM_CPUIDLE=y 37 - CONFIG_ARM_CRYPTO=y 38 37 CONFIG_CRYPTO_SHA1_ARM=m 39 38 CONFIG_CRYPTO_SHA256_ARM=m 40 39 CONFIG_CRYPTO_SHA512_ARM=m
+155 -85
arch/arm/crypto/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 - menuconfig ARM_CRYPTO 4 - bool "ARM Accelerated Cryptographic Algorithms" 5 - depends on ARM 6 - help 7 - Say Y here to choose from a selection of cryptographic algorithms 8 - implemented using ARM specific CPU features or instructions. 3 + menu "Accelerated Cryptographic Algorithms for CPU (arm)" 9 4 10 - if ARM_CRYPTO 5 + config CRYPTO_CURVE25519_NEON 6 + tristate "Public key crypto: Curve25519 (NEON)" 7 + depends on KERNEL_MODE_NEON 8 + select CRYPTO_LIB_CURVE25519_GENERIC 9 + select CRYPTO_ARCH_HAVE_LIB_CURVE25519 10 + help 11 + Curve25519 algorithm 12 + 13 + Architecture: arm with 14 + - NEON (Advanced SIMD) extensions 15 + 16 + config CRYPTO_GHASH_ARM_CE 17 + tristate "Hash functions: GHASH (PMULL/NEON/ARMv8 Crypto Extensions)" 18 + depends on KERNEL_MODE_NEON 19 + select CRYPTO_HASH 20 + select CRYPTO_CRYPTD 21 + select CRYPTO_GF128MUL 22 + help 23 + GCM GHASH function (NIST SP800-38D) 24 + 25 + Architecture: arm using 26 + - PMULL (Polynomial Multiply Long) instructions 27 + - NEON (Advanced SIMD) extensions 28 + - ARMv8 Crypto Extensions 29 + 30 + Use an implementation of GHASH (used by the GCM AEAD chaining mode) 31 + that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64) 32 + that is part of the ARMv8 Crypto Extensions, or a slower variant that 33 + uses the vmull.p8 instruction that is part of the basic NEON ISA. 34 + 35 + config CRYPTO_NHPOLY1305_NEON 36 + tristate "Hash functions: NHPoly1305 (NEON)" 37 + depends on KERNEL_MODE_NEON 38 + select CRYPTO_NHPOLY1305 39 + help 40 + NHPoly1305 hash function (Adiantum) 41 + 42 + Architecture: arm using: 43 + - NEON (Advanced SIMD) extensions 44 + 45 + config CRYPTO_POLY1305_ARM 46 + tristate "Hash functions: Poly1305 (NEON)" 47 + select CRYPTO_HASH 48 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 49 + help 50 + Poly1305 authenticator algorithm (RFC7539) 51 + 52 + Architecture: arm optionally using 53 + - NEON (Advanced SIMD) extensions 54 + 55 + config CRYPTO_BLAKE2S_ARM 56 + bool "Hash functions: BLAKE2s" 57 + select CRYPTO_ARCH_HAVE_LIB_BLAKE2S 58 + help 59 + BLAKE2s cryptographic hash function (RFC 7693) 60 + 61 + Architecture: arm 62 + 63 + This is faster than the generic implementations of BLAKE2s and 64 + BLAKE2b, but slower than the NEON implementation of BLAKE2b. 65 + There is no NEON implementation of BLAKE2s, since NEON doesn't 66 + really help with it. 67 + 68 + config CRYPTO_BLAKE2B_NEON 69 + tristate "Hash functions: BLAKE2b (NEON)" 70 + depends on KERNEL_MODE_NEON 71 + select CRYPTO_BLAKE2B 72 + help 73 + BLAKE2b cryptographic hash function (RFC 7693) 74 + 75 + Architecture: arm using 76 + - NEON (Advanced SIMD) extensions 77 + 78 + BLAKE2b digest algorithm optimized with ARM NEON instructions. 79 + On ARM processors that have NEON support but not the ARMv8 80 + Crypto Extensions, typically this BLAKE2b implementation is 81 + much faster than the SHA-2 family and slightly faster than 82 + SHA-1. 11 83 12 84 config CRYPTO_SHA1_ARM 13 - tristate "SHA1 digest algorithm (ARM-asm)" 85 + tristate "Hash functions: SHA-1" 14 86 select CRYPTO_SHA1 15 87 select CRYPTO_HASH 16 88 help 17 - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 18 - using optimized ARM assembler. 89 + SHA-1 secure hash algorithm (FIPS 180) 90 + 91 + Architecture: arm 19 92 20 93 config CRYPTO_SHA1_ARM_NEON 21 - tristate "SHA1 digest algorithm (ARM NEON)" 94 + tristate "Hash functions: SHA-1 (NEON)" 22 95 depends on KERNEL_MODE_NEON 23 96 select CRYPTO_SHA1_ARM 24 97 select CRYPTO_SHA1 25 98 select CRYPTO_HASH 26 99 help 27 - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 28 - using optimized ARM NEON assembly, when NEON instructions are 29 - available. 100 + SHA-1 secure hash algorithm (FIPS 180) 101 + 102 + Architecture: arm using 103 + - NEON (Advanced SIMD) extensions 30 104 31 105 config CRYPTO_SHA1_ARM_CE 32 - tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)" 106 + tristate "Hash functions: SHA-1 (ARMv8 Crypto Extensions)" 33 107 depends on KERNEL_MODE_NEON 34 108 select CRYPTO_SHA1_ARM 35 109 select CRYPTO_HASH 36 110 help 37 - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 38 - using special ARMv8 Crypto Extensions. 111 + SHA-1 secure hash algorithm (FIPS 180) 112 + 113 + Architecture: arm using ARMv8 Crypto Extensions 39 114 40 115 config CRYPTO_SHA2_ARM_CE 41 - tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)" 116 + tristate "Hash functions: SHA-224 and SHA-256 (ARMv8 Crypto Extensions)" 42 117 depends on KERNEL_MODE_NEON 43 118 select CRYPTO_SHA256_ARM 44 119 select CRYPTO_HASH 45 120 help 46 - SHA-256 secure hash standard (DFIPS 180-2) implemented 47 - using special ARMv8 Crypto Extensions. 121 + SHA-224 and SHA-256 secure hash algorithms (FIPS 180) 122 + 123 + Architecture: arm using 124 + - ARMv8 Crypto Extensions 48 125 49 126 config CRYPTO_SHA256_ARM 50 - tristate "SHA-224/256 digest algorithm (ARM-asm and NEON)" 127 + tristate "Hash functions: SHA-224 and SHA-256 (NEON)" 51 128 select CRYPTO_HASH 52 129 depends on !CPU_V7M 53 130 help 54 - SHA-256 secure hash standard (DFIPS 180-2) implemented 55 - using optimized ARM assembler and NEON, when available. 131 + SHA-224 and SHA-256 secure hash algorithms (FIPS 180) 132 + 133 + Architecture: arm using 134 + - NEON (Advanced SIMD) extensions 56 135 57 136 config CRYPTO_SHA512_ARM 58 - tristate "SHA-384/512 digest algorithm (ARM-asm and NEON)" 137 + tristate "Hash functions: SHA-384 and SHA-512 (NEON)" 59 138 select CRYPTO_HASH 60 139 depends on !CPU_V7M 61 140 help 62 - SHA-512 secure hash standard (DFIPS 180-2) implemented 63 - using optimized ARM assembler and NEON, when available. 141 + SHA-384 and SHA-512 secure hash algorithms (FIPS 180) 64 142 65 - config CRYPTO_BLAKE2S_ARM 66 - bool "BLAKE2s digest algorithm (ARM)" 67 - select CRYPTO_ARCH_HAVE_LIB_BLAKE2S 68 - help 69 - BLAKE2s digest algorithm optimized with ARM scalar instructions. This 70 - is faster than the generic implementations of BLAKE2s and BLAKE2b, but 71 - slower than the NEON implementation of BLAKE2b. (There is no NEON 72 - implementation of BLAKE2s, since NEON doesn't really help with it.) 73 - 74 - config CRYPTO_BLAKE2B_NEON 75 - tristate "BLAKE2b digest algorithm (ARM NEON)" 76 - depends on KERNEL_MODE_NEON 77 - select CRYPTO_BLAKE2B 78 - help 79 - BLAKE2b digest algorithm optimized with ARM NEON instructions. 80 - On ARM processors that have NEON support but not the ARMv8 81 - Crypto Extensions, typically this BLAKE2b implementation is 82 - much faster than SHA-2 and slightly faster than SHA-1. 143 + Architecture: arm using 144 + - NEON (Advanced SIMD) extensions 83 145 84 146 config CRYPTO_AES_ARM 85 - tristate "Scalar AES cipher for ARM" 147 + tristate "Ciphers: AES" 86 148 select CRYPTO_ALGAPI 87 149 select CRYPTO_AES 88 150 help 89 - Use optimized AES assembler routines for ARM platforms. 151 + Block ciphers: AES cipher algorithms (FIPS-197) 152 + 153 + Architecture: arm 90 154 91 155 On ARM processors without the Crypto Extensions, this is the 92 156 fastest AES implementation for single blocks. For multiple ··· 162 98 such attacks very difficult. 163 99 164 100 config CRYPTO_AES_ARM_BS 165 - tristate "Bit sliced AES using NEON instructions" 101 + tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)" 166 102 depends on KERNEL_MODE_NEON 167 103 select CRYPTO_SKCIPHER 168 104 select CRYPTO_LIB_AES ··· 170 106 select CRYPTO_CBC 171 107 select CRYPTO_SIMD 172 108 help 173 - Use a faster and more secure NEON based implementation of AES in CBC, 174 - CTR and XTS modes 109 + Length-preserving ciphers: AES cipher algorithms (FIPS-197) 110 + with block cipher modes: 111 + - ECB (Electronic Codebook) mode (NIST SP800-38A) 112 + - CBC (Cipher Block Chaining) mode (NIST SP800-38A) 113 + - CTR (Counter) mode (NIST SP800-38A) 114 + - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E 115 + and IEEE 1619) 175 116 176 117 Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode 177 118 and for XTS mode encryption, CBC and XTS mode decryption speedup is ··· 185 116 believed to be invulnerable to cache timing attacks. 186 117 187 118 config CRYPTO_AES_ARM_CE 188 - tristate "Accelerated AES using ARMv8 Crypto Extensions" 119 + tristate "Ciphers: AES, modes: ECB/CBC/CTS/CTR/XTS (ARMv8 Crypto Extensions)" 189 120 depends on KERNEL_MODE_NEON 190 121 select CRYPTO_SKCIPHER 191 122 select CRYPTO_LIB_AES 192 123 select CRYPTO_SIMD 193 124 help 194 - Use an implementation of AES in CBC, CTR and XTS modes that uses 195 - ARMv8 Crypto Extensions 125 + Length-preserving ciphers: AES cipher algorithms (FIPS-197) 126 + with block cipher modes: 127 + - ECB (Electronic Codebook) mode (NIST SP800-38A) 128 + - CBC (Cipher Block Chaining) mode (NIST SP800-38A) 129 + - CTR (Counter) mode (NIST SP800-38A) 130 + - CTS (Cipher Text Stealing) mode (NIST SP800-38A) 131 + - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E 132 + and IEEE 1619) 196 133 197 - config CRYPTO_GHASH_ARM_CE 198 - tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions" 199 - depends on KERNEL_MODE_NEON 200 - select CRYPTO_HASH 201 - select CRYPTO_CRYPTD 202 - select CRYPTO_GF128MUL 134 + Architecture: arm using: 135 + - ARMv8 Crypto Extensions 136 + 137 + config CRYPTO_CHACHA20_NEON 138 + tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (NEON)" 139 + select CRYPTO_SKCIPHER 140 + select CRYPTO_ARCH_HAVE_LIB_CHACHA 203 141 help 204 - Use an implementation of GHASH (used by the GCM AEAD chaining mode) 205 - that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64) 206 - that is part of the ARMv8 Crypto Extensions, or a slower variant that 207 - uses the vmull.p8 instruction that is part of the basic NEON ISA. 142 + Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 143 + stream cipher algorithms 208 144 209 - config CRYPTO_CRCT10DIF_ARM_CE 210 - tristate "CRCT10DIF digest algorithm using PMULL instructions" 211 - depends on KERNEL_MODE_NEON 212 - depends on CRC_T10DIF 213 - select CRYPTO_HASH 145 + Architecture: arm using: 146 + - NEON (Advanced SIMD) extensions 214 147 215 148 config CRYPTO_CRC32_ARM_CE 216 - tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions" 149 + tristate "CRC32C and CRC32" 217 150 depends on KERNEL_MODE_NEON 218 151 depends on CRC32 219 152 select CRYPTO_HASH 153 + help 154 + CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720) 155 + and CRC32 CRC algorithm (IEEE 802.3) 220 156 221 - config CRYPTO_CHACHA20_NEON 222 - tristate "NEON and scalar accelerated ChaCha stream cipher algorithms" 223 - select CRYPTO_SKCIPHER 224 - select CRYPTO_ARCH_HAVE_LIB_CHACHA 157 + Architecture: arm using: 158 + - CRC and/or PMULL instructions 225 159 226 - config CRYPTO_POLY1305_ARM 227 - tristate "Accelerated scalar and SIMD Poly1305 hash implementations" 160 + Drivers: crc32-arm-ce and crc32c-arm-ce 161 + 162 + config CRYPTO_CRCT10DIF_ARM_CE 163 + tristate "CRCT10DIF" 164 + depends on KERNEL_MODE_NEON 165 + depends on CRC_T10DIF 228 166 select CRYPTO_HASH 229 - select CRYPTO_ARCH_HAVE_LIB_POLY1305 167 + help 168 + CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF) 230 169 231 - config CRYPTO_NHPOLY1305_NEON 232 - tristate "NEON accelerated NHPoly1305 hash function (for Adiantum)" 233 - depends on KERNEL_MODE_NEON 234 - select CRYPTO_NHPOLY1305 170 + Architecture: arm using: 171 + - PMULL (Polynomial Multiply Long) instructions 235 172 236 - config CRYPTO_CURVE25519_NEON 237 - tristate "NEON accelerated Curve25519 scalar multiplication library" 238 - depends on KERNEL_MODE_NEON 239 - select CRYPTO_LIB_CURVE25519_GENERIC 240 - select CRYPTO_ARCH_HAVE_LIB_CURVE25519 173 + endmenu 241 174 242 - endif
-3
arch/arm64/Kconfig
··· 2251 2251 2252 2252 source "arch/arm64/kvm/Kconfig" 2253 2253 2254 - if CRYPTO 2255 - source "arch/arm64/crypto/Kconfig" 2256 - endif # CRYPTO
-1
arch/arm64/configs/defconfig
··· 112 112 CONFIG_ACPI_APEI_EINJ=y 113 113 CONFIG_VIRTUALIZATION=y 114 114 CONFIG_KVM=y 115 - CONFIG_ARM64_CRYPTO=y 116 115 CONFIG_CRYPTO_SHA1_ARM64_CE=y 117 116 CONFIG_CRYPTO_SHA2_ARM64_CE=y 118 117 CONFIG_CRYPTO_SHA512_ARM64_CE=m
+247 -106
arch/arm64/crypto/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 - menuconfig ARM64_CRYPTO 4 - bool "ARM64 Accelerated Cryptographic Algorithms" 5 - depends on ARM64 6 - help 7 - Say Y here to choose from a selection of cryptographic algorithms 8 - implemented using ARM64 specific CPU features or instructions. 9 - 10 - if ARM64_CRYPTO 11 - 12 - config CRYPTO_SHA256_ARM64 13 - tristate "SHA-224/SHA-256 digest algorithm for arm64" 14 - select CRYPTO_HASH 15 - 16 - config CRYPTO_SHA512_ARM64 17 - tristate "SHA-384/SHA-512 digest algorithm for arm64" 18 - select CRYPTO_HASH 19 - 20 - config CRYPTO_SHA1_ARM64_CE 21 - tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)" 22 - depends on KERNEL_MODE_NEON 23 - select CRYPTO_HASH 24 - select CRYPTO_SHA1 25 - 26 - config CRYPTO_SHA2_ARM64_CE 27 - tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)" 28 - depends on KERNEL_MODE_NEON 29 - select CRYPTO_HASH 30 - select CRYPTO_SHA256_ARM64 31 - 32 - config CRYPTO_SHA512_ARM64_CE 33 - tristate "SHA-384/SHA-512 digest algorithm (ARMv8 Crypto Extensions)" 34 - depends on KERNEL_MODE_NEON 35 - select CRYPTO_HASH 36 - select CRYPTO_SHA512_ARM64 37 - 38 - config CRYPTO_SHA3_ARM64 39 - tristate "SHA3 digest algorithm (ARMv8.2 Crypto Extensions)" 40 - depends on KERNEL_MODE_NEON 41 - select CRYPTO_HASH 42 - select CRYPTO_SHA3 43 - 44 - config CRYPTO_SM3_ARM64_CE 45 - tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)" 46 - depends on KERNEL_MODE_NEON 47 - select CRYPTO_HASH 48 - select CRYPTO_SM3 49 - 50 - config CRYPTO_SM4_ARM64_CE 51 - tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)" 52 - depends on KERNEL_MODE_NEON 53 - select CRYPTO_ALGAPI 54 - select CRYPTO_SM4 55 - 56 - config CRYPTO_SM4_ARM64_CE_BLK 57 - tristate "SM4 in ECB/CBC/CFB/CTR modes using ARMv8 Crypto Extensions" 58 - depends on KERNEL_MODE_NEON 59 - select CRYPTO_SKCIPHER 60 - select CRYPTO_SM4 61 - 62 - config CRYPTO_SM4_ARM64_NEON_BLK 63 - tristate "SM4 in ECB/CBC/CFB/CTR modes using NEON instructions" 64 - depends on KERNEL_MODE_NEON 65 - select CRYPTO_SKCIPHER 66 - select CRYPTO_SM4 3 + menu "Accelerated Cryptographic Algorithms for CPU (arm64)" 67 4 68 5 config CRYPTO_GHASH_ARM64_CE 69 - tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" 6 + tristate "Hash functions: GHASH (ARMv8 Crypto Extensions)" 70 7 depends on KERNEL_MODE_NEON 71 8 select CRYPTO_HASH 72 9 select CRYPTO_GF128MUL 73 10 select CRYPTO_LIB_AES 74 11 select CRYPTO_AEAD 12 + help 13 + GCM GHASH function (NIST SP800-38D) 14 + 15 + Architecture: arm64 using: 16 + - ARMv8 Crypto Extensions 17 + 18 + config CRYPTO_NHPOLY1305_NEON 19 + tristate "Hash functions: NHPoly1305 (NEON)" 20 + depends on KERNEL_MODE_NEON 21 + select CRYPTO_NHPOLY1305 22 + help 23 + NHPoly1305 hash function (Adiantum) 24 + 25 + Architecture: arm64 using: 26 + - NEON (Advanced SIMD) extensions 27 + 28 + config CRYPTO_POLY1305_NEON 29 + tristate "Hash functions: Poly1305 (NEON)" 30 + depends on KERNEL_MODE_NEON 31 + select CRYPTO_HASH 32 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 33 + help 34 + Poly1305 authenticator algorithm (RFC7539) 35 + 36 + Architecture: arm64 using: 37 + - NEON (Advanced SIMD) extensions 38 + 39 + config CRYPTO_SHA1_ARM64_CE 40 + tristate "Hash functions: SHA-1 (ARMv8 Crypto Extensions)" 41 + depends on KERNEL_MODE_NEON 42 + select CRYPTO_HASH 43 + select CRYPTO_SHA1 44 + help 45 + SHA-1 secure hash algorithm (FIPS 180) 46 + 47 + Architecture: arm64 using: 48 + - ARMv8 Crypto Extensions 49 + 50 + config CRYPTO_SHA256_ARM64 51 + tristate "Hash functions: SHA-224 and SHA-256" 52 + select CRYPTO_HASH 53 + help 54 + SHA-224 and SHA-256 secure hash algorithms (FIPS 180) 55 + 56 + Architecture: arm64 57 + 58 + config CRYPTO_SHA2_ARM64_CE 59 + tristate "Hash functions: SHA-224 and SHA-256 (ARMv8 Crypto Extensions)" 60 + depends on KERNEL_MODE_NEON 61 + select CRYPTO_HASH 62 + select CRYPTO_SHA256_ARM64 63 + help 64 + SHA-224 and SHA-256 secure hash algorithms (FIPS 180) 65 + 66 + Architecture: arm64 using: 67 + - ARMv8 Crypto Extensions 68 + 69 + config CRYPTO_SHA512_ARM64 70 + tristate "Hash functions: SHA-384 and SHA-512" 71 + select CRYPTO_HASH 72 + help 73 + SHA-384 and SHA-512 secure hash algorithms (FIPS 180) 74 + 75 + Architecture: arm64 76 + 77 + config CRYPTO_SHA512_ARM64_CE 78 + tristate "Hash functions: SHA-384 and SHA-512 (ARMv8 Crypto Extensions)" 79 + depends on KERNEL_MODE_NEON 80 + select CRYPTO_HASH 81 + select CRYPTO_SHA512_ARM64 82 + help 83 + SHA-384 and SHA-512 secure hash algorithms (FIPS 180) 84 + 85 + Architecture: arm64 using: 86 + - ARMv8 Crypto Extensions 87 + 88 + config CRYPTO_SHA3_ARM64 89 + tristate "Hash functions: SHA-3 (ARMv8.2 Crypto Extensions)" 90 + depends on KERNEL_MODE_NEON 91 + select CRYPTO_HASH 92 + select CRYPTO_SHA3 93 + help 94 + SHA-3 secure hash algorithms (FIPS 202) 95 + 96 + Architecture: arm64 using: 97 + - ARMv8.2 Crypto Extensions 98 + 99 + config CRYPTO_SM3_ARM64_CE 100 + tristate "Hash functions: SM3 (ARMv8.2 Crypto Extensions)" 101 + depends on KERNEL_MODE_NEON 102 + select CRYPTO_HASH 103 + select CRYPTO_SM3 104 + help 105 + SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012) 106 + 107 + Architecture: arm64 using: 108 + - ARMv8.2 Crypto Extensions 75 109 76 110 config CRYPTO_POLYVAL_ARM64_CE 77 - tristate "POLYVAL using ARMv8 Crypto Extensions (for HCTR2)" 111 + tristate "Hash functions: POLYVAL (ARMv8 Crypto Extensions)" 78 112 depends on KERNEL_MODE_NEON 79 113 select CRYPTO_POLYVAL 114 + help 115 + POLYVAL hash function for HCTR2 80 116 81 - config CRYPTO_CRCT10DIF_ARM64_CE 82 - tristate "CRCT10DIF digest algorithm using PMULL instructions" 83 - depends on KERNEL_MODE_NEON && CRC_T10DIF 84 - select CRYPTO_HASH 117 + Architecture: arm64 using: 118 + - ARMv8 Crypto Extensions 85 119 86 120 config CRYPTO_AES_ARM64 87 - tristate "AES core cipher using scalar instructions" 121 + tristate "Ciphers: AES, modes: ECB, CBC, CTR, CTS, XCTR, XTS" 88 122 select CRYPTO_AES 123 + help 124 + Block ciphers: AES cipher algorithms (FIPS-197) 125 + Length-preserving ciphers: AES with ECB, CBC, CTR, CTS, 126 + XCTR, and XTS modes 127 + AEAD cipher: AES with CBC, ESSIV, and SHA-256 128 + for fscrypt and dm-crypt 129 + 130 + Architecture: arm64 89 131 90 132 config CRYPTO_AES_ARM64_CE 91 - tristate "AES core cipher using ARMv8 Crypto Extensions" 133 + tristate "Ciphers: AES (ARMv8 Crypto Extensions)" 92 134 depends on ARM64 && KERNEL_MODE_NEON 93 135 select CRYPTO_ALGAPI 94 136 select CRYPTO_LIB_AES 137 + help 138 + Block ciphers: AES cipher algorithms (FIPS-197) 139 + 140 + Architecture: arm64 using: 141 + - ARMv8 Crypto Extensions 142 + 143 + config CRYPTO_AES_ARM64_CE_BLK 144 + tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (ARMv8 Crypto Extensions)" 145 + depends on KERNEL_MODE_NEON 146 + select CRYPTO_SKCIPHER 147 + select CRYPTO_AES_ARM64_CE 148 + help 149 + Length-preserving ciphers: AES cipher algorithms (FIPS-197) 150 + with block cipher modes: 151 + - ECB (Electronic Codebook) mode (NIST SP800-38A) 152 + - CBC (Cipher Block Chaining) mode (NIST SP800-38A) 153 + - CTR (Counter) mode (NIST SP800-38A) 154 + - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E 155 + and IEEE 1619) 156 + 157 + Architecture: arm64 using: 158 + - ARMv8 Crypto Extensions 159 + 160 + config CRYPTO_AES_ARM64_NEON_BLK 161 + tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (NEON)" 162 + depends on KERNEL_MODE_NEON 163 + select CRYPTO_SKCIPHER 164 + select CRYPTO_LIB_AES 165 + help 166 + Length-preserving ciphers: AES cipher algorithms (FIPS-197) 167 + with block cipher modes: 168 + - ECB (Electronic Codebook) mode (NIST SP800-38A) 169 + - CBC (Cipher Block Chaining) mode (NIST SP800-38A) 170 + - CTR (Counter) mode (NIST SP800-38A) 171 + - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E 172 + and IEEE 1619) 173 + 174 + Architecture: arm64 using: 175 + - NEON (Advanced SIMD) extensions 176 + 177 + config CRYPTO_CHACHA20_NEON 178 + tristate "Ciphers: ChaCha (NEON)" 179 + depends on KERNEL_MODE_NEON 180 + select CRYPTO_SKCIPHER 181 + select CRYPTO_LIB_CHACHA_GENERIC 182 + select CRYPTO_ARCH_HAVE_LIB_CHACHA 183 + help 184 + Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 185 + stream cipher algorithms 186 + 187 + Architecture: arm64 using: 188 + - NEON (Advanced SIMD) extensions 189 + 190 + config CRYPTO_AES_ARM64_BS 191 + tristate "Ciphers: AES, modes: ECB/CBC/CTR/XCTR/XTS modes (bit-sliced NEON)" 192 + depends on KERNEL_MODE_NEON 193 + select CRYPTO_SKCIPHER 194 + select CRYPTO_AES_ARM64_NEON_BLK 195 + select CRYPTO_LIB_AES 196 + help 197 + Length-preserving ciphers: AES cipher algorithms (FIPS-197) 198 + with block cipher modes: 199 + - ECB (Electronic Codebook) mode (NIST SP800-38A) 200 + - CBC (Cipher Block Chaining) mode (NIST SP800-38A) 201 + - CTR (Counter) mode (NIST SP800-38A) 202 + - XCTR mode for HCTR2 203 + - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E 204 + and IEEE 1619) 205 + 206 + Architecture: arm64 using: 207 + - bit-sliced algorithm 208 + - NEON (Advanced SIMD) extensions 209 + 210 + config CRYPTO_SM4_ARM64_CE 211 + tristate "Ciphers: SM4 (ARMv8.2 Crypto Extensions)" 212 + depends on KERNEL_MODE_NEON 213 + select CRYPTO_ALGAPI 214 + select CRYPTO_SM4 215 + help 216 + Block ciphers: SM4 cipher algorithms (OSCCA GB/T 32907-2016) 217 + 218 + Architecture: arm64 using: 219 + - ARMv8.2 Crypto Extensions 220 + - NEON (Advanced SIMD) extensions 221 + 222 + config CRYPTO_SM4_ARM64_CE_BLK 223 + tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR (ARMv8 Crypto Extensions)" 224 + depends on KERNEL_MODE_NEON 225 + select CRYPTO_SKCIPHER 226 + select CRYPTO_SM4 227 + help 228 + Length-preserving ciphers: SM4 cipher algorithms (OSCCA GB/T 32907-2016) 229 + with block cipher modes: 230 + - ECB (Electronic Codebook) mode (NIST SP800-38A) 231 + - CBC (Cipher Block Chaining) mode (NIST SP800-38A) 232 + - CFB (Cipher Feedback) mode (NIST SP800-38A) 233 + - CTR (Counter) mode (NIST SP800-38A) 234 + 235 + Architecture: arm64 using: 236 + - ARMv8 Crypto Extensions 237 + - NEON (Advanced SIMD) extensions 238 + 239 + config CRYPTO_SM4_ARM64_NEON_BLK 240 + tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR (NEON)" 241 + depends on KERNEL_MODE_NEON 242 + select CRYPTO_SKCIPHER 243 + select CRYPTO_SM4 244 + help 245 + Length-preserving ciphers: SM4 cipher algorithms (OSCCA GB/T 32907-2016) 246 + with block cipher modes: 247 + - ECB (Electronic Codebook) mode (NIST SP800-38A) 248 + - CBC (Cipher Block Chaining) mode (NIST SP800-38A) 249 + - CFB (Cipher Feedback) mode (NIST SP800-38A) 250 + - CTR (Counter) mode (NIST SP800-38A) 251 + 252 + Architecture: arm64 using: 253 + - NEON (Advanced SIMD) extensions 95 254 96 255 config CRYPTO_AES_ARM64_CE_CCM 97 - tristate "AES in CCM mode using ARMv8 Crypto Extensions" 256 + tristate "AEAD cipher: AES in CCM mode (ARMv8 Crypto Extensions)" 98 257 depends on ARM64 && KERNEL_MODE_NEON 99 258 select CRYPTO_ALGAPI 100 259 select CRYPTO_AES_ARM64_CE 101 260 select CRYPTO_AEAD 102 261 select CRYPTO_LIB_AES 262 + help 263 + AEAD cipher: AES cipher algorithms (FIPS-197) with 264 + CCM (Counter with Cipher Block Chaining-Message Authentication Code) 265 + authenticated encryption mode (NIST SP800-38C) 103 266 104 - config CRYPTO_AES_ARM64_CE_BLK 105 - tristate "AES in ECB/CBC/CTR/XTS/XCTR modes using ARMv8 Crypto Extensions" 106 - depends on KERNEL_MODE_NEON 107 - select CRYPTO_SKCIPHER 108 - select CRYPTO_AES_ARM64_CE 267 + Architecture: arm64 using: 268 + - ARMv8 Crypto Extensions 269 + - NEON (Advanced SIMD) extensions 109 270 110 - config CRYPTO_AES_ARM64_NEON_BLK 111 - tristate "AES in ECB/CBC/CTR/XTS/XCTR modes using NEON instructions" 112 - depends on KERNEL_MODE_NEON 113 - select CRYPTO_SKCIPHER 114 - select CRYPTO_LIB_AES 115 - 116 - config CRYPTO_CHACHA20_NEON 117 - tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions" 118 - depends on KERNEL_MODE_NEON 119 - select CRYPTO_SKCIPHER 120 - select CRYPTO_LIB_CHACHA_GENERIC 121 - select CRYPTO_ARCH_HAVE_LIB_CHACHA 122 - 123 - config CRYPTO_POLY1305_NEON 124 - tristate "Poly1305 hash function using scalar or NEON instructions" 125 - depends on KERNEL_MODE_NEON 271 + config CRYPTO_CRCT10DIF_ARM64_CE 272 + tristate "CRCT10DIF (PMULL)" 273 + depends on KERNEL_MODE_NEON && CRC_T10DIF 126 274 select CRYPTO_HASH 127 - select CRYPTO_ARCH_HAVE_LIB_POLY1305 275 + help 276 + CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF) 128 277 129 - config CRYPTO_NHPOLY1305_NEON 130 - tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)" 131 - depends on KERNEL_MODE_NEON 132 - select CRYPTO_NHPOLY1305 278 + Architecture: arm64 using 279 + - PMULL (Polynomial Multiply Long) instructions 133 280 134 - config CRYPTO_AES_ARM64_BS 135 - tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm" 136 - depends on KERNEL_MODE_NEON 137 - select CRYPTO_SKCIPHER 138 - select CRYPTO_AES_ARM64_NEON_BLK 139 - select CRYPTO_LIB_AES 281 + endmenu 140 282 141 - endif
+74
arch/mips/crypto/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + menu "Accelerated Cryptographic Algorithms for CPU (mips)" 4 + 5 + config CRYPTO_CRC32_MIPS 6 + tristate "CRC32c and CRC32" 7 + depends on MIPS_CRC_SUPPORT 8 + select CRYPTO_HASH 9 + help 10 + CRC32c and CRC32 CRC algorithms 11 + 12 + Architecture: mips 13 + 14 + config CRYPTO_POLY1305_MIPS 15 + tristate "Hash functions: Poly1305" 16 + depends on MIPS 17 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 18 + help 19 + Poly1305 authenticator algorithm (RFC7539) 20 + 21 + Architecture: mips 22 + 23 + config CRYPTO_MD5_OCTEON 24 + tristate "Digests: MD5 (OCTEON)" 25 + depends on CPU_CAVIUM_OCTEON 26 + select CRYPTO_MD5 27 + select CRYPTO_HASH 28 + help 29 + MD5 message digest algorithm (RFC1321) 30 + 31 + Architecture: mips OCTEON using crypto instructions, when available 32 + 33 + config CRYPTO_SHA1_OCTEON 34 + tristate "Hash functions: SHA-1 (OCTEON)" 35 + depends on CPU_CAVIUM_OCTEON 36 + select CRYPTO_SHA1 37 + select CRYPTO_HASH 38 + help 39 + SHA-1 secure hash algorithm (FIPS 180) 40 + 41 + Architecture: mips OCTEON 42 + 43 + config CRYPTO_SHA256_OCTEON 44 + tristate "Hash functions: SHA-224 and SHA-256 (OCTEON)" 45 + depends on CPU_CAVIUM_OCTEON 46 + select CRYPTO_SHA256 47 + select CRYPTO_HASH 48 + help 49 + SHA-224 and SHA-256 secure hash algorithms (FIPS 180) 50 + 51 + Architecture: mips OCTEON using crypto instructions, when available 52 + 53 + config CRYPTO_SHA512_OCTEON 54 + tristate "Hash functions: SHA-384 and SHA-512 (OCTEON)" 55 + depends on CPU_CAVIUM_OCTEON 56 + select CRYPTO_SHA512 57 + select CRYPTO_HASH 58 + help 59 + SHA-384 and SHA-512 secure hash algorithms (FIPS 180) 60 + 61 + Architecture: mips OCTEON using crypto instructions, when available 62 + 63 + config CRYPTO_CHACHA_MIPS 64 + tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (MIPS32r2)" 65 + depends on CPU_MIPS32_R2 66 + select CRYPTO_SKCIPHER 67 + select CRYPTO_ARCH_HAVE_LIB_CHACHA 68 + help 69 + Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 70 + stream cipher algorithms 71 + 72 + Architecture: MIPS32r2 73 + 74 + endmenu
+97
arch/powerpc/crypto/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + menu "Accelerated Cryptographic Algorithms for CPU (powerpc)" 4 + 5 + config CRYPTO_CRC32C_VPMSUM 6 + tristate "CRC32c" 7 + depends on PPC64 && ALTIVEC 8 + select CRYPTO_HASH 9 + select CRC32 10 + help 11 + CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720) 12 + 13 + Architecture: powerpc64 using 14 + - AltiVec extensions 15 + 16 + Enable on POWER8 and newer processors for improved performance. 17 + 18 + config CRYPTO_CRCT10DIF_VPMSUM 19 + tristate "CRC32T10DIF" 20 + depends on PPC64 && ALTIVEC && CRC_T10DIF 21 + select CRYPTO_HASH 22 + help 23 + CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF) 24 + 25 + Architecture: powerpc64 using 26 + - AltiVec extensions 27 + 28 + Enable on POWER8 and newer processors for improved performance. 29 + 30 + config CRYPTO_VPMSUM_TESTER 31 + tristate "CRC32c and CRC32T10DIF hardware acceleration tester" 32 + depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM 33 + help 34 + Stress test for CRC32c and CRCT10DIF algorithms implemented with 35 + powerpc64 AltiVec extensions (POWER8 vpmsum instructions). 36 + Unless you are testing these algorithms, you don't need this. 37 + 38 + config CRYPTO_MD5_PPC 39 + tristate "Digests: MD5" 40 + depends on PPC 41 + select CRYPTO_HASH 42 + help 43 + MD5 message digest algorithm (RFC1321) 44 + 45 + Architecture: powerpc 46 + 47 + config CRYPTO_SHA1_PPC 48 + tristate "Hash functions: SHA-1" 49 + depends on PPC 50 + help 51 + SHA-1 secure hash algorithm (FIPS 180) 52 + 53 + Architecture: powerpc 54 + 55 + config CRYPTO_SHA1_PPC_SPE 56 + tristate "Hash functions: SHA-1 (SPE)" 57 + depends on PPC && SPE 58 + help 59 + SHA-1 secure hash algorithm (FIPS 180) 60 + 61 + Architecture: powerpc using 62 + - SPE (Signal Processing Engine) extensions 63 + 64 + config CRYPTO_SHA256_PPC_SPE 65 + tristate "Hash functions: SHA-224 and SHA-256 (SPE)" 66 + depends on PPC && SPE 67 + select CRYPTO_SHA256 68 + select CRYPTO_HASH 69 + help 70 + SHA-224 and SHA-256 secure hash algorithms (FIPS 180) 71 + 72 + Architecture: powerpc using 73 + - SPE (Signal Processing Engine) extensions 74 + 75 + config CRYPTO_AES_PPC_SPE 76 + tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (SPE)" 77 + depends on PPC && SPE 78 + select CRYPTO_SKCIPHER 79 + help 80 + Block ciphers: AES cipher algorithms (FIPS-197) 81 + Length-preserving ciphers: AES with ECB, CBC, CTR, and XTS modes 82 + 83 + Architecture: powerpc using: 84 + - SPE (Signal Processing Engine) extensions 85 + 86 + SPE is available for: 87 + - Processor Type: Freescale 8500 88 + - CPU selection: e500 (8540) 89 + 90 + This module should only be used for low power (router) devices 91 + without hardware AES acceleration (e.g. caam crypto). It reduces the 92 + size of the AES tables from 16KB to 8KB + 256 bytes and mitigates 93 + timining attacks. Nevertheless it might be not as secure as other 94 + architecture specific assembler implementations that work on 1KB 95 + tables or 256 bytes S-boxes. 96 + 97 + endmenu
+135
arch/s390/crypto/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + menu "Accelerated Cryptographic Algorithms for CPU (s390)" 4 + 5 + config CRYPTO_CRC32_S390 6 + tristate "CRC32c and CRC32" 7 + depends on S390 8 + select CRYPTO_HASH 9 + select CRC32 10 + help 11 + CRC32c and CRC32 CRC algorithms 12 + 13 + Architecture: s390 14 + 15 + It is available with IBM z13 or later. 16 + 17 + config CRYPTO_SHA512_S390 18 + tristate "Hash functions: SHA-384 and SHA-512" 19 + depends on S390 20 + select CRYPTO_HASH 21 + help 22 + SHA-384 and SHA-512 secure hash algorithms (FIPS 180) 23 + 24 + Architecture: s390 25 + 26 + It is available as of z10. 27 + 28 + config CRYPTO_SHA1_S390 29 + tristate "Hash functions: SHA-1" 30 + depends on S390 31 + select CRYPTO_HASH 32 + help 33 + SHA-1 secure hash algorithm (FIPS 180) 34 + 35 + Architecture: s390 36 + 37 + It is available as of z990. 38 + 39 + config CRYPTO_SHA256_S390 40 + tristate "Hash functions: SHA-224 and SHA-256" 41 + depends on S390 42 + select CRYPTO_HASH 43 + help 44 + SHA-224 and SHA-256 secure hash algorithms (FIPS 180) 45 + 46 + Architecture: s390 47 + 48 + It is available as of z9. 49 + 50 + config CRYPTO_SHA3_256_S390 51 + tristate "Hash functions: SHA3-224 and SHA3-256" 52 + depends on S390 53 + select CRYPTO_HASH 54 + help 55 + SHA3-224 and SHA3-256 secure hash algorithms (FIPS 202) 56 + 57 + Architecture: s390 58 + 59 + It is available as of z14. 60 + 61 + config CRYPTO_SHA3_512_S390 62 + tristate "Hash functions: SHA3-384 and SHA3-512" 63 + depends on S390 64 + select CRYPTO_HASH 65 + help 66 + SHA3-384 and SHA3-512 secure hash algorithms (FIPS 202) 67 + 68 + Architecture: s390 69 + 70 + It is available as of z14. 71 + 72 + config CRYPTO_GHASH_S390 73 + tristate "Hash functions: GHASH" 74 + depends on S390 75 + select CRYPTO_HASH 76 + help 77 + GCM GHASH hash function (NIST SP800-38D) 78 + 79 + Architecture: s390 80 + 81 + It is available as of z196. 82 + 83 + config CRYPTO_AES_S390 84 + tristate "Ciphers: AES, modes: ECB, CBC, CTR, XTS, GCM" 85 + depends on S390 86 + select CRYPTO_ALGAPI 87 + select CRYPTO_SKCIPHER 88 + help 89 + Block cipher: AES cipher algorithms (FIPS 197) 90 + AEAD cipher: AES with GCM 91 + Length-preserving ciphers: AES with ECB, CBC, XTS, and CTR modes 92 + 93 + Architecture: s390 94 + 95 + As of z9 the ECB and CBC modes are hardware accelerated 96 + for 128 bit keys. 97 + 98 + As of z10 the ECB and CBC modes are hardware accelerated 99 + for all AES key sizes. 100 + 101 + As of z196 the CTR mode is hardware accelerated for all AES 102 + key sizes and XTS mode is hardware accelerated for 256 and 103 + 512 bit keys. 104 + 105 + config CRYPTO_DES_S390 106 + tristate "Ciphers: DES and Triple DES EDE, modes: ECB, CBC, CTR" 107 + depends on S390 108 + select CRYPTO_ALGAPI 109 + select CRYPTO_SKCIPHER 110 + select CRYPTO_LIB_DES 111 + help 112 + Block ciphers: DES (FIPS 46-2) cipher algorithm 113 + Block ciphers: Triple DES EDE (FIPS 46-3) cipher algorithm 114 + Length-preserving ciphers: DES with ECB, CBC, and CTR modes 115 + Length-preserving ciphers: Triple DES EDED with ECB, CBC, and CTR modes 116 + 117 + Architecture: s390 118 + 119 + As of z990 the ECB and CBC mode are hardware accelerated. 120 + As of z196 the CTR mode is hardware accelerated. 121 + 122 + config CRYPTO_CHACHA_S390 123 + tristate "Ciphers: ChaCha20" 124 + depends on S390 125 + select CRYPTO_SKCIPHER 126 + select CRYPTO_LIB_CHACHA_GENERIC 127 + select CRYPTO_ARCH_HAVE_LIB_CHACHA 128 + help 129 + Length-preserving cipher: ChaCha20 stream cipher (RFC 7539) 130 + 131 + Architecture: s390 132 + 133 + It is available as of z13. 134 + 135 + endmenu
+90
arch/sparc/crypto/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + menu "Accelerated Cryptographic Algorithms for CPU (sparc64)" 4 + 5 + config CRYPTO_DES_SPARC64 6 + tristate "Ciphers: DES and Triple DES EDE, modes: ECB/CBC" 7 + depends on SPARC64 8 + select CRYPTO_ALGAPI 9 + select CRYPTO_LIB_DES 10 + select CRYPTO_SKCIPHER 11 + help 12 + Block cipher: DES (FIPS 46-2) cipher algorithm 13 + Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm 14 + Length-preserving ciphers: DES with ECB and CBC modes 15 + Length-preserving ciphers: Tripe DES EDE with ECB and CBC modes 16 + 17 + Architecture: sparc64 18 + 19 + config CRYPTO_CRC32C_SPARC64 20 + tristate "CRC32c" 21 + depends on SPARC64 22 + select CRYPTO_HASH 23 + select CRC32 24 + help 25 + CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720) 26 + 27 + Architecture: sparc64 28 + 29 + config CRYPTO_MD5_SPARC64 30 + tristate "Digests: MD5" 31 + depends on SPARC64 32 + select CRYPTO_MD5 33 + select CRYPTO_HASH 34 + help 35 + MD5 message digest algorithm (RFC1321) 36 + 37 + Architecture: sparc64 using crypto instructions, when available 38 + 39 + config CRYPTO_SHA1_SPARC64 40 + tristate "Hash functions: SHA-1" 41 + depends on SPARC64 42 + select CRYPTO_SHA1 43 + select CRYPTO_HASH 44 + help 45 + SHA-1 secure hash algorithm (FIPS 180) 46 + 47 + Architecture: sparc64 48 + 49 + config CRYPTO_SHA256_SPARC64 50 + tristate "Hash functions: SHA-224 and SHA-256" 51 + depends on SPARC64 52 + select CRYPTO_SHA256 53 + select CRYPTO_HASH 54 + help 55 + SHA-224 and SHA-256 secure hash algorithms (FIPS 180) 56 + 57 + Architecture: sparc64 using crypto instructions, when available 58 + 59 + config CRYPTO_SHA512_SPARC64 60 + tristate "Hash functions: SHA-384 and SHA-512" 61 + depends on SPARC64 62 + select CRYPTO_SHA512 63 + select CRYPTO_HASH 64 + help 65 + SHA-384 and SHA-512 secure hash algorithms (FIPS 180) 66 + 67 + Architecture: sparc64 using crypto instructions, when available 68 + 69 + config CRYPTO_AES_SPARC64 70 + tristate "Ciphers: AES, modes: ECB, CBC, CTR" 71 + depends on SPARC64 72 + select CRYPTO_SKCIPHER 73 + help 74 + Block ciphers: AES cipher algorithms (FIPS-197) 75 + Length-preseving ciphers: AES with ECB, CBC, and CTR modes 76 + 77 + Architecture: sparc64 using crypto instructions 78 + 79 + config CRYPTO_CAMELLIA_SPARC64 80 + tristate "Ciphers: Camellia, modes: ECB, CBC" 81 + depends on SPARC64 82 + select CRYPTO_ALGAPI 83 + select CRYPTO_SKCIPHER 84 + help 85 + Block ciphers: Camellia cipher algorithms 86 + Length-preserving ciphers: Camellia with ECB and CBC modes 87 + 88 + Architecture: sparc64 89 + 90 + endmenu
+484
arch/x86/crypto/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + menu "Accelerated Cryptographic Algorithms for CPU (x86)" 4 + 5 + config CRYPTO_CURVE25519_X86 6 + tristate "Public key crypto: Curve25519 (ADX)" 7 + depends on X86 && 64BIT 8 + select CRYPTO_LIB_CURVE25519_GENERIC 9 + select CRYPTO_ARCH_HAVE_LIB_CURVE25519 10 + help 11 + Curve25519 algorithm 12 + 13 + Architecture: x86_64 using: 14 + - ADX (large integer arithmetic) 15 + 16 + config CRYPTO_AES_NI_INTEL 17 + tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTR, XTS, GCM (AES-NI)" 18 + depends on X86 19 + select CRYPTO_AEAD 20 + select CRYPTO_LIB_AES 21 + select CRYPTO_ALGAPI 22 + select CRYPTO_SKCIPHER 23 + select CRYPTO_SIMD 24 + help 25 + Block cipher: AES cipher algorithms 26 + AEAD cipher: AES with GCM 27 + Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTR, XTS 28 + 29 + Architecture: x86 (32-bit and 64-bit) using: 30 + - AES-NI (AES new instructions) 31 + 32 + config CRYPTO_BLOWFISH_X86_64 33 + tristate "Ciphers: Blowfish, modes: ECB, CBC" 34 + depends on X86 && 64BIT 35 + select CRYPTO_SKCIPHER 36 + select CRYPTO_BLOWFISH_COMMON 37 + imply CRYPTO_CTR 38 + help 39 + Block cipher: Blowfish cipher algorithm 40 + Length-preserving ciphers: Blowfish with ECB and CBC modes 41 + 42 + Architecture: x86_64 43 + 44 + config CRYPTO_CAMELLIA_X86_64 45 + tristate "Ciphers: Camellia with modes: ECB, CBC" 46 + depends on X86 && 64BIT 47 + select CRYPTO_SKCIPHER 48 + imply CRYPTO_CTR 49 + help 50 + Block cipher: Camellia cipher algorithms 51 + Length-preserving ciphers: Camellia with ECB and CBC modes 52 + 53 + Architecture: x86_64 54 + 55 + config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 56 + tristate "Ciphers: Camellia with modes: ECB, CBC (AES-NI/AVX)" 57 + depends on X86 && 64BIT 58 + select CRYPTO_SKCIPHER 59 + select CRYPTO_CAMELLIA_X86_64 60 + select CRYPTO_SIMD 61 + imply CRYPTO_XTS 62 + help 63 + Length-preserving ciphers: Camellia with ECB and CBC modes 64 + 65 + Architecture: x86_64 using: 66 + - AES-NI (AES New Instructions) 67 + - AVX (Advanced Vector Extensions) 68 + 69 + config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 70 + tristate "Ciphers: Camellia with modes: ECB, CBC (AES-NI/AVX2)" 71 + depends on X86 && 64BIT 72 + select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 73 + help 74 + Length-preserving ciphers: Camellia with ECB and CBC modes 75 + 76 + Architecture: x86_64 using: 77 + - AES-NI (AES New Instructions) 78 + - AVX2 (Advanced Vector Extensions 2) 79 + 80 + config CRYPTO_CAST5_AVX_X86_64 81 + tristate "Ciphers: CAST5 with modes: ECB, CBC (AVX)" 82 + depends on X86 && 64BIT 83 + select CRYPTO_SKCIPHER 84 + select CRYPTO_CAST5 85 + select CRYPTO_CAST_COMMON 86 + select CRYPTO_SIMD 87 + imply CRYPTO_CTR 88 + help 89 + Length-preserving ciphers: CAST5 (CAST-128) cipher algorithm 90 + (RFC2144) with ECB and CBC modes 91 + 92 + Architecture: x86_64 using: 93 + - AVX (Advanced Vector Extensions) 94 + 95 + Processes 16 blocks in parallel. 96 + 97 + config CRYPTO_CAST6_AVX_X86_64 98 + tristate "Ciphers: CAST6 with modes: ECB, CBC (AVX)" 99 + depends on X86 && 64BIT 100 + select CRYPTO_SKCIPHER 101 + select CRYPTO_CAST6 102 + select CRYPTO_CAST_COMMON 103 + select CRYPTO_SIMD 104 + imply CRYPTO_XTS 105 + imply CRYPTO_CTR 106 + help 107 + Length-preserving ciphers: CAST6 (CAST-256) cipher algorithm 108 + (RFC2612) with ECB and CBC modes 109 + 110 + Architecture: x86_64 using: 111 + - AVX (Advanced Vector Extensions) 112 + 113 + Processes eight blocks in parallel. 114 + 115 + config CRYPTO_DES3_EDE_X86_64 116 + tristate "Ciphers: Triple DES EDE with modes: ECB, CBC" 117 + depends on X86 && 64BIT 118 + select CRYPTO_SKCIPHER 119 + select CRYPTO_LIB_DES 120 + imply CRYPTO_CTR 121 + help 122 + Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm 123 + Length-preserving ciphers: Triple DES EDE with ECB and CBC modes 124 + 125 + Architecture: x86_64 126 + 127 + Processes one or three blocks in parallel. 128 + 129 + config CRYPTO_SERPENT_SSE2_X86_64 130 + tristate "Ciphers: Serpent with modes: ECB, CBC (SSE2)" 131 + depends on X86 && 64BIT 132 + select CRYPTO_SKCIPHER 133 + select CRYPTO_SERPENT 134 + select CRYPTO_SIMD 135 + imply CRYPTO_CTR 136 + help 137 + Length-preserving ciphers: Serpent cipher algorithm 138 + with ECB and CBC modes 139 + 140 + Architecture: x86_64 using: 141 + - SSE2 (Streaming SIMD Extensions 2) 142 + 143 + Processes eight blocks in parallel. 144 + 145 + config CRYPTO_SERPENT_SSE2_586 146 + tristate "Ciphers: Serpent with modes: ECB, CBC (32-bit with SSE2)" 147 + depends on X86 && !64BIT 148 + select CRYPTO_SKCIPHER 149 + select CRYPTO_SERPENT 150 + select CRYPTO_SIMD 151 + imply CRYPTO_CTR 152 + help 153 + Length-preserving ciphers: Serpent cipher algorithm 154 + with ECB and CBC modes 155 + 156 + Architecture: x86 (32-bit) using: 157 + - SSE2 (Streaming SIMD Extensions 2) 158 + 159 + Processes four blocks in parallel. 160 + 161 + config CRYPTO_SERPENT_AVX_X86_64 162 + tristate "Ciphers: Serpent with modes: ECB, CBC (AVX)" 163 + depends on X86 && 64BIT 164 + select CRYPTO_SKCIPHER 165 + select CRYPTO_SERPENT 166 + select CRYPTO_SIMD 167 + imply CRYPTO_XTS 168 + imply CRYPTO_CTR 169 + help 170 + Length-preserving ciphers: Serpent cipher algorithm 171 + with ECB and CBC modes 172 + 173 + Architecture: x86_64 using: 174 + - AVX (Advanced Vector Extensions) 175 + 176 + Processes eight blocks in parallel. 177 + 178 + config CRYPTO_SERPENT_AVX2_X86_64 179 + tristate "Ciphers: Serpent with modes: ECB, CBC (AVX2)" 180 + depends on X86 && 64BIT 181 + select CRYPTO_SERPENT_AVX_X86_64 182 + help 183 + Length-preserving ciphers: Serpent cipher algorithm 184 + with ECB and CBC modes 185 + 186 + Architecture: x86_64 using: 187 + - AVX2 (Advanced Vector Extensions 2) 188 + 189 + Processes 16 blocks in parallel. 190 + 191 + config CRYPTO_SM4_AESNI_AVX_X86_64 192 + tristate "Ciphers: SM4 with modes: ECB, CBC, CFB, CTR (AES-NI/AVX)" 193 + depends on X86 && 64BIT 194 + select CRYPTO_SKCIPHER 195 + select CRYPTO_SIMD 196 + select CRYPTO_ALGAPI 197 + select CRYPTO_SM4 198 + help 199 + Length-preserving ciphers: SM4 cipher algorithms 200 + (OSCCA GB/T 32907-2016) with ECB, CBC, CFB, and CTR modes 201 + 202 + Architecture: x86_64 using: 203 + - AES-NI (AES New Instructions) 204 + - AVX (Advanced Vector Extensions) 205 + 206 + Through two affine transforms, 207 + we can use the AES S-Box to simulate the SM4 S-Box to achieve the 208 + effect of instruction acceleration. 209 + 210 + If unsure, say N. 211 + 212 + config CRYPTO_SM4_AESNI_AVX2_X86_64 213 + tristate "Ciphers: SM4 with modes: ECB, CBC, CFB, CTR (AES-NI/AVX2)" 214 + depends on X86 && 64BIT 215 + select CRYPTO_SKCIPHER 216 + select CRYPTO_SIMD 217 + select CRYPTO_ALGAPI 218 + select CRYPTO_SM4 219 + select CRYPTO_SM4_AESNI_AVX_X86_64 220 + help 221 + Length-preserving ciphers: SM4 cipher algorithms 222 + (OSCCA GB/T 32907-2016) with ECB, CBC, CFB, and CTR modes 223 + 224 + Architecture: x86_64 using: 225 + - AES-NI (AES New Instructions) 226 + - AVX2 (Advanced Vector Extensions 2) 227 + 228 + Through two affine transforms, 229 + we can use the AES S-Box to simulate the SM4 S-Box to achieve the 230 + effect of instruction acceleration. 231 + 232 + If unsure, say N. 233 + 234 + config CRYPTO_TWOFISH_586 235 + tristate "Ciphers: Twofish (32-bit)" 236 + depends on (X86 || UML_X86) && !64BIT 237 + select CRYPTO_ALGAPI 238 + select CRYPTO_TWOFISH_COMMON 239 + imply CRYPTO_CTR 240 + help 241 + Block cipher: Twofish cipher algorithm 242 + 243 + Architecture: x86 (32-bit) 244 + 245 + config CRYPTO_TWOFISH_X86_64 246 + tristate "Ciphers: Twofish" 247 + depends on (X86 || UML_X86) && 64BIT 248 + select CRYPTO_ALGAPI 249 + select CRYPTO_TWOFISH_COMMON 250 + imply CRYPTO_CTR 251 + help 252 + Block cipher: Twofish cipher algorithm 253 + 254 + Architecture: x86_64 255 + 256 + config CRYPTO_TWOFISH_X86_64_3WAY 257 + tristate "Ciphers: Twofish with modes: ECB, CBC (3-way parallel)" 258 + depends on X86 && 64BIT 259 + select CRYPTO_SKCIPHER 260 + select CRYPTO_TWOFISH_COMMON 261 + select CRYPTO_TWOFISH_X86_64 262 + help 263 + Length-preserving cipher: Twofish cipher algorithm 264 + with ECB and CBC modes 265 + 266 + Architecture: x86_64 267 + 268 + Processes three blocks in parallel, better utilizing resources of 269 + out-of-order CPUs. 270 + 271 + config CRYPTO_TWOFISH_AVX_X86_64 272 + tristate "Ciphers: Twofish with modes: ECB, CBC (AVX)" 273 + depends on X86 && 64BIT 274 + select CRYPTO_SKCIPHER 275 + select CRYPTO_SIMD 276 + select CRYPTO_TWOFISH_COMMON 277 + select CRYPTO_TWOFISH_X86_64 278 + select CRYPTO_TWOFISH_X86_64_3WAY 279 + imply CRYPTO_XTS 280 + help 281 + Length-preserving cipher: Twofish cipher algorithm 282 + with ECB and CBC modes 283 + 284 + Architecture: x86_64 using: 285 + - AVX (Advanced Vector Extensions) 286 + 287 + Processes eight blocks in parallel. 288 + 289 + config CRYPTO_ARIA_AESNI_AVX_X86_64 290 + tristate "Ciphers: ARIA with modes: ECB, CTR (AES-NI/AVX/GFNI)" 291 + depends on X86 && 64BIT 292 + select CRYPTO_SKCIPHER 293 + select CRYPTO_SIMD 294 + select CRYPTO_ALGAPI 295 + select CRYPTO_ARIA 296 + help 297 + Length-preserving cipher: ARIA cipher algorithms 298 + (RFC 5794) with ECB and CTR modes 299 + 300 + Architecture: x86_64 using: 301 + - AES-NI (AES New Instructions) 302 + - AVX (Advanced Vector Extensions) 303 + - GFNI (Galois Field New Instructions) 304 + 305 + Processes 16 blocks in parallel. 306 + 307 + config CRYPTO_CHACHA20_X86_64 308 + tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (SSSE3/AVX2/AVX-512VL)" 309 + depends on X86 && 64BIT 310 + select CRYPTO_SKCIPHER 311 + select CRYPTO_LIB_CHACHA_GENERIC 312 + select CRYPTO_ARCH_HAVE_LIB_CHACHA 313 + help 314 + Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 315 + stream cipher algorithms 316 + 317 + Architecture: x86_64 using: 318 + - SSSE3 (Supplemental SSE3) 319 + - AVX2 (Advanced Vector Extensions 2) 320 + - AVX-512VL (Advanced Vector Extensions-512VL) 321 + 322 + config CRYPTO_AEGIS128_AESNI_SSE2 323 + tristate "AEAD ciphers: AEGIS-128 (AES-NI/SSE2)" 324 + depends on X86 && 64BIT 325 + select CRYPTO_AEAD 326 + select CRYPTO_SIMD 327 + help 328 + AEGIS-128 AEAD algorithm 329 + 330 + Architecture: x86_64 using: 331 + - AES-NI (AES New Instructions) 332 + - SSE2 (Streaming SIMD Extensions 2) 333 + 334 + config CRYPTO_NHPOLY1305_SSE2 335 + tristate "Hash functions: NHPoly1305 (SSE2)" 336 + depends on X86 && 64BIT 337 + select CRYPTO_NHPOLY1305 338 + help 339 + NHPoly1305 hash function for Adiantum 340 + 341 + Architecture: x86_64 using: 342 + - SSE2 (Streaming SIMD Extensions 2) 343 + 344 + config CRYPTO_NHPOLY1305_AVX2 345 + tristate "Hash functions: NHPoly1305 (AVX2)" 346 + depends on X86 && 64BIT 347 + select CRYPTO_NHPOLY1305 348 + help 349 + NHPoly1305 hash function for Adiantum 350 + 351 + Architecture: x86_64 using: 352 + - AVX2 (Advanced Vector Extensions 2) 353 + 354 + config CRYPTO_BLAKE2S_X86 355 + bool "Hash functions: BLAKE2s (SSSE3/AVX-512)" 356 + depends on X86 && 64BIT 357 + select CRYPTO_LIB_BLAKE2S_GENERIC 358 + select CRYPTO_ARCH_HAVE_LIB_BLAKE2S 359 + help 360 + BLAKE2s cryptographic hash function (RFC 7693) 361 + 362 + Architecture: x86_64 using: 363 + - SSSE3 (Supplemental SSE3) 364 + - AVX-512 (Advanced Vector Extensions-512) 365 + 366 + config CRYPTO_POLYVAL_CLMUL_NI 367 + tristate "Hash functions: POLYVAL (CLMUL-NI)" 368 + depends on X86 && 64BIT 369 + select CRYPTO_POLYVAL 370 + help 371 + POLYVAL hash function for HCTR2 372 + 373 + Architecture: x86_64 using: 374 + - CLMUL-NI (carry-less multiplication new instructions) 375 + 376 + config CRYPTO_POLY1305_X86_64 377 + tristate "Hash functions: Poly1305 (SSE2/AVX2)" 378 + depends on X86 && 64BIT 379 + select CRYPTO_LIB_POLY1305_GENERIC 380 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 381 + help 382 + Poly1305 authenticator algorithm (RFC7539) 383 + 384 + Architecture: x86_64 using: 385 + - SSE2 (Streaming SIMD Extensions 2) 386 + - AVX2 (Advanced Vector Extensions 2) 387 + 388 + config CRYPTO_SHA1_SSSE3 389 + tristate "Hash functions: SHA-1 (SSSE3/AVX/AVX2/SHA-NI)" 390 + depends on X86 && 64BIT 391 + select CRYPTO_SHA1 392 + select CRYPTO_HASH 393 + help 394 + SHA-1 secure hash algorithm (FIPS 180) 395 + 396 + Architecture: x86_64 using: 397 + - SSSE3 (Supplemental SSE3) 398 + - AVX (Advanced Vector Extensions) 399 + - AVX2 (Advanced Vector Extensions 2) 400 + - SHA-NI (SHA Extensions New Instructions) 401 + 402 + config CRYPTO_SHA256_SSSE3 403 + tristate "Hash functions: SHA-224 and SHA-256 (SSSE3/AVX/AVX2/SHA-NI)" 404 + depends on X86 && 64BIT 405 + select CRYPTO_SHA256 406 + select CRYPTO_HASH 407 + help 408 + SHA-224 and SHA-256 secure hash algorithms (FIPS 180) 409 + 410 + Architecture: x86_64 using: 411 + - SSSE3 (Supplemental SSE3) 412 + - AVX (Advanced Vector Extensions) 413 + - AVX2 (Advanced Vector Extensions 2) 414 + - SHA-NI (SHA Extensions New Instructions) 415 + 416 + config CRYPTO_SHA512_SSSE3 417 + tristate "Hash functions: SHA-384 and SHA-512 (SSSE3/AVX/AVX2)" 418 + depends on X86 && 64BIT 419 + select CRYPTO_SHA512 420 + select CRYPTO_HASH 421 + help 422 + SHA-384 and SHA-512 secure hash algorithms (FIPS 180) 423 + 424 + Architecture: x86_64 using: 425 + - SSSE3 (Supplemental SSE3) 426 + - AVX (Advanced Vector Extensions) 427 + - AVX2 (Advanced Vector Extensions 2) 428 + 429 + config CRYPTO_SM3_AVX_X86_64 430 + tristate "Hash functions: SM3 (AVX)" 431 + depends on X86 && 64BIT 432 + select CRYPTO_HASH 433 + select CRYPTO_SM3 434 + help 435 + SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3 436 + 437 + Architecture: x86_64 using: 438 + - AVX (Advanced Vector Extensions) 439 + 440 + If unsure, say N. 441 + 442 + config CRYPTO_GHASH_CLMUL_NI_INTEL 443 + tristate "Hash functions: GHASH (CLMUL-NI)" 444 + depends on X86 && 64BIT 445 + select CRYPTO_CRYPTD 446 + help 447 + GCM GHASH hash function (NIST SP800-38D) 448 + 449 + Architecture: x86_64 using: 450 + - CLMUL-NI (carry-less multiplication new instructions) 451 + 452 + config CRYPTO_CRC32C_INTEL 453 + tristate "CRC32c (SSE4.2/PCLMULQDQ)" 454 + depends on X86 455 + select CRYPTO_HASH 456 + help 457 + CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720) 458 + 459 + Architecture: x86 (32-bit and 64-bit) using: 460 + - SSE4.2 (Streaming SIMD Extensions 4.2) CRC32 instruction 461 + - PCLMULQDQ (carry-less multiplication) 462 + 463 + config CRYPTO_CRC32_PCLMUL 464 + tristate "CRC32 (PCLMULQDQ)" 465 + depends on X86 466 + select CRYPTO_HASH 467 + select CRC32 468 + help 469 + CRC32 CRC algorithm (IEEE 802.3) 470 + 471 + Architecture: x86 (32-bit and 64-bit) using: 472 + - PCLMULQDQ (carry-less multiplication) 473 + 474 + config CRYPTO_CRCT10DIF_PCLMUL 475 + tristate "CRCT10DIF (PCLMULQDQ)" 476 + depends on X86 && 64BIT && CRC_T10DIF 477 + select CRYPTO_HASH 478 + help 479 + CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF) 480 + 481 + Architecture: x86_64 using: 482 + - PCLMULQDQ (carry-less multiplication) 483 + 484 + endmenu
+3
arch/x86/crypto/Makefile
··· 100 100 obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64) += sm4-aesni-avx2-x86_64.o 101 101 sm4-aesni-avx2-x86_64-y := sm4-aesni-avx2-asm_64.o sm4_aesni_avx2_glue.o 102 102 103 + obj-$(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64) += aria-aesni-avx-x86_64.o 104 + aria-aesni-avx-x86_64-y := aria-aesni-avx-asm_64.o aria_aesni_avx_glue.o 105 + 103 106 quiet_cmd_perlasm = PERLASM $@ 104 107 cmd_perlasm = $(PERL) $< > $@ 105 108 $(obj)/%.S: $(src)/%.pl FORCE
+1303
arch/x86/crypto/aria-aesni-avx-asm_64.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * ARIA Cipher 16-way parallel algorithm (AVX) 4 + * 5 + * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com> 6 + * 7 + */ 8 + 9 + #include <linux/linkage.h> 10 + #include <asm/frame.h> 11 + 12 + /* struct aria_ctx: */ 13 + #define enc_key 0 14 + #define dec_key 272 15 + #define rounds 544 16 + 17 + /* register macros */ 18 + #define CTX %rdi 19 + 20 + 21 + #define BV8(a0, a1, a2, a3, a4, a5, a6, a7) \ 22 + ( (((a0) & 1) << 0) | \ 23 + (((a1) & 1) << 1) | \ 24 + (((a2) & 1) << 2) | \ 25 + (((a3) & 1) << 3) | \ 26 + (((a4) & 1) << 4) | \ 27 + (((a5) & 1) << 5) | \ 28 + (((a6) & 1) << 6) | \ 29 + (((a7) & 1) << 7) ) 30 + 31 + #define BM8X8(l0, l1, l2, l3, l4, l5, l6, l7) \ 32 + ( ((l7) << (0 * 8)) | \ 33 + ((l6) << (1 * 8)) | \ 34 + ((l5) << (2 * 8)) | \ 35 + ((l4) << (3 * 8)) | \ 36 + ((l3) << (4 * 8)) | \ 37 + ((l2) << (5 * 8)) | \ 38 + ((l1) << (6 * 8)) | \ 39 + ((l0) << (7 * 8)) ) 40 + 41 + #define inc_le128(x, minus_one, tmp) \ 42 + vpcmpeqq minus_one, x, tmp; \ 43 + vpsubq minus_one, x, x; \ 44 + vpslldq $8, tmp, tmp; \ 45 + vpsubq tmp, x, x; 46 + 47 + #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ 48 + vpand x, mask4bit, tmp0; \ 49 + vpandn x, mask4bit, x; \ 50 + vpsrld $4, x, x; \ 51 + \ 52 + vpshufb tmp0, lo_t, tmp0; \ 53 + vpshufb x, hi_t, x; \ 54 + vpxor tmp0, x, x; 55 + 56 + #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ 57 + vpunpckhdq x1, x0, t2; \ 58 + vpunpckldq x1, x0, x0; \ 59 + \ 60 + vpunpckldq x3, x2, t1; \ 61 + vpunpckhdq x3, x2, x2; \ 62 + \ 63 + vpunpckhqdq t1, x0, x1; \ 64 + vpunpcklqdq t1, x0, x0; \ 65 + \ 66 + vpunpckhqdq x2, t2, x3; \ 67 + vpunpcklqdq x2, t2, x2; 68 + 69 + #define byteslice_16x16b(a0, b0, c0, d0, \ 70 + a1, b1, c1, d1, \ 71 + a2, b2, c2, d2, \ 72 + a3, b3, c3, d3, \ 73 + st0, st1) \ 74 + vmovdqu d2, st0; \ 75 + vmovdqu d3, st1; \ 76 + transpose_4x4(a0, a1, a2, a3, d2, d3); \ 77 + transpose_4x4(b0, b1, b2, b3, d2, d3); \ 78 + vmovdqu st0, d2; \ 79 + vmovdqu st1, d3; \ 80 + \ 81 + vmovdqu a0, st0; \ 82 + vmovdqu a1, st1; \ 83 + transpose_4x4(c0, c1, c2, c3, a0, a1); \ 84 + transpose_4x4(d0, d1, d2, d3, a0, a1); \ 85 + \ 86 + vmovdqu .Lshufb_16x16b, a0; \ 87 + vmovdqu st1, a1; \ 88 + vpshufb a0, a2, a2; \ 89 + vpshufb a0, a3, a3; \ 90 + vpshufb a0, b0, b0; \ 91 + vpshufb a0, b1, b1; \ 92 + vpshufb a0, b2, b2; \ 93 + vpshufb a0, b3, b3; \ 94 + vpshufb a0, a1, a1; \ 95 + vpshufb a0, c0, c0; \ 96 + vpshufb a0, c1, c1; \ 97 + vpshufb a0, c2, c2; \ 98 + vpshufb a0, c3, c3; \ 99 + vpshufb a0, d0, d0; \ 100 + vpshufb a0, d1, d1; \ 101 + vpshufb a0, d2, d2; \ 102 + vpshufb a0, d3, d3; \ 103 + vmovdqu d3, st1; \ 104 + vmovdqu st0, d3; \ 105 + vpshufb a0, d3, a0; \ 106 + vmovdqu d2, st0; \ 107 + \ 108 + transpose_4x4(a0, b0, c0, d0, d2, d3); \ 109 + transpose_4x4(a1, b1, c1, d1, d2, d3); \ 110 + vmovdqu st0, d2; \ 111 + vmovdqu st1, d3; \ 112 + \ 113 + vmovdqu b0, st0; \ 114 + vmovdqu b1, st1; \ 115 + transpose_4x4(a2, b2, c2, d2, b0, b1); \ 116 + transpose_4x4(a3, b3, c3, d3, b0, b1); \ 117 + vmovdqu st0, b0; \ 118 + vmovdqu st1, b1; \ 119 + /* does not adjust output bytes inside vectors */ 120 + 121 + #define debyteslice_16x16b(a0, b0, c0, d0, \ 122 + a1, b1, c1, d1, \ 123 + a2, b2, c2, d2, \ 124 + a3, b3, c3, d3, \ 125 + st0, st1) \ 126 + vmovdqu d2, st0; \ 127 + vmovdqu d3, st1; \ 128 + transpose_4x4(a0, a1, a2, a3, d2, d3); \ 129 + transpose_4x4(b0, b1, b2, b3, d2, d3); \ 130 + vmovdqu st0, d2; \ 131 + vmovdqu st1, d3; \ 132 + \ 133 + vmovdqu a0, st0; \ 134 + vmovdqu a1, st1; \ 135 + transpose_4x4(c0, c1, c2, c3, a0, a1); \ 136 + transpose_4x4(d0, d1, d2, d3, a0, a1); \ 137 + \ 138 + vmovdqu .Lshufb_16x16b, a0; \ 139 + vmovdqu st1, a1; \ 140 + vpshufb a0, a2, a2; \ 141 + vpshufb a0, a3, a3; \ 142 + vpshufb a0, b0, b0; \ 143 + vpshufb a0, b1, b1; \ 144 + vpshufb a0, b2, b2; \ 145 + vpshufb a0, b3, b3; \ 146 + vpshufb a0, a1, a1; \ 147 + vpshufb a0, c0, c0; \ 148 + vpshufb a0, c1, c1; \ 149 + vpshufb a0, c2, c2; \ 150 + vpshufb a0, c3, c3; \ 151 + vpshufb a0, d0, d0; \ 152 + vpshufb a0, d1, d1; \ 153 + vpshufb a0, d2, d2; \ 154 + vpshufb a0, d3, d3; \ 155 + vmovdqu d3, st1; \ 156 + vmovdqu st0, d3; \ 157 + vpshufb a0, d3, a0; \ 158 + vmovdqu d2, st0; \ 159 + \ 160 + transpose_4x4(c0, d0, a0, b0, d2, d3); \ 161 + transpose_4x4(c1, d1, a1, b1, d2, d3); \ 162 + vmovdqu st0, d2; \ 163 + vmovdqu st1, d3; \ 164 + \ 165 + vmovdqu b0, st0; \ 166 + vmovdqu b1, st1; \ 167 + transpose_4x4(c2, d2, a2, b2, b0, b1); \ 168 + transpose_4x4(c3, d3, a3, b3, b0, b1); \ 169 + vmovdqu st0, b0; \ 170 + vmovdqu st1, b1; \ 171 + /* does not adjust output bytes inside vectors */ 172 + 173 + /* load blocks to registers and apply pre-whitening */ 174 + #define inpack16_pre(x0, x1, x2, x3, \ 175 + x4, x5, x6, x7, \ 176 + y0, y1, y2, y3, \ 177 + y4, y5, y6, y7, \ 178 + rio) \ 179 + vmovdqu (0 * 16)(rio), x0; \ 180 + vmovdqu (1 * 16)(rio), x1; \ 181 + vmovdqu (2 * 16)(rio), x2; \ 182 + vmovdqu (3 * 16)(rio), x3; \ 183 + vmovdqu (4 * 16)(rio), x4; \ 184 + vmovdqu (5 * 16)(rio), x5; \ 185 + vmovdqu (6 * 16)(rio), x6; \ 186 + vmovdqu (7 * 16)(rio), x7; \ 187 + vmovdqu (8 * 16)(rio), y0; \ 188 + vmovdqu (9 * 16)(rio), y1; \ 189 + vmovdqu (10 * 16)(rio), y2; \ 190 + vmovdqu (11 * 16)(rio), y3; \ 191 + vmovdqu (12 * 16)(rio), y4; \ 192 + vmovdqu (13 * 16)(rio), y5; \ 193 + vmovdqu (14 * 16)(rio), y6; \ 194 + vmovdqu (15 * 16)(rio), y7; 195 + 196 + /* byteslice pre-whitened blocks and store to temporary memory */ 197 + #define inpack16_post(x0, x1, x2, x3, \ 198 + x4, x5, x6, x7, \ 199 + y0, y1, y2, y3, \ 200 + y4, y5, y6, y7, \ 201 + mem_ab, mem_cd) \ 202 + byteslice_16x16b(x0, x1, x2, x3, \ 203 + x4, x5, x6, x7, \ 204 + y0, y1, y2, y3, \ 205 + y4, y5, y6, y7, \ 206 + (mem_ab), (mem_cd)); \ 207 + \ 208 + vmovdqu x0, 0 * 16(mem_ab); \ 209 + vmovdqu x1, 1 * 16(mem_ab); \ 210 + vmovdqu x2, 2 * 16(mem_ab); \ 211 + vmovdqu x3, 3 * 16(mem_ab); \ 212 + vmovdqu x4, 4 * 16(mem_ab); \ 213 + vmovdqu x5, 5 * 16(mem_ab); \ 214 + vmovdqu x6, 6 * 16(mem_ab); \ 215 + vmovdqu x7, 7 * 16(mem_ab); \ 216 + vmovdqu y0, 0 * 16(mem_cd); \ 217 + vmovdqu y1, 1 * 16(mem_cd); \ 218 + vmovdqu y2, 2 * 16(mem_cd); \ 219 + vmovdqu y3, 3 * 16(mem_cd); \ 220 + vmovdqu y4, 4 * 16(mem_cd); \ 221 + vmovdqu y5, 5 * 16(mem_cd); \ 222 + vmovdqu y6, 6 * 16(mem_cd); \ 223 + vmovdqu y7, 7 * 16(mem_cd); 224 + 225 + #define write_output(x0, x1, x2, x3, \ 226 + x4, x5, x6, x7, \ 227 + y0, y1, y2, y3, \ 228 + y4, y5, y6, y7, \ 229 + mem) \ 230 + vmovdqu x0, 0 * 16(mem); \ 231 + vmovdqu x1, 1 * 16(mem); \ 232 + vmovdqu x2, 2 * 16(mem); \ 233 + vmovdqu x3, 3 * 16(mem); \ 234 + vmovdqu x4, 4 * 16(mem); \ 235 + vmovdqu x5, 5 * 16(mem); \ 236 + vmovdqu x6, 6 * 16(mem); \ 237 + vmovdqu x7, 7 * 16(mem); \ 238 + vmovdqu y0, 8 * 16(mem); \ 239 + vmovdqu y1, 9 * 16(mem); \ 240 + vmovdqu y2, 10 * 16(mem); \ 241 + vmovdqu y3, 11 * 16(mem); \ 242 + vmovdqu y4, 12 * 16(mem); \ 243 + vmovdqu y5, 13 * 16(mem); \ 244 + vmovdqu y6, 14 * 16(mem); \ 245 + vmovdqu y7, 15 * 16(mem); \ 246 + 247 + #define aria_store_state_8way(x0, x1, x2, x3, \ 248 + x4, x5, x6, x7, \ 249 + mem_tmp, idx) \ 250 + vmovdqu x0, ((idx + 0) * 16)(mem_tmp); \ 251 + vmovdqu x1, ((idx + 1) * 16)(mem_tmp); \ 252 + vmovdqu x2, ((idx + 2) * 16)(mem_tmp); \ 253 + vmovdqu x3, ((idx + 3) * 16)(mem_tmp); \ 254 + vmovdqu x4, ((idx + 4) * 16)(mem_tmp); \ 255 + vmovdqu x5, ((idx + 5) * 16)(mem_tmp); \ 256 + vmovdqu x6, ((idx + 6) * 16)(mem_tmp); \ 257 + vmovdqu x7, ((idx + 7) * 16)(mem_tmp); 258 + 259 + #define aria_load_state_8way(x0, x1, x2, x3, \ 260 + x4, x5, x6, x7, \ 261 + mem_tmp, idx) \ 262 + vmovdqu ((idx + 0) * 16)(mem_tmp), x0; \ 263 + vmovdqu ((idx + 1) * 16)(mem_tmp), x1; \ 264 + vmovdqu ((idx + 2) * 16)(mem_tmp), x2; \ 265 + vmovdqu ((idx + 3) * 16)(mem_tmp), x3; \ 266 + vmovdqu ((idx + 4) * 16)(mem_tmp), x4; \ 267 + vmovdqu ((idx + 5) * 16)(mem_tmp), x5; \ 268 + vmovdqu ((idx + 6) * 16)(mem_tmp), x6; \ 269 + vmovdqu ((idx + 7) * 16)(mem_tmp), x7; 270 + 271 + #define aria_ark_8way(x0, x1, x2, x3, \ 272 + x4, x5, x6, x7, \ 273 + t0, rk, idx, round) \ 274 + /* AddRoundKey */ \ 275 + vpbroadcastb ((round * 16) + idx + 3)(rk), t0; \ 276 + vpxor t0, x0, x0; \ 277 + vpbroadcastb ((round * 16) + idx + 2)(rk), t0; \ 278 + vpxor t0, x1, x1; \ 279 + vpbroadcastb ((round * 16) + idx + 1)(rk), t0; \ 280 + vpxor t0, x2, x2; \ 281 + vpbroadcastb ((round * 16) + idx + 0)(rk), t0; \ 282 + vpxor t0, x3, x3; \ 283 + vpbroadcastb ((round * 16) + idx + 7)(rk), t0; \ 284 + vpxor t0, x4, x4; \ 285 + vpbroadcastb ((round * 16) + idx + 6)(rk), t0; \ 286 + vpxor t0, x5, x5; \ 287 + vpbroadcastb ((round * 16) + idx + 5)(rk), t0; \ 288 + vpxor t0, x6, x6; \ 289 + vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \ 290 + vpxor t0, x7, x7; 291 + 292 + #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ 293 + x4, x5, x6, x7, \ 294 + t0, t1, t2, t3, \ 295 + t4, t5, t6, t7) \ 296 + vpbroadcastq .Ltf_s2_bitmatrix, t0; \ 297 + vpbroadcastq .Ltf_inv_bitmatrix, t1; \ 298 + vpbroadcastq .Ltf_id_bitmatrix, t2; \ 299 + vpbroadcastq .Ltf_aff_bitmatrix, t3; \ 300 + vpbroadcastq .Ltf_x2_bitmatrix, t4; \ 301 + vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \ 302 + vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \ 303 + vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \ 304 + vgf2p8affineqb $(tf_inv_const), t1, x6, x6; \ 305 + vgf2p8affineinvqb $0, t2, x2, x2; \ 306 + vgf2p8affineinvqb $0, t2, x6, x6; \ 307 + vgf2p8affineinvqb $(tf_aff_const), t3, x0, x0; \ 308 + vgf2p8affineinvqb $(tf_aff_const), t3, x4, x4; \ 309 + vgf2p8affineqb $(tf_x2_const), t4, x3, x3; \ 310 + vgf2p8affineqb $(tf_x2_const), t4, x7, x7; \ 311 + vgf2p8affineinvqb $0, t2, x3, x3; \ 312 + vgf2p8affineinvqb $0, t2, x7, x7 313 + 314 + #define aria_sbox_8way(x0, x1, x2, x3, \ 315 + x4, x5, x6, x7, \ 316 + t0, t1, t2, t3, \ 317 + t4, t5, t6, t7) \ 318 + vpxor t7, t7, t7; \ 319 + vmovdqa .Linv_shift_row, t0; \ 320 + vmovdqa .Lshift_row, t1; \ 321 + vpbroadcastd .L0f0f0f0f, t6; \ 322 + vmovdqa .Ltf_lo__inv_aff__and__s2, t2; \ 323 + vmovdqa .Ltf_hi__inv_aff__and__s2, t3; \ 324 + vmovdqa .Ltf_lo__x2__and__fwd_aff, t4; \ 325 + vmovdqa .Ltf_hi__x2__and__fwd_aff, t5; \ 326 + \ 327 + vaesenclast t7, x0, x0; \ 328 + vaesenclast t7, x4, x4; \ 329 + vaesenclast t7, x1, x1; \ 330 + vaesenclast t7, x5, x5; \ 331 + vaesdeclast t7, x2, x2; \ 332 + vaesdeclast t7, x6, x6; \ 333 + \ 334 + /* AES inverse shift rows */ \ 335 + vpshufb t0, x0, x0; \ 336 + vpshufb t0, x4, x4; \ 337 + vpshufb t0, x1, x1; \ 338 + vpshufb t0, x5, x5; \ 339 + vpshufb t1, x3, x3; \ 340 + vpshufb t1, x7, x7; \ 341 + vpshufb t1, x2, x2; \ 342 + vpshufb t1, x6, x6; \ 343 + \ 344 + /* affine transformation for S2 */ \ 345 + filter_8bit(x1, t2, t3, t6, t0); \ 346 + /* affine transformation for S2 */ \ 347 + filter_8bit(x5, t2, t3, t6, t0); \ 348 + \ 349 + /* affine transformation for X2 */ \ 350 + filter_8bit(x3, t4, t5, t6, t0); \ 351 + /* affine transformation for X2 */ \ 352 + filter_8bit(x7, t4, t5, t6, t0); \ 353 + vaesdeclast t7, x3, x3; \ 354 + vaesdeclast t7, x7, x7; 355 + 356 + #define aria_diff_m(x0, x1, x2, x3, \ 357 + t0, t1, t2, t3) \ 358 + /* T = rotr32(X, 8); */ \ 359 + /* X ^= T */ \ 360 + vpxor x0, x3, t0; \ 361 + vpxor x1, x0, t1; \ 362 + vpxor x2, x1, t2; \ 363 + vpxor x3, x2, t3; \ 364 + /* X = T ^ rotr(X, 16); */ \ 365 + vpxor t2, x0, x0; \ 366 + vpxor x1, t3, t3; \ 367 + vpxor t0, x2, x2; \ 368 + vpxor t1, x3, x1; \ 369 + vmovdqu t3, x3; 370 + 371 + #define aria_diff_word(x0, x1, x2, x3, \ 372 + x4, x5, x6, x7, \ 373 + y0, y1, y2, y3, \ 374 + y4, y5, y6, y7) \ 375 + /* t1 ^= t2; */ \ 376 + vpxor y0, x4, x4; \ 377 + vpxor y1, x5, x5; \ 378 + vpxor y2, x6, x6; \ 379 + vpxor y3, x7, x7; \ 380 + \ 381 + /* t2 ^= t3; */ \ 382 + vpxor y4, y0, y0; \ 383 + vpxor y5, y1, y1; \ 384 + vpxor y6, y2, y2; \ 385 + vpxor y7, y3, y3; \ 386 + \ 387 + /* t0 ^= t1; */ \ 388 + vpxor x4, x0, x0; \ 389 + vpxor x5, x1, x1; \ 390 + vpxor x6, x2, x2; \ 391 + vpxor x7, x3, x3; \ 392 + \ 393 + /* t3 ^= t1; */ \ 394 + vpxor x4, y4, y4; \ 395 + vpxor x5, y5, y5; \ 396 + vpxor x6, y6, y6; \ 397 + vpxor x7, y7, y7; \ 398 + \ 399 + /* t2 ^= t0; */ \ 400 + vpxor x0, y0, y0; \ 401 + vpxor x1, y1, y1; \ 402 + vpxor x2, y2, y2; \ 403 + vpxor x3, y3, y3; \ 404 + \ 405 + /* t1 ^= t2; */ \ 406 + vpxor y0, x4, x4; \ 407 + vpxor y1, x5, x5; \ 408 + vpxor y2, x6, x6; \ 409 + vpxor y3, x7, x7; 410 + 411 + #define aria_fe(x0, x1, x2, x3, \ 412 + x4, x5, x6, x7, \ 413 + y0, y1, y2, y3, \ 414 + y4, y5, y6, y7, \ 415 + mem_tmp, rk, round) \ 416 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 417 + y0, rk, 8, round); \ 418 + \ 419 + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ 420 + y0, y1, y2, y3, y4, y5, y6, y7); \ 421 + \ 422 + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ 423 + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ 424 + aria_store_state_8way(x0, x1, x2, x3, \ 425 + x4, x5, x6, x7, \ 426 + mem_tmp, 8); \ 427 + \ 428 + aria_load_state_8way(x0, x1, x2, x3, \ 429 + x4, x5, x6, x7, \ 430 + mem_tmp, 0); \ 431 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 432 + y0, rk, 0, round); \ 433 + \ 434 + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ 435 + y0, y1, y2, y3, y4, y5, y6, y7); \ 436 + \ 437 + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ 438 + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ 439 + aria_store_state_8way(x0, x1, x2, x3, \ 440 + x4, x5, x6, x7, \ 441 + mem_tmp, 0); \ 442 + aria_load_state_8way(y0, y1, y2, y3, \ 443 + y4, y5, y6, y7, \ 444 + mem_tmp, 8); \ 445 + aria_diff_word(x0, x1, x2, x3, \ 446 + x4, x5, x6, x7, \ 447 + y0, y1, y2, y3, \ 448 + y4, y5, y6, y7); \ 449 + /* aria_diff_byte() \ 450 + * T3 = ABCD -> BADC \ 451 + * T3 = y4, y5, y6, y7 -> y5, y4, y7, y6 \ 452 + * T0 = ABCD -> CDAB \ 453 + * T0 = x0, x1, x2, x3 -> x2, x3, x0, x1 \ 454 + * T1 = ABCD -> DCBA \ 455 + * T1 = x4, x5, x6, x7 -> x7, x6, x5, x4 \ 456 + */ \ 457 + aria_diff_word(x2, x3, x0, x1, \ 458 + x7, x6, x5, x4, \ 459 + y0, y1, y2, y3, \ 460 + y5, y4, y7, y6); \ 461 + aria_store_state_8way(x3, x2, x1, x0, \ 462 + x6, x7, x4, x5, \ 463 + mem_tmp, 0); 464 + 465 + #define aria_fo(x0, x1, x2, x3, \ 466 + x4, x5, x6, x7, \ 467 + y0, y1, y2, y3, \ 468 + y4, y5, y6, y7, \ 469 + mem_tmp, rk, round) \ 470 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 471 + y0, rk, 8, round); \ 472 + \ 473 + aria_sbox_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 474 + y0, y1, y2, y3, y4, y5, y6, y7); \ 475 + \ 476 + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ 477 + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ 478 + aria_store_state_8way(x0, x1, x2, x3, \ 479 + x4, x5, x6, x7, \ 480 + mem_tmp, 8); \ 481 + \ 482 + aria_load_state_8way(x0, x1, x2, x3, \ 483 + x4, x5, x6, x7, \ 484 + mem_tmp, 0); \ 485 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 486 + y0, rk, 0, round); \ 487 + \ 488 + aria_sbox_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 489 + y0, y1, y2, y3, y4, y5, y6, y7); \ 490 + \ 491 + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ 492 + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ 493 + aria_store_state_8way(x0, x1, x2, x3, \ 494 + x4, x5, x6, x7, \ 495 + mem_tmp, 0); \ 496 + aria_load_state_8way(y0, y1, y2, y3, \ 497 + y4, y5, y6, y7, \ 498 + mem_tmp, 8); \ 499 + aria_diff_word(x0, x1, x2, x3, \ 500 + x4, x5, x6, x7, \ 501 + y0, y1, y2, y3, \ 502 + y4, y5, y6, y7); \ 503 + /* aria_diff_byte() \ 504 + * T1 = ABCD -> BADC \ 505 + * T1 = x4, x5, x6, x7 -> x5, x4, x7, x6 \ 506 + * T2 = ABCD -> CDAB \ 507 + * T2 = y0, y1, y2, y3, -> y2, y3, y0, y1 \ 508 + * T3 = ABCD -> DCBA \ 509 + * T3 = y4, y5, y6, y7 -> y7, y6, y5, y4 \ 510 + */ \ 511 + aria_diff_word(x0, x1, x2, x3, \ 512 + x5, x4, x7, x6, \ 513 + y2, y3, y0, y1, \ 514 + y7, y6, y5, y4); \ 515 + aria_store_state_8way(x3, x2, x1, x0, \ 516 + x6, x7, x4, x5, \ 517 + mem_tmp, 0); 518 + 519 + #define aria_ff(x0, x1, x2, x3, \ 520 + x4, x5, x6, x7, \ 521 + y0, y1, y2, y3, \ 522 + y4, y5, y6, y7, \ 523 + mem_tmp, rk, round, last_round) \ 524 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 525 + y0, rk, 8, round); \ 526 + \ 527 + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ 528 + y0, y1, y2, y3, y4, y5, y6, y7); \ 529 + \ 530 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 531 + y0, rk, 8, last_round); \ 532 + \ 533 + aria_store_state_8way(x0, x1, x2, x3, \ 534 + x4, x5, x6, x7, \ 535 + mem_tmp, 8); \ 536 + \ 537 + aria_load_state_8way(x0, x1, x2, x3, \ 538 + x4, x5, x6, x7, \ 539 + mem_tmp, 0); \ 540 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 541 + y0, rk, 0, round); \ 542 + \ 543 + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ 544 + y0, y1, y2, y3, y4, y5, y6, y7); \ 545 + \ 546 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 547 + y0, rk, 0, last_round); \ 548 + \ 549 + aria_load_state_8way(y0, y1, y2, y3, \ 550 + y4, y5, y6, y7, \ 551 + mem_tmp, 8); 552 + 553 + #define aria_fe_gfni(x0, x1, x2, x3, \ 554 + x4, x5, x6, x7, \ 555 + y0, y1, y2, y3, \ 556 + y4, y5, y6, y7, \ 557 + mem_tmp, rk, round) \ 558 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 559 + y0, rk, 8, round); \ 560 + \ 561 + aria_sbox_8way_gfni(x2, x3, x0, x1, \ 562 + x6, x7, x4, x5, \ 563 + y0, y1, y2, y3, \ 564 + y4, y5, y6, y7); \ 565 + \ 566 + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ 567 + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ 568 + aria_store_state_8way(x0, x1, x2, x3, \ 569 + x4, x5, x6, x7, \ 570 + mem_tmp, 8); \ 571 + \ 572 + aria_load_state_8way(x0, x1, x2, x3, \ 573 + x4, x5, x6, x7, \ 574 + mem_tmp, 0); \ 575 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 576 + y0, rk, 0, round); \ 577 + \ 578 + aria_sbox_8way_gfni(x2, x3, x0, x1, \ 579 + x6, x7, x4, x5, \ 580 + y0, y1, y2, y3, \ 581 + y4, y5, y6, y7); \ 582 + \ 583 + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ 584 + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ 585 + aria_store_state_8way(x0, x1, x2, x3, \ 586 + x4, x5, x6, x7, \ 587 + mem_tmp, 0); \ 588 + aria_load_state_8way(y0, y1, y2, y3, \ 589 + y4, y5, y6, y7, \ 590 + mem_tmp, 8); \ 591 + aria_diff_word(x0, x1, x2, x3, \ 592 + x4, x5, x6, x7, \ 593 + y0, y1, y2, y3, \ 594 + y4, y5, y6, y7); \ 595 + /* aria_diff_byte() \ 596 + * T3 = ABCD -> BADC \ 597 + * T3 = y4, y5, y6, y7 -> y5, y4, y7, y6 \ 598 + * T0 = ABCD -> CDAB \ 599 + * T0 = x0, x1, x2, x3 -> x2, x3, x0, x1 \ 600 + * T1 = ABCD -> DCBA \ 601 + * T1 = x4, x5, x6, x7 -> x7, x6, x5, x4 \ 602 + */ \ 603 + aria_diff_word(x2, x3, x0, x1, \ 604 + x7, x6, x5, x4, \ 605 + y0, y1, y2, y3, \ 606 + y5, y4, y7, y6); \ 607 + aria_store_state_8way(x3, x2, x1, x0, \ 608 + x6, x7, x4, x5, \ 609 + mem_tmp, 0); 610 + 611 + #define aria_fo_gfni(x0, x1, x2, x3, \ 612 + x4, x5, x6, x7, \ 613 + y0, y1, y2, y3, \ 614 + y4, y5, y6, y7, \ 615 + mem_tmp, rk, round) \ 616 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 617 + y0, rk, 8, round); \ 618 + \ 619 + aria_sbox_8way_gfni(x0, x1, x2, x3, \ 620 + x4, x5, x6, x7, \ 621 + y0, y1, y2, y3, \ 622 + y4, y5, y6, y7); \ 623 + \ 624 + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ 625 + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ 626 + aria_store_state_8way(x0, x1, x2, x3, \ 627 + x4, x5, x6, x7, \ 628 + mem_tmp, 8); \ 629 + \ 630 + aria_load_state_8way(x0, x1, x2, x3, \ 631 + x4, x5, x6, x7, \ 632 + mem_tmp, 0); \ 633 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 634 + y0, rk, 0, round); \ 635 + \ 636 + aria_sbox_8way_gfni(x0, x1, x2, x3, \ 637 + x4, x5, x6, x7, \ 638 + y0, y1, y2, y3, \ 639 + y4, y5, y6, y7); \ 640 + \ 641 + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ 642 + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ 643 + aria_store_state_8way(x0, x1, x2, x3, \ 644 + x4, x5, x6, x7, \ 645 + mem_tmp, 0); \ 646 + aria_load_state_8way(y0, y1, y2, y3, \ 647 + y4, y5, y6, y7, \ 648 + mem_tmp, 8); \ 649 + aria_diff_word(x0, x1, x2, x3, \ 650 + x4, x5, x6, x7, \ 651 + y0, y1, y2, y3, \ 652 + y4, y5, y6, y7); \ 653 + /* aria_diff_byte() \ 654 + * T1 = ABCD -> BADC \ 655 + * T1 = x4, x5, x6, x7 -> x5, x4, x7, x6 \ 656 + * T2 = ABCD -> CDAB \ 657 + * T2 = y0, y1, y2, y3, -> y2, y3, y0, y1 \ 658 + * T3 = ABCD -> DCBA \ 659 + * T3 = y4, y5, y6, y7 -> y7, y6, y5, y4 \ 660 + */ \ 661 + aria_diff_word(x0, x1, x2, x3, \ 662 + x5, x4, x7, x6, \ 663 + y2, y3, y0, y1, \ 664 + y7, y6, y5, y4); \ 665 + aria_store_state_8way(x3, x2, x1, x0, \ 666 + x6, x7, x4, x5, \ 667 + mem_tmp, 0); 668 + 669 + #define aria_ff_gfni(x0, x1, x2, x3, \ 670 + x4, x5, x6, x7, \ 671 + y0, y1, y2, y3, \ 672 + y4, y5, y6, y7, \ 673 + mem_tmp, rk, round, last_round) \ 674 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 675 + y0, rk, 8, round); \ 676 + \ 677 + aria_sbox_8way_gfni(x2, x3, x0, x1, \ 678 + x6, x7, x4, x5, \ 679 + y0, y1, y2, y3, \ 680 + y4, y5, y6, y7); \ 681 + \ 682 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 683 + y0, rk, 8, last_round); \ 684 + \ 685 + aria_store_state_8way(x0, x1, x2, x3, \ 686 + x4, x5, x6, x7, \ 687 + mem_tmp, 8); \ 688 + \ 689 + aria_load_state_8way(x0, x1, x2, x3, \ 690 + x4, x5, x6, x7, \ 691 + mem_tmp, 0); \ 692 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 693 + y0, rk, 0, round); \ 694 + \ 695 + aria_sbox_8way_gfni(x2, x3, x0, x1, \ 696 + x6, x7, x4, x5, \ 697 + y0, y1, y2, y3, \ 698 + y4, y5, y6, y7); \ 699 + \ 700 + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ 701 + y0, rk, 0, last_round); \ 702 + \ 703 + aria_load_state_8way(y0, y1, y2, y3, \ 704 + y4, y5, y6, y7, \ 705 + mem_tmp, 8); 706 + 707 + /* NB: section is mergeable, all elements must be aligned 16-byte blocks */ 708 + .section .rodata.cst16, "aM", @progbits, 16 709 + .align 16 710 + 711 + #define SHUFB_BYTES(idx) \ 712 + 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) 713 + 714 + .Lshufb_16x16b: 715 + .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3); 716 + /* For isolating SubBytes from AESENCLAST, inverse shift row */ 717 + .Linv_shift_row: 718 + .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b 719 + .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 720 + .Lshift_row: 721 + .byte 0x00, 0x05, 0x0a, 0x0f, 0x04, 0x09, 0x0e, 0x03 722 + .byte 0x08, 0x0d, 0x02, 0x07, 0x0c, 0x01, 0x06, 0x0b 723 + /* For CTR-mode IV byteswap */ 724 + .Lbswap128_mask: 725 + .byte 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08 726 + .byte 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00 727 + 728 + /* AES inverse affine and S2 combined: 729 + * 1 1 0 0 0 0 0 1 x0 0 730 + * 0 1 0 0 1 0 0 0 x1 0 731 + * 1 1 0 0 1 1 1 1 x2 0 732 + * 0 1 1 0 1 0 0 1 x3 1 733 + * 0 1 0 0 1 1 0 0 * x4 + 0 734 + * 0 1 0 1 1 0 0 0 x5 0 735 + * 0 0 0 0 0 1 0 1 x6 0 736 + * 1 1 1 0 0 1 1 1 x7 1 737 + */ 738 + .Ltf_lo__inv_aff__and__s2: 739 + .octa 0x92172DA81A9FA520B2370D883ABF8500 740 + .Ltf_hi__inv_aff__and__s2: 741 + .octa 0x2B15FFC1AF917B45E6D8320C625CB688 742 + 743 + /* X2 and AES forward affine combined: 744 + * 1 0 1 1 0 0 0 1 x0 0 745 + * 0 1 1 1 1 0 1 1 x1 0 746 + * 0 0 0 1 1 0 1 0 x2 1 747 + * 0 1 0 0 0 1 0 0 x3 0 748 + * 0 0 1 1 1 0 1 1 * x4 + 0 749 + * 0 1 0 0 1 0 0 0 x5 0 750 + * 1 1 0 1 0 0 1 1 x6 0 751 + * 0 1 0 0 1 0 1 0 x7 0 752 + */ 753 + .Ltf_lo__x2__and__fwd_aff: 754 + .octa 0xEFAE0544FCBD1657B8F95213ABEA4100 755 + .Ltf_hi__x2__and__fwd_aff: 756 + .octa 0x3F893781E95FE1576CDA64D2BA0CB204 757 + 758 + .section .rodata.cst8, "aM", @progbits, 8 759 + .align 8 760 + /* AES affine: */ 761 + #define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0) 762 + .Ltf_aff_bitmatrix: 763 + .quad BM8X8(BV8(1, 0, 0, 0, 1, 1, 1, 1), 764 + BV8(1, 1, 0, 0, 0, 1, 1, 1), 765 + BV8(1, 1, 1, 0, 0, 0, 1, 1), 766 + BV8(1, 1, 1, 1, 0, 0, 0, 1), 767 + BV8(1, 1, 1, 1, 1, 0, 0, 0), 768 + BV8(0, 1, 1, 1, 1, 1, 0, 0), 769 + BV8(0, 0, 1, 1, 1, 1, 1, 0), 770 + BV8(0, 0, 0, 1, 1, 1, 1, 1)) 771 + 772 + /* AES inverse affine: */ 773 + #define tf_inv_const BV8(1, 0, 1, 0, 0, 0, 0, 0) 774 + .Ltf_inv_bitmatrix: 775 + .quad BM8X8(BV8(0, 0, 1, 0, 0, 1, 0, 1), 776 + BV8(1, 0, 0, 1, 0, 0, 1, 0), 777 + BV8(0, 1, 0, 0, 1, 0, 0, 1), 778 + BV8(1, 0, 1, 0, 0, 1, 0, 0), 779 + BV8(0, 1, 0, 1, 0, 0, 1, 0), 780 + BV8(0, 0, 1, 0, 1, 0, 0, 1), 781 + BV8(1, 0, 0, 1, 0, 1, 0, 0), 782 + BV8(0, 1, 0, 0, 1, 0, 1, 0)) 783 + 784 + /* S2: */ 785 + #define tf_s2_const BV8(0, 1, 0, 0, 0, 1, 1, 1) 786 + .Ltf_s2_bitmatrix: 787 + .quad BM8X8(BV8(0, 1, 0, 1, 0, 1, 1, 1), 788 + BV8(0, 0, 1, 1, 1, 1, 1, 1), 789 + BV8(1, 1, 1, 0, 1, 1, 0, 1), 790 + BV8(1, 1, 0, 0, 0, 0, 1, 1), 791 + BV8(0, 1, 0, 0, 0, 0, 1, 1), 792 + BV8(1, 1, 0, 0, 1, 1, 1, 0), 793 + BV8(0, 1, 1, 0, 0, 0, 1, 1), 794 + BV8(1, 1, 1, 1, 0, 1, 1, 0)) 795 + 796 + /* X2: */ 797 + #define tf_x2_const BV8(0, 0, 1, 1, 0, 1, 0, 0) 798 + .Ltf_x2_bitmatrix: 799 + .quad BM8X8(BV8(0, 0, 0, 1, 1, 0, 0, 0), 800 + BV8(0, 0, 1, 0, 0, 1, 1, 0), 801 + BV8(0, 0, 0, 0, 1, 0, 1, 0), 802 + BV8(1, 1, 1, 0, 0, 0, 1, 1), 803 + BV8(1, 1, 1, 0, 1, 1, 0, 0), 804 + BV8(0, 1, 1, 0, 1, 0, 1, 1), 805 + BV8(1, 0, 1, 1, 1, 1, 0, 1), 806 + BV8(1, 0, 0, 1, 0, 0, 1, 1)) 807 + 808 + /* Identity matrix: */ 809 + .Ltf_id_bitmatrix: 810 + .quad BM8X8(BV8(1, 0, 0, 0, 0, 0, 0, 0), 811 + BV8(0, 1, 0, 0, 0, 0, 0, 0), 812 + BV8(0, 0, 1, 0, 0, 0, 0, 0), 813 + BV8(0, 0, 0, 1, 0, 0, 0, 0), 814 + BV8(0, 0, 0, 0, 1, 0, 0, 0), 815 + BV8(0, 0, 0, 0, 0, 1, 0, 0), 816 + BV8(0, 0, 0, 0, 0, 0, 1, 0), 817 + BV8(0, 0, 0, 0, 0, 0, 0, 1)) 818 + 819 + /* 4-bit mask */ 820 + .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 821 + .align 4 822 + .L0f0f0f0f: 823 + .long 0x0f0f0f0f 824 + 825 + .text 826 + 827 + SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way) 828 + /* input: 829 + * %r9: rk 830 + * %rsi: dst 831 + * %rdx: src 832 + * %xmm0..%xmm15: 16 byte-sliced blocks 833 + */ 834 + 835 + FRAME_BEGIN 836 + 837 + movq %rsi, %rax; 838 + leaq 8 * 16(%rax), %r8; 839 + 840 + inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 841 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 842 + %xmm15, %rax, %r8); 843 + aria_fo(%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, 844 + %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 845 + %rax, %r9, 0); 846 + aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 847 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 848 + %xmm15, %rax, %r9, 1); 849 + aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, 850 + %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 851 + %rax, %r9, 2); 852 + aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 853 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 854 + %xmm15, %rax, %r9, 3); 855 + aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, 856 + %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 857 + %rax, %r9, 4); 858 + aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 859 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 860 + %xmm15, %rax, %r9, 5); 861 + aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, 862 + %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 863 + %rax, %r9, 6); 864 + aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 865 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 866 + %xmm15, %rax, %r9, 7); 867 + aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, 868 + %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 869 + %rax, %r9, 8); 870 + aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 871 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 872 + %xmm15, %rax, %r9, 9); 873 + aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, 874 + %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 875 + %rax, %r9, 10); 876 + cmpl $12, rounds(CTX); 877 + jne .Laria_192; 878 + aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 879 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 880 + %xmm15, %rax, %r9, 11, 12); 881 + jmp .Laria_end; 882 + .Laria_192: 883 + aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 884 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 885 + %xmm15, %rax, %r9, 11); 886 + aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, 887 + %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 888 + %rax, %r9, 12); 889 + cmpl $14, rounds(CTX); 890 + jne .Laria_256; 891 + aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 892 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 893 + %xmm15, %rax, %r9, 13, 14); 894 + jmp .Laria_end; 895 + .Laria_256: 896 + aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 897 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 898 + %xmm15, %rax, %r9, 13); 899 + aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, 900 + %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 901 + %rax, %r9, 14); 902 + aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 903 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 904 + %xmm15, %rax, %r9, 15, 16); 905 + .Laria_end: 906 + debyteslice_16x16b(%xmm8, %xmm12, %xmm1, %xmm4, 907 + %xmm9, %xmm13, %xmm0, %xmm5, 908 + %xmm10, %xmm14, %xmm3, %xmm6, 909 + %xmm11, %xmm15, %xmm2, %xmm7, 910 + (%rax), (%r8)); 911 + 912 + FRAME_END 913 + RET; 914 + SYM_FUNC_END(__aria_aesni_avx_crypt_16way) 915 + 916 + SYM_FUNC_START(aria_aesni_avx_encrypt_16way) 917 + /* input: 918 + * %rdi: ctx, CTX 919 + * %rsi: dst 920 + * %rdx: src 921 + */ 922 + 923 + FRAME_BEGIN 924 + 925 + leaq enc_key(CTX), %r9; 926 + 927 + inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 928 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 929 + %xmm15, %rdx); 930 + 931 + call __aria_aesni_avx_crypt_16way; 932 + 933 + write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 934 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 935 + %xmm15, %rax); 936 + 937 + FRAME_END 938 + RET; 939 + SYM_FUNC_END(aria_aesni_avx_encrypt_16way) 940 + 941 + SYM_FUNC_START(aria_aesni_avx_decrypt_16way) 942 + /* input: 943 + * %rdi: ctx, CTX 944 + * %rsi: dst 945 + * %rdx: src 946 + */ 947 + 948 + FRAME_BEGIN 949 + 950 + leaq dec_key(CTX), %r9; 951 + 952 + inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 953 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 954 + %xmm15, %rdx); 955 + 956 + call __aria_aesni_avx_crypt_16way; 957 + 958 + write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 959 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 960 + %xmm15, %rax); 961 + 962 + FRAME_END 963 + RET; 964 + SYM_FUNC_END(aria_aesni_avx_decrypt_16way) 965 + 966 + SYM_FUNC_START_LOCAL(__aria_aesni_avx_ctr_gen_keystream_16way) 967 + /* input: 968 + * %rdi: ctx 969 + * %rsi: dst 970 + * %rdx: src 971 + * %rcx: keystream 972 + * %r8: iv (big endian, 128bit) 973 + */ 974 + 975 + FRAME_BEGIN 976 + /* load IV and byteswap */ 977 + vmovdqu (%r8), %xmm8; 978 + 979 + vmovdqa .Lbswap128_mask (%rip), %xmm1; 980 + vpshufb %xmm1, %xmm8, %xmm3; /* be => le */ 981 + 982 + vpcmpeqd %xmm0, %xmm0, %xmm0; 983 + vpsrldq $8, %xmm0, %xmm0; /* low: -1, high: 0 */ 984 + 985 + /* construct IVs */ 986 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 987 + vpshufb %xmm1, %xmm3, %xmm9; 988 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 989 + vpshufb %xmm1, %xmm3, %xmm10; 990 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 991 + vpshufb %xmm1, %xmm3, %xmm11; 992 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 993 + vpshufb %xmm1, %xmm3, %xmm12; 994 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 995 + vpshufb %xmm1, %xmm3, %xmm13; 996 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 997 + vpshufb %xmm1, %xmm3, %xmm14; 998 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 999 + vpshufb %xmm1, %xmm3, %xmm15; 1000 + vmovdqu %xmm8, (0 * 16)(%rcx); 1001 + vmovdqu %xmm9, (1 * 16)(%rcx); 1002 + vmovdqu %xmm10, (2 * 16)(%rcx); 1003 + vmovdqu %xmm11, (3 * 16)(%rcx); 1004 + vmovdqu %xmm12, (4 * 16)(%rcx); 1005 + vmovdqu %xmm13, (5 * 16)(%rcx); 1006 + vmovdqu %xmm14, (6 * 16)(%rcx); 1007 + vmovdqu %xmm15, (7 * 16)(%rcx); 1008 + 1009 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 1010 + vpshufb %xmm1, %xmm3, %xmm8; 1011 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 1012 + vpshufb %xmm1, %xmm3, %xmm9; 1013 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 1014 + vpshufb %xmm1, %xmm3, %xmm10; 1015 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 1016 + vpshufb %xmm1, %xmm3, %xmm11; 1017 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 1018 + vpshufb %xmm1, %xmm3, %xmm12; 1019 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 1020 + vpshufb %xmm1, %xmm3, %xmm13; 1021 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 1022 + vpshufb %xmm1, %xmm3, %xmm14; 1023 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 1024 + vpshufb %xmm1, %xmm3, %xmm15; 1025 + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ 1026 + vpshufb %xmm1, %xmm3, %xmm4; 1027 + vmovdqu %xmm4, (%r8); 1028 + 1029 + vmovdqu (0 * 16)(%rcx), %xmm0; 1030 + vmovdqu (1 * 16)(%rcx), %xmm1; 1031 + vmovdqu (2 * 16)(%rcx), %xmm2; 1032 + vmovdqu (3 * 16)(%rcx), %xmm3; 1033 + vmovdqu (4 * 16)(%rcx), %xmm4; 1034 + vmovdqu (5 * 16)(%rcx), %xmm5; 1035 + vmovdqu (6 * 16)(%rcx), %xmm6; 1036 + vmovdqu (7 * 16)(%rcx), %xmm7; 1037 + 1038 + FRAME_END 1039 + RET; 1040 + SYM_FUNC_END(__aria_aesni_avx_ctr_gen_keystream_16way) 1041 + 1042 + SYM_FUNC_START(aria_aesni_avx_ctr_crypt_16way) 1043 + /* input: 1044 + * %rdi: ctx 1045 + * %rsi: dst 1046 + * %rdx: src 1047 + * %rcx: keystream 1048 + * %r8: iv (big endian, 128bit) 1049 + */ 1050 + FRAME_BEGIN 1051 + 1052 + call __aria_aesni_avx_ctr_gen_keystream_16way; 1053 + 1054 + leaq (%rsi), %r10; 1055 + leaq (%rdx), %r11; 1056 + leaq (%rcx), %rsi; 1057 + leaq (%rcx), %rdx; 1058 + leaq enc_key(CTX), %r9; 1059 + 1060 + call __aria_aesni_avx_crypt_16way; 1061 + 1062 + vpxor (0 * 16)(%r11), %xmm1, %xmm1; 1063 + vpxor (1 * 16)(%r11), %xmm0, %xmm0; 1064 + vpxor (2 * 16)(%r11), %xmm3, %xmm3; 1065 + vpxor (3 * 16)(%r11), %xmm2, %xmm2; 1066 + vpxor (4 * 16)(%r11), %xmm4, %xmm4; 1067 + vpxor (5 * 16)(%r11), %xmm5, %xmm5; 1068 + vpxor (6 * 16)(%r11), %xmm6, %xmm6; 1069 + vpxor (7 * 16)(%r11), %xmm7, %xmm7; 1070 + vpxor (8 * 16)(%r11), %xmm8, %xmm8; 1071 + vpxor (9 * 16)(%r11), %xmm9, %xmm9; 1072 + vpxor (10 * 16)(%r11), %xmm10, %xmm10; 1073 + vpxor (11 * 16)(%r11), %xmm11, %xmm11; 1074 + vpxor (12 * 16)(%r11), %xmm12, %xmm12; 1075 + vpxor (13 * 16)(%r11), %xmm13, %xmm13; 1076 + vpxor (14 * 16)(%r11), %xmm14, %xmm14; 1077 + vpxor (15 * 16)(%r11), %xmm15, %xmm15; 1078 + write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 1079 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 1080 + %xmm15, %r10); 1081 + 1082 + FRAME_END 1083 + RET; 1084 + SYM_FUNC_END(aria_aesni_avx_ctr_crypt_16way) 1085 + 1086 + SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way) 1087 + /* input: 1088 + * %r9: rk 1089 + * %rsi: dst 1090 + * %rdx: src 1091 + * %xmm0..%xmm15: 16 byte-sliced blocks 1092 + */ 1093 + 1094 + FRAME_BEGIN 1095 + 1096 + movq %rsi, %rax; 1097 + leaq 8 * 16(%rax), %r8; 1098 + 1099 + inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, 1100 + %xmm4, %xmm5, %xmm6, %xmm7, 1101 + %xmm8, %xmm9, %xmm10, %xmm11, 1102 + %xmm12, %xmm13, %xmm14, 1103 + %xmm15, %rax, %r8); 1104 + aria_fo_gfni(%xmm8, %xmm9, %xmm10, %xmm11, 1105 + %xmm12, %xmm13, %xmm14, %xmm15, 1106 + %xmm0, %xmm1, %xmm2, %xmm3, 1107 + %xmm4, %xmm5, %xmm6, %xmm7, 1108 + %rax, %r9, 0); 1109 + aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, 1110 + %xmm4, %xmm5, %xmm6, %xmm7, 1111 + %xmm8, %xmm9, %xmm10, %xmm11, 1112 + %xmm12, %xmm13, %xmm14, 1113 + %xmm15, %rax, %r9, 1); 1114 + aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, 1115 + %xmm12, %xmm13, %xmm14, %xmm15, 1116 + %xmm0, %xmm1, %xmm2, %xmm3, 1117 + %xmm4, %xmm5, %xmm6, %xmm7, 1118 + %rax, %r9, 2); 1119 + aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, 1120 + %xmm4, %xmm5, %xmm6, %xmm7, 1121 + %xmm8, %xmm9, %xmm10, %xmm11, 1122 + %xmm12, %xmm13, %xmm14, 1123 + %xmm15, %rax, %r9, 3); 1124 + aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, 1125 + %xmm12, %xmm13, %xmm14, %xmm15, 1126 + %xmm0, %xmm1, %xmm2, %xmm3, 1127 + %xmm4, %xmm5, %xmm6, %xmm7, 1128 + %rax, %r9, 4); 1129 + aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, 1130 + %xmm4, %xmm5, %xmm6, %xmm7, 1131 + %xmm8, %xmm9, %xmm10, %xmm11, 1132 + %xmm12, %xmm13, %xmm14, 1133 + %xmm15, %rax, %r9, 5); 1134 + aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, 1135 + %xmm12, %xmm13, %xmm14, %xmm15, 1136 + %xmm0, %xmm1, %xmm2, %xmm3, 1137 + %xmm4, %xmm5, %xmm6, %xmm7, 1138 + %rax, %r9, 6); 1139 + aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, 1140 + %xmm4, %xmm5, %xmm6, %xmm7, 1141 + %xmm8, %xmm9, %xmm10, %xmm11, 1142 + %xmm12, %xmm13, %xmm14, 1143 + %xmm15, %rax, %r9, 7); 1144 + aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, 1145 + %xmm12, %xmm13, %xmm14, %xmm15, 1146 + %xmm0, %xmm1, %xmm2, %xmm3, 1147 + %xmm4, %xmm5, %xmm6, %xmm7, 1148 + %rax, %r9, 8); 1149 + aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, 1150 + %xmm4, %xmm5, %xmm6, %xmm7, 1151 + %xmm8, %xmm9, %xmm10, %xmm11, 1152 + %xmm12, %xmm13, %xmm14, 1153 + %xmm15, %rax, %r9, 9); 1154 + aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, 1155 + %xmm12, %xmm13, %xmm14, %xmm15, 1156 + %xmm0, %xmm1, %xmm2, %xmm3, 1157 + %xmm4, %xmm5, %xmm6, %xmm7, 1158 + %rax, %r9, 10); 1159 + cmpl $12, rounds(CTX); 1160 + jne .Laria_gfni_192; 1161 + aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 1162 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 1163 + %xmm15, %rax, %r9, 11, 12); 1164 + jmp .Laria_gfni_end; 1165 + .Laria_gfni_192: 1166 + aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, 1167 + %xmm4, %xmm5, %xmm6, %xmm7, 1168 + %xmm8, %xmm9, %xmm10, %xmm11, 1169 + %xmm12, %xmm13, %xmm14, 1170 + %xmm15, %rax, %r9, 11); 1171 + aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, 1172 + %xmm12, %xmm13, %xmm14, %xmm15, 1173 + %xmm0, %xmm1, %xmm2, %xmm3, 1174 + %xmm4, %xmm5, %xmm6, %xmm7, 1175 + %rax, %r9, 12); 1176 + cmpl $14, rounds(CTX); 1177 + jne .Laria_gfni_256; 1178 + aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2, 1179 + %xmm4, %xmm5, %xmm6, %xmm7, 1180 + %xmm8, %xmm9, %xmm10, %xmm11, 1181 + %xmm12, %xmm13, %xmm14, 1182 + %xmm15, %rax, %r9, 13, 14); 1183 + jmp .Laria_gfni_end; 1184 + .Laria_gfni_256: 1185 + aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, 1186 + %xmm4, %xmm5, %xmm6, %xmm7, 1187 + %xmm8, %xmm9, %xmm10, %xmm11, 1188 + %xmm12, %xmm13, %xmm14, 1189 + %xmm15, %rax, %r9, 13); 1190 + aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, 1191 + %xmm12, %xmm13, %xmm14, %xmm15, 1192 + %xmm0, %xmm1, %xmm2, %xmm3, 1193 + %xmm4, %xmm5, %xmm6, %xmm7, 1194 + %rax, %r9, 14); 1195 + aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2, 1196 + %xmm4, %xmm5, %xmm6, %xmm7, 1197 + %xmm8, %xmm9, %xmm10, %xmm11, 1198 + %xmm12, %xmm13, %xmm14, 1199 + %xmm15, %rax, %r9, 15, 16); 1200 + .Laria_gfni_end: 1201 + debyteslice_16x16b(%xmm8, %xmm12, %xmm1, %xmm4, 1202 + %xmm9, %xmm13, %xmm0, %xmm5, 1203 + %xmm10, %xmm14, %xmm3, %xmm6, 1204 + %xmm11, %xmm15, %xmm2, %xmm7, 1205 + (%rax), (%r8)); 1206 + 1207 + FRAME_END 1208 + RET; 1209 + SYM_FUNC_END(__aria_aesni_avx_gfni_crypt_16way) 1210 + 1211 + SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way) 1212 + /* input: 1213 + * %rdi: ctx, CTX 1214 + * %rsi: dst 1215 + * %rdx: src 1216 + */ 1217 + 1218 + FRAME_BEGIN 1219 + 1220 + leaq enc_key(CTX), %r9; 1221 + 1222 + inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 1223 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 1224 + %xmm15, %rdx); 1225 + 1226 + call __aria_aesni_avx_gfni_crypt_16way; 1227 + 1228 + write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 1229 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 1230 + %xmm15, %rax); 1231 + 1232 + FRAME_END 1233 + RET; 1234 + SYM_FUNC_END(aria_aesni_avx_gfni_encrypt_16way) 1235 + 1236 + SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way) 1237 + /* input: 1238 + * %rdi: ctx, CTX 1239 + * %rsi: dst 1240 + * %rdx: src 1241 + */ 1242 + 1243 + FRAME_BEGIN 1244 + 1245 + leaq dec_key(CTX), %r9; 1246 + 1247 + inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 1248 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 1249 + %xmm15, %rdx); 1250 + 1251 + call __aria_aesni_avx_gfni_crypt_16way; 1252 + 1253 + write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 1254 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 1255 + %xmm15, %rax); 1256 + 1257 + FRAME_END 1258 + RET; 1259 + SYM_FUNC_END(aria_aesni_avx_gfni_decrypt_16way) 1260 + 1261 + SYM_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way) 1262 + /* input: 1263 + * %rdi: ctx 1264 + * %rsi: dst 1265 + * %rdx: src 1266 + * %rcx: keystream 1267 + * %r8: iv (big endian, 128bit) 1268 + */ 1269 + FRAME_BEGIN 1270 + 1271 + call __aria_aesni_avx_ctr_gen_keystream_16way 1272 + 1273 + leaq (%rsi), %r10; 1274 + leaq (%rdx), %r11; 1275 + leaq (%rcx), %rsi; 1276 + leaq (%rcx), %rdx; 1277 + leaq enc_key(CTX), %r9; 1278 + 1279 + call __aria_aesni_avx_gfni_crypt_16way; 1280 + 1281 + vpxor (0 * 16)(%r11), %xmm1, %xmm1; 1282 + vpxor (1 * 16)(%r11), %xmm0, %xmm0; 1283 + vpxor (2 * 16)(%r11), %xmm3, %xmm3; 1284 + vpxor (3 * 16)(%r11), %xmm2, %xmm2; 1285 + vpxor (4 * 16)(%r11), %xmm4, %xmm4; 1286 + vpxor (5 * 16)(%r11), %xmm5, %xmm5; 1287 + vpxor (6 * 16)(%r11), %xmm6, %xmm6; 1288 + vpxor (7 * 16)(%r11), %xmm7, %xmm7; 1289 + vpxor (8 * 16)(%r11), %xmm8, %xmm8; 1290 + vpxor (9 * 16)(%r11), %xmm9, %xmm9; 1291 + vpxor (10 * 16)(%r11), %xmm10, %xmm10; 1292 + vpxor (11 * 16)(%r11), %xmm11, %xmm11; 1293 + vpxor (12 * 16)(%r11), %xmm12, %xmm12; 1294 + vpxor (13 * 16)(%r11), %xmm13, %xmm13; 1295 + vpxor (14 * 16)(%r11), %xmm14, %xmm14; 1296 + vpxor (15 * 16)(%r11), %xmm15, %xmm15; 1297 + write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 1298 + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, 1299 + %xmm15, %r10); 1300 + 1301 + FRAME_END 1302 + RET; 1303 + SYM_FUNC_END(aria_aesni_avx_gfni_ctr_crypt_16way)
+16
arch/x86/crypto/aria-avx.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + #ifndef ASM_X86_ARIA_AVX_H 3 + #define ASM_X86_ARIA_AVX_H 4 + 5 + #include <linux/types.h> 6 + 7 + #define ARIA_AESNI_PARALLEL_BLOCKS 16 8 + #define ARIA_AESNI_PARALLEL_BLOCK_SIZE (ARIA_BLOCK_SIZE * 16) 9 + 10 + struct aria_avx_ops { 11 + void (*aria_encrypt_16way)(const void *ctx, u8 *dst, const u8 *src); 12 + void (*aria_decrypt_16way)(const void *ctx, u8 *dst, const u8 *src); 13 + void (*aria_ctr_crypt_16way)(const void *ctx, u8 *dst, const u8 *src, 14 + u8 *keystream, u8 *iv); 15 + }; 16 + #endif
+213
arch/x86/crypto/aria_aesni_avx_glue.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Glue Code for the AVX/AES-NI/GFNI assembler implementation of the ARIA Cipher 4 + * 5 + * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com> 6 + */ 7 + 8 + #include <crypto/algapi.h> 9 + #include <crypto/internal/simd.h> 10 + #include <crypto/aria.h> 11 + #include <linux/crypto.h> 12 + #include <linux/err.h> 13 + #include <linux/module.h> 14 + #include <linux/types.h> 15 + 16 + #include "ecb_cbc_helpers.h" 17 + #include "aria-avx.h" 18 + 19 + asmlinkage void aria_aesni_avx_encrypt_16way(const void *ctx, u8 *dst, 20 + const u8 *src); 21 + asmlinkage void aria_aesni_avx_decrypt_16way(const void *ctx, u8 *dst, 22 + const u8 *src); 23 + asmlinkage void aria_aesni_avx_ctr_crypt_16way(const void *ctx, u8 *dst, 24 + const u8 *src, 25 + u8 *keystream, u8 *iv); 26 + asmlinkage void aria_aesni_avx_gfni_encrypt_16way(const void *ctx, u8 *dst, 27 + const u8 *src); 28 + asmlinkage void aria_aesni_avx_gfni_decrypt_16way(const void *ctx, u8 *dst, 29 + const u8 *src); 30 + asmlinkage void aria_aesni_avx_gfni_ctr_crypt_16way(const void *ctx, u8 *dst, 31 + const u8 *src, 32 + u8 *keystream, u8 *iv); 33 + 34 + static struct aria_avx_ops aria_ops; 35 + 36 + static int ecb_do_encrypt(struct skcipher_request *req, const u32 *rkey) 37 + { 38 + ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS); 39 + ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_encrypt_16way); 40 + ECB_BLOCK(1, aria_encrypt); 41 + ECB_WALK_END(); 42 + } 43 + 44 + static int ecb_do_decrypt(struct skcipher_request *req, const u32 *rkey) 45 + { 46 + ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS); 47 + ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_decrypt_16way); 48 + ECB_BLOCK(1, aria_decrypt); 49 + ECB_WALK_END(); 50 + } 51 + 52 + static int aria_avx_ecb_encrypt(struct skcipher_request *req) 53 + { 54 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 55 + struct aria_ctx *ctx = crypto_skcipher_ctx(tfm); 56 + 57 + return ecb_do_encrypt(req, ctx->enc_key[0]); 58 + } 59 + 60 + static int aria_avx_ecb_decrypt(struct skcipher_request *req) 61 + { 62 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 63 + struct aria_ctx *ctx = crypto_skcipher_ctx(tfm); 64 + 65 + return ecb_do_decrypt(req, ctx->dec_key[0]); 66 + } 67 + 68 + static int aria_avx_set_key(struct crypto_skcipher *tfm, const u8 *key, 69 + unsigned int keylen) 70 + { 71 + return aria_set_key(&tfm->base, key, keylen); 72 + } 73 + 74 + static int aria_avx_ctr_encrypt(struct skcipher_request *req) 75 + { 76 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 77 + struct aria_ctx *ctx = crypto_skcipher_ctx(tfm); 78 + struct skcipher_walk walk; 79 + unsigned int nbytes; 80 + int err; 81 + 82 + err = skcipher_walk_virt(&walk, req, false); 83 + 84 + while ((nbytes = walk.nbytes) > 0) { 85 + const u8 *src = walk.src.virt.addr; 86 + u8 *dst = walk.dst.virt.addr; 87 + 88 + while (nbytes >= ARIA_AESNI_PARALLEL_BLOCK_SIZE) { 89 + u8 keystream[ARIA_AESNI_PARALLEL_BLOCK_SIZE]; 90 + 91 + kernel_fpu_begin(); 92 + aria_ops.aria_ctr_crypt_16way(ctx, dst, src, keystream, 93 + walk.iv); 94 + kernel_fpu_end(); 95 + dst += ARIA_AESNI_PARALLEL_BLOCK_SIZE; 96 + src += ARIA_AESNI_PARALLEL_BLOCK_SIZE; 97 + nbytes -= ARIA_AESNI_PARALLEL_BLOCK_SIZE; 98 + } 99 + 100 + while (nbytes >= ARIA_BLOCK_SIZE) { 101 + u8 keystream[ARIA_BLOCK_SIZE]; 102 + 103 + memcpy(keystream, walk.iv, ARIA_BLOCK_SIZE); 104 + crypto_inc(walk.iv, ARIA_BLOCK_SIZE); 105 + 106 + aria_encrypt(ctx, keystream, keystream); 107 + 108 + crypto_xor_cpy(dst, src, keystream, ARIA_BLOCK_SIZE); 109 + dst += ARIA_BLOCK_SIZE; 110 + src += ARIA_BLOCK_SIZE; 111 + nbytes -= ARIA_BLOCK_SIZE; 112 + } 113 + 114 + if (walk.nbytes == walk.total && nbytes > 0) { 115 + u8 keystream[ARIA_BLOCK_SIZE]; 116 + 117 + memcpy(keystream, walk.iv, ARIA_BLOCK_SIZE); 118 + crypto_inc(walk.iv, ARIA_BLOCK_SIZE); 119 + 120 + aria_encrypt(ctx, keystream, keystream); 121 + 122 + crypto_xor_cpy(dst, src, keystream, nbytes); 123 + dst += nbytes; 124 + src += nbytes; 125 + nbytes = 0; 126 + } 127 + err = skcipher_walk_done(&walk, nbytes); 128 + } 129 + 130 + return err; 131 + } 132 + 133 + static struct skcipher_alg aria_algs[] = { 134 + { 135 + .base.cra_name = "__ecb(aria)", 136 + .base.cra_driver_name = "__ecb-aria-avx", 137 + .base.cra_priority = 400, 138 + .base.cra_flags = CRYPTO_ALG_INTERNAL, 139 + .base.cra_blocksize = ARIA_BLOCK_SIZE, 140 + .base.cra_ctxsize = sizeof(struct aria_ctx), 141 + .base.cra_module = THIS_MODULE, 142 + .min_keysize = ARIA_MIN_KEY_SIZE, 143 + .max_keysize = ARIA_MAX_KEY_SIZE, 144 + .setkey = aria_avx_set_key, 145 + .encrypt = aria_avx_ecb_encrypt, 146 + .decrypt = aria_avx_ecb_decrypt, 147 + }, { 148 + .base.cra_name = "__ctr(aria)", 149 + .base.cra_driver_name = "__ctr-aria-avx", 150 + .base.cra_priority = 400, 151 + .base.cra_flags = CRYPTO_ALG_INTERNAL, 152 + .base.cra_blocksize = 1, 153 + .base.cra_ctxsize = sizeof(struct aria_ctx), 154 + .base.cra_module = THIS_MODULE, 155 + .min_keysize = ARIA_MIN_KEY_SIZE, 156 + .max_keysize = ARIA_MAX_KEY_SIZE, 157 + .ivsize = ARIA_BLOCK_SIZE, 158 + .chunksize = ARIA_BLOCK_SIZE, 159 + .walksize = 16 * ARIA_BLOCK_SIZE, 160 + .setkey = aria_avx_set_key, 161 + .encrypt = aria_avx_ctr_encrypt, 162 + .decrypt = aria_avx_ctr_encrypt, 163 + } 164 + }; 165 + 166 + static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)]; 167 + 168 + static int __init aria_avx_init(void) 169 + { 170 + const char *feature_name; 171 + 172 + if (!boot_cpu_has(X86_FEATURE_AVX) || 173 + !boot_cpu_has(X86_FEATURE_AES) || 174 + !boot_cpu_has(X86_FEATURE_OSXSAVE)) { 175 + pr_info("AVX or AES-NI instructions are not detected.\n"); 176 + return -ENODEV; 177 + } 178 + 179 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, 180 + &feature_name)) { 181 + pr_info("CPU feature '%s' is not supported.\n", feature_name); 182 + return -ENODEV; 183 + } 184 + 185 + if (boot_cpu_has(X86_FEATURE_GFNI)) { 186 + aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way; 187 + aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way; 188 + aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way; 189 + } else { 190 + aria_ops.aria_encrypt_16way = aria_aesni_avx_encrypt_16way; 191 + aria_ops.aria_decrypt_16way = aria_aesni_avx_decrypt_16way; 192 + aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_ctr_crypt_16way; 193 + } 194 + 195 + return simd_register_skciphers_compat(aria_algs, 196 + ARRAY_SIZE(aria_algs), 197 + aria_simd_algs); 198 + } 199 + 200 + static void __exit aria_avx_exit(void) 201 + { 202 + simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs), 203 + aria_simd_algs); 204 + } 205 + 206 + module_init(aria_avx_init); 207 + module_exit(aria_avx_exit); 208 + 209 + MODULE_LICENSE("GPL"); 210 + MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>"); 211 + MODULE_DESCRIPTION("ARIA Cipher Algorithm, AVX/AES-NI/GFNI optimized"); 212 + MODULE_ALIAS_CRYPTO("aria"); 213 + MODULE_ALIAS_CRYPTO("aria-aesni-avx");
+10
arch/x86/crypto/sha512_ssse3_glue.c
··· 36 36 #include <linux/types.h> 37 37 #include <crypto/sha2.h> 38 38 #include <crypto/sha512_base.h> 39 + #include <asm/cpu_device_id.h> 39 40 #include <asm/simd.h> 40 41 41 42 asmlinkage void sha512_transform_ssse3(struct sha512_state *state, ··· 285 284 ARRAY_SIZE(sha512_avx2_algs)); 286 285 return 0; 287 286 } 287 + static const struct x86_cpu_id module_cpu_ids[] = { 288 + X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL), 289 + X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL), 290 + X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL), 291 + {} 292 + }; 293 + MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids); 288 294 289 295 static void unregister_sha512_avx2(void) 290 296 { ··· 302 294 303 295 static int __init sha512_ssse3_mod_init(void) 304 296 { 297 + if (!x86_match_cpu(module_cpu_ids)) 298 + return -ENODEV; 305 299 306 300 if (register_sha512_ssse3()) 307 301 goto fail;
+798 -1513
crypto/Kconfig
··· 15 15 # 16 16 menuconfig CRYPTO 17 17 tristate "Cryptographic API" 18 - select LIB_MEMNEQ 18 + select CRYPTO_LIB_UTILS 19 19 help 20 20 This option provides the core Cryptographic API. 21 21 22 22 if CRYPTO 23 23 24 - comment "Crypto core or helper" 24 + menu "Crypto core or helper" 25 25 26 26 config CRYPTO_FIPS 27 27 bool "FIPS 200 compliance" ··· 219 219 select CRYPTO_NULL 220 220 help 221 221 Authenc: Combined mode wrapper for IPsec. 222 - This is required for IPSec. 222 + 223 + This is required for IPSec ESP (XFRM_ESP). 223 224 224 225 config CRYPTO_TEST 225 226 tristate "Testing module" ··· 236 235 config CRYPTO_ENGINE 237 236 tristate 238 237 239 - comment "Public-key cryptography" 238 + endmenu 239 + 240 + menu "Public-key cryptography" 240 241 241 242 config CRYPTO_RSA 242 - tristate "RSA algorithm" 243 + tristate "RSA (Rivest-Shamir-Adleman)" 243 244 select CRYPTO_AKCIPHER 244 245 select CRYPTO_MANAGER 245 246 select MPILIB 246 247 select ASN1 247 248 help 248 - Generic implementation of the RSA public key algorithm. 249 + RSA (Rivest-Shamir-Adleman) public key algorithm (RFC8017) 249 250 250 251 config CRYPTO_DH 251 - tristate "Diffie-Hellman algorithm" 252 + tristate "DH (Diffie-Hellman)" 252 253 select CRYPTO_KPP 253 254 select MPILIB 254 255 help 255 - Generic implementation of the Diffie-Hellman algorithm. 256 + DH (Diffie-Hellman) key exchange algorithm 256 257 257 258 config CRYPTO_DH_RFC7919_GROUPS 258 - bool "Support for RFC 7919 FFDHE group parameters" 259 + bool "RFC 7919 FFDHE groups" 259 260 depends on CRYPTO_DH 260 261 select CRYPTO_RNG_DEFAULT 261 262 help 262 - Provide support for RFC 7919 FFDHE group parameters. If unsure, say N. 263 + FFDHE (Finite-Field-based Diffie-Hellman Ephemeral) groups 264 + defined in RFC7919. 265 + 266 + Support these finite-field groups in DH key exchanges: 267 + - ffdhe2048, ffdhe3072, ffdhe4096, ffdhe6144, ffdhe8192 268 + 269 + If unsure, say N. 263 270 264 271 config CRYPTO_ECC 265 272 tristate 266 273 select CRYPTO_RNG_DEFAULT 267 274 268 275 config CRYPTO_ECDH 269 - tristate "ECDH algorithm" 276 + tristate "ECDH (Elliptic Curve Diffie-Hellman)" 270 277 select CRYPTO_ECC 271 278 select CRYPTO_KPP 272 279 help 273 - Generic implementation of the ECDH algorithm 280 + ECDH (Elliptic Curve Diffie-Hellman) key exchange algorithm 281 + using curves P-192, P-256, and P-384 (FIPS 186) 274 282 275 283 config CRYPTO_ECDSA 276 - tristate "ECDSA (NIST P192, P256 etc.) algorithm" 284 + tristate "ECDSA (Elliptic Curve Digital Signature Algorithm)" 277 285 select CRYPTO_ECC 278 286 select CRYPTO_AKCIPHER 279 287 select ASN1 280 288 help 281 - Elliptic Curve Digital Signature Algorithm (NIST P192, P256 etc.) 282 - is A NIST cryptographic standard algorithm. Only signature verification 283 - is implemented. 289 + ECDSA (Elliptic Curve Digital Signature Algorithm) (FIPS 186, 290 + ISO/IEC 14888-3) 291 + using curves P-192, P-256, and P-384 292 + 293 + Only signature verification is implemented. 284 294 285 295 config CRYPTO_ECRDSA 286 - tristate "EC-RDSA (GOST 34.10) algorithm" 296 + tristate "EC-RDSA (Elliptic Curve Russian Digital Signature Algorithm)" 287 297 select CRYPTO_ECC 288 298 select CRYPTO_AKCIPHER 289 299 select CRYPTO_STREEBOG ··· 302 290 select ASN1 303 291 help 304 292 Elliptic Curve Russian Digital Signature Algorithm (GOST R 34.10-2012, 305 - RFC 7091, ISO/IEC 14888-3:2018) is one of the Russian cryptographic 306 - standard algorithms (called GOST algorithms). Only signature verification 307 - is implemented. 293 + RFC 7091, ISO/IEC 14888-3) 294 + 295 + One of the Russian cryptographic standard algorithms (called GOST 296 + algorithms). Only signature verification is implemented. 308 297 309 298 config CRYPTO_SM2 310 - tristate "SM2 algorithm" 299 + tristate "SM2 (ShangMi 2)" 311 300 select CRYPTO_SM3 312 301 select CRYPTO_AKCIPHER 313 302 select CRYPTO_MANAGER 314 303 select MPILIB 315 304 select ASN1 316 305 help 317 - Generic implementation of the SM2 public key algorithm. It was 318 - published by State Encryption Management Bureau, China. 306 + SM2 (ShangMi 2) public key algorithm 307 + 308 + Published by State Encryption Management Bureau, China, 319 309 as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012. 320 310 321 311 References: 322 - https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 312 + https://datatracker.ietf.org/doc/draft-shen-sm2-ecdsa/ 323 313 http://www.oscca.gov.cn/sca/xxgk/2010-12/17/content_1002386.shtml 324 314 http://www.gmbz.org.cn/main/bzlb.html 325 315 326 316 config CRYPTO_CURVE25519 327 - tristate "Curve25519 algorithm" 317 + tristate "Curve25519" 328 318 select CRYPTO_KPP 329 319 select CRYPTO_LIB_CURVE25519_GENERIC 330 - 331 - config CRYPTO_CURVE25519_X86 332 - tristate "x86_64 accelerated Curve25519 scalar multiplication library" 333 - depends on X86 && 64BIT 334 - select CRYPTO_LIB_CURVE25519_GENERIC 335 - select CRYPTO_ARCH_HAVE_LIB_CURVE25519 336 - 337 - comment "Authenticated Encryption with Associated Data" 338 - 339 - config CRYPTO_CCM 340 - tristate "CCM support" 341 - select CRYPTO_CTR 342 - select CRYPTO_HASH 343 - select CRYPTO_AEAD 344 - select CRYPTO_MANAGER 345 320 help 346 - Support for Counter with CBC MAC. Required for IPsec. 321 + Curve25519 elliptic curve (RFC7748) 347 322 348 - config CRYPTO_GCM 349 - tristate "GCM/GMAC support" 350 - select CRYPTO_CTR 351 - select CRYPTO_AEAD 352 - select CRYPTO_GHASH 353 - select CRYPTO_NULL 354 - select CRYPTO_MANAGER 323 + endmenu 324 + 325 + menu "Block ciphers" 326 + 327 + config CRYPTO_AES 328 + tristate "AES (Advanced Encryption Standard)" 329 + select CRYPTO_ALGAPI 330 + select CRYPTO_LIB_AES 355 331 help 356 - Support for Galois/Counter Mode (GCM) and Galois Message 357 - Authentication Code (GMAC). Required for IPSec. 332 + AES cipher algorithms (Rijndael)(FIPS-197, ISO/IEC 18033-3) 358 333 359 - config CRYPTO_CHACHA20POLY1305 360 - tristate "ChaCha20-Poly1305 AEAD support" 361 - select CRYPTO_CHACHA20 362 - select CRYPTO_POLY1305 363 - select CRYPTO_AEAD 364 - select CRYPTO_MANAGER 334 + Rijndael appears to be consistently a very good performer in 335 + both hardware and software across a wide range of computing 336 + environments regardless of its use in feedback or non-feedback 337 + modes. Its key setup time is excellent, and its key agility is 338 + good. Rijndael's very low memory requirements make it very well 339 + suited for restricted-space environments, in which it also 340 + demonstrates excellent performance. Rijndael's operations are 341 + among the easiest to defend against power and timing attacks. 342 + 343 + The AES specifies three key sizes: 128, 192 and 256 bits 344 + 345 + config CRYPTO_AES_TI 346 + tristate "AES (Advanced Encryption Standard) (fixed time)" 347 + select CRYPTO_ALGAPI 348 + select CRYPTO_LIB_AES 365 349 help 366 - ChaCha20-Poly1305 AEAD support, RFC7539. 350 + AES cipher algorithms (Rijndael)(FIPS-197, ISO/IEC 18033-3) 367 351 368 - Support for the AEAD wrapper using the ChaCha20 stream cipher combined 369 - with the Poly1305 authenticator. It is defined in RFC7539 for use in 370 - IETF protocols. 352 + This is a generic implementation of AES that attempts to eliminate 353 + data dependent latencies as much as possible without affecting 354 + performance too much. It is intended for use by the generic CCM 355 + and GCM drivers, and other CTR or CMAC/XCBC based modes that rely 356 + solely on encryption (although decryption is supported as well, but 357 + with a more dramatic performance hit) 371 358 372 - config CRYPTO_AEGIS128 373 - tristate "AEGIS-128 AEAD algorithm" 374 - select CRYPTO_AEAD 375 - select CRYPTO_AES # for AES S-box tables 359 + Instead of using 16 lookup tables of 1 KB each, (8 for encryption and 360 + 8 for decryption), this implementation only uses just two S-boxes of 361 + 256 bytes each, and attempts to eliminate data dependent latencies by 362 + prefetching the entire table into the cache at the start of each 363 + block. Interrupts are also disabled to avoid races where cachelines 364 + are evicted when the CPU is interrupted to do something else. 365 + 366 + config CRYPTO_ANUBIS 367 + tristate "Anubis" 368 + depends on CRYPTO_USER_API_ENABLE_OBSOLETE 369 + select CRYPTO_ALGAPI 376 370 help 377 - Support for the AEGIS-128 dedicated AEAD algorithm. 371 + Anubis cipher algorithm 378 372 379 - config CRYPTO_AEGIS128_SIMD 380 - bool "Support SIMD acceleration for AEGIS-128" 381 - depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) 382 - default y 373 + Anubis is a variable key length cipher which can use keys from 374 + 128 bits to 320 bits in length. It was evaluated as a entrant 375 + in the NESSIE competition. 383 376 384 - config CRYPTO_AEGIS128_AESNI_SSE2 385 - tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" 386 - depends on X86 && 64BIT 387 - select CRYPTO_AEAD 388 - select CRYPTO_SIMD 377 + See https://web.archive.org/web/20160606112246/http://www.larc.usp.br/~pbarreto/AnubisPage.html 378 + for further information. 379 + 380 + config CRYPTO_ARIA 381 + tristate "ARIA" 382 + select CRYPTO_ALGAPI 389 383 help 390 - AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm. 384 + ARIA cipher algorithm (RFC5794) 391 385 392 - config CRYPTO_SEQIV 393 - tristate "Sequence Number IV Generator" 394 - select CRYPTO_AEAD 395 - select CRYPTO_SKCIPHER 396 - select CRYPTO_NULL 397 - select CRYPTO_RNG_DEFAULT 398 - select CRYPTO_MANAGER 386 + ARIA is a standard encryption algorithm of the Republic of Korea. 387 + The ARIA specifies three key sizes and rounds. 388 + 128-bit: 12 rounds. 389 + 192-bit: 14 rounds. 390 + 256-bit: 16 rounds. 391 + 392 + See: 393 + https://seed.kisa.or.kr/kisa/algorithm/EgovAriaInfo.do 394 + 395 + config CRYPTO_BLOWFISH 396 + tristate "Blowfish" 397 + select CRYPTO_ALGAPI 398 + select CRYPTO_BLOWFISH_COMMON 399 399 help 400 - This IV generator generates an IV based on a sequence number by 401 - xoring it with a salt. This algorithm is mainly useful for CTR 400 + Blowfish cipher algorithm, by Bruce Schneier 402 401 403 - config CRYPTO_ECHAINIV 404 - tristate "Encrypted Chain IV Generator" 405 - select CRYPTO_AEAD 406 - select CRYPTO_NULL 407 - select CRYPTO_RNG_DEFAULT 408 - select CRYPTO_MANAGER 409 - help 410 - This IV generator generates an IV based on the encryption of 411 - a sequence number xored with a salt. This is the default 412 - algorithm for CBC. 402 + This is a variable key length cipher which can use keys from 32 403 + bits to 448 bits in length. It's fast, simple and specifically 404 + designed for use on "large microprocessors". 413 405 414 - comment "Block modes" 406 + See https://www.schneier.com/blowfish.html for further information. 415 407 416 - config CRYPTO_CBC 417 - tristate "CBC support" 418 - select CRYPTO_SKCIPHER 419 - select CRYPTO_MANAGER 420 - help 421 - CBC: Cipher Block Chaining mode 422 - This block cipher algorithm is required for IPSec. 423 - 424 - config CRYPTO_CFB 425 - tristate "CFB support" 426 - select CRYPTO_SKCIPHER 427 - select CRYPTO_MANAGER 428 - help 429 - CFB: Cipher FeedBack mode 430 - This block cipher algorithm is required for TPM2 Cryptography. 431 - 432 - config CRYPTO_CTR 433 - tristate "CTR support" 434 - select CRYPTO_SKCIPHER 435 - select CRYPTO_MANAGER 436 - help 437 - CTR: Counter mode 438 - This block cipher algorithm is required for IPSec. 439 - 440 - config CRYPTO_CTS 441 - tristate "CTS support" 442 - select CRYPTO_SKCIPHER 443 - select CRYPTO_MANAGER 444 - help 445 - CTS: Cipher Text Stealing 446 - This is the Cipher Text Stealing mode as described by 447 - Section 8 of rfc2040 and referenced by rfc3962 448 - (rfc3962 includes errata information in its Appendix A) or 449 - CBC-CS3 as defined by NIST in Sp800-38A addendum from Oct 2010. 450 - This mode is required for Kerberos gss mechanism support 451 - for AES encryption. 452 - 453 - See: https://csrc.nist.gov/publications/detail/sp/800-38a/addendum/final 454 - 455 - config CRYPTO_ECB 456 - tristate "ECB support" 457 - select CRYPTO_SKCIPHER 458 - select CRYPTO_MANAGER 459 - help 460 - ECB: Electronic CodeBook mode 461 - This is the simplest block cipher algorithm. It simply encrypts 462 - the input block by block. 463 - 464 - config CRYPTO_LRW 465 - tristate "LRW support" 466 - select CRYPTO_SKCIPHER 467 - select CRYPTO_MANAGER 468 - select CRYPTO_GF128MUL 469 - select CRYPTO_ECB 470 - help 471 - LRW: Liskov Rivest Wagner, a tweakable, non malleable, non movable 472 - narrow block cipher mode for dm-crypt. Use it with cipher 473 - specification string aes-lrw-benbi, the key must be 256, 320 or 384. 474 - The first 128, 192 or 256 bits in the key are used for AES and the 475 - rest is used to tie each cipher block to its logical position. 476 - 477 - config CRYPTO_OFB 478 - tristate "OFB support" 479 - select CRYPTO_SKCIPHER 480 - select CRYPTO_MANAGER 481 - help 482 - OFB: the Output Feedback mode makes a block cipher into a synchronous 483 - stream cipher. It generates keystream blocks, which are then XORed 484 - with the plaintext blocks to get the ciphertext. Flipping a bit in the 485 - ciphertext produces a flipped bit in the plaintext at the same 486 - location. This property allows many error correcting codes to function 487 - normally even when applied before encryption. 488 - 489 - config CRYPTO_PCBC 490 - tristate "PCBC support" 491 - select CRYPTO_SKCIPHER 492 - select CRYPTO_MANAGER 493 - help 494 - PCBC: Propagating Cipher Block Chaining mode 495 - This block cipher algorithm is required for RxRPC. 496 - 497 - config CRYPTO_XCTR 408 + config CRYPTO_BLOWFISH_COMMON 498 409 tristate 499 - select CRYPTO_SKCIPHER 500 - select CRYPTO_MANAGER 501 410 help 502 - XCTR: XOR Counter mode. This blockcipher mode is a variant of CTR mode 503 - using XORs and little-endian addition rather than big-endian arithmetic. 504 - XCTR mode is used to implement HCTR2. 411 + Common parts of the Blowfish cipher algorithm shared by the 412 + generic c and the assembler implementations. 505 413 506 - config CRYPTO_XTS 507 - tristate "XTS support" 508 - select CRYPTO_SKCIPHER 509 - select CRYPTO_MANAGER 510 - select CRYPTO_ECB 414 + config CRYPTO_CAMELLIA 415 + tristate "Camellia" 416 + select CRYPTO_ALGAPI 511 417 help 512 - XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain, 513 - key size 256, 384 or 512 bits. This implementation currently 514 - can't handle a sectorsize which is not a multiple of 16 bytes. 418 + Camellia cipher algorithms (ISO/IEC 18033-3) 515 419 516 - config CRYPTO_KEYWRAP 517 - tristate "Key wrapping support" 518 - select CRYPTO_SKCIPHER 519 - select CRYPTO_MANAGER 520 - help 521 - Support for key wrapping (NIST SP800-38F / RFC3394) without 522 - padding. 420 + Camellia is a symmetric key block cipher developed jointly 421 + at NTT and Mitsubishi Electric Corporation. 523 422 524 - config CRYPTO_NHPOLY1305 423 + The Camellia specifies three key sizes: 128, 192 and 256 bits. 424 + 425 + See https://info.isl.ntt.co.jp/crypt/eng/camellia/ for further information. 426 + 427 + config CRYPTO_CAST_COMMON 525 428 tristate 526 - select CRYPTO_HASH 527 - select CRYPTO_LIB_POLY1305_GENERIC 528 - 529 - config CRYPTO_NHPOLY1305_SSE2 530 - tristate "NHPoly1305 hash function (x86_64 SSE2 implementation)" 531 - depends on X86 && 64BIT 532 - select CRYPTO_NHPOLY1305 533 429 help 534 - SSE2 optimized implementation of the hash function used by the 535 - Adiantum encryption mode. 430 + Common parts of the CAST cipher algorithms shared by the 431 + generic c and the assembler implementations. 536 432 537 - config CRYPTO_NHPOLY1305_AVX2 538 - tristate "NHPoly1305 hash function (x86_64 AVX2 implementation)" 539 - depends on X86 && 64BIT 540 - select CRYPTO_NHPOLY1305 433 + config CRYPTO_CAST5 434 + tristate "CAST5 (CAST-128)" 435 + select CRYPTO_ALGAPI 436 + select CRYPTO_CAST_COMMON 541 437 help 542 - AVX2 optimized implementation of the hash function used by the 543 - Adiantum encryption mode. 438 + CAST5 (CAST-128) cipher algorithm (RFC2144, ISO/IEC 18033-3) 439 + 440 + config CRYPTO_CAST6 441 + tristate "CAST6 (CAST-256)" 442 + select CRYPTO_ALGAPI 443 + select CRYPTO_CAST_COMMON 444 + help 445 + CAST6 (CAST-256) encryption algorithm (RFC2612) 446 + 447 + config CRYPTO_DES 448 + tristate "DES and Triple DES EDE" 449 + select CRYPTO_ALGAPI 450 + select CRYPTO_LIB_DES 451 + help 452 + DES (Data Encryption Standard)(FIPS 46-2, ISO/IEC 18033-3) and 453 + Triple DES EDE (Encrypt/Decrypt/Encrypt) (FIPS 46-3, ISO/IEC 18033-3) 454 + cipher algorithms 455 + 456 + config CRYPTO_FCRYPT 457 + tristate "FCrypt" 458 + select CRYPTO_ALGAPI 459 + select CRYPTO_SKCIPHER 460 + help 461 + FCrypt algorithm used by RxRPC 462 + 463 + See https://ota.polyonymo.us/fcrypt-paper.txt 464 + 465 + config CRYPTO_KHAZAD 466 + tristate "Khazad" 467 + depends on CRYPTO_USER_API_ENABLE_OBSOLETE 468 + select CRYPTO_ALGAPI 469 + help 470 + Khazad cipher algorithm 471 + 472 + Khazad was a finalist in the initial NESSIE competition. It is 473 + an algorithm optimized for 64-bit processors with good performance 474 + on 32-bit processors. Khazad uses an 128 bit key size. 475 + 476 + See https://web.archive.org/web/20171011071731/http://www.larc.usp.br/~pbarreto/KhazadPage.html 477 + for further information. 478 + 479 + config CRYPTO_SEED 480 + tristate "SEED" 481 + depends on CRYPTO_USER_API_ENABLE_OBSOLETE 482 + select CRYPTO_ALGAPI 483 + help 484 + SEED cipher algorithm (RFC4269, ISO/IEC 18033-3) 485 + 486 + SEED is a 128-bit symmetric key block cipher that has been 487 + developed by KISA (Korea Information Security Agency) as a 488 + national standard encryption algorithm of the Republic of Korea. 489 + It is a 16 round block cipher with the key size of 128 bit. 490 + 491 + See https://seed.kisa.or.kr/kisa/algorithm/EgovSeedInfo.do 492 + for further information. 493 + 494 + config CRYPTO_SERPENT 495 + tristate "Serpent" 496 + select CRYPTO_ALGAPI 497 + help 498 + Serpent cipher algorithm, by Anderson, Biham & Knudsen 499 + 500 + Keys are allowed to be from 0 to 256 bits in length, in steps 501 + of 8 bits. 502 + 503 + See https://www.cl.cam.ac.uk/~rja14/serpent.html for further information. 504 + 505 + config CRYPTO_SM4 506 + tristate 507 + 508 + config CRYPTO_SM4_GENERIC 509 + tristate "SM4 (ShangMi 4)" 510 + select CRYPTO_ALGAPI 511 + select CRYPTO_SM4 512 + help 513 + SM4 cipher algorithms (OSCCA GB/T 32907-2016, 514 + ISO/IEC 18033-3:2010/Amd 1:2021) 515 + 516 + SM4 (GBT.32907-2016) is a cryptographic standard issued by the 517 + Organization of State Commercial Administration of China (OSCCA) 518 + as an authorized cryptographic algorithms for the use within China. 519 + 520 + SMS4 was originally created for use in protecting wireless 521 + networks, and is mandated in the Chinese National Standard for 522 + Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure) 523 + (GB.15629.11-2003). 524 + 525 + The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and 526 + standardized through TC 260 of the Standardization Administration 527 + of the People's Republic of China (SAC). 528 + 529 + The input, output, and key of SMS4 are each 128 bits. 530 + 531 + See https://eprint.iacr.org/2008/329.pdf for further information. 532 + 533 + If unsure, say N. 534 + 535 + config CRYPTO_TEA 536 + tristate "TEA, XTEA and XETA" 537 + depends on CRYPTO_USER_API_ENABLE_OBSOLETE 538 + select CRYPTO_ALGAPI 539 + help 540 + TEA (Tiny Encryption Algorithm) cipher algorithms 541 + 542 + Tiny Encryption Algorithm is a simple cipher that uses 543 + many rounds for security. It is very fast and uses 544 + little memory. 545 + 546 + Xtendend Tiny Encryption Algorithm is a modification to 547 + the TEA algorithm to address a potential key weakness 548 + in the TEA algorithm. 549 + 550 + Xtendend Encryption Tiny Algorithm is a mis-implementation 551 + of the XTEA algorithm for compatibility purposes. 552 + 553 + config CRYPTO_TWOFISH 554 + tristate "Twofish" 555 + select CRYPTO_ALGAPI 556 + select CRYPTO_TWOFISH_COMMON 557 + help 558 + Twofish cipher algorithm 559 + 560 + Twofish was submitted as an AES (Advanced Encryption Standard) 561 + candidate cipher by researchers at CounterPane Systems. It is a 562 + 16 round block cipher supporting key sizes of 128, 192, and 256 563 + bits. 564 + 565 + See https://www.schneier.com/twofish.html for further information. 566 + 567 + config CRYPTO_TWOFISH_COMMON 568 + tristate 569 + help 570 + Common parts of the Twofish cipher algorithm shared by the 571 + generic c and the assembler implementations. 572 + 573 + endmenu 574 + 575 + menu "Length-preserving ciphers and modes" 544 576 545 577 config CRYPTO_ADIANTUM 546 - tristate "Adiantum support" 578 + tristate "Adiantum" 547 579 select CRYPTO_CHACHA20 548 580 select CRYPTO_LIB_POLY1305_GENERIC 549 581 select CRYPTO_NHPOLY1305 550 582 select CRYPTO_MANAGER 551 583 help 552 - Adiantum is a tweakable, length-preserving encryption mode 553 - designed for fast and secure disk encryption, especially on 584 + Adiantum tweakable, length-preserving encryption mode 585 + 586 + Designed for fast and secure disk encryption, especially on 554 587 CPUs without dedicated crypto instructions. It encrypts 555 588 each sector using the XChaCha12 stream cipher, two passes of 556 589 an ε-almost-∆-universal hash function, and an invocation of ··· 611 554 612 555 If unsure, say N. 613 556 557 + config CRYPTO_ARC4 558 + tristate "ARC4 (Alleged Rivest Cipher 4)" 559 + depends on CRYPTO_USER_API_ENABLE_OBSOLETE 560 + select CRYPTO_SKCIPHER 561 + select CRYPTO_LIB_ARC4 562 + help 563 + ARC4 cipher algorithm 564 + 565 + ARC4 is a stream cipher using keys ranging from 8 bits to 2048 566 + bits in length. This algorithm is required for driver-based 567 + WEP, but it should not be for other purposes because of the 568 + weakness of the algorithm. 569 + 570 + config CRYPTO_CHACHA20 571 + tristate "ChaCha" 572 + select CRYPTO_LIB_CHACHA_GENERIC 573 + select CRYPTO_SKCIPHER 574 + help 575 + The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms 576 + 577 + ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. 578 + Bernstein and further specified in RFC7539 for use in IETF protocols. 579 + This is the portable C implementation of ChaCha20. See 580 + https://cr.yp.to/chacha/chacha-20080128.pdf for further information. 581 + 582 + XChaCha20 is the application of the XSalsa20 construction to ChaCha20 583 + rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length 584 + from 64 bits (or 96 bits using the RFC7539 convention) to 192 bits, 585 + while provably retaining ChaCha20's security. See 586 + https://cr.yp.to/snuffle/xsalsa-20081128.pdf for further information. 587 + 588 + XChaCha12 is XChaCha20 reduced to 12 rounds, with correspondingly 589 + reduced security margin but increased performance. It can be needed 590 + in some performance-sensitive scenarios. 591 + 592 + config CRYPTO_CBC 593 + tristate "CBC (Cipher Block Chaining)" 594 + select CRYPTO_SKCIPHER 595 + select CRYPTO_MANAGER 596 + help 597 + CBC (Cipher Block Chaining) mode (NIST SP800-38A) 598 + 599 + This block cipher mode is required for IPSec ESP (XFRM_ESP). 600 + 601 + config CRYPTO_CFB 602 + tristate "CFB (Cipher Feedback)" 603 + select CRYPTO_SKCIPHER 604 + select CRYPTO_MANAGER 605 + help 606 + CFB (Cipher Feedback) mode (NIST SP800-38A) 607 + 608 + This block cipher mode is required for TPM2 Cryptography. 609 + 610 + config CRYPTO_CTR 611 + tristate "CTR (Counter)" 612 + select CRYPTO_SKCIPHER 613 + select CRYPTO_MANAGER 614 + help 615 + CTR (Counter) mode (NIST SP800-38A) 616 + 617 + config CRYPTO_CTS 618 + tristate "CTS (Cipher Text Stealing)" 619 + select CRYPTO_SKCIPHER 620 + select CRYPTO_MANAGER 621 + help 622 + CBC-CS3 variant of CTS (Cipher Text Stealing) (NIST 623 + Addendum to SP800-38A (October 2010)) 624 + 625 + This mode is required for Kerberos gss mechanism support 626 + for AES encryption. 627 + 628 + config CRYPTO_ECB 629 + tristate "ECB (Electronic Codebook)" 630 + select CRYPTO_SKCIPHER 631 + select CRYPTO_MANAGER 632 + help 633 + ECB (Electronic Codebook) mode (NIST SP800-38A) 634 + 614 635 config CRYPTO_HCTR2 615 - tristate "HCTR2 support" 636 + tristate "HCTR2" 616 637 select CRYPTO_XCTR 617 638 select CRYPTO_POLYVAL 618 639 select CRYPTO_MANAGER 619 640 help 620 - HCTR2 is a length-preserving encryption mode for storage encryption that 621 - is efficient on processors with instructions to accelerate AES and 622 - carryless multiplication, e.g. x86 processors with AES-NI and CLMUL, and 623 - ARM processors with the ARMv8 crypto extensions. 641 + HCTR2 length-preserving encryption mode 642 + 643 + A mode for storage encryption that is efficient on processors with 644 + instructions to accelerate AES and carryless multiplication, e.g. 645 + x86 processors with AES-NI and CLMUL, and ARM processors with the 646 + ARMv8 crypto extensions. 647 + 648 + See https://eprint.iacr.org/2021/1441 649 + 650 + config CRYPTO_KEYWRAP 651 + tristate "KW (AES Key Wrap)" 652 + select CRYPTO_SKCIPHER 653 + select CRYPTO_MANAGER 654 + help 655 + KW (AES Key Wrap) authenticated encryption mode (NIST SP800-38F 656 + and RFC3394) without padding. 657 + 658 + config CRYPTO_LRW 659 + tristate "LRW (Liskov Rivest Wagner)" 660 + select CRYPTO_SKCIPHER 661 + select CRYPTO_MANAGER 662 + select CRYPTO_GF128MUL 663 + select CRYPTO_ECB 664 + help 665 + LRW (Liskov Rivest Wagner) mode 666 + 667 + A tweakable, non malleable, non movable 668 + narrow block cipher mode for dm-crypt. Use it with cipher 669 + specification string aes-lrw-benbi, the key must be 256, 320 or 384. 670 + The first 128, 192 or 256 bits in the key are used for AES and the 671 + rest is used to tie each cipher block to its logical position. 672 + 673 + See https://people.csail.mit.edu/rivest/pubs/LRW02.pdf 674 + 675 + config CRYPTO_OFB 676 + tristate "OFB (Output Feedback)" 677 + select CRYPTO_SKCIPHER 678 + select CRYPTO_MANAGER 679 + help 680 + OFB (Output Feedback) mode (NIST SP800-38A) 681 + 682 + This mode makes a block cipher into a synchronous 683 + stream cipher. It generates keystream blocks, which are then XORed 684 + with the plaintext blocks to get the ciphertext. Flipping a bit in the 685 + ciphertext produces a flipped bit in the plaintext at the same 686 + location. This property allows many error correcting codes to function 687 + normally even when applied before encryption. 688 + 689 + config CRYPTO_PCBC 690 + tristate "PCBC (Propagating Cipher Block Chaining)" 691 + select CRYPTO_SKCIPHER 692 + select CRYPTO_MANAGER 693 + help 694 + PCBC (Propagating Cipher Block Chaining) mode 695 + 696 + This block cipher mode is required for RxRPC. 697 + 698 + config CRYPTO_XCTR 699 + tristate 700 + select CRYPTO_SKCIPHER 701 + select CRYPTO_MANAGER 702 + help 703 + XCTR (XOR Counter) mode for HCTR2 704 + 705 + This blockcipher mode is a variant of CTR mode using XORs and little-endian 706 + addition rather than big-endian arithmetic. 707 + 708 + XCTR mode is used to implement HCTR2. 709 + 710 + config CRYPTO_XTS 711 + tristate "XTS (XOR Encrypt XOR with ciphertext stealing)" 712 + select CRYPTO_SKCIPHER 713 + select CRYPTO_MANAGER 714 + select CRYPTO_ECB 715 + help 716 + XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E 717 + and IEEE 1619) 718 + 719 + Use with aes-xts-plain, key size 256, 384 or 512 bits. This 720 + implementation currently can't handle a sectorsize which is not a 721 + multiple of 16 bytes. 722 + 723 + config CRYPTO_NHPOLY1305 724 + tristate 725 + select CRYPTO_HASH 726 + select CRYPTO_LIB_POLY1305_GENERIC 727 + 728 + endmenu 729 + 730 + menu "AEAD (authenticated encryption with associated data) ciphers" 731 + 732 + config CRYPTO_AEGIS128 733 + tristate "AEGIS-128" 734 + select CRYPTO_AEAD 735 + select CRYPTO_AES # for AES S-box tables 736 + help 737 + AEGIS-128 AEAD algorithm 738 + 739 + config CRYPTO_AEGIS128_SIMD 740 + bool "AEGIS-128 (arm NEON, arm64 NEON)" 741 + depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) 742 + default y 743 + help 744 + AEGIS-128 AEAD algorithm 745 + 746 + Architecture: arm or arm64 using: 747 + - NEON (Advanced SIMD) extension 748 + 749 + config CRYPTO_CHACHA20POLY1305 750 + tristate "ChaCha20-Poly1305" 751 + select CRYPTO_CHACHA20 752 + select CRYPTO_POLY1305 753 + select CRYPTO_AEAD 754 + select CRYPTO_MANAGER 755 + help 756 + ChaCha20 stream cipher and Poly1305 authenticator combined 757 + mode (RFC8439) 758 + 759 + config CRYPTO_CCM 760 + tristate "CCM (Counter with Cipher Block Chaining-MAC)" 761 + select CRYPTO_CTR 762 + select CRYPTO_HASH 763 + select CRYPTO_AEAD 764 + select CRYPTO_MANAGER 765 + help 766 + CCM (Counter with Cipher Block Chaining-Message Authentication Code) 767 + authenticated encryption mode (NIST SP800-38C) 768 + 769 + config CRYPTO_GCM 770 + tristate "GCM (Galois/Counter Mode) and GMAC (GCM MAC)" 771 + select CRYPTO_CTR 772 + select CRYPTO_AEAD 773 + select CRYPTO_GHASH 774 + select CRYPTO_NULL 775 + select CRYPTO_MANAGER 776 + help 777 + GCM (Galois/Counter Mode) authenticated encryption mode and GMAC 778 + (GCM Message Authentication Code) (NIST SP800-38D) 779 + 780 + This is required for IPSec ESP (XFRM_ESP). 781 + 782 + config CRYPTO_SEQIV 783 + tristate "Sequence Number IV Generator" 784 + select CRYPTO_AEAD 785 + select CRYPTO_SKCIPHER 786 + select CRYPTO_NULL 787 + select CRYPTO_RNG_DEFAULT 788 + select CRYPTO_MANAGER 789 + help 790 + Sequence Number IV generator 791 + 792 + This IV generator generates an IV based on a sequence number by 793 + xoring it with a salt. This algorithm is mainly useful for CTR. 794 + 795 + This is required for IPsec ESP (XFRM_ESP). 796 + 797 + config CRYPTO_ECHAINIV 798 + tristate "Encrypted Chain IV Generator" 799 + select CRYPTO_AEAD 800 + select CRYPTO_NULL 801 + select CRYPTO_RNG_DEFAULT 802 + select CRYPTO_MANAGER 803 + help 804 + Encrypted Chain IV generator 805 + 806 + This IV generator generates an IV based on the encryption of 807 + a sequence number xored with a salt. This is the default 808 + algorithm for CBC. 624 809 625 810 config CRYPTO_ESSIV 626 - tristate "ESSIV support for block encryption" 811 + tristate "Encrypted Salt-Sector IV Generator" 627 812 select CRYPTO_AUTHENC 628 813 help 629 - Encrypted salt-sector initialization vector (ESSIV) is an IV 630 - generation method that is used in some cases by fscrypt and/or 814 + Encrypted Salt-Sector IV generator 815 + 816 + This IV generator is used in some cases by fscrypt and/or 631 817 dm-crypt. It uses the hash of the block encryption key as the 632 818 symmetric key for a block encryption pass applied to the input 633 819 IV, making low entropy IV sources more suitable for block ··· 893 593 combined with ESSIV the only feasible mode for h/w accelerated 894 594 block encryption) 895 595 896 - comment "Hash modes" 596 + endmenu 897 597 898 - config CRYPTO_CMAC 899 - tristate "CMAC support" 900 - select CRYPTO_HASH 901 - select CRYPTO_MANAGER 902 - help 903 - Cipher-based Message Authentication Code (CMAC) specified by 904 - The National Institute of Standards and Technology (NIST). 905 - 906 - https://tools.ietf.org/html/rfc4493 907 - http://csrc.nist.gov/publications/nistpubs/800-38B/SP_800-38B.pdf 908 - 909 - config CRYPTO_HMAC 910 - tristate "HMAC support" 911 - select CRYPTO_HASH 912 - select CRYPTO_MANAGER 913 - help 914 - HMAC: Keyed-Hashing for Message Authentication (RFC2104). 915 - This is required for IPSec. 916 - 917 - config CRYPTO_XCBC 918 - tristate "XCBC support" 919 - select CRYPTO_HASH 920 - select CRYPTO_MANAGER 921 - help 922 - XCBC: Keyed-Hashing with encryption algorithm 923 - https://www.ietf.org/rfc/rfc3566.txt 924 - http://csrc.nist.gov/encryption/modes/proposedmodes/ 925 - xcbc-mac/xcbc-mac-spec.pdf 926 - 927 - config CRYPTO_VMAC 928 - tristate "VMAC support" 929 - select CRYPTO_HASH 930 - select CRYPTO_MANAGER 931 - help 932 - VMAC is a message authentication algorithm designed for 933 - very high speed on 64-bit architectures. 934 - 935 - See also: 936 - <https://fastcrypto.org/vmac> 937 - 938 - comment "Digest" 939 - 940 - config CRYPTO_CRC32C 941 - tristate "CRC32c CRC algorithm" 942 - select CRYPTO_HASH 943 - select CRC32 944 - help 945 - Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used 946 - by iSCSI for header and data digests and by others. 947 - See Castagnoli93. Module will be crc32c. 948 - 949 - config CRYPTO_CRC32C_INTEL 950 - tristate "CRC32c INTEL hardware acceleration" 951 - depends on X86 952 - select CRYPTO_HASH 953 - help 954 - In Intel processor with SSE4.2 supported, the processor will 955 - support CRC32C implementation using hardware accelerated CRC32 956 - instruction. This option will create 'crc32c-intel' module, 957 - which will enable any routine to use the CRC32 instruction to 958 - gain performance compared with software implementation. 959 - Module will be crc32c-intel. 960 - 961 - config CRYPTO_CRC32C_VPMSUM 962 - tristate "CRC32c CRC algorithm (powerpc64)" 963 - depends on PPC64 && ALTIVEC 964 - select CRYPTO_HASH 965 - select CRC32 966 - help 967 - CRC32c algorithm implemented using vector polynomial multiply-sum 968 - (vpmsum) instructions, introduced in POWER8. Enable on POWER8 969 - and newer processors for improved performance. 970 - 971 - 972 - config CRYPTO_CRC32C_SPARC64 973 - tristate "CRC32c CRC algorithm (SPARC64)" 974 - depends on SPARC64 975 - select CRYPTO_HASH 976 - select CRC32 977 - help 978 - CRC32c CRC algorithm implemented using sparc64 crypto instructions, 979 - when available. 980 - 981 - config CRYPTO_CRC32 982 - tristate "CRC32 CRC algorithm" 983 - select CRYPTO_HASH 984 - select CRC32 985 - help 986 - CRC-32-IEEE 802.3 cyclic redundancy-check algorithm. 987 - Shash crypto api wrappers to crc32_le function. 988 - 989 - config CRYPTO_CRC32_PCLMUL 990 - tristate "CRC32 PCLMULQDQ hardware acceleration" 991 - depends on X86 992 - select CRYPTO_HASH 993 - select CRC32 994 - help 995 - From Intel Westmere and AMD Bulldozer processor with SSE4.2 996 - and PCLMULQDQ supported, the processor will support 997 - CRC32 PCLMULQDQ implementation using hardware accelerated PCLMULQDQ 998 - instruction. This option will create 'crc32-pclmul' module, 999 - which will enable any routine to use the CRC-32-IEEE 802.3 checksum 1000 - and gain better performance as compared with the table implementation. 1001 - 1002 - config CRYPTO_CRC32_MIPS 1003 - tristate "CRC32c and CRC32 CRC algorithm (MIPS)" 1004 - depends on MIPS_CRC_SUPPORT 1005 - select CRYPTO_HASH 1006 - help 1007 - CRC32c and CRC32 CRC algorithms implemented using mips crypto 1008 - instructions, when available. 1009 - 1010 - config CRYPTO_CRC32_S390 1011 - tristate "CRC-32 algorithms" 1012 - depends on S390 1013 - select CRYPTO_HASH 1014 - select CRC32 1015 - help 1016 - Select this option if you want to use hardware accelerated 1017 - implementations of CRC algorithms. With this option, you 1018 - can optimize the computation of CRC-32 (IEEE 802.3 Ethernet) 1019 - and CRC-32C (Castagnoli). 1020 - 1021 - It is available with IBM z13 or later. 1022 - 1023 - config CRYPTO_XXHASH 1024 - tristate "xxHash hash algorithm" 1025 - select CRYPTO_HASH 1026 - select XXHASH 1027 - help 1028 - xxHash non-cryptographic hash algorithm. Extremely fast, working at 1029 - speeds close to RAM limits. 598 + menu "Hashes, digests, and MACs" 1030 599 1031 600 config CRYPTO_BLAKE2B 1032 - tristate "BLAKE2b digest algorithm" 601 + tristate "BLAKE2b" 1033 602 select CRYPTO_HASH 1034 603 help 1035 - Implementation of cryptographic hash function BLAKE2b (or just BLAKE2), 1036 - optimized for 64bit platforms and can produce digests of any size 1037 - between 1 to 64. The keyed hash is also implemented. 604 + BLAKE2b cryptographic hash function (RFC 7693) 605 + 606 + BLAKE2b is optimized for 64-bit platforms and can produce digests 607 + of any size between 1 and 64 bytes. The keyed hash is also implemented. 1038 608 1039 609 This module provides the following algorithms: 1040 - 1041 610 - blake2b-160 1042 611 - blake2b-256 1043 612 - blake2b-384 1044 613 - blake2b-512 1045 614 615 + Used by the btrfs filesystem. 616 + 1046 617 See https://blake2.net for further information. 1047 618 1048 - config CRYPTO_BLAKE2S_X86 1049 - bool "BLAKE2s digest algorithm (x86 accelerated version)" 1050 - depends on X86 && 64BIT 1051 - select CRYPTO_LIB_BLAKE2S_GENERIC 1052 - select CRYPTO_ARCH_HAVE_LIB_BLAKE2S 1053 - 1054 - config CRYPTO_CRCT10DIF 1055 - tristate "CRCT10DIF algorithm" 619 + config CRYPTO_CMAC 620 + tristate "CMAC (Cipher-based MAC)" 1056 621 select CRYPTO_HASH 622 + select CRYPTO_MANAGER 1057 623 help 1058 - CRC T10 Data Integrity Field computation is being cast as 1059 - a crypto transform. This allows for faster crc t10 diff 1060 - transforms to be used if they are available. 1061 - 1062 - config CRYPTO_CRCT10DIF_PCLMUL 1063 - tristate "CRCT10DIF PCLMULQDQ hardware acceleration" 1064 - depends on X86 && 64BIT && CRC_T10DIF 1065 - select CRYPTO_HASH 1066 - help 1067 - For x86_64 processors with SSE4.2 and PCLMULQDQ supported, 1068 - CRC T10 DIF PCLMULQDQ computation can be hardware 1069 - accelerated PCLMULQDQ instruction. This option will create 1070 - 'crct10dif-pclmul' module, which is faster when computing the 1071 - crct10dif checksum as compared with the generic table implementation. 1072 - 1073 - config CRYPTO_CRCT10DIF_VPMSUM 1074 - tristate "CRC32T10DIF powerpc64 hardware acceleration" 1075 - depends on PPC64 && ALTIVEC && CRC_T10DIF 1076 - select CRYPTO_HASH 1077 - help 1078 - CRC10T10DIF algorithm implemented using vector polynomial 1079 - multiply-sum (vpmsum) instructions, introduced in POWER8. Enable on 1080 - POWER8 and newer processors for improved performance. 1081 - 1082 - config CRYPTO_CRC64_ROCKSOFT 1083 - tristate "Rocksoft Model CRC64 algorithm" 1084 - depends on CRC64 1085 - select CRYPTO_HASH 1086 - 1087 - config CRYPTO_VPMSUM_TESTER 1088 - tristate "Powerpc64 vpmsum hardware acceleration tester" 1089 - depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM 1090 - help 1091 - Stress test for CRC32c and CRC-T10DIF algorithms implemented with 1092 - POWER8 vpmsum instructions. 1093 - Unless you are testing these algorithms, you don't need this. 624 + CMAC (Cipher-based Message Authentication Code) authentication 625 + mode (NIST SP800-38B and IETF RFC4493) 1094 626 1095 627 config CRYPTO_GHASH 1096 - tristate "GHASH hash function" 628 + tristate "GHASH" 1097 629 select CRYPTO_GF128MUL 1098 630 select CRYPTO_HASH 1099 631 help 1100 - GHASH is the hash function used in GCM (Galois/Counter Mode). 1101 - It is not a general-purpose cryptographic hash function. 632 + GCM GHASH function (NIST SP800-38D) 633 + 634 + config CRYPTO_HMAC 635 + tristate "HMAC (Keyed-Hash MAC)" 636 + select CRYPTO_HASH 637 + select CRYPTO_MANAGER 638 + help 639 + HMAC (Keyed-Hash Message Authentication Code) (FIPS 198 and 640 + RFC2104) 641 + 642 + This is required for IPsec AH (XFRM_AH) and IPsec ESP (XFRM_ESP). 643 + 644 + config CRYPTO_MD4 645 + tristate "MD4" 646 + select CRYPTO_HASH 647 + help 648 + MD4 message digest algorithm (RFC1320) 649 + 650 + config CRYPTO_MD5 651 + tristate "MD5" 652 + select CRYPTO_HASH 653 + help 654 + MD5 message digest algorithm (RFC1321) 655 + 656 + config CRYPTO_MICHAEL_MIC 657 + tristate "Michael MIC" 658 + select CRYPTO_HASH 659 + help 660 + Michael MIC (Message Integrity Code) (IEEE 802.11i) 661 + 662 + Defined by the IEEE 802.11i TKIP (Temporal Key Integrity Protocol), 663 + known as WPA (Wif-Fi Protected Access). 664 + 665 + This algorithm is required for TKIP, but it should not be used for 666 + other purposes because of the weakness of the algorithm. 1102 667 1103 668 config CRYPTO_POLYVAL 1104 669 tristate 1105 670 select CRYPTO_GF128MUL 1106 671 select CRYPTO_HASH 1107 672 help 1108 - POLYVAL is the hash function used in HCTR2. It is not a general-purpose 673 + POLYVAL hash function for HCTR2 674 + 675 + This is used in HCTR2. It is not a general-purpose 1109 676 cryptographic hash function. 1110 677 1111 - config CRYPTO_POLYVAL_CLMUL_NI 1112 - tristate "POLYVAL hash function (CLMUL-NI accelerated)" 1113 - depends on X86 && 64BIT 1114 - select CRYPTO_POLYVAL 1115 - help 1116 - This is the x86_64 CLMUL-NI accelerated implementation of POLYVAL. It is 1117 - used to efficiently implement HCTR2 on x86-64 processors that support 1118 - carry-less multiplication instructions. 1119 - 1120 678 config CRYPTO_POLY1305 1121 - tristate "Poly1305 authenticator algorithm" 679 + tristate "Poly1305" 1122 680 select CRYPTO_HASH 1123 681 select CRYPTO_LIB_POLY1305_GENERIC 1124 682 help 1125 - Poly1305 authenticator algorithm, RFC7539. 683 + Poly1305 authenticator algorithm (RFC7539) 1126 684 1127 685 Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. 1128 686 It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use 1129 687 in IETF protocols. This is the portable C implementation of Poly1305. 1130 688 1131 - config CRYPTO_POLY1305_X86_64 1132 - tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)" 1133 - depends on X86 && 64BIT 1134 - select CRYPTO_LIB_POLY1305_GENERIC 1135 - select CRYPTO_ARCH_HAVE_LIB_POLY1305 1136 - help 1137 - Poly1305 authenticator algorithm, RFC7539. 1138 - 1139 - Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. 1140 - It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use 1141 - in IETF protocols. This is the x86_64 assembler implementation using SIMD 1142 - instructions. 1143 - 1144 - config CRYPTO_POLY1305_MIPS 1145 - tristate "Poly1305 authenticator algorithm (MIPS optimized)" 1146 - depends on MIPS 1147 - select CRYPTO_ARCH_HAVE_LIB_POLY1305 1148 - 1149 - config CRYPTO_MD4 1150 - tristate "MD4 digest algorithm" 1151 - select CRYPTO_HASH 1152 - help 1153 - MD4 message digest algorithm (RFC1320). 1154 - 1155 - config CRYPTO_MD5 1156 - tristate "MD5 digest algorithm" 1157 - select CRYPTO_HASH 1158 - help 1159 - MD5 message digest algorithm (RFC1321). 1160 - 1161 - config CRYPTO_MD5_OCTEON 1162 - tristate "MD5 digest algorithm (OCTEON)" 1163 - depends on CPU_CAVIUM_OCTEON 1164 - select CRYPTO_MD5 1165 - select CRYPTO_HASH 1166 - help 1167 - MD5 message digest algorithm (RFC1321) implemented 1168 - using OCTEON crypto instructions, when available. 1169 - 1170 - config CRYPTO_MD5_PPC 1171 - tristate "MD5 digest algorithm (PPC)" 1172 - depends on PPC 1173 - select CRYPTO_HASH 1174 - help 1175 - MD5 message digest algorithm (RFC1321) implemented 1176 - in PPC assembler. 1177 - 1178 - config CRYPTO_MD5_SPARC64 1179 - tristate "MD5 digest algorithm (SPARC64)" 1180 - depends on SPARC64 1181 - select CRYPTO_MD5 1182 - select CRYPTO_HASH 1183 - help 1184 - MD5 message digest algorithm (RFC1321) implemented 1185 - using sparc64 crypto instructions, when available. 1186 - 1187 - config CRYPTO_MICHAEL_MIC 1188 - tristate "Michael MIC keyed digest algorithm" 1189 - select CRYPTO_HASH 1190 - help 1191 - Michael MIC is used for message integrity protection in TKIP 1192 - (IEEE 802.11i). This algorithm is required for TKIP, but it 1193 - should not be used for other purposes because of the weakness 1194 - of the algorithm. 1195 - 1196 689 config CRYPTO_RMD160 1197 - tristate "RIPEMD-160 digest algorithm" 690 + tristate "RIPEMD-160" 1198 691 select CRYPTO_HASH 1199 692 help 1200 - RIPEMD-160 (ISO/IEC 10118-3:2004). 693 + RIPEMD-160 hash function (ISO/IEC 10118-3) 1201 694 1202 695 RIPEMD-160 is a 160-bit cryptographic hash function. It is intended 1203 696 to be used as a secure replacement for the 128-bit hash functions 1204 697 MD4, MD5 and its predecessor RIPEMD 1205 698 (not to be confused with RIPEMD-128). 1206 699 1207 - It's speed is comparable to SHA1 and there are no known attacks 700 + Its speed is comparable to SHA-1 and there are no known attacks 1208 701 against RIPEMD-160. 1209 702 1210 703 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 1211 - See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html> 704 + See https://homes.esat.kuleuven.be/~bosselae/ripemd160.html 705 + for further information. 1212 706 1213 707 config CRYPTO_SHA1 1214 - tristate "SHA1 digest algorithm" 708 + tristate "SHA-1" 1215 709 select CRYPTO_HASH 1216 710 select CRYPTO_LIB_SHA1 1217 711 help 1218 - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 1219 - 1220 - config CRYPTO_SHA1_SSSE3 1221 - tristate "SHA1 digest algorithm (SSSE3/AVX/AVX2/SHA-NI)" 1222 - depends on X86 && 64BIT 1223 - select CRYPTO_SHA1 1224 - select CRYPTO_HASH 1225 - help 1226 - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 1227 - using Supplemental SSE3 (SSSE3) instructions or Advanced Vector 1228 - Extensions (AVX/AVX2) or SHA-NI(SHA Extensions New Instructions), 1229 - when available. 1230 - 1231 - config CRYPTO_SHA256_SSSE3 1232 - tristate "SHA256 digest algorithm (SSSE3/AVX/AVX2/SHA-NI)" 1233 - depends on X86 && 64BIT 1234 - select CRYPTO_SHA256 1235 - select CRYPTO_HASH 1236 - help 1237 - SHA-256 secure hash standard (DFIPS 180-2) implemented 1238 - using Supplemental SSE3 (SSSE3) instructions, or Advanced Vector 1239 - Extensions version 1 (AVX1), or Advanced Vector Extensions 1240 - version 2 (AVX2) instructions, or SHA-NI (SHA Extensions New 1241 - Instructions) when available. 1242 - 1243 - config CRYPTO_SHA512_SSSE3 1244 - tristate "SHA512 digest algorithm (SSSE3/AVX/AVX2)" 1245 - depends on X86 && 64BIT 1246 - select CRYPTO_SHA512 1247 - select CRYPTO_HASH 1248 - help 1249 - SHA-512 secure hash standard (DFIPS 180-2) implemented 1250 - using Supplemental SSE3 (SSSE3) instructions, or Advanced Vector 1251 - Extensions version 1 (AVX1), or Advanced Vector Extensions 1252 - version 2 (AVX2) instructions, when available. 1253 - 1254 - config CRYPTO_SHA512_S390 1255 - tristate "SHA384 and SHA512 digest algorithm" 1256 - depends on S390 1257 - select CRYPTO_HASH 1258 - help 1259 - This is the s390 hardware accelerated implementation of the 1260 - SHA512 secure hash standard. 1261 - 1262 - It is available as of z10. 1263 - 1264 - config CRYPTO_SHA1_OCTEON 1265 - tristate "SHA1 digest algorithm (OCTEON)" 1266 - depends on CPU_CAVIUM_OCTEON 1267 - select CRYPTO_SHA1 1268 - select CRYPTO_HASH 1269 - help 1270 - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 1271 - using OCTEON crypto instructions, when available. 1272 - 1273 - config CRYPTO_SHA1_SPARC64 1274 - tristate "SHA1 digest algorithm (SPARC64)" 1275 - depends on SPARC64 1276 - select CRYPTO_SHA1 1277 - select CRYPTO_HASH 1278 - help 1279 - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 1280 - using sparc64 crypto instructions, when available. 1281 - 1282 - config CRYPTO_SHA1_PPC 1283 - tristate "SHA1 digest algorithm (powerpc)" 1284 - depends on PPC 1285 - help 1286 - This is the powerpc hardware accelerated implementation of the 1287 - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 1288 - 1289 - config CRYPTO_SHA1_PPC_SPE 1290 - tristate "SHA1 digest algorithm (PPC SPE)" 1291 - depends on PPC && SPE 1292 - help 1293 - SHA-1 secure hash standard (DFIPS 180-4) implemented 1294 - using powerpc SPE SIMD instruction set. 1295 - 1296 - config CRYPTO_SHA1_S390 1297 - tristate "SHA1 digest algorithm" 1298 - depends on S390 1299 - select CRYPTO_HASH 1300 - help 1301 - This is the s390 hardware accelerated implementation of the 1302 - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 1303 - 1304 - It is available as of z990. 712 + SHA-1 secure hash algorithm (FIPS 180, ISO/IEC 10118-3) 1305 713 1306 714 config CRYPTO_SHA256 1307 - tristate "SHA224 and SHA256 digest algorithm" 715 + tristate "SHA-224 and SHA-256" 1308 716 select CRYPTO_HASH 1309 717 select CRYPTO_LIB_SHA256 1310 718 help 1311 - SHA256 secure hash standard (DFIPS 180-2). 719 + SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC 10118-3) 1312 720 1313 - This version of SHA implements a 256 bit hash with 128 bits of 1314 - security against collision attacks. 1315 - 1316 - This code also includes SHA-224, a 224 bit hash with 112 bits 1317 - of security against collision attacks. 1318 - 1319 - config CRYPTO_SHA256_PPC_SPE 1320 - tristate "SHA224 and SHA256 digest algorithm (PPC SPE)" 1321 - depends on PPC && SPE 1322 - select CRYPTO_SHA256 1323 - select CRYPTO_HASH 1324 - help 1325 - SHA224 and SHA256 secure hash standard (DFIPS 180-2) 1326 - implemented using powerpc SPE SIMD instruction set. 1327 - 1328 - config CRYPTO_SHA256_OCTEON 1329 - tristate "SHA224 and SHA256 digest algorithm (OCTEON)" 1330 - depends on CPU_CAVIUM_OCTEON 1331 - select CRYPTO_SHA256 1332 - select CRYPTO_HASH 1333 - help 1334 - SHA-256 secure hash standard (DFIPS 180-2) implemented 1335 - using OCTEON crypto instructions, when available. 1336 - 1337 - config CRYPTO_SHA256_SPARC64 1338 - tristate "SHA224 and SHA256 digest algorithm (SPARC64)" 1339 - depends on SPARC64 1340 - select CRYPTO_SHA256 1341 - select CRYPTO_HASH 1342 - help 1343 - SHA-256 secure hash standard (DFIPS 180-2) implemented 1344 - using sparc64 crypto instructions, when available. 1345 - 1346 - config CRYPTO_SHA256_S390 1347 - tristate "SHA256 digest algorithm" 1348 - depends on S390 1349 - select CRYPTO_HASH 1350 - help 1351 - This is the s390 hardware accelerated implementation of the 1352 - SHA256 secure hash standard (DFIPS 180-2). 1353 - 1354 - It is available as of z9. 721 + This is required for IPsec AH (XFRM_AH) and IPsec ESP (XFRM_ESP). 722 + Used by the btrfs filesystem, Ceph, NFS, and SMB. 1355 723 1356 724 config CRYPTO_SHA512 1357 - tristate "SHA384 and SHA512 digest algorithms" 725 + tristate "SHA-384 and SHA-512" 1358 726 select CRYPTO_HASH 1359 727 help 1360 - SHA512 secure hash standard (DFIPS 180-2). 1361 - 1362 - This version of SHA implements a 512 bit hash with 256 bits of 1363 - security against collision attacks. 1364 - 1365 - This code also includes SHA-384, a 384 bit hash with 192 bits 1366 - of security against collision attacks. 1367 - 1368 - config CRYPTO_SHA512_OCTEON 1369 - tristate "SHA384 and SHA512 digest algorithms (OCTEON)" 1370 - depends on CPU_CAVIUM_OCTEON 1371 - select CRYPTO_SHA512 1372 - select CRYPTO_HASH 1373 - help 1374 - SHA-512 secure hash standard (DFIPS 180-2) implemented 1375 - using OCTEON crypto instructions, when available. 1376 - 1377 - config CRYPTO_SHA512_SPARC64 1378 - tristate "SHA384 and SHA512 digest algorithm (SPARC64)" 1379 - depends on SPARC64 1380 - select CRYPTO_SHA512 1381 - select CRYPTO_HASH 1382 - help 1383 - SHA-512 secure hash standard (DFIPS 180-2) implemented 1384 - using sparc64 crypto instructions, when available. 728 + SHA-384 and SHA-512 secure hash algorithms (FIPS 180, ISO/IEC 10118-3) 1385 729 1386 730 config CRYPTO_SHA3 1387 - tristate "SHA3 digest algorithm" 731 + tristate "SHA-3" 1388 732 select CRYPTO_HASH 1389 733 help 1390 - SHA-3 secure hash standard (DFIPS 202). It's based on 1391 - cryptographic sponge function family called Keccak. 1392 - 1393 - References: 1394 - http://keccak.noekeon.org/ 1395 - 1396 - config CRYPTO_SHA3_256_S390 1397 - tristate "SHA3_224 and SHA3_256 digest algorithm" 1398 - depends on S390 1399 - select CRYPTO_HASH 1400 - help 1401 - This is the s390 hardware accelerated implementation of the 1402 - SHA3_256 secure hash standard. 1403 - 1404 - It is available as of z14. 1405 - 1406 - config CRYPTO_SHA3_512_S390 1407 - tristate "SHA3_384 and SHA3_512 digest algorithm" 1408 - depends on S390 1409 - select CRYPTO_HASH 1410 - help 1411 - This is the s390 hardware accelerated implementation of the 1412 - SHA3_512 secure hash standard. 1413 - 1414 - It is available as of z14. 734 + SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3) 1415 735 1416 736 config CRYPTO_SM3 1417 737 tristate 1418 738 1419 739 config CRYPTO_SM3_GENERIC 1420 - tristate "SM3 digest algorithm" 740 + tristate "SM3 (ShangMi 3)" 1421 741 select CRYPTO_HASH 1422 742 select CRYPTO_SM3 1423 743 help 1424 - SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). 1425 - It is part of the Chinese Commercial Cryptography suite. 744 + SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012, ISO/IEC 10118-3) 745 + 746 + This is part of the Chinese Commercial Cryptography suite. 1426 747 1427 748 References: 1428 749 http://www.oscca.gov.cn/UpFile/20101222141857786.pdf 1429 750 https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash 1430 751 1431 - config CRYPTO_SM3_AVX_X86_64 1432 - tristate "SM3 digest algorithm (x86_64/AVX)" 1433 - depends on X86 && 64BIT 1434 - select CRYPTO_HASH 1435 - select CRYPTO_SM3 1436 - help 1437 - SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). 1438 - It is part of the Chinese Commercial Cryptography suite. This is 1439 - SM3 optimized implementation using Advanced Vector Extensions (AVX) 1440 - when available. 1441 - 1442 - If unsure, say N. 1443 - 1444 752 config CRYPTO_STREEBOG 1445 - tristate "Streebog Hash Function" 753 + tristate "Streebog" 1446 754 select CRYPTO_HASH 1447 755 help 1448 - Streebog Hash Function (GOST R 34.11-2012, RFC 6986) is one of the Russian 1449 - cryptographic standard algorithms (called GOST algorithms). 1450 - This setting enables two hash algorithms with 256 and 512 bits output. 756 + Streebog Hash Function (GOST R 34.11-2012, RFC 6986, ISO/IEC 10118-3) 757 + 758 + This is one of the Russian cryptographic standard algorithms (called 759 + GOST algorithms). This setting enables two hash algorithms with 760 + 256 and 512 bits output. 1451 761 1452 762 References: 1453 763 https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf 1454 764 https://tools.ietf.org/html/rfc6986 1455 765 766 + config CRYPTO_VMAC 767 + tristate "VMAC" 768 + select CRYPTO_HASH 769 + select CRYPTO_MANAGER 770 + help 771 + VMAC is a message authentication algorithm designed for 772 + very high speed on 64-bit architectures. 773 + 774 + See https://fastcrypto.org/vmac for further information. 775 + 1456 776 config CRYPTO_WP512 1457 - tristate "Whirlpool digest algorithms" 777 + tristate "Whirlpool" 1458 778 select CRYPTO_HASH 1459 779 help 1460 - Whirlpool hash algorithm 512, 384 and 256-bit hashes 780 + Whirlpool hash function (ISO/IEC 10118-3) 781 + 782 + 512, 384 and 256-bit hashes. 1461 783 1462 784 Whirlpool-512 is part of the NESSIE cryptographic primitives. 1463 - Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard 1464 785 1465 - See also: 1466 - <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html> 786 + See https://web.archive.org/web/20171129084214/http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html 787 + for further information. 1467 788 1468 - config CRYPTO_GHASH_CLMUL_NI_INTEL 1469 - tristate "GHASH hash function (CLMUL-NI accelerated)" 1470 - depends on X86 && 64BIT 1471 - select CRYPTO_CRYPTD 789 + config CRYPTO_XCBC 790 + tristate "XCBC-MAC (Extended Cipher Block Chaining MAC)" 791 + select CRYPTO_HASH 792 + select CRYPTO_MANAGER 1472 793 help 1473 - This is the x86_64 CLMUL-NI accelerated implementation of 1474 - GHASH, the hash function used in GCM (Galois/Counter mode). 794 + XCBC-MAC (Extended Cipher Block Chaining Message Authentication 795 + Code) (RFC3566) 1475 796 1476 - config CRYPTO_GHASH_S390 1477 - tristate "GHASH hash function" 1478 - depends on S390 797 + config CRYPTO_XXHASH 798 + tristate "xxHash" 799 + select CRYPTO_HASH 800 + select XXHASH 801 + help 802 + xxHash non-cryptographic hash algorithm 803 + 804 + Extremely fast, working at speeds close to RAM limits. 805 + 806 + Used by the btrfs filesystem. 807 + 808 + endmenu 809 + 810 + menu "CRCs (cyclic redundancy checks)" 811 + 812 + config CRYPTO_CRC32C 813 + tristate "CRC32c" 814 + select CRYPTO_HASH 815 + select CRC32 816 + help 817 + CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720) 818 + 819 + A 32-bit CRC (cyclic redundancy check) with a polynomial defined 820 + by G. Castagnoli, S. Braeuer and M. Herrman in "Optimization of Cyclic 821 + Redundancy-Check Codes with 24 and 32 Parity Bits", IEEE Transactions 822 + on Communications, Vol. 41, No. 6, June 1993, selected for use with 823 + iSCSI. 824 + 825 + Used by btrfs, ext4, jbd2, NVMeoF/TCP, and iSCSI. 826 + 827 + config CRYPTO_CRC32 828 + tristate "CRC32" 829 + select CRYPTO_HASH 830 + select CRC32 831 + help 832 + CRC32 CRC algorithm (IEEE 802.3) 833 + 834 + Used by RoCEv2 and f2fs. 835 + 836 + config CRYPTO_CRCT10DIF 837 + tristate "CRCT10DIF" 1479 838 select CRYPTO_HASH 1480 839 help 1481 - This is the s390 hardware accelerated implementation of GHASH, 1482 - the hash function used in GCM (Galois/Counter mode). 840 + CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF) 1483 841 1484 - It is available as of z196. 842 + CRC algorithm used by the SCSI Block Commands standard. 1485 843 1486 - comment "Ciphers" 1487 - 1488 - config CRYPTO_AES 1489 - tristate "AES cipher algorithms" 1490 - select CRYPTO_ALGAPI 1491 - select CRYPTO_LIB_AES 844 + config CRYPTO_CRC64_ROCKSOFT 845 + tristate "CRC64 based on Rocksoft Model algorithm" 846 + depends on CRC64 847 + select CRYPTO_HASH 1492 848 help 1493 - AES cipher algorithms (FIPS-197). AES uses the Rijndael 1494 - algorithm. 849 + CRC64 CRC algorithm based on the Rocksoft Model CRC Algorithm 1495 850 1496 - Rijndael appears to be consistently a very good performer in 1497 - both hardware and software across a wide range of computing 1498 - environments regardless of its use in feedback or non-feedback 1499 - modes. Its key setup time is excellent, and its key agility is 1500 - good. Rijndael's very low memory requirements make it very well 1501 - suited for restricted-space environments, in which it also 1502 - demonstrates excellent performance. Rijndael's operations are 1503 - among the easiest to defend against power and timing attacks. 851 + Used by the NVMe implementation of T10 DIF (BLK_DEV_INTEGRITY) 1504 852 1505 - The AES specifies three key sizes: 128, 192 and 256 bits 853 + See https://zlib.net/crc_v3.txt 1506 854 1507 - See <http://csrc.nist.gov/CryptoToolkit/aes/> for more information. 855 + endmenu 1508 856 1509 - config CRYPTO_AES_TI 1510 - tristate "Fixed time AES cipher" 1511 - select CRYPTO_ALGAPI 1512 - select CRYPTO_LIB_AES 1513 - help 1514 - This is a generic implementation of AES that attempts to eliminate 1515 - data dependent latencies as much as possible without affecting 1516 - performance too much. It is intended for use by the generic CCM 1517 - and GCM drivers, and other CTR or CMAC/XCBC based modes that rely 1518 - solely on encryption (although decryption is supported as well, but 1519 - with a more dramatic performance hit) 1520 - 1521 - Instead of using 16 lookup tables of 1 KB each, (8 for encryption and 1522 - 8 for decryption), this implementation only uses just two S-boxes of 1523 - 256 bytes each, and attempts to eliminate data dependent latencies by 1524 - prefetching the entire table into the cache at the start of each 1525 - block. Interrupts are also disabled to avoid races where cachelines 1526 - are evicted when the CPU is interrupted to do something else. 1527 - 1528 - config CRYPTO_AES_NI_INTEL 1529 - tristate "AES cipher algorithms (AES-NI)" 1530 - depends on X86 1531 - select CRYPTO_AEAD 1532 - select CRYPTO_LIB_AES 1533 - select CRYPTO_ALGAPI 1534 - select CRYPTO_SKCIPHER 1535 - select CRYPTO_SIMD 1536 - help 1537 - Use Intel AES-NI instructions for AES algorithm. 1538 - 1539 - AES cipher algorithms (FIPS-197). AES uses the Rijndael 1540 - algorithm. 1541 - 1542 - Rijndael appears to be consistently a very good performer in 1543 - both hardware and software across a wide range of computing 1544 - environments regardless of its use in feedback or non-feedback 1545 - modes. Its key setup time is excellent, and its key agility is 1546 - good. Rijndael's very low memory requirements make it very well 1547 - suited for restricted-space environments, in which it also 1548 - demonstrates excellent performance. Rijndael's operations are 1549 - among the easiest to defend against power and timing attacks. 1550 - 1551 - The AES specifies three key sizes: 128, 192 and 256 bits 1552 - 1553 - See <http://csrc.nist.gov/encryption/aes/> for more information. 1554 - 1555 - In addition to AES cipher algorithm support, the acceleration 1556 - for some popular block cipher mode is supported too, including 1557 - ECB, CBC, LRW, XTS. The 64 bit version has additional 1558 - acceleration for CTR and XCTR. 1559 - 1560 - config CRYPTO_AES_SPARC64 1561 - tristate "AES cipher algorithms (SPARC64)" 1562 - depends on SPARC64 1563 - select CRYPTO_SKCIPHER 1564 - help 1565 - Use SPARC64 crypto opcodes for AES algorithm. 1566 - 1567 - AES cipher algorithms (FIPS-197). AES uses the Rijndael 1568 - algorithm. 1569 - 1570 - Rijndael appears to be consistently a very good performer in 1571 - both hardware and software across a wide range of computing 1572 - environments regardless of its use in feedback or non-feedback 1573 - modes. Its key setup time is excellent, and its key agility is 1574 - good. Rijndael's very low memory requirements make it very well 1575 - suited for restricted-space environments, in which it also 1576 - demonstrates excellent performance. Rijndael's operations are 1577 - among the easiest to defend against power and timing attacks. 1578 - 1579 - The AES specifies three key sizes: 128, 192 and 256 bits 1580 - 1581 - See <http://csrc.nist.gov/encryption/aes/> for more information. 1582 - 1583 - In addition to AES cipher algorithm support, the acceleration 1584 - for some popular block cipher mode is supported too, including 1585 - ECB and CBC. 1586 - 1587 - config CRYPTO_AES_PPC_SPE 1588 - tristate "AES cipher algorithms (PPC SPE)" 1589 - depends on PPC && SPE 1590 - select CRYPTO_SKCIPHER 1591 - help 1592 - AES cipher algorithms (FIPS-197). Additionally the acceleration 1593 - for popular block cipher modes ECB, CBC, CTR and XTS is supported. 1594 - This module should only be used for low power (router) devices 1595 - without hardware AES acceleration (e.g. caam crypto). It reduces the 1596 - size of the AES tables from 16KB to 8KB + 256 bytes and mitigates 1597 - timining attacks. Nevertheless it might be not as secure as other 1598 - architecture specific assembler implementations that work on 1KB 1599 - tables or 256 bytes S-boxes. 1600 - 1601 - config CRYPTO_AES_S390 1602 - tristate "AES cipher algorithms" 1603 - depends on S390 1604 - select CRYPTO_ALGAPI 1605 - select CRYPTO_SKCIPHER 1606 - help 1607 - This is the s390 hardware accelerated implementation of the 1608 - AES cipher algorithms (FIPS-197). 1609 - 1610 - As of z9 the ECB and CBC modes are hardware accelerated 1611 - for 128 bit keys. 1612 - As of z10 the ECB and CBC modes are hardware accelerated 1613 - for all AES key sizes. 1614 - As of z196 the CTR mode is hardware accelerated for all AES 1615 - key sizes and XTS mode is hardware accelerated for 256 and 1616 - 512 bit keys. 1617 - 1618 - config CRYPTO_ANUBIS 1619 - tristate "Anubis cipher algorithm" 1620 - depends on CRYPTO_USER_API_ENABLE_OBSOLETE 1621 - select CRYPTO_ALGAPI 1622 - help 1623 - Anubis cipher algorithm. 1624 - 1625 - Anubis is a variable key length cipher which can use keys from 1626 - 128 bits to 320 bits in length. It was evaluated as a entrant 1627 - in the NESSIE competition. 1628 - 1629 - See also: 1630 - <https://www.cosic.esat.kuleuven.be/nessie/reports/> 1631 - <http://www.larc.usp.br/~pbarreto/AnubisPage.html> 1632 - 1633 - config CRYPTO_ARC4 1634 - tristate "ARC4 cipher algorithm" 1635 - depends on CRYPTO_USER_API_ENABLE_OBSOLETE 1636 - select CRYPTO_SKCIPHER 1637 - select CRYPTO_LIB_ARC4 1638 - help 1639 - ARC4 cipher algorithm. 1640 - 1641 - ARC4 is a stream cipher using keys ranging from 8 bits to 2048 1642 - bits in length. This algorithm is required for driver-based 1643 - WEP, but it should not be for other purposes because of the 1644 - weakness of the algorithm. 1645 - 1646 - config CRYPTO_BLOWFISH 1647 - tristate "Blowfish cipher algorithm" 1648 - select CRYPTO_ALGAPI 1649 - select CRYPTO_BLOWFISH_COMMON 1650 - help 1651 - Blowfish cipher algorithm, by Bruce Schneier. 1652 - 1653 - This is a variable key length cipher which can use keys from 32 1654 - bits to 448 bits in length. It's fast, simple and specifically 1655 - designed for use on "large microprocessors". 1656 - 1657 - See also: 1658 - <https://www.schneier.com/blowfish.html> 1659 - 1660 - config CRYPTO_BLOWFISH_COMMON 1661 - tristate 1662 - help 1663 - Common parts of the Blowfish cipher algorithm shared by the 1664 - generic c and the assembler implementations. 1665 - 1666 - See also: 1667 - <https://www.schneier.com/blowfish.html> 1668 - 1669 - config CRYPTO_BLOWFISH_X86_64 1670 - tristate "Blowfish cipher algorithm (x86_64)" 1671 - depends on X86 && 64BIT 1672 - select CRYPTO_SKCIPHER 1673 - select CRYPTO_BLOWFISH_COMMON 1674 - imply CRYPTO_CTR 1675 - help 1676 - Blowfish cipher algorithm (x86_64), by Bruce Schneier. 1677 - 1678 - This is a variable key length cipher which can use keys from 32 1679 - bits to 448 bits in length. It's fast, simple and specifically 1680 - designed for use on "large microprocessors". 1681 - 1682 - See also: 1683 - <https://www.schneier.com/blowfish.html> 1684 - 1685 - config CRYPTO_CAMELLIA 1686 - tristate "Camellia cipher algorithms" 1687 - select CRYPTO_ALGAPI 1688 - help 1689 - Camellia cipher algorithms module. 1690 - 1691 - Camellia is a symmetric key block cipher developed jointly 1692 - at NTT and Mitsubishi Electric Corporation. 1693 - 1694 - The Camellia specifies three key sizes: 128, 192 and 256 bits. 1695 - 1696 - See also: 1697 - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> 1698 - 1699 - config CRYPTO_CAMELLIA_X86_64 1700 - tristate "Camellia cipher algorithm (x86_64)" 1701 - depends on X86 && 64BIT 1702 - select CRYPTO_SKCIPHER 1703 - imply CRYPTO_CTR 1704 - help 1705 - Camellia cipher algorithm module (x86_64). 1706 - 1707 - Camellia is a symmetric key block cipher developed jointly 1708 - at NTT and Mitsubishi Electric Corporation. 1709 - 1710 - The Camellia specifies three key sizes: 128, 192 and 256 bits. 1711 - 1712 - See also: 1713 - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> 1714 - 1715 - config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 1716 - tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)" 1717 - depends on X86 && 64BIT 1718 - select CRYPTO_SKCIPHER 1719 - select CRYPTO_CAMELLIA_X86_64 1720 - select CRYPTO_SIMD 1721 - imply CRYPTO_XTS 1722 - help 1723 - Camellia cipher algorithm module (x86_64/AES-NI/AVX). 1724 - 1725 - Camellia is a symmetric key block cipher developed jointly 1726 - at NTT and Mitsubishi Electric Corporation. 1727 - 1728 - The Camellia specifies three key sizes: 128, 192 and 256 bits. 1729 - 1730 - See also: 1731 - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> 1732 - 1733 - config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 1734 - tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)" 1735 - depends on X86 && 64BIT 1736 - select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 1737 - help 1738 - Camellia cipher algorithm module (x86_64/AES-NI/AVX2). 1739 - 1740 - Camellia is a symmetric key block cipher developed jointly 1741 - at NTT and Mitsubishi Electric Corporation. 1742 - 1743 - The Camellia specifies three key sizes: 128, 192 and 256 bits. 1744 - 1745 - See also: 1746 - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> 1747 - 1748 - config CRYPTO_CAMELLIA_SPARC64 1749 - tristate "Camellia cipher algorithm (SPARC64)" 1750 - depends on SPARC64 1751 - select CRYPTO_ALGAPI 1752 - select CRYPTO_SKCIPHER 1753 - help 1754 - Camellia cipher algorithm module (SPARC64). 1755 - 1756 - Camellia is a symmetric key block cipher developed jointly 1757 - at NTT and Mitsubishi Electric Corporation. 1758 - 1759 - The Camellia specifies three key sizes: 128, 192 and 256 bits. 1760 - 1761 - See also: 1762 - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> 1763 - 1764 - config CRYPTO_CAST_COMMON 1765 - tristate 1766 - help 1767 - Common parts of the CAST cipher algorithms shared by the 1768 - generic c and the assembler implementations. 1769 - 1770 - config CRYPTO_CAST5 1771 - tristate "CAST5 (CAST-128) cipher algorithm" 1772 - select CRYPTO_ALGAPI 1773 - select CRYPTO_CAST_COMMON 1774 - help 1775 - The CAST5 encryption algorithm (synonymous with CAST-128) is 1776 - described in RFC2144. 1777 - 1778 - config CRYPTO_CAST5_AVX_X86_64 1779 - tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)" 1780 - depends on X86 && 64BIT 1781 - select CRYPTO_SKCIPHER 1782 - select CRYPTO_CAST5 1783 - select CRYPTO_CAST_COMMON 1784 - select CRYPTO_SIMD 1785 - imply CRYPTO_CTR 1786 - help 1787 - The CAST5 encryption algorithm (synonymous with CAST-128) is 1788 - described in RFC2144. 1789 - 1790 - This module provides the Cast5 cipher algorithm that processes 1791 - sixteen blocks parallel using the AVX instruction set. 1792 - 1793 - config CRYPTO_CAST6 1794 - tristate "CAST6 (CAST-256) cipher algorithm" 1795 - select CRYPTO_ALGAPI 1796 - select CRYPTO_CAST_COMMON 1797 - help 1798 - The CAST6 encryption algorithm (synonymous with CAST-256) is 1799 - described in RFC2612. 1800 - 1801 - config CRYPTO_CAST6_AVX_X86_64 1802 - tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)" 1803 - depends on X86 && 64BIT 1804 - select CRYPTO_SKCIPHER 1805 - select CRYPTO_CAST6 1806 - select CRYPTO_CAST_COMMON 1807 - select CRYPTO_SIMD 1808 - imply CRYPTO_XTS 1809 - imply CRYPTO_CTR 1810 - help 1811 - The CAST6 encryption algorithm (synonymous with CAST-256) is 1812 - described in RFC2612. 1813 - 1814 - This module provides the Cast6 cipher algorithm that processes 1815 - eight blocks parallel using the AVX instruction set. 1816 - 1817 - config CRYPTO_DES 1818 - tristate "DES and Triple DES EDE cipher algorithms" 1819 - select CRYPTO_ALGAPI 1820 - select CRYPTO_LIB_DES 1821 - help 1822 - DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 1823 - 1824 - config CRYPTO_DES_SPARC64 1825 - tristate "DES and Triple DES EDE cipher algorithms (SPARC64)" 1826 - depends on SPARC64 1827 - select CRYPTO_ALGAPI 1828 - select CRYPTO_LIB_DES 1829 - select CRYPTO_SKCIPHER 1830 - help 1831 - DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3), 1832 - optimized using SPARC64 crypto opcodes. 1833 - 1834 - config CRYPTO_DES3_EDE_X86_64 1835 - tristate "Triple DES EDE cipher algorithm (x86-64)" 1836 - depends on X86 && 64BIT 1837 - select CRYPTO_SKCIPHER 1838 - select CRYPTO_LIB_DES 1839 - imply CRYPTO_CTR 1840 - help 1841 - Triple DES EDE (FIPS 46-3) algorithm. 1842 - 1843 - This module provides implementation of the Triple DES EDE cipher 1844 - algorithm that is optimized for x86-64 processors. Two versions of 1845 - algorithm are provided; regular processing one input block and 1846 - one that processes three blocks parallel. 1847 - 1848 - config CRYPTO_DES_S390 1849 - tristate "DES and Triple DES cipher algorithms" 1850 - depends on S390 1851 - select CRYPTO_ALGAPI 1852 - select CRYPTO_SKCIPHER 1853 - select CRYPTO_LIB_DES 1854 - help 1855 - This is the s390 hardware accelerated implementation of the 1856 - DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 1857 - 1858 - As of z990 the ECB and CBC mode are hardware accelerated. 1859 - As of z196 the CTR mode is hardware accelerated. 1860 - 1861 - config CRYPTO_FCRYPT 1862 - tristate "FCrypt cipher algorithm" 1863 - select CRYPTO_ALGAPI 1864 - select CRYPTO_SKCIPHER 1865 - help 1866 - FCrypt algorithm used by RxRPC. 1867 - 1868 - config CRYPTO_KHAZAD 1869 - tristate "Khazad cipher algorithm" 1870 - depends on CRYPTO_USER_API_ENABLE_OBSOLETE 1871 - select CRYPTO_ALGAPI 1872 - help 1873 - Khazad cipher algorithm. 1874 - 1875 - Khazad was a finalist in the initial NESSIE competition. It is 1876 - an algorithm optimized for 64-bit processors with good performance 1877 - on 32-bit processors. Khazad uses an 128 bit key size. 1878 - 1879 - See also: 1880 - <http://www.larc.usp.br/~pbarreto/KhazadPage.html> 1881 - 1882 - config CRYPTO_CHACHA20 1883 - tristate "ChaCha stream cipher algorithms" 1884 - select CRYPTO_LIB_CHACHA_GENERIC 1885 - select CRYPTO_SKCIPHER 1886 - help 1887 - The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms. 1888 - 1889 - ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. 1890 - Bernstein and further specified in RFC7539 for use in IETF protocols. 1891 - This is the portable C implementation of ChaCha20. See also: 1892 - <https://cr.yp.to/chacha/chacha-20080128.pdf> 1893 - 1894 - XChaCha20 is the application of the XSalsa20 construction to ChaCha20 1895 - rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length 1896 - from 64 bits (or 96 bits using the RFC7539 convention) to 192 bits, 1897 - while provably retaining ChaCha20's security. See also: 1898 - <https://cr.yp.to/snuffle/xsalsa-20081128.pdf> 1899 - 1900 - XChaCha12 is XChaCha20 reduced to 12 rounds, with correspondingly 1901 - reduced security margin but increased performance. It can be needed 1902 - in some performance-sensitive scenarios. 1903 - 1904 - config CRYPTO_CHACHA20_X86_64 1905 - tristate "ChaCha stream cipher algorithms (x86_64/SSSE3/AVX2/AVX-512VL)" 1906 - depends on X86 && 64BIT 1907 - select CRYPTO_SKCIPHER 1908 - select CRYPTO_LIB_CHACHA_GENERIC 1909 - select CRYPTO_ARCH_HAVE_LIB_CHACHA 1910 - help 1911 - SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20, 1912 - XChaCha20, and XChaCha12 stream ciphers. 1913 - 1914 - config CRYPTO_CHACHA_MIPS 1915 - tristate "ChaCha stream cipher algorithms (MIPS 32r2 optimized)" 1916 - depends on CPU_MIPS32_R2 1917 - select CRYPTO_SKCIPHER 1918 - select CRYPTO_ARCH_HAVE_LIB_CHACHA 1919 - 1920 - config CRYPTO_CHACHA_S390 1921 - tristate "ChaCha20 stream cipher" 1922 - depends on S390 1923 - select CRYPTO_SKCIPHER 1924 - select CRYPTO_LIB_CHACHA_GENERIC 1925 - select CRYPTO_ARCH_HAVE_LIB_CHACHA 1926 - help 1927 - This is the s390 SIMD implementation of the ChaCha20 stream 1928 - cipher (RFC 7539). 1929 - 1930 - It is available as of z13. 1931 - 1932 - config CRYPTO_SEED 1933 - tristate "SEED cipher algorithm" 1934 - depends on CRYPTO_USER_API_ENABLE_OBSOLETE 1935 - select CRYPTO_ALGAPI 1936 - help 1937 - SEED cipher algorithm (RFC4269). 1938 - 1939 - SEED is a 128-bit symmetric key block cipher that has been 1940 - developed by KISA (Korea Information Security Agency) as a 1941 - national standard encryption algorithm of the Republic of Korea. 1942 - It is a 16 round block cipher with the key size of 128 bit. 1943 - 1944 - See also: 1945 - <http://www.kisa.or.kr/kisa/seed/jsp/seed_eng.jsp> 1946 - 1947 - config CRYPTO_ARIA 1948 - tristate "ARIA cipher algorithm" 1949 - select CRYPTO_ALGAPI 1950 - help 1951 - ARIA cipher algorithm (RFC5794). 1952 - 1953 - ARIA is a standard encryption algorithm of the Republic of Korea. 1954 - The ARIA specifies three key sizes and rounds. 1955 - 128-bit: 12 rounds. 1956 - 192-bit: 14 rounds. 1957 - 256-bit: 16 rounds. 1958 - 1959 - See also: 1960 - <https://seed.kisa.or.kr/kisa/algorithm/EgovAriaInfo.do> 1961 - 1962 - config CRYPTO_SERPENT 1963 - tristate "Serpent cipher algorithm" 1964 - select CRYPTO_ALGAPI 1965 - help 1966 - Serpent cipher algorithm, by Anderson, Biham & Knudsen. 1967 - 1968 - Keys are allowed to be from 0 to 256 bits in length, in steps 1969 - of 8 bits. 1970 - 1971 - See also: 1972 - <https://www.cl.cam.ac.uk/~rja14/serpent.html> 1973 - 1974 - config CRYPTO_SERPENT_SSE2_X86_64 1975 - tristate "Serpent cipher algorithm (x86_64/SSE2)" 1976 - depends on X86 && 64BIT 1977 - select CRYPTO_SKCIPHER 1978 - select CRYPTO_SERPENT 1979 - select CRYPTO_SIMD 1980 - imply CRYPTO_CTR 1981 - help 1982 - Serpent cipher algorithm, by Anderson, Biham & Knudsen. 1983 - 1984 - Keys are allowed to be from 0 to 256 bits in length, in steps 1985 - of 8 bits. 1986 - 1987 - This module provides Serpent cipher algorithm that processes eight 1988 - blocks parallel using SSE2 instruction set. 1989 - 1990 - See also: 1991 - <https://www.cl.cam.ac.uk/~rja14/serpent.html> 1992 - 1993 - config CRYPTO_SERPENT_SSE2_586 1994 - tristate "Serpent cipher algorithm (i586/SSE2)" 1995 - depends on X86 && !64BIT 1996 - select CRYPTO_SKCIPHER 1997 - select CRYPTO_SERPENT 1998 - select CRYPTO_SIMD 1999 - imply CRYPTO_CTR 2000 - help 2001 - Serpent cipher algorithm, by Anderson, Biham & Knudsen. 2002 - 2003 - Keys are allowed to be from 0 to 256 bits in length, in steps 2004 - of 8 bits. 2005 - 2006 - This module provides Serpent cipher algorithm that processes four 2007 - blocks parallel using SSE2 instruction set. 2008 - 2009 - See also: 2010 - <https://www.cl.cam.ac.uk/~rja14/serpent.html> 2011 - 2012 - config CRYPTO_SERPENT_AVX_X86_64 2013 - tristate "Serpent cipher algorithm (x86_64/AVX)" 2014 - depends on X86 && 64BIT 2015 - select CRYPTO_SKCIPHER 2016 - select CRYPTO_SERPENT 2017 - select CRYPTO_SIMD 2018 - imply CRYPTO_XTS 2019 - imply CRYPTO_CTR 2020 - help 2021 - Serpent cipher algorithm, by Anderson, Biham & Knudsen. 2022 - 2023 - Keys are allowed to be from 0 to 256 bits in length, in steps 2024 - of 8 bits. 2025 - 2026 - This module provides the Serpent cipher algorithm that processes 2027 - eight blocks parallel using the AVX instruction set. 2028 - 2029 - See also: 2030 - <https://www.cl.cam.ac.uk/~rja14/serpent.html> 2031 - 2032 - config CRYPTO_SERPENT_AVX2_X86_64 2033 - tristate "Serpent cipher algorithm (x86_64/AVX2)" 2034 - depends on X86 && 64BIT 2035 - select CRYPTO_SERPENT_AVX_X86_64 2036 - help 2037 - Serpent cipher algorithm, by Anderson, Biham & Knudsen. 2038 - 2039 - Keys are allowed to be from 0 to 256 bits in length, in steps 2040 - of 8 bits. 2041 - 2042 - This module provides Serpent cipher algorithm that processes 16 2043 - blocks parallel using AVX2 instruction set. 2044 - 2045 - See also: 2046 - <https://www.cl.cam.ac.uk/~rja14/serpent.html> 2047 - 2048 - config CRYPTO_SM4 2049 - tristate 2050 - 2051 - config CRYPTO_SM4_GENERIC 2052 - tristate "SM4 cipher algorithm" 2053 - select CRYPTO_ALGAPI 2054 - select CRYPTO_SM4 2055 - help 2056 - SM4 cipher algorithms (OSCCA GB/T 32907-2016). 2057 - 2058 - SM4 (GBT.32907-2016) is a cryptographic standard issued by the 2059 - Organization of State Commercial Administration of China (OSCCA) 2060 - as an authorized cryptographic algorithms for the use within China. 2061 - 2062 - SMS4 was originally created for use in protecting wireless 2063 - networks, and is mandated in the Chinese National Standard for 2064 - Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure) 2065 - (GB.15629.11-2003). 2066 - 2067 - The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and 2068 - standardized through TC 260 of the Standardization Administration 2069 - of the People's Republic of China (SAC). 2070 - 2071 - The input, output, and key of SMS4 are each 128 bits. 2072 - 2073 - See also: <https://eprint.iacr.org/2008/329.pdf> 2074 - 2075 - If unsure, say N. 2076 - 2077 - config CRYPTO_SM4_AESNI_AVX_X86_64 2078 - tristate "SM4 cipher algorithm (x86_64/AES-NI/AVX)" 2079 - depends on X86 && 64BIT 2080 - select CRYPTO_SKCIPHER 2081 - select CRYPTO_SIMD 2082 - select CRYPTO_ALGAPI 2083 - select CRYPTO_SM4 2084 - help 2085 - SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX). 2086 - 2087 - SM4 (GBT.32907-2016) is a cryptographic standard issued by the 2088 - Organization of State Commercial Administration of China (OSCCA) 2089 - as an authorized cryptographic algorithms for the use within China. 2090 - 2091 - This is SM4 optimized implementation using AES-NI/AVX/x86_64 2092 - instruction set for block cipher. Through two affine transforms, 2093 - we can use the AES S-Box to simulate the SM4 S-Box to achieve the 2094 - effect of instruction acceleration. 2095 - 2096 - If unsure, say N. 2097 - 2098 - config CRYPTO_SM4_AESNI_AVX2_X86_64 2099 - tristate "SM4 cipher algorithm (x86_64/AES-NI/AVX2)" 2100 - depends on X86 && 64BIT 2101 - select CRYPTO_SKCIPHER 2102 - select CRYPTO_SIMD 2103 - select CRYPTO_ALGAPI 2104 - select CRYPTO_SM4 2105 - select CRYPTO_SM4_AESNI_AVX_X86_64 2106 - help 2107 - SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX2). 2108 - 2109 - SM4 (GBT.32907-2016) is a cryptographic standard issued by the 2110 - Organization of State Commercial Administration of China (OSCCA) 2111 - as an authorized cryptographic algorithms for the use within China. 2112 - 2113 - This is SM4 optimized implementation using AES-NI/AVX2/x86_64 2114 - instruction set for block cipher. Through two affine transforms, 2115 - we can use the AES S-Box to simulate the SM4 S-Box to achieve the 2116 - effect of instruction acceleration. 2117 - 2118 - If unsure, say N. 2119 - 2120 - config CRYPTO_TEA 2121 - tristate "TEA, XTEA and XETA cipher algorithms" 2122 - depends on CRYPTO_USER_API_ENABLE_OBSOLETE 2123 - select CRYPTO_ALGAPI 2124 - help 2125 - TEA cipher algorithm. 2126 - 2127 - Tiny Encryption Algorithm is a simple cipher that uses 2128 - many rounds for security. It is very fast and uses 2129 - little memory. 2130 - 2131 - Xtendend Tiny Encryption Algorithm is a modification to 2132 - the TEA algorithm to address a potential key weakness 2133 - in the TEA algorithm. 2134 - 2135 - Xtendend Encryption Tiny Algorithm is a mis-implementation 2136 - of the XTEA algorithm for compatibility purposes. 2137 - 2138 - config CRYPTO_TWOFISH 2139 - tristate "Twofish cipher algorithm" 2140 - select CRYPTO_ALGAPI 2141 - select CRYPTO_TWOFISH_COMMON 2142 - help 2143 - Twofish cipher algorithm. 2144 - 2145 - Twofish was submitted as an AES (Advanced Encryption Standard) 2146 - candidate cipher by researchers at CounterPane Systems. It is a 2147 - 16 round block cipher supporting key sizes of 128, 192, and 256 2148 - bits. 2149 - 2150 - See also: 2151 - <https://www.schneier.com/twofish.html> 2152 - 2153 - config CRYPTO_TWOFISH_COMMON 2154 - tristate 2155 - help 2156 - Common parts of the Twofish cipher algorithm shared by the 2157 - generic c and the assembler implementations. 2158 - 2159 - config CRYPTO_TWOFISH_586 2160 - tristate "Twofish cipher algorithms (i586)" 2161 - depends on (X86 || UML_X86) && !64BIT 2162 - select CRYPTO_ALGAPI 2163 - select CRYPTO_TWOFISH_COMMON 2164 - imply CRYPTO_CTR 2165 - help 2166 - Twofish cipher algorithm. 2167 - 2168 - Twofish was submitted as an AES (Advanced Encryption Standard) 2169 - candidate cipher by researchers at CounterPane Systems. It is a 2170 - 16 round block cipher supporting key sizes of 128, 192, and 256 2171 - bits. 2172 - 2173 - See also: 2174 - <https://www.schneier.com/twofish.html> 2175 - 2176 - config CRYPTO_TWOFISH_X86_64 2177 - tristate "Twofish cipher algorithm (x86_64)" 2178 - depends on (X86 || UML_X86) && 64BIT 2179 - select CRYPTO_ALGAPI 2180 - select CRYPTO_TWOFISH_COMMON 2181 - imply CRYPTO_CTR 2182 - help 2183 - Twofish cipher algorithm (x86_64). 2184 - 2185 - Twofish was submitted as an AES (Advanced Encryption Standard) 2186 - candidate cipher by researchers at CounterPane Systems. It is a 2187 - 16 round block cipher supporting key sizes of 128, 192, and 256 2188 - bits. 2189 - 2190 - See also: 2191 - <https://www.schneier.com/twofish.html> 2192 - 2193 - config CRYPTO_TWOFISH_X86_64_3WAY 2194 - tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" 2195 - depends on X86 && 64BIT 2196 - select CRYPTO_SKCIPHER 2197 - select CRYPTO_TWOFISH_COMMON 2198 - select CRYPTO_TWOFISH_X86_64 2199 - help 2200 - Twofish cipher algorithm (x86_64, 3-way parallel). 2201 - 2202 - Twofish was submitted as an AES (Advanced Encryption Standard) 2203 - candidate cipher by researchers at CounterPane Systems. It is a 2204 - 16 round block cipher supporting key sizes of 128, 192, and 256 2205 - bits. 2206 - 2207 - This module provides Twofish cipher algorithm that processes three 2208 - blocks parallel, utilizing resources of out-of-order CPUs better. 2209 - 2210 - See also: 2211 - <https://www.schneier.com/twofish.html> 2212 - 2213 - config CRYPTO_TWOFISH_AVX_X86_64 2214 - tristate "Twofish cipher algorithm (x86_64/AVX)" 2215 - depends on X86 && 64BIT 2216 - select CRYPTO_SKCIPHER 2217 - select CRYPTO_SIMD 2218 - select CRYPTO_TWOFISH_COMMON 2219 - select CRYPTO_TWOFISH_X86_64 2220 - select CRYPTO_TWOFISH_X86_64_3WAY 2221 - imply CRYPTO_XTS 2222 - help 2223 - Twofish cipher algorithm (x86_64/AVX). 2224 - 2225 - Twofish was submitted as an AES (Advanced Encryption Standard) 2226 - candidate cipher by researchers at CounterPane Systems. It is a 2227 - 16 round block cipher supporting key sizes of 128, 192, and 256 2228 - bits. 2229 - 2230 - This module provides the Twofish cipher algorithm that processes 2231 - eight blocks parallel using the AVX Instruction Set. 2232 - 2233 - See also: 2234 - <https://www.schneier.com/twofish.html> 2235 - 2236 - comment "Compression" 857 + menu "Compression" 2237 858 2238 859 config CRYPTO_DEFLATE 2239 - tristate "Deflate compression algorithm" 860 + tristate "Deflate" 2240 861 select CRYPTO_ALGAPI 2241 862 select CRYPTO_ACOMP2 2242 863 select ZLIB_INFLATE 2243 864 select ZLIB_DEFLATE 2244 865 help 2245 - This is the Deflate algorithm (RFC1951), specified for use in 2246 - IPSec with the IPCOMP protocol (RFC3173, RFC2394). 866 + Deflate compression algorithm (RFC1951) 2247 867 2248 - You will most probably want this if using IPSec. 868 + Used by IPSec with the IPCOMP protocol (RFC3173, RFC2394) 2249 869 2250 870 config CRYPTO_LZO 2251 - tristate "LZO compression algorithm" 871 + tristate "LZO" 2252 872 select CRYPTO_ALGAPI 2253 873 select CRYPTO_ACOMP2 2254 874 select LZO_COMPRESS 2255 875 select LZO_DECOMPRESS 2256 876 help 2257 - This is the LZO algorithm. 877 + LZO compression algorithm 878 + 879 + See https://www.oberhumer.com/opensource/lzo/ for further information. 2258 880 2259 881 config CRYPTO_842 2260 - tristate "842 compression algorithm" 882 + tristate "842" 2261 883 select CRYPTO_ALGAPI 2262 884 select CRYPTO_ACOMP2 2263 885 select 842_COMPRESS 2264 886 select 842_DECOMPRESS 2265 887 help 2266 - This is the 842 algorithm. 888 + 842 compression algorithm by IBM 889 + 890 + See https://github.com/plauth/lib842 for further information. 2267 891 2268 892 config CRYPTO_LZ4 2269 - tristate "LZ4 compression algorithm" 893 + tristate "LZ4" 2270 894 select CRYPTO_ALGAPI 2271 895 select CRYPTO_ACOMP2 2272 896 select LZ4_COMPRESS 2273 897 select LZ4_DECOMPRESS 2274 898 help 2275 - This is the LZ4 algorithm. 899 + LZ4 compression algorithm 900 + 901 + See https://github.com/lz4/lz4 for further information. 2276 902 2277 903 config CRYPTO_LZ4HC 2278 - tristate "LZ4HC compression algorithm" 904 + tristate "LZ4HC" 2279 905 select CRYPTO_ALGAPI 2280 906 select CRYPTO_ACOMP2 2281 907 select LZ4HC_COMPRESS 2282 908 select LZ4_DECOMPRESS 2283 909 help 2284 - This is the LZ4 high compression mode algorithm. 910 + LZ4 high compression mode algorithm 911 + 912 + See https://github.com/lz4/lz4 for further information. 2285 913 2286 914 config CRYPTO_ZSTD 2287 - tristate "Zstd compression algorithm" 915 + tristate "Zstd" 2288 916 select CRYPTO_ALGAPI 2289 917 select CRYPTO_ACOMP2 2290 918 select ZSTD_COMPRESS 2291 919 select ZSTD_DECOMPRESS 2292 920 help 2293 - This is the zstd algorithm. 921 + zstd compression algorithm 2294 922 2295 - comment "Random Number Generation" 923 + See https://github.com/facebook/zstd for further information. 924 + 925 + endmenu 926 + 927 + menu "Random number generation" 2296 928 2297 929 config CRYPTO_ANSI_CPRNG 2298 - tristate "Pseudo Random Number Generation for Cryptographic modules" 930 + tristate "ANSI PRNG (Pseudo Random Number Generator)" 2299 931 select CRYPTO_AES 2300 932 select CRYPTO_RNG 2301 933 help 2302 - This option enables the generic pseudo random number generator 2303 - for cryptographic modules. Uses the Algorithm specified in 2304 - ANSI X9.31 A.2.4. Note that this option must be enabled if 2305 - CRYPTO_FIPS is selected 934 + Pseudo RNG (random number generator) (ANSI X9.31 Appendix A.2.4) 935 + 936 + This uses the AES cipher algorithm. 937 + 938 + Note that this option must be enabled if CRYPTO_FIPS is selected 2306 939 2307 940 menuconfig CRYPTO_DRBG_MENU 2308 - tristate "NIST SP800-90A DRBG" 941 + tristate "NIST SP800-90A DRBG (Deterministic Random Bit Generator)" 2309 942 help 2310 - NIST SP800-90A compliant DRBG. In the following submenu, one or 2311 - more of the DRBG types must be selected. 943 + DRBG (Deterministic Random Bit Generator) (NIST SP800-90A) 944 + 945 + In the following submenu, one or more of the DRBG types must be selected. 2312 946 2313 947 if CRYPTO_DRBG_MENU 2314 948 ··· 1253 2019 select CRYPTO_SHA512 1254 2020 1255 2021 config CRYPTO_DRBG_HASH 1256 - bool "Enable Hash DRBG" 2022 + bool "Hash_DRBG" 1257 2023 select CRYPTO_SHA256 1258 2024 help 1259 - Enable the Hash DRBG variant as defined in NIST SP800-90A. 2025 + Hash_DRBG variant as defined in NIST SP800-90A. 2026 + 2027 + This uses the SHA-1, SHA-256, SHA-384, or SHA-512 hash algorithms. 1260 2028 1261 2029 config CRYPTO_DRBG_CTR 1262 - bool "Enable CTR DRBG" 2030 + bool "CTR_DRBG" 1263 2031 select CRYPTO_AES 1264 2032 select CRYPTO_CTR 1265 2033 help 1266 - Enable the CTR DRBG variant as defined in NIST SP800-90A. 2034 + CTR_DRBG variant as defined in NIST SP800-90A. 2035 + 2036 + This uses the AES cipher algorithm with the counter block mode. 1267 2037 1268 2038 config CRYPTO_DRBG 1269 2039 tristate ··· 1278 2040 endif # if CRYPTO_DRBG_MENU 1279 2041 1280 2042 config CRYPTO_JITTERENTROPY 1281 - tristate "Jitterentropy Non-Deterministic Random Number Generator" 2043 + tristate "CPU Jitter Non-Deterministic RNG (Random Number Generator)" 1282 2044 select CRYPTO_RNG 1283 2045 help 1284 - The Jitterentropy RNG is a noise that is intended 1285 - to provide seed to another RNG. The RNG does not 1286 - perform any cryptographic whitening of the generated 1287 - random numbers. This Jitterentropy RNG registers with 1288 - the kernel crypto API and can be used by any caller. 2046 + CPU Jitter RNG (Random Number Generator) from the Jitterentropy library 2047 + 2048 + A non-physical non-deterministic ("true") RNG (e.g., an entropy source 2049 + compliant with NIST SP800-90B) intended to provide a seed to a 2050 + deterministic RNG (e.g. per NIST SP800-90C). 2051 + This RNG does not perform any cryptographic whitening of the generated 2052 + 2053 + See https://www.chronox.de/jent.html 1289 2054 1290 2055 config CRYPTO_KDF800108_CTR 1291 2056 tristate 1292 2057 select CRYPTO_HMAC 1293 2058 select CRYPTO_SHA256 1294 2059 2060 + endmenu 2061 + menu "Userspace interface" 2062 + 1295 2063 config CRYPTO_USER_API 1296 2064 tristate 1297 2065 1298 2066 config CRYPTO_USER_API_HASH 1299 - tristate "User-space interface for hash algorithms" 2067 + tristate "Hash algorithms" 1300 2068 depends on NET 1301 2069 select CRYPTO_HASH 1302 2070 select CRYPTO_USER_API 1303 2071 help 1304 - This option enables the user-spaces interface for hash 1305 - algorithms. 2072 + Enable the userspace interface for hash algorithms. 2073 + 2074 + See Documentation/crypto/userspace-if.rst and 2075 + https://www.chronox.de/libkcapi/html/index.html 1306 2076 1307 2077 config CRYPTO_USER_API_SKCIPHER 1308 - tristate "User-space interface for symmetric key cipher algorithms" 2078 + tristate "Symmetric key cipher algorithms" 1309 2079 depends on NET 1310 2080 select CRYPTO_SKCIPHER 1311 2081 select CRYPTO_USER_API 1312 2082 help 1313 - This option enables the user-spaces interface for symmetric 1314 - key cipher algorithms. 2083 + Enable the userspace interface for symmetric key cipher algorithms. 2084 + 2085 + See Documentation/crypto/userspace-if.rst and 2086 + https://www.chronox.de/libkcapi/html/index.html 1315 2087 1316 2088 config CRYPTO_USER_API_RNG 1317 - tristate "User-space interface for random number generator algorithms" 2089 + tristate "RNG (random number generator) algorithms" 1318 2090 depends on NET 1319 2091 select CRYPTO_RNG 1320 2092 select CRYPTO_USER_API 1321 2093 help 1322 - This option enables the user-spaces interface for random 1323 - number generator algorithms. 2094 + Enable the userspace interface for RNG (random number generator) 2095 + algorithms. 2096 + 2097 + See Documentation/crypto/userspace-if.rst and 2098 + https://www.chronox.de/libkcapi/html/index.html 1324 2099 1325 2100 config CRYPTO_USER_API_RNG_CAVP 1326 2101 bool "Enable CAVP testing of DRBG" 1327 2102 depends on CRYPTO_USER_API_RNG && CRYPTO_DRBG 1328 2103 help 1329 - This option enables extra API for CAVP testing via the user-space 1330 - interface: resetting of DRBG entropy, and providing Additional Data. 2104 + Enable extra APIs in the userspace interface for NIST CAVP 2105 + (Cryptographic Algorithm Validation Program) testing: 2106 + - resetting DRBG entropy 2107 + - providing Additional Data 2108 + 1331 2109 This should only be enabled for CAVP testing. You should say 1332 2110 no unless you know what this is. 1333 2111 1334 2112 config CRYPTO_USER_API_AEAD 1335 - tristate "User-space interface for AEAD cipher algorithms" 2113 + tristate "AEAD cipher algorithms" 1336 2114 depends on NET 1337 2115 select CRYPTO_AEAD 1338 2116 select CRYPTO_SKCIPHER 1339 2117 select CRYPTO_NULL 1340 2118 select CRYPTO_USER_API 1341 2119 help 1342 - This option enables the user-spaces interface for AEAD 1343 - cipher algorithms. 2120 + Enable the userspace interface for AEAD cipher algorithms. 2121 + 2122 + See Documentation/crypto/userspace-if.rst and 2123 + https://www.chronox.de/libkcapi/html/index.html 1344 2124 1345 2125 config CRYPTO_USER_API_ENABLE_OBSOLETE 1346 - bool "Enable obsolete cryptographic algorithms for userspace" 2126 + bool "Obsolete cryptographic algorithms" 1347 2127 depends on CRYPTO_USER_API 1348 2128 default y 1349 2129 help ··· 1370 2114 only useful for userspace clients that still rely on them. 1371 2115 1372 2116 config CRYPTO_STATS 1373 - bool "Crypto usage statistics for User-space" 2117 + bool "Crypto usage statistics" 1374 2118 depends on CRYPTO_USER 1375 2119 help 1376 - This option enables the gathering of crypto stats. 1377 - This will collect: 1378 - - encrypt/decrypt size and numbers of symmeric operations 1379 - - compress/decompress size and numbers of compress operations 1380 - - size and numbers of hash operations 1381 - - encrypt/decrypt/sign/verify numbers for asymmetric operations 1382 - - generate/seed numbers for rng operations 2120 + Enable the gathering of crypto stats. 2121 + 2122 + This collects data sizes, numbers of requests, and numbers 2123 + of errors processed by: 2124 + - AEAD ciphers (encrypt, decrypt) 2125 + - asymmetric key ciphers (encrypt, decrypt, verify, sign) 2126 + - symmetric key ciphers (encrypt, decrypt) 2127 + - compression algorithms (compress, decompress) 2128 + - hash algorithms (hash) 2129 + - key-agreement protocol primitives (setsecret, generate 2130 + public key, compute shared secret) 2131 + - RNG (generate, seed) 2132 + 2133 + endmenu 1383 2134 1384 2135 config CRYPTO_HASH_INFO 1385 2136 bool 2137 + 2138 + if ARM 2139 + source "arch/arm/crypto/Kconfig" 2140 + endif 2141 + if ARM64 2142 + source "arch/arm64/crypto/Kconfig" 2143 + endif 2144 + if MIPS 2145 + source "arch/mips/crypto/Kconfig" 2146 + endif 2147 + if PPC 2148 + source "arch/powerpc/crypto/Kconfig" 2149 + endif 2150 + if S390 2151 + source "arch/s390/crypto/Kconfig" 2152 + endif 2153 + if SPARC 2154 + source "arch/sparc/crypto/Kconfig" 2155 + endif 2156 + if X86 2157 + source "arch/x86/crypto/Kconfig" 2158 + endif 1386 2159 1387 2160 source "drivers/crypto/Kconfig" 1388 2161 source "crypto/asymmetric_keys/Kconfig"
+1 -1
crypto/Makefile
··· 149 149 obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o 150 150 obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o 151 151 obj-$(CONFIG_CRYPTO_SEED) += seed.o 152 - obj-$(CONFIG_CRYPTO_ARIA) += aria.o 152 + obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o 153 153 obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o 154 154 obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o 155 155 obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
+8
crypto/akcipher.c
··· 120 120 return -ENOSYS; 121 121 } 122 122 123 + static int akcipher_default_set_key(struct crypto_akcipher *tfm, 124 + const void *key, unsigned int keylen) 125 + { 126 + return -ENOSYS; 127 + } 128 + 123 129 int crypto_register_akcipher(struct akcipher_alg *alg) 124 130 { 125 131 struct crypto_alg *base = &alg->base; ··· 138 132 alg->encrypt = akcipher_default_op; 139 133 if (!alg->decrypt) 140 134 alg->decrypt = akcipher_default_op; 135 + if (!alg->set_priv_key) 136 + alg->set_priv_key = akcipher_default_set_key; 141 137 142 138 akcipher_prepare_alg(alg); 143 139 return crypto_register_alg(base);
-71
crypto/algapi.c
··· 997 997 } 998 998 EXPORT_SYMBOL_GPL(crypto_inc); 999 999 1000 - void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) 1001 - { 1002 - int relalign = 0; 1003 - 1004 - if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { 1005 - int size = sizeof(unsigned long); 1006 - int d = (((unsigned long)dst ^ (unsigned long)src1) | 1007 - ((unsigned long)dst ^ (unsigned long)src2)) & 1008 - (size - 1); 1009 - 1010 - relalign = d ? 1 << __ffs(d) : size; 1011 - 1012 - /* 1013 - * If we care about alignment, process as many bytes as 1014 - * needed to advance dst and src to values whose alignments 1015 - * equal their relative alignment. This will allow us to 1016 - * process the remainder of the input using optimal strides. 1017 - */ 1018 - while (((unsigned long)dst & (relalign - 1)) && len > 0) { 1019 - *dst++ = *src1++ ^ *src2++; 1020 - len--; 1021 - } 1022 - } 1023 - 1024 - while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { 1025 - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { 1026 - u64 l = get_unaligned((u64 *)src1) ^ 1027 - get_unaligned((u64 *)src2); 1028 - put_unaligned(l, (u64 *)dst); 1029 - } else { 1030 - *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; 1031 - } 1032 - dst += 8; 1033 - src1 += 8; 1034 - src2 += 8; 1035 - len -= 8; 1036 - } 1037 - 1038 - while (len >= 4 && !(relalign & 3)) { 1039 - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { 1040 - u32 l = get_unaligned((u32 *)src1) ^ 1041 - get_unaligned((u32 *)src2); 1042 - put_unaligned(l, (u32 *)dst); 1043 - } else { 1044 - *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; 1045 - } 1046 - dst += 4; 1047 - src1 += 4; 1048 - src2 += 4; 1049 - len -= 4; 1050 - } 1051 - 1052 - while (len >= 2 && !(relalign & 1)) { 1053 - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { 1054 - u16 l = get_unaligned((u16 *)src1) ^ 1055 - get_unaligned((u16 *)src2); 1056 - put_unaligned(l, (u16 *)dst); 1057 - } else { 1058 - *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2; 1059 - } 1060 - dst += 2; 1061 - src1 += 2; 1062 - src2 += 2; 1063 - len -= 2; 1064 - } 1065 - 1066 - while (len--) 1067 - *dst++ = *src1++ ^ *src2++; 1068 - } 1069 - EXPORT_SYMBOL_GPL(__crypto_xor); 1070 - 1071 1000 unsigned int crypto_alg_extsize(struct crypto_alg *alg) 1072 1001 { 1073 1002 return alg->cra_ctxsize +
+2 -2
crypto/api.c
··· 114 114 larval->alg.cra_priority = -1; 115 115 larval->alg.cra_destroy = crypto_larval_destroy; 116 116 117 - strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); 117 + strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); 118 118 init_completion(&larval->completion); 119 119 120 120 return larval; ··· 321 321 322 322 /* 323 323 * If the internal flag is set for a cipher, require a caller to 324 - * to invoke the cipher with the internal flag to use that cipher. 324 + * invoke the cipher with the internal flag to use that cipher. 325 325 * Also, if a caller wants to allocate a cipher that may or may 326 326 * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and 327 327 * !(mask & CRYPTO_ALG_INTERNAL).
+32 -7
crypto/aria.c crypto/aria_generic.c
··· 16 16 17 17 #include <crypto/aria.h> 18 18 19 + static const u32 key_rc[20] = { 20 + 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0, 21 + 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0, 22 + 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e, 23 + 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0, 24 + 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 25 + }; 26 + 19 27 static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, 20 28 unsigned int key_len) 21 29 { ··· 33 25 const u32 *ck; 34 26 int rkidx = 0; 35 27 36 - ck = &key_rc[(key_len - 16) / 8][0]; 28 + ck = &key_rc[(key_len - 16) / 2]; 37 29 38 30 w0[0] = be32_to_cpu(key[0]); 39 31 w0[1] = be32_to_cpu(key[1]); ··· 171 163 } 172 164 } 173 165 174 - static int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, 175 - unsigned int key_len) 166 + int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) 176 167 { 177 168 struct aria_ctx *ctx = crypto_tfm_ctx(tfm); 178 169 ··· 186 179 187 180 return 0; 188 181 } 182 + EXPORT_SYMBOL_GPL(aria_set_key); 189 183 190 184 static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in, 191 185 u32 key[][ARIA_RD_KEY_WORDS]) ··· 243 235 dst[3] = cpu_to_be32(reg3); 244 236 } 245 237 246 - static void aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 238 + void aria_encrypt(void *_ctx, u8 *out, const u8 *in) 239 + { 240 + struct aria_ctx *ctx = (struct aria_ctx *)_ctx; 241 + 242 + __aria_crypt(ctx, out, in, ctx->enc_key); 243 + } 244 + EXPORT_SYMBOL_GPL(aria_encrypt); 245 + 246 + void aria_decrypt(void *_ctx, u8 *out, const u8 *in) 247 + { 248 + struct aria_ctx *ctx = (struct aria_ctx *)_ctx; 249 + 250 + __aria_crypt(ctx, out, in, ctx->dec_key); 251 + } 252 + EXPORT_SYMBOL_GPL(aria_decrypt); 253 + 254 + static void __aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 247 255 { 248 256 struct aria_ctx *ctx = crypto_tfm_ctx(tfm); 249 257 250 258 __aria_crypt(ctx, out, in, ctx->enc_key); 251 259 } 252 260 253 - static void aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 261 + static void __aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 254 262 { 255 263 struct aria_ctx *ctx = crypto_tfm_ctx(tfm); 256 264 ··· 287 263 .cia_min_keysize = ARIA_MIN_KEY_SIZE, 288 264 .cia_max_keysize = ARIA_MAX_KEY_SIZE, 289 265 .cia_setkey = aria_set_key, 290 - .cia_encrypt = aria_encrypt, 291 - .cia_decrypt = aria_decrypt 266 + .cia_encrypt = __aria_encrypt, 267 + .cia_decrypt = __aria_decrypt 292 268 } 293 269 } 294 270 }; ··· 310 286 MODULE_LICENSE("GPL"); 311 287 MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>"); 312 288 MODULE_ALIAS_CRYPTO("aria"); 289 + MODULE_ALIAS_CRYPTO("aria-generic");
+2 -2
crypto/async_tx/raid6test.c
··· 189 189 } 190 190 191 191 192 - static int raid6_test(void) 192 + static int __init raid6_test(void) 193 193 { 194 194 int err = 0; 195 195 int tests = 0; ··· 236 236 return 0; 237 237 } 238 238 239 - static void raid6_test_exit(void) 239 + static void __exit raid6_test_exit(void) 240 240 { 241 241 } 242 242
+2 -2
crypto/curve25519-generic.c
··· 72 72 .max_size = curve25519_max_size, 73 73 }; 74 74 75 - static int curve25519_init(void) 75 + static int __init curve25519_init(void) 76 76 { 77 77 return crypto_register_kpp(&curve25519_alg); 78 78 } 79 79 80 - static void curve25519_exit(void) 80 + static void __exit curve25519_exit(void) 81 81 { 82 82 crypto_unregister_kpp(&curve25519_alg); 83 83 }
+2 -2
crypto/dh.c
··· 893 893 #endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */ 894 894 895 895 896 - static int dh_init(void) 896 + static int __init dh_init(void) 897 897 { 898 898 int err; 899 899 ··· 911 911 return 0; 912 912 } 913 913 914 - static void dh_exit(void) 914 + static void __exit dh_exit(void) 915 915 { 916 916 crypto_unregister_templates(crypto_ffdhe_templates, 917 917 ARRAY_SIZE(crypto_ffdhe_templates));
+5 -7
crypto/drbg.c
··· 1703 1703 1704 1704 static int drbg_fini_hash_kernel(struct drbg_state *drbg) 1705 1705 { 1706 - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; 1706 + struct sdesc *sdesc = drbg->priv_data; 1707 1707 if (sdesc) { 1708 1708 crypto_free_shash(sdesc->shash.tfm); 1709 1709 kfree_sensitive(sdesc); ··· 1715 1715 static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg, 1716 1716 const unsigned char *key) 1717 1717 { 1718 - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; 1718 + struct sdesc *sdesc = drbg->priv_data; 1719 1719 1720 1720 crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg)); 1721 1721 } ··· 1723 1723 static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval, 1724 1724 const struct list_head *in) 1725 1725 { 1726 - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; 1726 + struct sdesc *sdesc = drbg->priv_data; 1727 1727 struct drbg_string *input = NULL; 1728 1728 1729 1729 crypto_shash_init(&sdesc->shash); ··· 1818 1818 static void drbg_kcapi_symsetkey(struct drbg_state *drbg, 1819 1819 const unsigned char *key) 1820 1820 { 1821 - struct crypto_cipher *tfm = 1822 - (struct crypto_cipher *)drbg->priv_data; 1821 + struct crypto_cipher *tfm = drbg->priv_data; 1823 1822 1824 1823 crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg))); 1825 1824 } ··· 1826 1827 static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, 1827 1828 const struct drbg_string *in) 1828 1829 { 1829 - struct crypto_cipher *tfm = 1830 - (struct crypto_cipher *)drbg->priv_data; 1830 + struct crypto_cipher *tfm = drbg->priv_data; 1831 1831 1832 1832 /* there is only component in *in */ 1833 1833 BUG_ON(in->len < drbg_blocklen(drbg));
+2 -2
crypto/ecdh.c
··· 200 200 201 201 static bool ecdh_nist_p192_registered; 202 202 203 - static int ecdh_init(void) 203 + static int __init ecdh_init(void) 204 204 { 205 205 int ret; 206 206 ··· 227 227 return ret; 228 228 } 229 229 230 - static void ecdh_exit(void) 230 + static void __exit ecdh_exit(void) 231 231 { 232 232 if (ecdh_nist_p192_registered) 233 233 crypto_unregister_kpp(&ecdh_nist_p192);
+2 -2
crypto/ecdsa.c
··· 332 332 }; 333 333 static bool ecdsa_nist_p192_registered; 334 334 335 - static int ecdsa_init(void) 335 + static int __init ecdsa_init(void) 336 336 { 337 337 int ret; 338 338 ··· 359 359 return ret; 360 360 } 361 361 362 - static void ecdsa_exit(void) 362 + static void __exit ecdsa_exit(void) 363 363 { 364 364 if (ecdsa_nist_p192_registered) 365 365 crypto_unregister_akcipher(&ecdsa_nist_p192);
+1 -1
crypto/essiv.c
··· 543 543 } 544 544 545 545 /* record the driver name so we can instantiate this exact algo later */ 546 - strlcpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name, 546 + strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name, 547 547 CRYPTO_MAX_ALG_NAME); 548 548 549 549 /* Instance fields */
+2 -2
crypto/rsa.c
··· 327 327 }, 328 328 }; 329 329 330 - static int rsa_init(void) 330 + static int __init rsa_init(void) 331 331 { 332 332 int err; 333 333 ··· 344 344 return 0; 345 345 } 346 346 347 - static void rsa_exit(void) 347 + static void __exit rsa_exit(void) 348 348 { 349 349 crypto_unregister_template(&rsa_pkcs1pad_tmpl); 350 350 crypto_unregister_akcipher(&rsa);
+2 -2
crypto/sm2.c
··· 441 441 }, 442 442 }; 443 443 444 - static int sm2_init(void) 444 + static int __init sm2_init(void) 445 445 { 446 446 return crypto_register_akcipher(&sm2); 447 447 } 448 448 449 - static void sm2_exit(void) 449 + static void __exit sm2_exit(void) 450 450 { 451 451 crypto_unregister_akcipher(&sm2); 452 452 }
+28 -25
crypto/tcrypt.c
··· 66 66 static unsigned int klen; 67 67 static char *tvmem[TVMEMSIZE]; 68 68 69 - static const char *check[] = { 70 - "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3", 71 - "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", 72 - "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", 73 - "khazad", "wp512", "wp384", "wp256", "xeta", "fcrypt", 74 - "camellia", "seed", "rmd160", "aria", 75 - "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384", 76 - "sha3-512", "streebog256", "streebog512", 77 - NULL 78 - }; 79 - 80 69 static const int block_sizes[] = { 16, 64, 128, 256, 1024, 1420, 4096, 0 }; 81 70 static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 }; 82 71 ··· 1443 1454 false); 1444 1455 } 1445 1456 1446 - static void test_available(void) 1447 - { 1448 - const char **name = check; 1449 - 1450 - while (*name) { 1451 - printk("alg %s ", *name); 1452 - printk(crypto_has_alg(*name, 0, 0) ? 1453 - "found\n" : "not found\n"); 1454 - name++; 1455 - } 1456 - } 1457 - 1458 1457 static inline int tcrypt_test(const char *alg) 1459 1458 { 1460 1459 int ret; ··· 2205 2228 NULL, 0, 16, 8, speed_template_16_24_32); 2206 2229 break; 2207 2230 2231 + case 229: 2232 + test_mb_aead_speed("gcm(aria)", ENCRYPT, sec, NULL, 0, 16, 8, 2233 + speed_template_16, num_mb); 2234 + test_mb_aead_speed("gcm(aria)", DECRYPT, sec, NULL, 0, 16, 8, 2235 + speed_template_16, num_mb); 2236 + break; 2237 + 2208 2238 case 300: 2209 2239 if (alg) { 2210 2240 test_hash_speed(alg, sec, generic_hash_speed_template); ··· 2632 2648 speed_template_16); 2633 2649 break; 2634 2650 2651 + case 519: 2652 + test_acipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0, 2653 + speed_template_16_24_32); 2654 + test_acipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0, 2655 + speed_template_16_24_32); 2656 + test_acipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0, 2657 + speed_template_16_24_32); 2658 + test_acipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0, 2659 + speed_template_16_24_32); 2660 + break; 2661 + 2635 2662 case 600: 2636 2663 test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, 2637 2664 speed_template_16_24_32, num_mb); ··· 2855 2860 speed_template_8_32, num_mb); 2856 2861 break; 2857 2862 2858 - case 1000: 2859 - test_available(); 2863 + case 610: 2864 + test_mb_skcipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0, 2865 + speed_template_16_32, num_mb); 2866 + test_mb_skcipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0, 2867 + speed_template_16_32, num_mb); 2868 + test_mb_skcipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0, 2869 + speed_template_16_32, num_mb); 2870 + test_mb_skcipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0, 2871 + speed_template_16_32, num_mb); 2860 2872 break; 2873 + 2861 2874 } 2862 2875 2863 2876 return ret;
+35 -3
crypto/testmgr.c
··· 3322 3322 } 3323 3323 3324 3324 static int test_acomp(struct crypto_acomp *tfm, 3325 - const struct comp_testvec *ctemplate, 3325 + const struct comp_testvec *ctemplate, 3326 3326 const struct comp_testvec *dtemplate, 3327 3327 int ctcount, int dtcount) 3328 3328 { ··· 3417 3417 goto out; 3418 3418 } 3419 3419 3420 + #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS 3421 + crypto_init_wait(&wait); 3422 + sg_init_one(&src, input_vec, ilen); 3423 + acomp_request_set_params(req, &src, NULL, ilen, 0); 3424 + 3425 + ret = crypto_wait_req(crypto_acomp_compress(req), &wait); 3426 + if (ret) { 3427 + pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n", 3428 + i + 1, algo, -ret); 3429 + kfree(input_vec); 3430 + acomp_request_free(req); 3431 + goto out; 3432 + } 3433 + #endif 3434 + 3420 3435 kfree(input_vec); 3421 3436 acomp_request_free(req); 3422 3437 } ··· 3492 3477 acomp_request_free(req); 3493 3478 goto out; 3494 3479 } 3480 + 3481 + #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS 3482 + crypto_init_wait(&wait); 3483 + acomp_request_set_params(req, &src, NULL, ilen, 0); 3484 + 3485 + ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); 3486 + if (ret) { 3487 + pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n", 3488 + i + 1, algo, -ret); 3489 + kfree(input_vec); 3490 + acomp_request_free(req); 3491 + goto out; 3492 + } 3493 + #endif 3495 3494 3496 3495 kfree(input_vec); 3497 3496 acomp_request_free(req); ··· 5830 5801 driver, alg, 5831 5802 fips_enabled ? "fips" : "panic_on_fail"); 5832 5803 } 5833 - WARN(1, "alg: self-tests for %s (%s) failed (rc=%d)", 5834 - driver, alg, rc); 5804 + pr_warn("alg: self-tests for %s using %s failed (rc=%d)", 5805 + alg, driver, rc); 5806 + WARN(rc != -ENOENT, 5807 + "alg: self-tests for %s using %s failed (rc=%d)", 5808 + alg, driver, rc); 5835 5809 } else { 5836 5810 if (fips_enabled) 5837 5811 pr_info("alg: self-tests for %s (%s) passed\n",
+2 -2
drivers/char/hw_random/arm_smccc_trng.c
··· 71 71 MAX_BITS_PER_CALL); 72 72 73 73 arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res); 74 - if ((int)res.a0 < 0) 75 - return (int)res.a0; 76 74 77 75 switch ((int)res.a0) { 78 76 case SMCCC_RET_SUCCESS: ··· 86 88 return copied; 87 89 cond_resched(); 88 90 break; 91 + default: 92 + return -EIO; 89 93 } 90 94 } 91 95
+25 -30
drivers/char/hw_random/core.c
··· 52 52 53 53 static void drop_current_rng(void); 54 54 static int hwrng_init(struct hwrng *rng); 55 - static void hwrng_manage_rngd(struct hwrng *rng); 55 + static int hwrng_fillfn(void *unused); 56 56 57 57 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 58 58 int wait); ··· 95 95 96 96 drop_current_rng(); 97 97 current_rng = rng; 98 + 99 + /* if necessary, start hwrng thread */ 100 + if (!hwrng_fill) { 101 + hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 102 + if (IS_ERR(hwrng_fill)) { 103 + pr_err("hwrng_fill thread creation failed\n"); 104 + hwrng_fill = NULL; 105 + } 106 + } 98 107 99 108 return 0; 100 109 } ··· 175 166 if (rng->quality > 1024) 176 167 rng->quality = 1024; 177 168 current_quality = rng->quality; /* obsolete */ 178 - 179 - hwrng_manage_rngd(rng); 180 169 181 170 return 0; 182 171 } ··· 461 454 /* the best available RNG may have changed */ 462 455 ret = enable_best_rng(); 463 456 464 - /* start/stop rngd if necessary */ 465 - if (current_rng) 466 - hwrng_manage_rngd(current_rng); 467 - 468 457 out: 469 458 mutex_unlock(&rng_mutex); 470 459 return ret ? ret : len; ··· 510 507 rng->quality = current_quality; /* obsolete */ 511 508 quality = rng->quality; 512 509 mutex_unlock(&reading_mutex); 510 + 511 + if (rc <= 0) 512 + hwrng_msleep(rng, 10000); 513 + 513 514 put_rng(rng); 514 515 515 - if (!quality) 516 - break; 517 - 518 - if (rc <= 0) { 519 - pr_warn("hwrng: no data available\n"); 520 - msleep_interruptible(10000); 516 + if (rc <= 0) 521 517 continue; 522 - } 523 518 524 519 /* If we cannot credit at least one bit of entropy, 525 520 * keep track of the remainder for the next iteration ··· 532 531 } 533 532 hwrng_fill = NULL; 534 533 return 0; 535 - } 536 - 537 - static void hwrng_manage_rngd(struct hwrng *rng) 538 - { 539 - if (WARN_ON(!mutex_is_locked(&rng_mutex))) 540 - return; 541 - 542 - if (rng->quality == 0 && hwrng_fill) 543 - kthread_stop(hwrng_fill); 544 - if (rng->quality > 0 && !hwrng_fill) { 545 - hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 546 - if (IS_ERR(hwrng_fill)) { 547 - pr_err("hwrng_fill thread creation failed\n"); 548 - hwrng_fill = NULL; 549 - } 550 - } 551 534 } 552 535 553 536 int hwrng_register(struct hwrng *rng) ··· 555 570 556 571 init_completion(&rng->cleanup_done); 557 572 complete(&rng->cleanup_done); 573 + init_completion(&rng->dying); 558 574 559 575 if (!current_rng || 560 576 (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { ··· 603 617 604 618 old_rng = current_rng; 605 619 list_del(&rng->list); 620 + complete_all(&rng->dying); 606 621 if (current_rng == rng) { 607 622 err = enable_best_rng(); 608 623 if (err) { ··· 671 684 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); 672 685 } 673 686 EXPORT_SYMBOL_GPL(devm_hwrng_unregister); 687 + 688 + long hwrng_msleep(struct hwrng *rng, unsigned int msecs) 689 + { 690 + unsigned long timeout = msecs_to_jiffies(msecs) + 1; 691 + 692 + return wait_for_completion_interruptible_timeout(&rng->dying, timeout); 693 + } 694 + EXPORT_SYMBOL_GPL(hwrng_msleep); 674 695 675 696 static int __init hwrng_modinit(void) 676 697 {
+14 -37
drivers/char/hw_random/imx-rngc.c
··· 245 245 if (IS_ERR(rngc->base)) 246 246 return PTR_ERR(rngc->base); 247 247 248 - rngc->clk = devm_clk_get(&pdev->dev, NULL); 248 + rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL); 249 249 if (IS_ERR(rngc->clk)) { 250 250 dev_err(&pdev->dev, "Can not get rng_clk\n"); 251 251 return PTR_ERR(rngc->clk); ··· 255 255 if (irq < 0) 256 256 return irq; 257 257 258 - ret = clk_prepare_enable(rngc->clk); 259 - if (ret) 260 - return ret; 261 - 262 258 ver_id = readl(rngc->base + RNGC_VER_ID); 263 259 rng_type = ver_id >> RNGC_TYPE_SHIFT; 264 260 /* 265 261 * This driver supports only RNGC and RNGB. (There's a different 266 262 * driver for RNGA.) 267 263 */ 268 - if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) { 269 - ret = -ENODEV; 270 - goto err; 271 - } 272 - 273 - ret = devm_request_irq(&pdev->dev, 274 - irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); 275 - if (ret) { 276 - dev_err(rngc->dev, "Can't get interrupt working.\n"); 277 - goto err; 278 - } 264 + if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) 265 + return -ENODEV; 279 266 280 267 init_completion(&rngc->rng_op_done); 281 268 ··· 277 290 278 291 imx_rngc_irq_mask_clear(rngc); 279 292 293 + ret = devm_request_irq(&pdev->dev, 294 + irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); 295 + if (ret) { 296 + dev_err(rngc->dev, "Can't get interrupt working.\n"); 297 + return ret; 298 + } 299 + 280 300 if (self_test) { 281 301 ret = imx_rngc_self_test(rngc); 282 302 if (ret) { 283 303 dev_err(rngc->dev, "self test failed\n"); 284 - goto err; 304 + return ret; 285 305 } 286 306 } 287 307 288 - ret = hwrng_register(&rngc->rng); 308 + ret = devm_hwrng_register(&pdev->dev, &rngc->rng); 289 309 if (ret) { 290 310 dev_err(&pdev->dev, "hwrng registration failed\n"); 291 - goto err; 311 + return ret; 292 312 } 293 313 294 314 dev_info(&pdev->dev, 295 315 "Freescale RNG%c registered (HW revision %d.%02d)\n", 296 316 rng_type == RNGC_TYPE_RNGB ? 'B' : 'C', 297 317 (ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff); 298 - return 0; 299 - 300 - err: 301 - clk_disable_unprepare(rngc->clk); 302 - 303 - return ret; 304 - } 305 - 306 - static int __exit imx_rngc_remove(struct platform_device *pdev) 307 - { 308 - struct imx_rngc *rngc = platform_get_drvdata(pdev); 309 - 310 - hwrng_unregister(&rngc->rng); 311 - 312 - clk_disable_unprepare(rngc->clk); 313 - 314 318 return 0; 315 319 } 316 320 ··· 333 355 334 356 static struct platform_driver imx_rngc_driver = { 335 357 .driver = { 336 - .name = "imx_rngc", 358 + .name = KBUILD_MODNAME, 337 359 .pm = &imx_rngc_pm_ops, 338 360 .of_match_table = imx_rngc_dt_ids, 339 361 }, 340 - .remove = __exit_p(imx_rngc_remove), 341 362 }; 342 363 343 364 module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
+1 -2
drivers/crypto/Kconfig
··· 802 802 config CRYPTO_DEV_SA2UL 803 803 tristate "Support for TI security accelerator" 804 804 depends on ARCH_K3 || COMPILE_TEST 805 - select ARM64_CRYPTO 806 805 select CRYPTO_AES 807 - select CRYPTO_AES_ARM64 808 806 select CRYPTO_ALGAPI 809 807 select CRYPTO_AUTHENC 810 808 select CRYPTO_SHA1 ··· 816 818 acceleration for cryptographic algorithms on these devices. 817 819 818 820 source "drivers/crypto/keembay/Kconfig" 821 + source "drivers/crypto/aspeed/Kconfig" 819 822 820 823 endif # CRYPTO_HW
+1
drivers/crypto/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/ 3 + obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed/ 3 4 obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o 4 5 obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o 5 6 obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+2 -14
drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
··· 235 235 #endif 236 236 }; 237 237 238 - static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v) 238 + static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v) 239 239 { 240 240 unsigned int i; 241 241 ··· 266 266 } 267 267 return 0; 268 268 } 269 - 270 - static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file) 271 - { 272 - return single_open(file, sun4i_ss_dbgfs_read, inode->i_private); 273 - } 274 - 275 - static const struct file_operations sun4i_ss_debugfs_fops = { 276 - .owner = THIS_MODULE, 277 - .open = sun4i_ss_dbgfs_open, 278 - .read = seq_read, 279 - .llseek = seq_lseek, 280 - .release = single_release, 281 - }; 269 + DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs); 282 270 283 271 /* 284 272 * Power management strategy: The device is suspended unless a TFM exists for
+2 -4
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
··· 54 54 goto err_dst; 55 55 } 56 56 57 - err = pm_runtime_get_sync(ce->dev); 58 - if (err < 0) { 59 - pm_runtime_put_noidle(ce->dev); 57 + err = pm_runtime_resume_and_get(ce->dev); 58 + if (err < 0) 60 59 goto err_pm; 61 - } 62 60 63 61 mutex_lock(&ce->rnglock); 64 62 chan = &ce->chanlist[flow];
+3 -3
drivers/crypto/amlogic/amlogic-gxl-cipher.c
··· 177 177 if (areq->src == areq->dst) { 178 178 nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src), 179 179 DMA_BIDIRECTIONAL); 180 - if (nr_sgs < 0) { 180 + if (!nr_sgs) { 181 181 dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs); 182 182 err = -EINVAL; 183 183 goto theend; ··· 186 186 } else { 187 187 nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src), 188 188 DMA_TO_DEVICE); 189 - if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) { 189 + if (!nr_sgs || nr_sgs > MAXDESC - 3) { 190 190 dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs); 191 191 err = -EINVAL; 192 192 goto theend; 193 193 } 194 194 nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst), 195 195 DMA_FROM_DEVICE); 196 - if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) { 196 + if (!nr_sgd || nr_sgd > MAXDESC - 3) { 197 197 dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd); 198 198 err = -EINVAL; 199 199 goto theend;
+48
drivers/crypto/aspeed/Kconfig
··· 1 + config CRYPTO_DEV_ASPEED 2 + tristate "Support for Aspeed cryptographic engine driver" 3 + depends on ARCH_ASPEED || COMPILE_TEST 4 + select CRYPTO_ENGINE 5 + help 6 + Hash and Crypto Engine (HACE) is designed to accelerate the 7 + throughput of hash data digest, encryption and decryption. 8 + 9 + Select y here to have support for the cryptographic driver 10 + available on Aspeed SoC. 11 + 12 + config CRYPTO_DEV_ASPEED_DEBUG 13 + bool "Enable Aspeed crypto debug messages" 14 + depends on CRYPTO_DEV_ASPEED 15 + help 16 + Print Aspeed crypto debugging messages if you use this 17 + option to ask for those messages. 18 + Avoid enabling this option for production build to 19 + minimize driver timing. 20 + 21 + config CRYPTO_DEV_ASPEED_HACE_HASH 22 + bool "Enable Aspeed Hash & Crypto Engine (HACE) hash" 23 + depends on CRYPTO_DEV_ASPEED 24 + select CRYPTO_SHA1 25 + select CRYPTO_SHA256 26 + select CRYPTO_SHA512 27 + select CRYPTO_HMAC 28 + help 29 + Select here to enable Aspeed Hash & Crypto Engine (HACE) 30 + hash driver. 31 + Supports multiple message digest standards, including 32 + SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, and so on. 33 + 34 + config CRYPTO_DEV_ASPEED_HACE_CRYPTO 35 + bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto" 36 + depends on CRYPTO_DEV_ASPEED 37 + select CRYPTO_AES 38 + select CRYPTO_DES 39 + select CRYPTO_ECB 40 + select CRYPTO_CBC 41 + select CRYPTO_CFB 42 + select CRYPTO_OFB 43 + select CRYPTO_CTR 44 + help 45 + Select here to enable Aspeed Hash & Crypto Engine (HACE) 46 + crypto driver. 47 + Supports AES/DES symmetric-key encryption and decryption 48 + with ECB/CBC/CFB/OFB/CTR options.
+7
drivers/crypto/aspeed/Makefile
··· 1 + hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o 2 + hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o 3 + 4 + obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o 5 + aspeed_crypto-objs := aspeed-hace.o \ 6 + $(hace-hash-y) \ 7 + $(hace-crypto-y)
+1133
drivers/crypto/aspeed/aspeed-hace-crypto.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright (c) 2021 Aspeed Technology Inc. 4 + */ 5 + 6 + #include "aspeed-hace.h" 7 + 8 + #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG 9 + #define CIPHER_DBG(h, fmt, ...) \ 10 + dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 11 + #else 12 + #define CIPHER_DBG(h, fmt, ...) \ 13 + dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 14 + #endif 15 + 16 + static int aspeed_crypto_do_fallback(struct skcipher_request *areq) 17 + { 18 + struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq); 19 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 20 + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 21 + int err; 22 + 23 + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 24 + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, 25 + areq->base.complete, areq->base.data); 26 + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, 27 + areq->cryptlen, areq->iv); 28 + 29 + if (rctx->enc_cmd & HACE_CMD_ENCRYPT) 30 + err = crypto_skcipher_encrypt(&rctx->fallback_req); 31 + else 32 + err = crypto_skcipher_decrypt(&rctx->fallback_req); 33 + 34 + return err; 35 + } 36 + 37 + static bool aspeed_crypto_need_fallback(struct skcipher_request *areq) 38 + { 39 + struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq); 40 + 41 + if (areq->cryptlen == 0) 42 + return true; 43 + 44 + if ((rctx->enc_cmd & HACE_CMD_DES_SELECT) && 45 + !IS_ALIGNED(areq->cryptlen, DES_BLOCK_SIZE)) 46 + return true; 47 + 48 + if ((!(rctx->enc_cmd & HACE_CMD_DES_SELECT)) && 49 + !IS_ALIGNED(areq->cryptlen, AES_BLOCK_SIZE)) 50 + return true; 51 + 52 + return false; 53 + } 54 + 55 + static int aspeed_hace_crypto_handle_queue(struct aspeed_hace_dev *hace_dev, 56 + struct skcipher_request *req) 57 + { 58 + if (hace_dev->version == AST2500_VERSION && 59 + aspeed_crypto_need_fallback(req)) { 60 + CIPHER_DBG(hace_dev, "SW fallback\n"); 61 + return aspeed_crypto_do_fallback(req); 62 + } 63 + 64 + return crypto_transfer_skcipher_request_to_engine( 65 + hace_dev->crypt_engine_crypto, req); 66 + } 67 + 68 + static int aspeed_crypto_do_request(struct crypto_engine *engine, void *areq) 69 + { 70 + struct skcipher_request *req = skcipher_request_cast(areq); 71 + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); 72 + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); 73 + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; 74 + struct aspeed_engine_crypto *crypto_engine; 75 + int rc; 76 + 77 + crypto_engine = &hace_dev->crypto_engine; 78 + crypto_engine->req = req; 79 + crypto_engine->flags |= CRYPTO_FLAGS_BUSY; 80 + 81 + rc = ctx->start(hace_dev); 82 + 83 + if (rc != -EINPROGRESS) 84 + return -EIO; 85 + 86 + return 0; 87 + } 88 + 89 + static int aspeed_sk_complete(struct aspeed_hace_dev *hace_dev, int err) 90 + { 91 + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 92 + struct aspeed_cipher_reqctx *rctx; 93 + struct skcipher_request *req; 94 + 95 + CIPHER_DBG(hace_dev, "\n"); 96 + 97 + req = crypto_engine->req; 98 + rctx = skcipher_request_ctx(req); 99 + 100 + if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) { 101 + if (rctx->enc_cmd & HACE_CMD_DES_SELECT) 102 + memcpy(req->iv, crypto_engine->cipher_ctx + 103 + DES_KEY_SIZE, DES_KEY_SIZE); 104 + else 105 + memcpy(req->iv, crypto_engine->cipher_ctx, 106 + AES_BLOCK_SIZE); 107 + } 108 + 109 + crypto_engine->flags &= ~CRYPTO_FLAGS_BUSY; 110 + 111 + crypto_finalize_skcipher_request(hace_dev->crypt_engine_crypto, req, 112 + err); 113 + 114 + return err; 115 + } 116 + 117 + static int aspeed_sk_transfer_sg(struct aspeed_hace_dev *hace_dev) 118 + { 119 + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 120 + struct device *dev = hace_dev->dev; 121 + struct aspeed_cipher_reqctx *rctx; 122 + struct skcipher_request *req; 123 + 124 + CIPHER_DBG(hace_dev, "\n"); 125 + 126 + req = crypto_engine->req; 127 + rctx = skcipher_request_ctx(req); 128 + 129 + if (req->src == req->dst) { 130 + dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL); 131 + } else { 132 + dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE); 133 + dma_unmap_sg(dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE); 134 + } 135 + 136 + return aspeed_sk_complete(hace_dev, 0); 137 + } 138 + 139 + static int aspeed_sk_transfer(struct aspeed_hace_dev *hace_dev) 140 + { 141 + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 142 + struct aspeed_cipher_reqctx *rctx; 143 + struct skcipher_request *req; 144 + struct scatterlist *out_sg; 145 + int nbytes = 0; 146 + int rc = 0; 147 + 148 + req = crypto_engine->req; 149 + rctx = skcipher_request_ctx(req); 150 + out_sg = req->dst; 151 + 152 + /* Copy output buffer to dst scatter-gather lists */ 153 + nbytes = sg_copy_from_buffer(out_sg, rctx->dst_nents, 154 + crypto_engine->cipher_addr, req->cryptlen); 155 + if (!nbytes) { 156 + dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n", 157 + "nbytes", nbytes, "cryptlen", req->cryptlen); 158 + rc = -EINVAL; 159 + } 160 + 161 + CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n", 162 + "nbytes", nbytes, "req->cryptlen", req->cryptlen, 163 + "nb_out_sg", rctx->dst_nents, 164 + "cipher addr", crypto_engine->cipher_addr); 165 + 166 + return aspeed_sk_complete(hace_dev, rc); 167 + } 168 + 169 + static int aspeed_sk_start(struct aspeed_hace_dev *hace_dev) 170 + { 171 + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 172 + struct aspeed_cipher_reqctx *rctx; 173 + struct skcipher_request *req; 174 + struct scatterlist *in_sg; 175 + int nbytes; 176 + 177 + req = crypto_engine->req; 178 + rctx = skcipher_request_ctx(req); 179 + in_sg = req->src; 180 + 181 + nbytes = sg_copy_to_buffer(in_sg, rctx->src_nents, 182 + crypto_engine->cipher_addr, req->cryptlen); 183 + 184 + CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n", 185 + "nbytes", nbytes, "req->cryptlen", req->cryptlen, 186 + "nb_in_sg", rctx->src_nents, 187 + "cipher addr", crypto_engine->cipher_addr); 188 + 189 + if (!nbytes) { 190 + dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n", 191 + "nbytes", nbytes, "cryptlen", req->cryptlen); 192 + return -EINVAL; 193 + } 194 + 195 + crypto_engine->resume = aspeed_sk_transfer; 196 + 197 + /* Trigger engines */ 198 + ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr, 199 + ASPEED_HACE_SRC); 200 + ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr, 201 + ASPEED_HACE_DEST); 202 + ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN); 203 + ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD); 204 + 205 + return -EINPROGRESS; 206 + } 207 + 208 + static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev) 209 + { 210 + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 211 + struct aspeed_sg_list *src_list, *dst_list; 212 + dma_addr_t src_dma_addr, dst_dma_addr; 213 + struct aspeed_cipher_reqctx *rctx; 214 + struct skcipher_request *req; 215 + struct scatterlist *s; 216 + int src_sg_len; 217 + int dst_sg_len; 218 + int total, i; 219 + int rc; 220 + 221 + CIPHER_DBG(hace_dev, "\n"); 222 + 223 + req = crypto_engine->req; 224 + rctx = skcipher_request_ctx(req); 225 + 226 + rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL | 227 + HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN; 228 + 229 + /* BIDIRECTIONAL */ 230 + if (req->dst == req->src) { 231 + src_sg_len = dma_map_sg(hace_dev->dev, req->src, 232 + rctx->src_nents, DMA_BIDIRECTIONAL); 233 + dst_sg_len = src_sg_len; 234 + if (!src_sg_len) { 235 + dev_warn(hace_dev->dev, "dma_map_sg() src error\n"); 236 + return -EINVAL; 237 + } 238 + 239 + } else { 240 + src_sg_len = dma_map_sg(hace_dev->dev, req->src, 241 + rctx->src_nents, DMA_TO_DEVICE); 242 + if (!src_sg_len) { 243 + dev_warn(hace_dev->dev, "dma_map_sg() src error\n"); 244 + return -EINVAL; 245 + } 246 + 247 + dst_sg_len = dma_map_sg(hace_dev->dev, req->dst, 248 + rctx->dst_nents, DMA_FROM_DEVICE); 249 + if (!dst_sg_len) { 250 + dev_warn(hace_dev->dev, "dma_map_sg() dst error\n"); 251 + rc = -EINVAL; 252 + goto free_req_src; 253 + } 254 + } 255 + 256 + src_list = (struct aspeed_sg_list *)crypto_engine->cipher_addr; 257 + src_dma_addr = crypto_engine->cipher_dma_addr; 258 + total = req->cryptlen; 259 + 260 + for_each_sg(req->src, s, src_sg_len, i) { 261 + u32 phy_addr = sg_dma_address(s); 262 + u32 len = sg_dma_len(s); 263 + 264 + if (total > len) 265 + total -= len; 266 + else { 267 + /* last sg list */ 268 + len = total; 269 + len |= BIT(31); 270 + total = 0; 271 + } 272 + 273 + src_list[i].phy_addr = cpu_to_le32(phy_addr); 274 + src_list[i].len = cpu_to_le32(len); 275 + } 276 + 277 + if (total != 0) { 278 + rc = -EINVAL; 279 + goto free_req; 280 + } 281 + 282 + if (req->dst == req->src) { 283 + dst_list = src_list; 284 + dst_dma_addr = src_dma_addr; 285 + 286 + } else { 287 + dst_list = (struct aspeed_sg_list *)crypto_engine->dst_sg_addr; 288 + dst_dma_addr = crypto_engine->dst_sg_dma_addr; 289 + total = req->cryptlen; 290 + 291 + for_each_sg(req->dst, s, dst_sg_len, i) { 292 + u32 phy_addr = sg_dma_address(s); 293 + u32 len = sg_dma_len(s); 294 + 295 + if (total > len) 296 + total -= len; 297 + else { 298 + /* last sg list */ 299 + len = total; 300 + len |= BIT(31); 301 + total = 0; 302 + } 303 + 304 + dst_list[i].phy_addr = cpu_to_le32(phy_addr); 305 + dst_list[i].len = cpu_to_le32(len); 306 + 307 + } 308 + 309 + dst_list[dst_sg_len].phy_addr = 0; 310 + dst_list[dst_sg_len].len = 0; 311 + } 312 + 313 + if (total != 0) { 314 + rc = -EINVAL; 315 + goto free_req; 316 + } 317 + 318 + crypto_engine->resume = aspeed_sk_transfer_sg; 319 + 320 + /* Memory barrier to ensure all data setup before engine starts */ 321 + mb(); 322 + 323 + /* Trigger engines */ 324 + ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC); 325 + ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST); 326 + ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN); 327 + ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD); 328 + 329 + return -EINPROGRESS; 330 + 331 + free_req: 332 + if (req->dst == req->src) { 333 + dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, 334 + DMA_BIDIRECTIONAL); 335 + 336 + } else { 337 + dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents, 338 + DMA_TO_DEVICE); 339 + dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, 340 + DMA_TO_DEVICE); 341 + } 342 + 343 + return rc; 344 + 345 + free_req_src: 346 + dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); 347 + 348 + return rc; 349 + } 350 + 351 + static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev) 352 + { 353 + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 354 + struct aspeed_cipher_reqctx *rctx; 355 + struct crypto_skcipher *cipher; 356 + struct aspeed_cipher_ctx *ctx; 357 + struct skcipher_request *req; 358 + 359 + CIPHER_DBG(hace_dev, "\n"); 360 + 361 + req = crypto_engine->req; 362 + rctx = skcipher_request_ctx(req); 363 + cipher = crypto_skcipher_reqtfm(req); 364 + ctx = crypto_skcipher_ctx(cipher); 365 + 366 + /* enable interrupt */ 367 + rctx->enc_cmd |= HACE_CMD_ISR_EN; 368 + 369 + rctx->dst_nents = sg_nents(req->dst); 370 + rctx->src_nents = sg_nents(req->src); 371 + 372 + ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma, 373 + ASPEED_HACE_CONTEXT); 374 + 375 + if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) { 376 + if (rctx->enc_cmd & HACE_CMD_DES_SELECT) 377 + memcpy(crypto_engine->cipher_ctx + DES_BLOCK_SIZE, 378 + req->iv, DES_BLOCK_SIZE); 379 + else 380 + memcpy(crypto_engine->cipher_ctx, req->iv, 381 + AES_BLOCK_SIZE); 382 + } 383 + 384 + if (hace_dev->version == AST2600_VERSION) { 385 + memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len); 386 + 387 + return aspeed_sk_start_sg(hace_dev); 388 + } 389 + 390 + memcpy(crypto_engine->cipher_ctx + 16, ctx->key, AES_MAX_KEYLENGTH); 391 + 392 + return aspeed_sk_start(hace_dev); 393 + } 394 + 395 + static int aspeed_des_crypt(struct skcipher_request *req, u32 cmd) 396 + { 397 + struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req); 398 + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); 399 + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); 400 + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; 401 + u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK; 402 + 403 + CIPHER_DBG(hace_dev, "\n"); 404 + 405 + if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) { 406 + if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) 407 + return -EINVAL; 408 + } 409 + 410 + rctx->enc_cmd = cmd | HACE_CMD_DES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE | 411 + HACE_CMD_DES | HACE_CMD_CONTEXT_LOAD_ENABLE | 412 + HACE_CMD_CONTEXT_SAVE_ENABLE; 413 + 414 + return aspeed_hace_crypto_handle_queue(hace_dev, req); 415 + } 416 + 417 + static int aspeed_des_setkey(struct crypto_skcipher *cipher, const u8 *key, 418 + unsigned int keylen) 419 + { 420 + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); 421 + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 422 + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; 423 + int rc; 424 + 425 + CIPHER_DBG(hace_dev, "keylen: %d bits\n", keylen); 426 + 427 + if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) { 428 + dev_warn(hace_dev->dev, "invalid keylen: %d bits\n", keylen); 429 + return -EINVAL; 430 + } 431 + 432 + if (keylen == DES_KEY_SIZE) { 433 + rc = crypto_des_verify_key(tfm, key); 434 + if (rc) 435 + return rc; 436 + 437 + } else if (keylen == DES3_EDE_KEY_SIZE) { 438 + rc = crypto_des3_ede_verify_key(tfm, key); 439 + if (rc) 440 + return rc; 441 + } 442 + 443 + memcpy(ctx->key, key, keylen); 444 + ctx->key_len = keylen; 445 + 446 + crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK); 447 + crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags & 448 + CRYPTO_TFM_REQ_MASK); 449 + 450 + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); 451 + } 452 + 453 + static int aspeed_tdes_ctr_decrypt(struct skcipher_request *req) 454 + { 455 + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR | 456 + HACE_CMD_TRIPLE_DES); 457 + } 458 + 459 + static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req) 460 + { 461 + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR | 462 + HACE_CMD_TRIPLE_DES); 463 + } 464 + 465 + static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req) 466 + { 467 + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB | 468 + HACE_CMD_TRIPLE_DES); 469 + } 470 + 471 + static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req) 472 + { 473 + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB | 474 + HACE_CMD_TRIPLE_DES); 475 + } 476 + 477 + static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req) 478 + { 479 + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB | 480 + HACE_CMD_TRIPLE_DES); 481 + } 482 + 483 + static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req) 484 + { 485 + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB | 486 + HACE_CMD_TRIPLE_DES); 487 + } 488 + 489 + static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req) 490 + { 491 + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC | 492 + HACE_CMD_TRIPLE_DES); 493 + } 494 + 495 + static int aspeed_tdes_cbc_encrypt(struct skcipher_request *req) 496 + { 497 + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC | 498 + HACE_CMD_TRIPLE_DES); 499 + } 500 + 501 + static int aspeed_tdes_ecb_decrypt(struct skcipher_request *req) 502 + { 503 + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB | 504 + HACE_CMD_TRIPLE_DES); 505 + } 506 + 507 + static int aspeed_tdes_ecb_encrypt(struct skcipher_request *req) 508 + { 509 + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB | 510 + HACE_CMD_TRIPLE_DES); 511 + } 512 + 513 + static int aspeed_des_ctr_decrypt(struct skcipher_request *req) 514 + { 515 + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR | 516 + HACE_CMD_SINGLE_DES); 517 + } 518 + 519 + static int aspeed_des_ctr_encrypt(struct skcipher_request *req) 520 + { 521 + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR | 522 + HACE_CMD_SINGLE_DES); 523 + } 524 + 525 + static int aspeed_des_ofb_decrypt(struct skcipher_request *req) 526 + { 527 + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB | 528 + HACE_CMD_SINGLE_DES); 529 + } 530 + 531 + static int aspeed_des_ofb_encrypt(struct skcipher_request *req) 532 + { 533 + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB | 534 + HACE_CMD_SINGLE_DES); 535 + } 536 + 537 + static int aspeed_des_cfb_decrypt(struct skcipher_request *req) 538 + { 539 + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB | 540 + HACE_CMD_SINGLE_DES); 541 + } 542 + 543 + static int aspeed_des_cfb_encrypt(struct skcipher_request *req) 544 + { 545 + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB | 546 + HACE_CMD_SINGLE_DES); 547 + } 548 + 549 + static int aspeed_des_cbc_decrypt(struct skcipher_request *req) 550 + { 551 + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC | 552 + HACE_CMD_SINGLE_DES); 553 + } 554 + 555 + static int aspeed_des_cbc_encrypt(struct skcipher_request *req) 556 + { 557 + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC | 558 + HACE_CMD_SINGLE_DES); 559 + } 560 + 561 + static int aspeed_des_ecb_decrypt(struct skcipher_request *req) 562 + { 563 + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB | 564 + HACE_CMD_SINGLE_DES); 565 + } 566 + 567 + static int aspeed_des_ecb_encrypt(struct skcipher_request *req) 568 + { 569 + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB | 570 + HACE_CMD_SINGLE_DES); 571 + } 572 + 573 + static int aspeed_aes_crypt(struct skcipher_request *req, u32 cmd) 574 + { 575 + struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req); 576 + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); 577 + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); 578 + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; 579 + u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK; 580 + 581 + if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) { 582 + if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) 583 + return -EINVAL; 584 + } 585 + 586 + CIPHER_DBG(hace_dev, "%s\n", 587 + (cmd & HACE_CMD_ENCRYPT) ? "encrypt" : "decrypt"); 588 + 589 + cmd |= HACE_CMD_AES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE | 590 + HACE_CMD_CONTEXT_LOAD_ENABLE | HACE_CMD_CONTEXT_SAVE_ENABLE; 591 + 592 + switch (ctx->key_len) { 593 + case AES_KEYSIZE_128: 594 + cmd |= HACE_CMD_AES128; 595 + break; 596 + case AES_KEYSIZE_192: 597 + cmd |= HACE_CMD_AES192; 598 + break; 599 + case AES_KEYSIZE_256: 600 + cmd |= HACE_CMD_AES256; 601 + break; 602 + default: 603 + return -EINVAL; 604 + } 605 + 606 + rctx->enc_cmd = cmd; 607 + 608 + return aspeed_hace_crypto_handle_queue(hace_dev, req); 609 + } 610 + 611 + static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, 612 + unsigned int keylen) 613 + { 614 + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); 615 + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; 616 + struct crypto_aes_ctx gen_aes_key; 617 + 618 + CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8)); 619 + 620 + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && 621 + keylen != AES_KEYSIZE_256) 622 + return -EINVAL; 623 + 624 + if (ctx->hace_dev->version == AST2500_VERSION) { 625 + aes_expandkey(&gen_aes_key, key, keylen); 626 + memcpy(ctx->key, gen_aes_key.key_enc, AES_MAX_KEYLENGTH); 627 + 628 + } else { 629 + memcpy(ctx->key, key, keylen); 630 + } 631 + 632 + ctx->key_len = keylen; 633 + 634 + crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK); 635 + crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags & 636 + CRYPTO_TFM_REQ_MASK); 637 + 638 + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); 639 + } 640 + 641 + static int aspeed_aes_ctr_decrypt(struct skcipher_request *req) 642 + { 643 + return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR); 644 + } 645 + 646 + static int aspeed_aes_ctr_encrypt(struct skcipher_request *req) 647 + { 648 + return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR); 649 + } 650 + 651 + static int aspeed_aes_ofb_decrypt(struct skcipher_request *req) 652 + { 653 + return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB); 654 + } 655 + 656 + static int aspeed_aes_ofb_encrypt(struct skcipher_request *req) 657 + { 658 + return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB); 659 + } 660 + 661 + static int aspeed_aes_cfb_decrypt(struct skcipher_request *req) 662 + { 663 + return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB); 664 + } 665 + 666 + static int aspeed_aes_cfb_encrypt(struct skcipher_request *req) 667 + { 668 + return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB); 669 + } 670 + 671 + static int aspeed_aes_cbc_decrypt(struct skcipher_request *req) 672 + { 673 + return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC); 674 + } 675 + 676 + static int aspeed_aes_cbc_encrypt(struct skcipher_request *req) 677 + { 678 + return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC); 679 + } 680 + 681 + static int aspeed_aes_ecb_decrypt(struct skcipher_request *req) 682 + { 683 + return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB); 684 + } 685 + 686 + static int aspeed_aes_ecb_encrypt(struct skcipher_request *req) 687 + { 688 + return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB); 689 + } 690 + 691 + static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm) 692 + { 693 + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 694 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 695 + const char *name = crypto_tfm_alg_name(&tfm->base); 696 + struct aspeed_hace_alg *crypto_alg; 697 + 698 + 699 + crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher); 700 + ctx->hace_dev = crypto_alg->hace_dev; 701 + ctx->start = aspeed_hace_skcipher_trigger; 702 + 703 + CIPHER_DBG(ctx->hace_dev, "%s\n", name); 704 + 705 + ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_ASYNC | 706 + CRYPTO_ALG_NEED_FALLBACK); 707 + if (IS_ERR(ctx->fallback_tfm)) { 708 + dev_err(ctx->hace_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", 709 + name, PTR_ERR(ctx->fallback_tfm)); 710 + return PTR_ERR(ctx->fallback_tfm); 711 + } 712 + 713 + crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) + 714 + crypto_skcipher_reqsize(ctx->fallback_tfm)); 715 + 716 + ctx->enginectx.op.do_one_request = aspeed_crypto_do_request; 717 + ctx->enginectx.op.prepare_request = NULL; 718 + ctx->enginectx.op.unprepare_request = NULL; 719 + 720 + return 0; 721 + } 722 + 723 + static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm) 724 + { 725 + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 726 + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; 727 + 728 + CIPHER_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(&tfm->base)); 729 + crypto_free_skcipher(ctx->fallback_tfm); 730 + } 731 + 732 + static struct aspeed_hace_alg aspeed_crypto_algs[] = { 733 + { 734 + .alg.skcipher = { 735 + .min_keysize = AES_MIN_KEY_SIZE, 736 + .max_keysize = AES_MAX_KEY_SIZE, 737 + .setkey = aspeed_aes_setkey, 738 + .encrypt = aspeed_aes_ecb_encrypt, 739 + .decrypt = aspeed_aes_ecb_decrypt, 740 + .init = aspeed_crypto_cra_init, 741 + .exit = aspeed_crypto_cra_exit, 742 + .base = { 743 + .cra_name = "ecb(aes)", 744 + .cra_driver_name = "aspeed-ecb-aes", 745 + .cra_priority = 300, 746 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 747 + CRYPTO_ALG_ASYNC | 748 + CRYPTO_ALG_NEED_FALLBACK, 749 + .cra_blocksize = AES_BLOCK_SIZE, 750 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 751 + .cra_alignmask = 0x0f, 752 + .cra_module = THIS_MODULE, 753 + } 754 + } 755 + }, 756 + { 757 + .alg.skcipher = { 758 + .ivsize = AES_BLOCK_SIZE, 759 + .min_keysize = AES_MIN_KEY_SIZE, 760 + .max_keysize = AES_MAX_KEY_SIZE, 761 + .setkey = aspeed_aes_setkey, 762 + .encrypt = aspeed_aes_cbc_encrypt, 763 + .decrypt = aspeed_aes_cbc_decrypt, 764 + .init = aspeed_crypto_cra_init, 765 + .exit = aspeed_crypto_cra_exit, 766 + .base = { 767 + .cra_name = "cbc(aes)", 768 + .cra_driver_name = "aspeed-cbc-aes", 769 + .cra_priority = 300, 770 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 771 + CRYPTO_ALG_ASYNC | 772 + CRYPTO_ALG_NEED_FALLBACK, 773 + .cra_blocksize = AES_BLOCK_SIZE, 774 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 775 + .cra_alignmask = 0x0f, 776 + .cra_module = THIS_MODULE, 777 + } 778 + } 779 + }, 780 + { 781 + .alg.skcipher = { 782 + .ivsize = AES_BLOCK_SIZE, 783 + .min_keysize = AES_MIN_KEY_SIZE, 784 + .max_keysize = AES_MAX_KEY_SIZE, 785 + .setkey = aspeed_aes_setkey, 786 + .encrypt = aspeed_aes_cfb_encrypt, 787 + .decrypt = aspeed_aes_cfb_decrypt, 788 + .init = aspeed_crypto_cra_init, 789 + .exit = aspeed_crypto_cra_exit, 790 + .base = { 791 + .cra_name = "cfb(aes)", 792 + .cra_driver_name = "aspeed-cfb-aes", 793 + .cra_priority = 300, 794 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 795 + CRYPTO_ALG_ASYNC | 796 + CRYPTO_ALG_NEED_FALLBACK, 797 + .cra_blocksize = 1, 798 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 799 + .cra_alignmask = 0x0f, 800 + .cra_module = THIS_MODULE, 801 + } 802 + } 803 + }, 804 + { 805 + .alg.skcipher = { 806 + .ivsize = AES_BLOCK_SIZE, 807 + .min_keysize = AES_MIN_KEY_SIZE, 808 + .max_keysize = AES_MAX_KEY_SIZE, 809 + .setkey = aspeed_aes_setkey, 810 + .encrypt = aspeed_aes_ofb_encrypt, 811 + .decrypt = aspeed_aes_ofb_decrypt, 812 + .init = aspeed_crypto_cra_init, 813 + .exit = aspeed_crypto_cra_exit, 814 + .base = { 815 + .cra_name = "ofb(aes)", 816 + .cra_driver_name = "aspeed-ofb-aes", 817 + .cra_priority = 300, 818 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 819 + CRYPTO_ALG_ASYNC | 820 + CRYPTO_ALG_NEED_FALLBACK, 821 + .cra_blocksize = 1, 822 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 823 + .cra_alignmask = 0x0f, 824 + .cra_module = THIS_MODULE, 825 + } 826 + } 827 + }, 828 + { 829 + .alg.skcipher = { 830 + .min_keysize = DES_KEY_SIZE, 831 + .max_keysize = DES_KEY_SIZE, 832 + .setkey = aspeed_des_setkey, 833 + .encrypt = aspeed_des_ecb_encrypt, 834 + .decrypt = aspeed_des_ecb_decrypt, 835 + .init = aspeed_crypto_cra_init, 836 + .exit = aspeed_crypto_cra_exit, 837 + .base = { 838 + .cra_name = "ecb(des)", 839 + .cra_driver_name = "aspeed-ecb-des", 840 + .cra_priority = 300, 841 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 842 + CRYPTO_ALG_ASYNC | 843 + CRYPTO_ALG_NEED_FALLBACK, 844 + .cra_blocksize = DES_BLOCK_SIZE, 845 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 846 + .cra_alignmask = 0x0f, 847 + .cra_module = THIS_MODULE, 848 + } 849 + } 850 + }, 851 + { 852 + .alg.skcipher = { 853 + .ivsize = DES_BLOCK_SIZE, 854 + .min_keysize = DES_KEY_SIZE, 855 + .max_keysize = DES_KEY_SIZE, 856 + .setkey = aspeed_des_setkey, 857 + .encrypt = aspeed_des_cbc_encrypt, 858 + .decrypt = aspeed_des_cbc_decrypt, 859 + .init = aspeed_crypto_cra_init, 860 + .exit = aspeed_crypto_cra_exit, 861 + .base = { 862 + .cra_name = "cbc(des)", 863 + .cra_driver_name = "aspeed-cbc-des", 864 + .cra_priority = 300, 865 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 866 + CRYPTO_ALG_ASYNC | 867 + CRYPTO_ALG_NEED_FALLBACK, 868 + .cra_blocksize = DES_BLOCK_SIZE, 869 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 870 + .cra_alignmask = 0x0f, 871 + .cra_module = THIS_MODULE, 872 + } 873 + } 874 + }, 875 + { 876 + .alg.skcipher = { 877 + .ivsize = DES_BLOCK_SIZE, 878 + .min_keysize = DES_KEY_SIZE, 879 + .max_keysize = DES_KEY_SIZE, 880 + .setkey = aspeed_des_setkey, 881 + .encrypt = aspeed_des_cfb_encrypt, 882 + .decrypt = aspeed_des_cfb_decrypt, 883 + .init = aspeed_crypto_cra_init, 884 + .exit = aspeed_crypto_cra_exit, 885 + .base = { 886 + .cra_name = "cfb(des)", 887 + .cra_driver_name = "aspeed-cfb-des", 888 + .cra_priority = 300, 889 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 890 + CRYPTO_ALG_ASYNC | 891 + CRYPTO_ALG_NEED_FALLBACK, 892 + .cra_blocksize = DES_BLOCK_SIZE, 893 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 894 + .cra_alignmask = 0x0f, 895 + .cra_module = THIS_MODULE, 896 + } 897 + } 898 + }, 899 + { 900 + .alg.skcipher = { 901 + .ivsize = DES_BLOCK_SIZE, 902 + .min_keysize = DES_KEY_SIZE, 903 + .max_keysize = DES_KEY_SIZE, 904 + .setkey = aspeed_des_setkey, 905 + .encrypt = aspeed_des_ofb_encrypt, 906 + .decrypt = aspeed_des_ofb_decrypt, 907 + .init = aspeed_crypto_cra_init, 908 + .exit = aspeed_crypto_cra_exit, 909 + .base = { 910 + .cra_name = "ofb(des)", 911 + .cra_driver_name = "aspeed-ofb-des", 912 + .cra_priority = 300, 913 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 914 + CRYPTO_ALG_ASYNC | 915 + CRYPTO_ALG_NEED_FALLBACK, 916 + .cra_blocksize = DES_BLOCK_SIZE, 917 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 918 + .cra_alignmask = 0x0f, 919 + .cra_module = THIS_MODULE, 920 + } 921 + } 922 + }, 923 + { 924 + .alg.skcipher = { 925 + .min_keysize = DES3_EDE_KEY_SIZE, 926 + .max_keysize = DES3_EDE_KEY_SIZE, 927 + .setkey = aspeed_des_setkey, 928 + .encrypt = aspeed_tdes_ecb_encrypt, 929 + .decrypt = aspeed_tdes_ecb_decrypt, 930 + .init = aspeed_crypto_cra_init, 931 + .exit = aspeed_crypto_cra_exit, 932 + .base = { 933 + .cra_name = "ecb(des3_ede)", 934 + .cra_driver_name = "aspeed-ecb-tdes", 935 + .cra_priority = 300, 936 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 937 + CRYPTO_ALG_ASYNC | 938 + CRYPTO_ALG_NEED_FALLBACK, 939 + .cra_blocksize = DES_BLOCK_SIZE, 940 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 941 + .cra_alignmask = 0x0f, 942 + .cra_module = THIS_MODULE, 943 + } 944 + } 945 + }, 946 + { 947 + .alg.skcipher = { 948 + .ivsize = DES_BLOCK_SIZE, 949 + .min_keysize = DES3_EDE_KEY_SIZE, 950 + .max_keysize = DES3_EDE_KEY_SIZE, 951 + .setkey = aspeed_des_setkey, 952 + .encrypt = aspeed_tdes_cbc_encrypt, 953 + .decrypt = aspeed_tdes_cbc_decrypt, 954 + .init = aspeed_crypto_cra_init, 955 + .exit = aspeed_crypto_cra_exit, 956 + .base = { 957 + .cra_name = "cbc(des3_ede)", 958 + .cra_driver_name = "aspeed-cbc-tdes", 959 + .cra_priority = 300, 960 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 961 + CRYPTO_ALG_ASYNC | 962 + CRYPTO_ALG_NEED_FALLBACK, 963 + .cra_blocksize = DES_BLOCK_SIZE, 964 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 965 + .cra_alignmask = 0x0f, 966 + .cra_module = THIS_MODULE, 967 + } 968 + } 969 + }, 970 + { 971 + .alg.skcipher = { 972 + .ivsize = DES_BLOCK_SIZE, 973 + .min_keysize = DES3_EDE_KEY_SIZE, 974 + .max_keysize = DES3_EDE_KEY_SIZE, 975 + .setkey = aspeed_des_setkey, 976 + .encrypt = aspeed_tdes_cfb_encrypt, 977 + .decrypt = aspeed_tdes_cfb_decrypt, 978 + .init = aspeed_crypto_cra_init, 979 + .exit = aspeed_crypto_cra_exit, 980 + .base = { 981 + .cra_name = "cfb(des3_ede)", 982 + .cra_driver_name = "aspeed-cfb-tdes", 983 + .cra_priority = 300, 984 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 985 + CRYPTO_ALG_ASYNC | 986 + CRYPTO_ALG_NEED_FALLBACK, 987 + .cra_blocksize = DES_BLOCK_SIZE, 988 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 989 + .cra_alignmask = 0x0f, 990 + .cra_module = THIS_MODULE, 991 + } 992 + } 993 + }, 994 + { 995 + .alg.skcipher = { 996 + .ivsize = DES_BLOCK_SIZE, 997 + .min_keysize = DES3_EDE_KEY_SIZE, 998 + .max_keysize = DES3_EDE_KEY_SIZE, 999 + .setkey = aspeed_des_setkey, 1000 + .encrypt = aspeed_tdes_ofb_encrypt, 1001 + .decrypt = aspeed_tdes_ofb_decrypt, 1002 + .init = aspeed_crypto_cra_init, 1003 + .exit = aspeed_crypto_cra_exit, 1004 + .base = { 1005 + .cra_name = "ofb(des3_ede)", 1006 + .cra_driver_name = "aspeed-ofb-tdes", 1007 + .cra_priority = 300, 1008 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1009 + CRYPTO_ALG_ASYNC | 1010 + CRYPTO_ALG_NEED_FALLBACK, 1011 + .cra_blocksize = DES_BLOCK_SIZE, 1012 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 1013 + .cra_alignmask = 0x0f, 1014 + .cra_module = THIS_MODULE, 1015 + } 1016 + } 1017 + }, 1018 + }; 1019 + 1020 + static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = { 1021 + { 1022 + .alg.skcipher = { 1023 + .ivsize = AES_BLOCK_SIZE, 1024 + .min_keysize = AES_MIN_KEY_SIZE, 1025 + .max_keysize = AES_MAX_KEY_SIZE, 1026 + .setkey = aspeed_aes_setkey, 1027 + .encrypt = aspeed_aes_ctr_encrypt, 1028 + .decrypt = aspeed_aes_ctr_decrypt, 1029 + .init = aspeed_crypto_cra_init, 1030 + .exit = aspeed_crypto_cra_exit, 1031 + .base = { 1032 + .cra_name = "ctr(aes)", 1033 + .cra_driver_name = "aspeed-ctr-aes", 1034 + .cra_priority = 300, 1035 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1036 + CRYPTO_ALG_ASYNC, 1037 + .cra_blocksize = 1, 1038 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 1039 + .cra_alignmask = 0x0f, 1040 + .cra_module = THIS_MODULE, 1041 + } 1042 + } 1043 + }, 1044 + { 1045 + .alg.skcipher = { 1046 + .ivsize = DES_BLOCK_SIZE, 1047 + .min_keysize = DES_KEY_SIZE, 1048 + .max_keysize = DES_KEY_SIZE, 1049 + .setkey = aspeed_des_setkey, 1050 + .encrypt = aspeed_des_ctr_encrypt, 1051 + .decrypt = aspeed_des_ctr_decrypt, 1052 + .init = aspeed_crypto_cra_init, 1053 + .exit = aspeed_crypto_cra_exit, 1054 + .base = { 1055 + .cra_name = "ctr(des)", 1056 + .cra_driver_name = "aspeed-ctr-des", 1057 + .cra_priority = 300, 1058 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1059 + CRYPTO_ALG_ASYNC, 1060 + .cra_blocksize = 1, 1061 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 1062 + .cra_alignmask = 0x0f, 1063 + .cra_module = THIS_MODULE, 1064 + } 1065 + } 1066 + }, 1067 + { 1068 + .alg.skcipher = { 1069 + .ivsize = DES_BLOCK_SIZE, 1070 + .min_keysize = DES3_EDE_KEY_SIZE, 1071 + .max_keysize = DES3_EDE_KEY_SIZE, 1072 + .setkey = aspeed_des_setkey, 1073 + .encrypt = aspeed_tdes_ctr_encrypt, 1074 + .decrypt = aspeed_tdes_ctr_decrypt, 1075 + .init = aspeed_crypto_cra_init, 1076 + .exit = aspeed_crypto_cra_exit, 1077 + .base = { 1078 + .cra_name = "ctr(des3_ede)", 1079 + .cra_driver_name = "aspeed-ctr-tdes", 1080 + .cra_priority = 300, 1081 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1082 + CRYPTO_ALG_ASYNC, 1083 + .cra_blocksize = 1, 1084 + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), 1085 + .cra_alignmask = 0x0f, 1086 + .cra_module = THIS_MODULE, 1087 + } 1088 + } 1089 + }, 1090 + 1091 + }; 1092 + 1093 + void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) 1094 + { 1095 + int i; 1096 + 1097 + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) 1098 + crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher); 1099 + 1100 + if (hace_dev->version != AST2600_VERSION) 1101 + return; 1102 + 1103 + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) 1104 + crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); 1105 + } 1106 + 1107 + void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) 1108 + { 1109 + int rc, i; 1110 + 1111 + CIPHER_DBG(hace_dev, "\n"); 1112 + 1113 + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) { 1114 + aspeed_crypto_algs[i].hace_dev = hace_dev; 1115 + rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher); 1116 + if (rc) { 1117 + CIPHER_DBG(hace_dev, "Failed to register %s\n", 1118 + aspeed_crypto_algs[i].alg.skcipher.base.cra_name); 1119 + } 1120 + } 1121 + 1122 + if (hace_dev->version != AST2600_VERSION) 1123 + return; 1124 + 1125 + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) { 1126 + aspeed_crypto_algs_g6[i].hace_dev = hace_dev; 1127 + rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); 1128 + if (rc) { 1129 + CIPHER_DBG(hace_dev, "Failed to register %s\n", 1130 + aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name); 1131 + } 1132 + } 1133 + }
+1391
drivers/crypto/aspeed/aspeed-hace-hash.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright (c) 2021 Aspeed Technology Inc. 4 + */ 5 + 6 + #include "aspeed-hace.h" 7 + 8 + #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG 9 + #define AHASH_DBG(h, fmt, ...) \ 10 + dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 11 + #else 12 + #define AHASH_DBG(h, fmt, ...) \ 13 + dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 14 + #endif 15 + 16 + /* Initialization Vectors for SHA-family */ 17 + static const __be32 sha1_iv[8] = { 18 + cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), 19 + cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), 20 + cpu_to_be32(SHA1_H4), 0, 0, 0 21 + }; 22 + 23 + static const __be32 sha224_iv[8] = { 24 + cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), 25 + cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), 26 + cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), 27 + cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), 28 + }; 29 + 30 + static const __be32 sha256_iv[8] = { 31 + cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), 32 + cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), 33 + cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), 34 + cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 35 + }; 36 + 37 + static const __be64 sha384_iv[8] = { 38 + cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1), 39 + cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3), 40 + cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5), 41 + cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7) 42 + }; 43 + 44 + static const __be64 sha512_iv[8] = { 45 + cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1), 46 + cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3), 47 + cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5), 48 + cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7) 49 + }; 50 + 51 + static const __be32 sha512_224_iv[16] = { 52 + cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL), 53 + cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL), 54 + cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL), 55 + cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL), 56 + cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL), 57 + cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL), 58 + cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL), 59 + cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL) 60 + }; 61 + 62 + static const __be32 sha512_256_iv[16] = { 63 + cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL), 64 + cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL), 65 + cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL), 66 + cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL), 67 + cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL), 68 + cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL), 69 + cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL), 70 + cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL) 71 + }; 72 + 73 + /* The purpose of this padding is to ensure that the padded message is a 74 + * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). 75 + * The bit "1" is appended at the end of the message followed by 76 + * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or 77 + * 128 bits block (SHA384/SHA512) equals to the message length in bits 78 + * is appended. 79 + * 80 + * For SHA1/SHA224/SHA256, padlen is calculated as followed: 81 + * - if message length < 56 bytes then padlen = 56 - message length 82 + * - else padlen = 64 + 56 - message length 83 + * 84 + * For SHA384/SHA512, padlen is calculated as followed: 85 + * - if message length < 112 bytes then padlen = 112 - message length 86 + * - else padlen = 128 + 112 - message length 87 + */ 88 + static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev, 89 + struct aspeed_sham_reqctx *rctx) 90 + { 91 + unsigned int index, padlen; 92 + __be64 bits[2]; 93 + 94 + AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags); 95 + 96 + switch (rctx->flags & SHA_FLAGS_MASK) { 97 + case SHA_FLAGS_SHA1: 98 + case SHA_FLAGS_SHA224: 99 + case SHA_FLAGS_SHA256: 100 + bits[0] = cpu_to_be64(rctx->digcnt[0] << 3); 101 + index = rctx->bufcnt & 0x3f; 102 + padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); 103 + *(rctx->buffer + rctx->bufcnt) = 0x80; 104 + memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1); 105 + memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8); 106 + rctx->bufcnt += padlen + 8; 107 + break; 108 + default: 109 + bits[1] = cpu_to_be64(rctx->digcnt[0] << 3); 110 + bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 | 111 + rctx->digcnt[0] >> 61); 112 + index = rctx->bufcnt & 0x7f; 113 + padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); 114 + *(rctx->buffer + rctx->bufcnt) = 0x80; 115 + memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1); 116 + memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16); 117 + rctx->bufcnt += padlen + 16; 118 + break; 119 + } 120 + } 121 + 122 + /* 123 + * Prepare DMA buffer before hardware engine 124 + * processing. 125 + */ 126 + static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev) 127 + { 128 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 129 + struct ahash_request *req = hash_engine->req; 130 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 131 + int length, remain; 132 + 133 + length = rctx->total + rctx->bufcnt; 134 + remain = length % rctx->block_size; 135 + 136 + AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain); 137 + 138 + if (rctx->bufcnt) 139 + memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt); 140 + 141 + if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) { 142 + scatterwalk_map_and_copy(hash_engine->ahash_src_addr + 143 + rctx->bufcnt, rctx->src_sg, 144 + rctx->offset, rctx->total - remain, 0); 145 + rctx->offset += rctx->total - remain; 146 + 147 + } else { 148 + dev_warn(hace_dev->dev, "Hash data length is too large\n"); 149 + return -EINVAL; 150 + } 151 + 152 + scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, 153 + rctx->offset, remain, 0); 154 + 155 + rctx->bufcnt = remain; 156 + rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, 157 + SHA512_DIGEST_SIZE, 158 + DMA_BIDIRECTIONAL); 159 + if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { 160 + dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); 161 + return -ENOMEM; 162 + } 163 + 164 + hash_engine->src_length = length - remain; 165 + hash_engine->src_dma = hash_engine->ahash_src_dma_addr; 166 + hash_engine->digest_dma = rctx->digest_dma_addr; 167 + 168 + return 0; 169 + } 170 + 171 + /* 172 + * Prepare DMA buffer as SG list buffer before 173 + * hardware engine processing. 174 + */ 175 + static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev) 176 + { 177 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 178 + struct ahash_request *req = hash_engine->req; 179 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 180 + struct aspeed_sg_list *src_list; 181 + struct scatterlist *s; 182 + int length, remain, sg_len, i; 183 + int rc = 0; 184 + 185 + remain = (rctx->total + rctx->bufcnt) % rctx->block_size; 186 + length = rctx->total + rctx->bufcnt - remain; 187 + 188 + AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n", 189 + "rctx total", rctx->total, "bufcnt", rctx->bufcnt, 190 + "length", length, "remain", remain); 191 + 192 + sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, 193 + DMA_TO_DEVICE); 194 + if (!sg_len) { 195 + dev_warn(hace_dev->dev, "dma_map_sg() src error\n"); 196 + rc = -ENOMEM; 197 + goto end; 198 + } 199 + 200 + src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr; 201 + rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, 202 + SHA512_DIGEST_SIZE, 203 + DMA_BIDIRECTIONAL); 204 + if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { 205 + dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); 206 + rc = -ENOMEM; 207 + goto free_src_sg; 208 + } 209 + 210 + if (rctx->bufcnt != 0) { 211 + u32 phy_addr; 212 + u32 len; 213 + 214 + rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, 215 + rctx->buffer, 216 + rctx->block_size * 2, 217 + DMA_TO_DEVICE); 218 + if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { 219 + dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); 220 + rc = -ENOMEM; 221 + goto free_rctx_digest; 222 + } 223 + 224 + phy_addr = rctx->buffer_dma_addr; 225 + len = rctx->bufcnt; 226 + length -= len; 227 + 228 + /* Last sg list */ 229 + if (length == 0) 230 + len |= HASH_SG_LAST_LIST; 231 + 232 + src_list[0].phy_addr = cpu_to_le32(phy_addr); 233 + src_list[0].len = cpu_to_le32(len); 234 + src_list++; 235 + } 236 + 237 + if (length != 0) { 238 + for_each_sg(rctx->src_sg, s, sg_len, i) { 239 + u32 phy_addr = sg_dma_address(s); 240 + u32 len = sg_dma_len(s); 241 + 242 + if (length > len) 243 + length -= len; 244 + else { 245 + /* Last sg list */ 246 + len = length; 247 + len |= HASH_SG_LAST_LIST; 248 + length = 0; 249 + } 250 + 251 + src_list[i].phy_addr = cpu_to_le32(phy_addr); 252 + src_list[i].len = cpu_to_le32(len); 253 + } 254 + } 255 + 256 + if (length != 0) { 257 + rc = -EINVAL; 258 + goto free_rctx_buffer; 259 + } 260 + 261 + rctx->offset = rctx->total - remain; 262 + hash_engine->src_length = rctx->total + rctx->bufcnt - remain; 263 + hash_engine->src_dma = hash_engine->ahash_src_dma_addr; 264 + hash_engine->digest_dma = rctx->digest_dma_addr; 265 + 266 + return 0; 267 + 268 + free_rctx_buffer: 269 + if (rctx->bufcnt != 0) 270 + dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, 271 + rctx->block_size * 2, DMA_TO_DEVICE); 272 + free_rctx_digest: 273 + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, 274 + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); 275 + free_src_sg: 276 + dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, 277 + DMA_TO_DEVICE); 278 + end: 279 + return rc; 280 + } 281 + 282 + static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev) 283 + { 284 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 285 + struct ahash_request *req = hash_engine->req; 286 + 287 + AHASH_DBG(hace_dev, "\n"); 288 + 289 + hash_engine->flags &= ~CRYPTO_FLAGS_BUSY; 290 + 291 + crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0); 292 + 293 + return 0; 294 + } 295 + 296 + /* 297 + * Copy digest to the corresponding request result. 298 + * This function will be called at final() stage. 299 + */ 300 + static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev) 301 + { 302 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 303 + struct ahash_request *req = hash_engine->req; 304 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 305 + 306 + AHASH_DBG(hace_dev, "\n"); 307 + 308 + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, 309 + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); 310 + 311 + dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, 312 + rctx->block_size * 2, DMA_TO_DEVICE); 313 + 314 + memcpy(req->result, rctx->digest, rctx->digsize); 315 + 316 + return aspeed_ahash_complete(hace_dev); 317 + } 318 + 319 + /* 320 + * Trigger hardware engines to do the math. 321 + */ 322 + static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev, 323 + aspeed_hace_fn_t resume) 324 + { 325 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 326 + struct ahash_request *req = hash_engine->req; 327 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 328 + 329 + AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n", 330 + &hash_engine->src_dma, &hash_engine->digest_dma, 331 + hash_engine->src_length); 332 + 333 + rctx->cmd |= HASH_CMD_INT_ENABLE; 334 + hash_engine->resume = resume; 335 + 336 + ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC); 337 + ast_hace_write(hace_dev, hash_engine->digest_dma, 338 + ASPEED_HACE_HASH_DIGEST_BUFF); 339 + ast_hace_write(hace_dev, hash_engine->digest_dma, 340 + ASPEED_HACE_HASH_KEY_BUFF); 341 + ast_hace_write(hace_dev, hash_engine->src_length, 342 + ASPEED_HACE_HASH_DATA_LEN); 343 + 344 + /* Memory barrier to ensure all data setup before engine starts */ 345 + mb(); 346 + 347 + ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD); 348 + 349 + return -EINPROGRESS; 350 + } 351 + 352 + /* 353 + * HMAC resume aims to do the second pass produces 354 + * the final HMAC code derived from the inner hash 355 + * result and the outer key. 356 + */ 357 + static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev) 358 + { 359 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 360 + struct ahash_request *req = hash_engine->req; 361 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 362 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 363 + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); 364 + struct aspeed_sha_hmac_ctx *bctx = tctx->base; 365 + int rc = 0; 366 + 367 + AHASH_DBG(hace_dev, "\n"); 368 + 369 + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, 370 + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); 371 + 372 + dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, 373 + rctx->block_size * 2, DMA_TO_DEVICE); 374 + 375 + /* o key pad + hash sum 1 */ 376 + memcpy(rctx->buffer, bctx->opad, rctx->block_size); 377 + memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize); 378 + 379 + rctx->bufcnt = rctx->block_size + rctx->digsize; 380 + rctx->digcnt[0] = rctx->block_size + rctx->digsize; 381 + 382 + aspeed_ahash_fill_padding(hace_dev, rctx); 383 + memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize); 384 + 385 + rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, 386 + SHA512_DIGEST_SIZE, 387 + DMA_BIDIRECTIONAL); 388 + if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { 389 + dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); 390 + rc = -ENOMEM; 391 + goto end; 392 + } 393 + 394 + rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer, 395 + rctx->block_size * 2, 396 + DMA_TO_DEVICE); 397 + if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { 398 + dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); 399 + rc = -ENOMEM; 400 + goto free_rctx_digest; 401 + } 402 + 403 + hash_engine->src_dma = rctx->buffer_dma_addr; 404 + hash_engine->src_length = rctx->bufcnt; 405 + hash_engine->digest_dma = rctx->digest_dma_addr; 406 + 407 + return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer); 408 + 409 + free_rctx_digest: 410 + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, 411 + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); 412 + end: 413 + return rc; 414 + } 415 + 416 + static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev) 417 + { 418 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 419 + struct ahash_request *req = hash_engine->req; 420 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 421 + int rc = 0; 422 + 423 + AHASH_DBG(hace_dev, "\n"); 424 + 425 + aspeed_ahash_fill_padding(hace_dev, rctx); 426 + 427 + rctx->digest_dma_addr = dma_map_single(hace_dev->dev, 428 + rctx->digest, 429 + SHA512_DIGEST_SIZE, 430 + DMA_BIDIRECTIONAL); 431 + if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { 432 + dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); 433 + rc = -ENOMEM; 434 + goto end; 435 + } 436 + 437 + rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, 438 + rctx->buffer, 439 + rctx->block_size * 2, 440 + DMA_TO_DEVICE); 441 + if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { 442 + dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); 443 + rc = -ENOMEM; 444 + goto free_rctx_digest; 445 + } 446 + 447 + hash_engine->src_dma = rctx->buffer_dma_addr; 448 + hash_engine->src_length = rctx->bufcnt; 449 + hash_engine->digest_dma = rctx->digest_dma_addr; 450 + 451 + if (rctx->flags & SHA_FLAGS_HMAC) 452 + return aspeed_hace_ahash_trigger(hace_dev, 453 + aspeed_ahash_hmac_resume); 454 + 455 + return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer); 456 + 457 + free_rctx_digest: 458 + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, 459 + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); 460 + end: 461 + return rc; 462 + } 463 + 464 + static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev) 465 + { 466 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 467 + struct ahash_request *req = hash_engine->req; 468 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 469 + 470 + AHASH_DBG(hace_dev, "\n"); 471 + 472 + dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, 473 + DMA_TO_DEVICE); 474 + 475 + if (rctx->bufcnt != 0) 476 + dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, 477 + rctx->block_size * 2, 478 + DMA_TO_DEVICE); 479 + 480 + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, 481 + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); 482 + 483 + scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset, 484 + rctx->total - rctx->offset, 0); 485 + 486 + rctx->bufcnt = rctx->total - rctx->offset; 487 + rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL; 488 + 489 + if (rctx->flags & SHA_FLAGS_FINUP) 490 + return aspeed_ahash_req_final(hace_dev); 491 + 492 + return aspeed_ahash_complete(hace_dev); 493 + } 494 + 495 + static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev) 496 + { 497 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 498 + struct ahash_request *req = hash_engine->req; 499 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 500 + 501 + AHASH_DBG(hace_dev, "\n"); 502 + 503 + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, 504 + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); 505 + 506 + if (rctx->flags & SHA_FLAGS_FINUP) 507 + return aspeed_ahash_req_final(hace_dev); 508 + 509 + return aspeed_ahash_complete(hace_dev); 510 + } 511 + 512 + static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev) 513 + { 514 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 515 + struct ahash_request *req = hash_engine->req; 516 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 517 + aspeed_hace_fn_t resume; 518 + int ret; 519 + 520 + AHASH_DBG(hace_dev, "\n"); 521 + 522 + if (hace_dev->version == AST2600_VERSION) { 523 + rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL; 524 + resume = aspeed_ahash_update_resume_sg; 525 + 526 + } else { 527 + resume = aspeed_ahash_update_resume; 528 + } 529 + 530 + ret = hash_engine->dma_prepare(hace_dev); 531 + if (ret) 532 + return ret; 533 + 534 + return aspeed_hace_ahash_trigger(hace_dev, resume); 535 + } 536 + 537 + static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev, 538 + struct ahash_request *req) 539 + { 540 + return crypto_transfer_hash_request_to_engine( 541 + hace_dev->crypt_engine_hash, req); 542 + } 543 + 544 + static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq) 545 + { 546 + struct ahash_request *req = ahash_request_cast(areq); 547 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 548 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 549 + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); 550 + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; 551 + struct aspeed_engine_hash *hash_engine; 552 + int ret = 0; 553 + 554 + hash_engine = &hace_dev->hash_engine; 555 + hash_engine->flags |= CRYPTO_FLAGS_BUSY; 556 + 557 + if (rctx->op == SHA_OP_UPDATE) 558 + ret = aspeed_ahash_req_update(hace_dev); 559 + else if (rctx->op == SHA_OP_FINAL) 560 + ret = aspeed_ahash_req_final(hace_dev); 561 + 562 + if (ret != -EINPROGRESS) 563 + return ret; 564 + 565 + return 0; 566 + } 567 + 568 + static int aspeed_ahash_prepare_request(struct crypto_engine *engine, 569 + void *areq) 570 + { 571 + struct ahash_request *req = ahash_request_cast(areq); 572 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 573 + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); 574 + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; 575 + struct aspeed_engine_hash *hash_engine; 576 + 577 + hash_engine = &hace_dev->hash_engine; 578 + hash_engine->req = req; 579 + 580 + if (hace_dev->version == AST2600_VERSION) 581 + hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg; 582 + else 583 + hash_engine->dma_prepare = aspeed_ahash_dma_prepare; 584 + 585 + return 0; 586 + } 587 + 588 + static int aspeed_sham_update(struct ahash_request *req) 589 + { 590 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 591 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 592 + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); 593 + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; 594 + 595 + AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes); 596 + 597 + rctx->total = req->nbytes; 598 + rctx->src_sg = req->src; 599 + rctx->offset = 0; 600 + rctx->src_nents = sg_nents(req->src); 601 + rctx->op = SHA_OP_UPDATE; 602 + 603 + rctx->digcnt[0] += rctx->total; 604 + if (rctx->digcnt[0] < rctx->total) 605 + rctx->digcnt[1]++; 606 + 607 + if (rctx->bufcnt + rctx->total < rctx->block_size) { 608 + scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, 609 + rctx->src_sg, rctx->offset, 610 + rctx->total, 0); 611 + rctx->bufcnt += rctx->total; 612 + 613 + return 0; 614 + } 615 + 616 + return aspeed_hace_hash_handle_queue(hace_dev, req); 617 + } 618 + 619 + static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags, 620 + const u8 *data, unsigned int len, u8 *out) 621 + { 622 + SHASH_DESC_ON_STACK(shash, tfm); 623 + 624 + shash->tfm = tfm; 625 + 626 + return crypto_shash_digest(shash, data, len, out); 627 + } 628 + 629 + static int aspeed_sham_final(struct ahash_request *req) 630 + { 631 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 632 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 633 + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); 634 + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; 635 + 636 + AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n", 637 + req->nbytes, rctx->total); 638 + rctx->op = SHA_OP_FINAL; 639 + 640 + return aspeed_hace_hash_handle_queue(hace_dev, req); 641 + } 642 + 643 + static int aspeed_sham_finup(struct ahash_request *req) 644 + { 645 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 646 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 647 + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); 648 + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; 649 + int rc1, rc2; 650 + 651 + AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes); 652 + 653 + rctx->flags |= SHA_FLAGS_FINUP; 654 + 655 + rc1 = aspeed_sham_update(req); 656 + if (rc1 == -EINPROGRESS || rc1 == -EBUSY) 657 + return rc1; 658 + 659 + /* 660 + * final() has to be always called to cleanup resources 661 + * even if update() failed, except EINPROGRESS 662 + */ 663 + rc2 = aspeed_sham_final(req); 664 + 665 + return rc1 ? : rc2; 666 + } 667 + 668 + static int aspeed_sham_init(struct ahash_request *req) 669 + { 670 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 671 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 672 + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); 673 + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; 674 + struct aspeed_sha_hmac_ctx *bctx = tctx->base; 675 + 676 + AHASH_DBG(hace_dev, "%s: digest size:%d\n", 677 + crypto_tfm_alg_name(&tfm->base), 678 + crypto_ahash_digestsize(tfm)); 679 + 680 + rctx->cmd = HASH_CMD_ACC_MODE; 681 + rctx->flags = 0; 682 + 683 + switch (crypto_ahash_digestsize(tfm)) { 684 + case SHA1_DIGEST_SIZE: 685 + rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP; 686 + rctx->flags |= SHA_FLAGS_SHA1; 687 + rctx->digsize = SHA1_DIGEST_SIZE; 688 + rctx->block_size = SHA1_BLOCK_SIZE; 689 + rctx->sha_iv = sha1_iv; 690 + rctx->ivsize = 32; 691 + memcpy(rctx->digest, sha1_iv, rctx->ivsize); 692 + break; 693 + case SHA224_DIGEST_SIZE: 694 + rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP; 695 + rctx->flags |= SHA_FLAGS_SHA224; 696 + rctx->digsize = SHA224_DIGEST_SIZE; 697 + rctx->block_size = SHA224_BLOCK_SIZE; 698 + rctx->sha_iv = sha224_iv; 699 + rctx->ivsize = 32; 700 + memcpy(rctx->digest, sha224_iv, rctx->ivsize); 701 + break; 702 + case SHA256_DIGEST_SIZE: 703 + rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP; 704 + rctx->flags |= SHA_FLAGS_SHA256; 705 + rctx->digsize = SHA256_DIGEST_SIZE; 706 + rctx->block_size = SHA256_BLOCK_SIZE; 707 + rctx->sha_iv = sha256_iv; 708 + rctx->ivsize = 32; 709 + memcpy(rctx->digest, sha256_iv, rctx->ivsize); 710 + break; 711 + case SHA384_DIGEST_SIZE: 712 + rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 | 713 + HASH_CMD_SHA_SWAP; 714 + rctx->flags |= SHA_FLAGS_SHA384; 715 + rctx->digsize = SHA384_DIGEST_SIZE; 716 + rctx->block_size = SHA384_BLOCK_SIZE; 717 + rctx->sha_iv = (const __be32 *)sha384_iv; 718 + rctx->ivsize = 64; 719 + memcpy(rctx->digest, sha384_iv, rctx->ivsize); 720 + break; 721 + case SHA512_DIGEST_SIZE: 722 + rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 | 723 + HASH_CMD_SHA_SWAP; 724 + rctx->flags |= SHA_FLAGS_SHA512; 725 + rctx->digsize = SHA512_DIGEST_SIZE; 726 + rctx->block_size = SHA512_BLOCK_SIZE; 727 + rctx->sha_iv = (const __be32 *)sha512_iv; 728 + rctx->ivsize = 64; 729 + memcpy(rctx->digest, sha512_iv, rctx->ivsize); 730 + break; 731 + default: 732 + dev_warn(tctx->hace_dev->dev, "digest size %d not support\n", 733 + crypto_ahash_digestsize(tfm)); 734 + return -EINVAL; 735 + } 736 + 737 + rctx->bufcnt = 0; 738 + rctx->total = 0; 739 + rctx->digcnt[0] = 0; 740 + rctx->digcnt[1] = 0; 741 + 742 + /* HMAC init */ 743 + if (tctx->flags & SHA_FLAGS_HMAC) { 744 + rctx->digcnt[0] = rctx->block_size; 745 + rctx->bufcnt = rctx->block_size; 746 + memcpy(rctx->buffer, bctx->ipad, rctx->block_size); 747 + rctx->flags |= SHA_FLAGS_HMAC; 748 + } 749 + 750 + return 0; 751 + } 752 + 753 + static int aspeed_sha512s_init(struct ahash_request *req) 754 + { 755 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 756 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 757 + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); 758 + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; 759 + struct aspeed_sha_hmac_ctx *bctx = tctx->base; 760 + 761 + AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm)); 762 + 763 + rctx->cmd = HASH_CMD_ACC_MODE; 764 + rctx->flags = 0; 765 + 766 + switch (crypto_ahash_digestsize(tfm)) { 767 + case SHA224_DIGEST_SIZE: 768 + rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 | 769 + HASH_CMD_SHA_SWAP; 770 + rctx->flags |= SHA_FLAGS_SHA512_224; 771 + rctx->digsize = SHA224_DIGEST_SIZE; 772 + rctx->block_size = SHA512_BLOCK_SIZE; 773 + rctx->sha_iv = sha512_224_iv; 774 + rctx->ivsize = 64; 775 + memcpy(rctx->digest, sha512_224_iv, rctx->ivsize); 776 + break; 777 + case SHA256_DIGEST_SIZE: 778 + rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 | 779 + HASH_CMD_SHA_SWAP; 780 + rctx->flags |= SHA_FLAGS_SHA512_256; 781 + rctx->digsize = SHA256_DIGEST_SIZE; 782 + rctx->block_size = SHA512_BLOCK_SIZE; 783 + rctx->sha_iv = sha512_256_iv; 784 + rctx->ivsize = 64; 785 + memcpy(rctx->digest, sha512_256_iv, rctx->ivsize); 786 + break; 787 + default: 788 + dev_warn(tctx->hace_dev->dev, "digest size %d not support\n", 789 + crypto_ahash_digestsize(tfm)); 790 + return -EINVAL; 791 + } 792 + 793 + rctx->bufcnt = 0; 794 + rctx->total = 0; 795 + rctx->digcnt[0] = 0; 796 + rctx->digcnt[1] = 0; 797 + 798 + /* HMAC init */ 799 + if (tctx->flags & SHA_FLAGS_HMAC) { 800 + rctx->digcnt[0] = rctx->block_size; 801 + rctx->bufcnt = rctx->block_size; 802 + memcpy(rctx->buffer, bctx->ipad, rctx->block_size); 803 + rctx->flags |= SHA_FLAGS_HMAC; 804 + } 805 + 806 + return 0; 807 + } 808 + 809 + static int aspeed_sham_digest(struct ahash_request *req) 810 + { 811 + return aspeed_sham_init(req) ? : aspeed_sham_finup(req); 812 + } 813 + 814 + static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key, 815 + unsigned int keylen) 816 + { 817 + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); 818 + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; 819 + struct aspeed_sha_hmac_ctx *bctx = tctx->base; 820 + int ds = crypto_shash_digestsize(bctx->shash); 821 + int bs = crypto_shash_blocksize(bctx->shash); 822 + int err = 0; 823 + int i; 824 + 825 + AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base), 826 + keylen); 827 + 828 + if (keylen > bs) { 829 + err = aspeed_sham_shash_digest(bctx->shash, 830 + crypto_shash_get_flags(bctx->shash), 831 + key, keylen, bctx->ipad); 832 + if (err) 833 + return err; 834 + keylen = ds; 835 + 836 + } else { 837 + memcpy(bctx->ipad, key, keylen); 838 + } 839 + 840 + memset(bctx->ipad + keylen, 0, bs - keylen); 841 + memcpy(bctx->opad, bctx->ipad, bs); 842 + 843 + for (i = 0; i < bs; i++) { 844 + bctx->ipad[i] ^= HMAC_IPAD_VALUE; 845 + bctx->opad[i] ^= HMAC_OPAD_VALUE; 846 + } 847 + 848 + return err; 849 + } 850 + 851 + static int aspeed_sham_cra_init(struct crypto_tfm *tfm) 852 + { 853 + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); 854 + struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm); 855 + struct aspeed_hace_alg *ast_alg; 856 + 857 + ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash); 858 + tctx->hace_dev = ast_alg->hace_dev; 859 + tctx->flags = 0; 860 + 861 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 862 + sizeof(struct aspeed_sham_reqctx)); 863 + 864 + if (ast_alg->alg_base) { 865 + /* hmac related */ 866 + struct aspeed_sha_hmac_ctx *bctx = tctx->base; 867 + 868 + tctx->flags |= SHA_FLAGS_HMAC; 869 + bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0, 870 + CRYPTO_ALG_NEED_FALLBACK); 871 + if (IS_ERR(bctx->shash)) { 872 + dev_warn(ast_alg->hace_dev->dev, 873 + "base driver '%s' could not be loaded.\n", 874 + ast_alg->alg_base); 875 + return PTR_ERR(bctx->shash); 876 + } 877 + } 878 + 879 + tctx->enginectx.op.do_one_request = aspeed_ahash_do_request; 880 + tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request; 881 + tctx->enginectx.op.unprepare_request = NULL; 882 + 883 + return 0; 884 + } 885 + 886 + static void aspeed_sham_cra_exit(struct crypto_tfm *tfm) 887 + { 888 + struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm); 889 + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; 890 + 891 + AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm)); 892 + 893 + if (tctx->flags & SHA_FLAGS_HMAC) { 894 + struct aspeed_sha_hmac_ctx *bctx = tctx->base; 895 + 896 + crypto_free_shash(bctx->shash); 897 + } 898 + } 899 + 900 + static int aspeed_sham_export(struct ahash_request *req, void *out) 901 + { 902 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 903 + 904 + memcpy(out, rctx, sizeof(*rctx)); 905 + 906 + return 0; 907 + } 908 + 909 + static int aspeed_sham_import(struct ahash_request *req, const void *in) 910 + { 911 + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); 912 + 913 + memcpy(rctx, in, sizeof(*rctx)); 914 + 915 + return 0; 916 + } 917 + 918 + static struct aspeed_hace_alg aspeed_ahash_algs[] = { 919 + { 920 + .alg.ahash = { 921 + .init = aspeed_sham_init, 922 + .update = aspeed_sham_update, 923 + .final = aspeed_sham_final, 924 + .finup = aspeed_sham_finup, 925 + .digest = aspeed_sham_digest, 926 + .export = aspeed_sham_export, 927 + .import = aspeed_sham_import, 928 + .halg = { 929 + .digestsize = SHA1_DIGEST_SIZE, 930 + .statesize = sizeof(struct aspeed_sham_reqctx), 931 + .base = { 932 + .cra_name = "sha1", 933 + .cra_driver_name = "aspeed-sha1", 934 + .cra_priority = 300, 935 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 936 + CRYPTO_ALG_ASYNC | 937 + CRYPTO_ALG_KERN_DRIVER_ONLY, 938 + .cra_blocksize = SHA1_BLOCK_SIZE, 939 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), 940 + .cra_alignmask = 0, 941 + .cra_module = THIS_MODULE, 942 + .cra_init = aspeed_sham_cra_init, 943 + .cra_exit = aspeed_sham_cra_exit, 944 + } 945 + } 946 + }, 947 + }, 948 + { 949 + .alg.ahash = { 950 + .init = aspeed_sham_init, 951 + .update = aspeed_sham_update, 952 + .final = aspeed_sham_final, 953 + .finup = aspeed_sham_finup, 954 + .digest = aspeed_sham_digest, 955 + .export = aspeed_sham_export, 956 + .import = aspeed_sham_import, 957 + .halg = { 958 + .digestsize = SHA256_DIGEST_SIZE, 959 + .statesize = sizeof(struct aspeed_sham_reqctx), 960 + .base = { 961 + .cra_name = "sha256", 962 + .cra_driver_name = "aspeed-sha256", 963 + .cra_priority = 300, 964 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 965 + CRYPTO_ALG_ASYNC | 966 + CRYPTO_ALG_KERN_DRIVER_ONLY, 967 + .cra_blocksize = SHA256_BLOCK_SIZE, 968 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), 969 + .cra_alignmask = 0, 970 + .cra_module = THIS_MODULE, 971 + .cra_init = aspeed_sham_cra_init, 972 + .cra_exit = aspeed_sham_cra_exit, 973 + } 974 + } 975 + }, 976 + }, 977 + { 978 + .alg.ahash = { 979 + .init = aspeed_sham_init, 980 + .update = aspeed_sham_update, 981 + .final = aspeed_sham_final, 982 + .finup = aspeed_sham_finup, 983 + .digest = aspeed_sham_digest, 984 + .export = aspeed_sham_export, 985 + .import = aspeed_sham_import, 986 + .halg = { 987 + .digestsize = SHA224_DIGEST_SIZE, 988 + .statesize = sizeof(struct aspeed_sham_reqctx), 989 + .base = { 990 + .cra_name = "sha224", 991 + .cra_driver_name = "aspeed-sha224", 992 + .cra_priority = 300, 993 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 994 + CRYPTO_ALG_ASYNC | 995 + CRYPTO_ALG_KERN_DRIVER_ONLY, 996 + .cra_blocksize = SHA224_BLOCK_SIZE, 997 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), 998 + .cra_alignmask = 0, 999 + .cra_module = THIS_MODULE, 1000 + .cra_init = aspeed_sham_cra_init, 1001 + .cra_exit = aspeed_sham_cra_exit, 1002 + } 1003 + } 1004 + }, 1005 + }, 1006 + { 1007 + .alg_base = "sha1", 1008 + .alg.ahash = { 1009 + .init = aspeed_sham_init, 1010 + .update = aspeed_sham_update, 1011 + .final = aspeed_sham_final, 1012 + .finup = aspeed_sham_finup, 1013 + .digest = aspeed_sham_digest, 1014 + .setkey = aspeed_sham_setkey, 1015 + .export = aspeed_sham_export, 1016 + .import = aspeed_sham_import, 1017 + .halg = { 1018 + .digestsize = SHA1_DIGEST_SIZE, 1019 + .statesize = sizeof(struct aspeed_sham_reqctx), 1020 + .base = { 1021 + .cra_name = "hmac(sha1)", 1022 + .cra_driver_name = "aspeed-hmac-sha1", 1023 + .cra_priority = 300, 1024 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1025 + CRYPTO_ALG_ASYNC | 1026 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1027 + .cra_blocksize = SHA1_BLOCK_SIZE, 1028 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + 1029 + sizeof(struct aspeed_sha_hmac_ctx), 1030 + .cra_alignmask = 0, 1031 + .cra_module = THIS_MODULE, 1032 + .cra_init = aspeed_sham_cra_init, 1033 + .cra_exit = aspeed_sham_cra_exit, 1034 + } 1035 + } 1036 + }, 1037 + }, 1038 + { 1039 + .alg_base = "sha224", 1040 + .alg.ahash = { 1041 + .init = aspeed_sham_init, 1042 + .update = aspeed_sham_update, 1043 + .final = aspeed_sham_final, 1044 + .finup = aspeed_sham_finup, 1045 + .digest = aspeed_sham_digest, 1046 + .setkey = aspeed_sham_setkey, 1047 + .export = aspeed_sham_export, 1048 + .import = aspeed_sham_import, 1049 + .halg = { 1050 + .digestsize = SHA224_DIGEST_SIZE, 1051 + .statesize = sizeof(struct aspeed_sham_reqctx), 1052 + .base = { 1053 + .cra_name = "hmac(sha224)", 1054 + .cra_driver_name = "aspeed-hmac-sha224", 1055 + .cra_priority = 300, 1056 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1057 + CRYPTO_ALG_ASYNC | 1058 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1059 + .cra_blocksize = SHA224_BLOCK_SIZE, 1060 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + 1061 + sizeof(struct aspeed_sha_hmac_ctx), 1062 + .cra_alignmask = 0, 1063 + .cra_module = THIS_MODULE, 1064 + .cra_init = aspeed_sham_cra_init, 1065 + .cra_exit = aspeed_sham_cra_exit, 1066 + } 1067 + } 1068 + }, 1069 + }, 1070 + { 1071 + .alg_base = "sha256", 1072 + .alg.ahash = { 1073 + .init = aspeed_sham_init, 1074 + .update = aspeed_sham_update, 1075 + .final = aspeed_sham_final, 1076 + .finup = aspeed_sham_finup, 1077 + .digest = aspeed_sham_digest, 1078 + .setkey = aspeed_sham_setkey, 1079 + .export = aspeed_sham_export, 1080 + .import = aspeed_sham_import, 1081 + .halg = { 1082 + .digestsize = SHA256_DIGEST_SIZE, 1083 + .statesize = sizeof(struct aspeed_sham_reqctx), 1084 + .base = { 1085 + .cra_name = "hmac(sha256)", 1086 + .cra_driver_name = "aspeed-hmac-sha256", 1087 + .cra_priority = 300, 1088 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1089 + CRYPTO_ALG_ASYNC | 1090 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1091 + .cra_blocksize = SHA256_BLOCK_SIZE, 1092 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + 1093 + sizeof(struct aspeed_sha_hmac_ctx), 1094 + .cra_alignmask = 0, 1095 + .cra_module = THIS_MODULE, 1096 + .cra_init = aspeed_sham_cra_init, 1097 + .cra_exit = aspeed_sham_cra_exit, 1098 + } 1099 + } 1100 + }, 1101 + }, 1102 + }; 1103 + 1104 + static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { 1105 + { 1106 + .alg.ahash = { 1107 + .init = aspeed_sham_init, 1108 + .update = aspeed_sham_update, 1109 + .final = aspeed_sham_final, 1110 + .finup = aspeed_sham_finup, 1111 + .digest = aspeed_sham_digest, 1112 + .export = aspeed_sham_export, 1113 + .import = aspeed_sham_import, 1114 + .halg = { 1115 + .digestsize = SHA384_DIGEST_SIZE, 1116 + .statesize = sizeof(struct aspeed_sham_reqctx), 1117 + .base = { 1118 + .cra_name = "sha384", 1119 + .cra_driver_name = "aspeed-sha384", 1120 + .cra_priority = 300, 1121 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1122 + CRYPTO_ALG_ASYNC | 1123 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1124 + .cra_blocksize = SHA384_BLOCK_SIZE, 1125 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), 1126 + .cra_alignmask = 0, 1127 + .cra_module = THIS_MODULE, 1128 + .cra_init = aspeed_sham_cra_init, 1129 + .cra_exit = aspeed_sham_cra_exit, 1130 + } 1131 + } 1132 + }, 1133 + }, 1134 + { 1135 + .alg.ahash = { 1136 + .init = aspeed_sham_init, 1137 + .update = aspeed_sham_update, 1138 + .final = aspeed_sham_final, 1139 + .finup = aspeed_sham_finup, 1140 + .digest = aspeed_sham_digest, 1141 + .export = aspeed_sham_export, 1142 + .import = aspeed_sham_import, 1143 + .halg = { 1144 + .digestsize = SHA512_DIGEST_SIZE, 1145 + .statesize = sizeof(struct aspeed_sham_reqctx), 1146 + .base = { 1147 + .cra_name = "sha512", 1148 + .cra_driver_name = "aspeed-sha512", 1149 + .cra_priority = 300, 1150 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1151 + CRYPTO_ALG_ASYNC | 1152 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1153 + .cra_blocksize = SHA512_BLOCK_SIZE, 1154 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), 1155 + .cra_alignmask = 0, 1156 + .cra_module = THIS_MODULE, 1157 + .cra_init = aspeed_sham_cra_init, 1158 + .cra_exit = aspeed_sham_cra_exit, 1159 + } 1160 + } 1161 + }, 1162 + }, 1163 + { 1164 + .alg.ahash = { 1165 + .init = aspeed_sha512s_init, 1166 + .update = aspeed_sham_update, 1167 + .final = aspeed_sham_final, 1168 + .finup = aspeed_sham_finup, 1169 + .digest = aspeed_sham_digest, 1170 + .export = aspeed_sham_export, 1171 + .import = aspeed_sham_import, 1172 + .halg = { 1173 + .digestsize = SHA224_DIGEST_SIZE, 1174 + .statesize = sizeof(struct aspeed_sham_reqctx), 1175 + .base = { 1176 + .cra_name = "sha512_224", 1177 + .cra_driver_name = "aspeed-sha512_224", 1178 + .cra_priority = 300, 1179 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1180 + CRYPTO_ALG_ASYNC | 1181 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1182 + .cra_blocksize = SHA512_BLOCK_SIZE, 1183 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), 1184 + .cra_alignmask = 0, 1185 + .cra_module = THIS_MODULE, 1186 + .cra_init = aspeed_sham_cra_init, 1187 + .cra_exit = aspeed_sham_cra_exit, 1188 + } 1189 + } 1190 + }, 1191 + }, 1192 + { 1193 + .alg.ahash = { 1194 + .init = aspeed_sha512s_init, 1195 + .update = aspeed_sham_update, 1196 + .final = aspeed_sham_final, 1197 + .finup = aspeed_sham_finup, 1198 + .digest = aspeed_sham_digest, 1199 + .export = aspeed_sham_export, 1200 + .import = aspeed_sham_import, 1201 + .halg = { 1202 + .digestsize = SHA256_DIGEST_SIZE, 1203 + .statesize = sizeof(struct aspeed_sham_reqctx), 1204 + .base = { 1205 + .cra_name = "sha512_256", 1206 + .cra_driver_name = "aspeed-sha512_256", 1207 + .cra_priority = 300, 1208 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1209 + CRYPTO_ALG_ASYNC | 1210 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1211 + .cra_blocksize = SHA512_BLOCK_SIZE, 1212 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), 1213 + .cra_alignmask = 0, 1214 + .cra_module = THIS_MODULE, 1215 + .cra_init = aspeed_sham_cra_init, 1216 + .cra_exit = aspeed_sham_cra_exit, 1217 + } 1218 + } 1219 + }, 1220 + }, 1221 + { 1222 + .alg_base = "sha384", 1223 + .alg.ahash = { 1224 + .init = aspeed_sham_init, 1225 + .update = aspeed_sham_update, 1226 + .final = aspeed_sham_final, 1227 + .finup = aspeed_sham_finup, 1228 + .digest = aspeed_sham_digest, 1229 + .setkey = aspeed_sham_setkey, 1230 + .export = aspeed_sham_export, 1231 + .import = aspeed_sham_import, 1232 + .halg = { 1233 + .digestsize = SHA384_DIGEST_SIZE, 1234 + .statesize = sizeof(struct aspeed_sham_reqctx), 1235 + .base = { 1236 + .cra_name = "hmac(sha384)", 1237 + .cra_driver_name = "aspeed-hmac-sha384", 1238 + .cra_priority = 300, 1239 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1240 + CRYPTO_ALG_ASYNC | 1241 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1242 + .cra_blocksize = SHA384_BLOCK_SIZE, 1243 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + 1244 + sizeof(struct aspeed_sha_hmac_ctx), 1245 + .cra_alignmask = 0, 1246 + .cra_module = THIS_MODULE, 1247 + .cra_init = aspeed_sham_cra_init, 1248 + .cra_exit = aspeed_sham_cra_exit, 1249 + } 1250 + } 1251 + }, 1252 + }, 1253 + { 1254 + .alg_base = "sha512", 1255 + .alg.ahash = { 1256 + .init = aspeed_sham_init, 1257 + .update = aspeed_sham_update, 1258 + .final = aspeed_sham_final, 1259 + .finup = aspeed_sham_finup, 1260 + .digest = aspeed_sham_digest, 1261 + .setkey = aspeed_sham_setkey, 1262 + .export = aspeed_sham_export, 1263 + .import = aspeed_sham_import, 1264 + .halg = { 1265 + .digestsize = SHA512_DIGEST_SIZE, 1266 + .statesize = sizeof(struct aspeed_sham_reqctx), 1267 + .base = { 1268 + .cra_name = "hmac(sha512)", 1269 + .cra_driver_name = "aspeed-hmac-sha512", 1270 + .cra_priority = 300, 1271 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1272 + CRYPTO_ALG_ASYNC | 1273 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1274 + .cra_blocksize = SHA512_BLOCK_SIZE, 1275 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + 1276 + sizeof(struct aspeed_sha_hmac_ctx), 1277 + .cra_alignmask = 0, 1278 + .cra_module = THIS_MODULE, 1279 + .cra_init = aspeed_sham_cra_init, 1280 + .cra_exit = aspeed_sham_cra_exit, 1281 + } 1282 + } 1283 + }, 1284 + }, 1285 + { 1286 + .alg_base = "sha512_224", 1287 + .alg.ahash = { 1288 + .init = aspeed_sha512s_init, 1289 + .update = aspeed_sham_update, 1290 + .final = aspeed_sham_final, 1291 + .finup = aspeed_sham_finup, 1292 + .digest = aspeed_sham_digest, 1293 + .setkey = aspeed_sham_setkey, 1294 + .export = aspeed_sham_export, 1295 + .import = aspeed_sham_import, 1296 + .halg = { 1297 + .digestsize = SHA224_DIGEST_SIZE, 1298 + .statesize = sizeof(struct aspeed_sham_reqctx), 1299 + .base = { 1300 + .cra_name = "hmac(sha512_224)", 1301 + .cra_driver_name = "aspeed-hmac-sha512_224", 1302 + .cra_priority = 300, 1303 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1304 + CRYPTO_ALG_ASYNC | 1305 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1306 + .cra_blocksize = SHA512_BLOCK_SIZE, 1307 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + 1308 + sizeof(struct aspeed_sha_hmac_ctx), 1309 + .cra_alignmask = 0, 1310 + .cra_module = THIS_MODULE, 1311 + .cra_init = aspeed_sham_cra_init, 1312 + .cra_exit = aspeed_sham_cra_exit, 1313 + } 1314 + } 1315 + }, 1316 + }, 1317 + { 1318 + .alg_base = "sha512_256", 1319 + .alg.ahash = { 1320 + .init = aspeed_sha512s_init, 1321 + .update = aspeed_sham_update, 1322 + .final = aspeed_sham_final, 1323 + .finup = aspeed_sham_finup, 1324 + .digest = aspeed_sham_digest, 1325 + .setkey = aspeed_sham_setkey, 1326 + .export = aspeed_sham_export, 1327 + .import = aspeed_sham_import, 1328 + .halg = { 1329 + .digestsize = SHA256_DIGEST_SIZE, 1330 + .statesize = sizeof(struct aspeed_sham_reqctx), 1331 + .base = { 1332 + .cra_name = "hmac(sha512_256)", 1333 + .cra_driver_name = "aspeed-hmac-sha512_256", 1334 + .cra_priority = 300, 1335 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1336 + CRYPTO_ALG_ASYNC | 1337 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1338 + .cra_blocksize = SHA512_BLOCK_SIZE, 1339 + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + 1340 + sizeof(struct aspeed_sha_hmac_ctx), 1341 + .cra_alignmask = 0, 1342 + .cra_module = THIS_MODULE, 1343 + .cra_init = aspeed_sham_cra_init, 1344 + .cra_exit = aspeed_sham_cra_exit, 1345 + } 1346 + } 1347 + }, 1348 + }, 1349 + }; 1350 + 1351 + void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev) 1352 + { 1353 + int i; 1354 + 1355 + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) 1356 + crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash); 1357 + 1358 + if (hace_dev->version != AST2600_VERSION) 1359 + return; 1360 + 1361 + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) 1362 + crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash); 1363 + } 1364 + 1365 + void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev) 1366 + { 1367 + int rc, i; 1368 + 1369 + AHASH_DBG(hace_dev, "\n"); 1370 + 1371 + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) { 1372 + aspeed_ahash_algs[i].hace_dev = hace_dev; 1373 + rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash); 1374 + if (rc) { 1375 + AHASH_DBG(hace_dev, "Failed to register %s\n", 1376 + aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name); 1377 + } 1378 + } 1379 + 1380 + if (hace_dev->version != AST2600_VERSION) 1381 + return; 1382 + 1383 + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) { 1384 + aspeed_ahash_algs_g6[i].hace_dev = hace_dev; 1385 + rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash); 1386 + if (rc) { 1387 + AHASH_DBG(hace_dev, "Failed to register %s\n", 1388 + aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name); 1389 + } 1390 + } 1391 + }
+284
drivers/crypto/aspeed/aspeed-hace.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright (c) 2021 Aspeed Technology Inc. 4 + */ 5 + 6 + #include <linux/clk.h> 7 + #include <linux/module.h> 8 + #include <linux/of_address.h> 9 + #include <linux/of_device.h> 10 + #include <linux/of_irq.h> 11 + #include <linux/of.h> 12 + #include <linux/platform_device.h> 13 + 14 + #include "aspeed-hace.h" 15 + 16 + #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG 17 + #define HACE_DBG(d, fmt, ...) \ 18 + dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 19 + #else 20 + #define HACE_DBG(d, fmt, ...) \ 21 + dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 22 + #endif 23 + 24 + /* HACE interrupt service routine */ 25 + static irqreturn_t aspeed_hace_irq(int irq, void *dev) 26 + { 27 + struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev; 28 + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 29 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 30 + u32 sts; 31 + 32 + sts = ast_hace_read(hace_dev, ASPEED_HACE_STS); 33 + ast_hace_write(hace_dev, sts, ASPEED_HACE_STS); 34 + 35 + HACE_DBG(hace_dev, "irq status: 0x%x\n", sts); 36 + 37 + if (sts & HACE_HASH_ISR) { 38 + if (hash_engine->flags & CRYPTO_FLAGS_BUSY) 39 + tasklet_schedule(&hash_engine->done_task); 40 + else 41 + dev_warn(hace_dev->dev, "HASH no active requests.\n"); 42 + } 43 + 44 + if (sts & HACE_CRYPTO_ISR) { 45 + if (crypto_engine->flags & CRYPTO_FLAGS_BUSY) 46 + tasklet_schedule(&crypto_engine->done_task); 47 + else 48 + dev_warn(hace_dev->dev, "CRYPTO no active requests.\n"); 49 + } 50 + 51 + return IRQ_HANDLED; 52 + } 53 + 54 + static void aspeed_hace_crypto_done_task(unsigned long data) 55 + { 56 + struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; 57 + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 58 + 59 + crypto_engine->resume(hace_dev); 60 + } 61 + 62 + static void aspeed_hace_hash_done_task(unsigned long data) 63 + { 64 + struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; 65 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 66 + 67 + hash_engine->resume(hace_dev); 68 + } 69 + 70 + static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev) 71 + { 72 + #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH 73 + aspeed_register_hace_hash_algs(hace_dev); 74 + #endif 75 + #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO 76 + aspeed_register_hace_crypto_algs(hace_dev); 77 + #endif 78 + } 79 + 80 + static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev) 81 + { 82 + #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH 83 + aspeed_unregister_hace_hash_algs(hace_dev); 84 + #endif 85 + #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO 86 + aspeed_unregister_hace_crypto_algs(hace_dev); 87 + #endif 88 + } 89 + 90 + static const struct of_device_id aspeed_hace_of_matches[] = { 91 + { .compatible = "aspeed,ast2500-hace", .data = (void *)5, }, 92 + { .compatible = "aspeed,ast2600-hace", .data = (void *)6, }, 93 + {}, 94 + }; 95 + 96 + static int aspeed_hace_probe(struct platform_device *pdev) 97 + { 98 + struct aspeed_engine_crypto *crypto_engine; 99 + const struct of_device_id *hace_dev_id; 100 + struct aspeed_engine_hash *hash_engine; 101 + struct aspeed_hace_dev *hace_dev; 102 + struct resource *res; 103 + int rc; 104 + 105 + hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev), 106 + GFP_KERNEL); 107 + if (!hace_dev) 108 + return -ENOMEM; 109 + 110 + hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev); 111 + if (!hace_dev_id) { 112 + dev_err(&pdev->dev, "Failed to match hace dev id\n"); 113 + return -EINVAL; 114 + } 115 + 116 + hace_dev->dev = &pdev->dev; 117 + hace_dev->version = (unsigned long)hace_dev_id->data; 118 + hash_engine = &hace_dev->hash_engine; 119 + crypto_engine = &hace_dev->crypto_engine; 120 + 121 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 122 + 123 + platform_set_drvdata(pdev, hace_dev); 124 + 125 + hace_dev->regs = devm_ioremap_resource(&pdev->dev, res); 126 + if (IS_ERR(hace_dev->regs)) 127 + return PTR_ERR(hace_dev->regs); 128 + 129 + /* Get irq number and register it */ 130 + hace_dev->irq = platform_get_irq(pdev, 0); 131 + if (hace_dev->irq < 0) 132 + return -ENXIO; 133 + 134 + rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0, 135 + dev_name(&pdev->dev), hace_dev); 136 + if (rc) { 137 + dev_err(&pdev->dev, "Failed to request interrupt\n"); 138 + return rc; 139 + } 140 + 141 + /* Get clk and enable it */ 142 + hace_dev->clk = devm_clk_get(&pdev->dev, NULL); 143 + if (IS_ERR(hace_dev->clk)) { 144 + dev_err(&pdev->dev, "Failed to get clk\n"); 145 + return -ENODEV; 146 + } 147 + 148 + rc = clk_prepare_enable(hace_dev->clk); 149 + if (rc) { 150 + dev_err(&pdev->dev, "Failed to enable clock 0x%x\n", rc); 151 + return rc; 152 + } 153 + 154 + /* Initialize crypto hardware engine structure for hash */ 155 + hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev, 156 + true); 157 + if (!hace_dev->crypt_engine_hash) { 158 + rc = -ENOMEM; 159 + goto clk_exit; 160 + } 161 + 162 + rc = crypto_engine_start(hace_dev->crypt_engine_hash); 163 + if (rc) 164 + goto err_engine_hash_start; 165 + 166 + tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task, 167 + (unsigned long)hace_dev); 168 + 169 + /* Initialize crypto hardware engine structure for crypto */ 170 + hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev, 171 + true); 172 + if (!hace_dev->crypt_engine_crypto) { 173 + rc = -ENOMEM; 174 + goto err_engine_hash_start; 175 + } 176 + 177 + rc = crypto_engine_start(hace_dev->crypt_engine_crypto); 178 + if (rc) 179 + goto err_engine_crypto_start; 180 + 181 + tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task, 182 + (unsigned long)hace_dev); 183 + 184 + /* Allocate DMA buffer for hash engine input used */ 185 + hash_engine->ahash_src_addr = 186 + dmam_alloc_coherent(&pdev->dev, 187 + ASPEED_HASH_SRC_DMA_BUF_LEN, 188 + &hash_engine->ahash_src_dma_addr, 189 + GFP_KERNEL); 190 + if (!hash_engine->ahash_src_addr) { 191 + dev_err(&pdev->dev, "Failed to allocate dma buffer\n"); 192 + rc = -ENOMEM; 193 + goto err_engine_crypto_start; 194 + } 195 + 196 + /* Allocate DMA buffer for crypto engine context used */ 197 + crypto_engine->cipher_ctx = 198 + dmam_alloc_coherent(&pdev->dev, 199 + PAGE_SIZE, 200 + &crypto_engine->cipher_ctx_dma, 201 + GFP_KERNEL); 202 + if (!crypto_engine->cipher_ctx) { 203 + dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n"); 204 + rc = -ENOMEM; 205 + goto err_engine_crypto_start; 206 + } 207 + 208 + /* Allocate DMA buffer for crypto engine input used */ 209 + crypto_engine->cipher_addr = 210 + dmam_alloc_coherent(&pdev->dev, 211 + ASPEED_CRYPTO_SRC_DMA_BUF_LEN, 212 + &crypto_engine->cipher_dma_addr, 213 + GFP_KERNEL); 214 + if (!crypto_engine->cipher_addr) { 215 + dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n"); 216 + rc = -ENOMEM; 217 + goto err_engine_crypto_start; 218 + } 219 + 220 + /* Allocate DMA buffer for crypto engine output used */ 221 + if (hace_dev->version == AST2600_VERSION) { 222 + crypto_engine->dst_sg_addr = 223 + dmam_alloc_coherent(&pdev->dev, 224 + ASPEED_CRYPTO_DST_DMA_BUF_LEN, 225 + &crypto_engine->dst_sg_dma_addr, 226 + GFP_KERNEL); 227 + if (!crypto_engine->dst_sg_addr) { 228 + dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n"); 229 + rc = -ENOMEM; 230 + goto err_engine_crypto_start; 231 + } 232 + } 233 + 234 + aspeed_hace_register(hace_dev); 235 + 236 + dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n"); 237 + 238 + return 0; 239 + 240 + err_engine_crypto_start: 241 + crypto_engine_exit(hace_dev->crypt_engine_crypto); 242 + err_engine_hash_start: 243 + crypto_engine_exit(hace_dev->crypt_engine_hash); 244 + clk_exit: 245 + clk_disable_unprepare(hace_dev->clk); 246 + 247 + return rc; 248 + } 249 + 250 + static int aspeed_hace_remove(struct platform_device *pdev) 251 + { 252 + struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev); 253 + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 254 + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 255 + 256 + aspeed_hace_unregister(hace_dev); 257 + 258 + crypto_engine_exit(hace_dev->crypt_engine_hash); 259 + crypto_engine_exit(hace_dev->crypt_engine_crypto); 260 + 261 + tasklet_kill(&hash_engine->done_task); 262 + tasklet_kill(&crypto_engine->done_task); 263 + 264 + clk_disable_unprepare(hace_dev->clk); 265 + 266 + return 0; 267 + } 268 + 269 + MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches); 270 + 271 + static struct platform_driver aspeed_hace_driver = { 272 + .probe = aspeed_hace_probe, 273 + .remove = aspeed_hace_remove, 274 + .driver = { 275 + .name = KBUILD_MODNAME, 276 + .of_match_table = aspeed_hace_of_matches, 277 + }, 278 + }; 279 + 280 + module_platform_driver(aspeed_hace_driver); 281 + 282 + MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>"); 283 + MODULE_DESCRIPTION("Aspeed HACE driver Crypto Accelerator"); 284 + MODULE_LICENSE("GPL");
+298
drivers/crypto/aspeed/aspeed-hace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + #ifndef __ASPEED_HACE_H__ 3 + #define __ASPEED_HACE_H__ 4 + 5 + #include <linux/interrupt.h> 6 + #include <linux/delay.h> 7 + #include <linux/err.h> 8 + #include <linux/fips.h> 9 + #include <linux/dma-mapping.h> 10 + #include <crypto/aes.h> 11 + #include <crypto/des.h> 12 + #include <crypto/scatterwalk.h> 13 + #include <crypto/internal/aead.h> 14 + #include <crypto/internal/akcipher.h> 15 + #include <crypto/internal/des.h> 16 + #include <crypto/internal/hash.h> 17 + #include <crypto/internal/kpp.h> 18 + #include <crypto/internal/skcipher.h> 19 + #include <crypto/algapi.h> 20 + #include <crypto/engine.h> 21 + #include <crypto/hmac.h> 22 + #include <crypto/sha1.h> 23 + #include <crypto/sha2.h> 24 + 25 + /***************************** 26 + * * 27 + * HACE register definitions * 28 + * * 29 + * ***************************/ 30 + #define ASPEED_HACE_SRC 0x00 /* Crypto Data Source Base Address Register */ 31 + #define ASPEED_HACE_DEST 0x04 /* Crypto Data Destination Base Address Register */ 32 + #define ASPEED_HACE_CONTEXT 0x08 /* Crypto Context Buffer Base Address Register */ 33 + #define ASPEED_HACE_DATA_LEN 0x0C /* Crypto Data Length Register */ 34 + #define ASPEED_HACE_CMD 0x10 /* Crypto Engine Command Register */ 35 + 36 + /* G5 */ 37 + #define ASPEED_HACE_TAG 0x18 /* HACE Tag Register */ 38 + /* G6 */ 39 + #define ASPEED_HACE_GCM_ADD_LEN 0x14 /* Crypto AES-GCM Additional Data Length Register */ 40 + #define ASPEED_HACE_GCM_TAG_BASE_ADDR 0x18 /* Crypto AES-GCM Tag Write Buff Base Address Reg */ 41 + 42 + #define ASPEED_HACE_STS 0x1C /* HACE Status Register */ 43 + 44 + #define ASPEED_HACE_HASH_SRC 0x20 /* Hash Data Source Base Address Register */ 45 + #define ASPEED_HACE_HASH_DIGEST_BUFF 0x24 /* Hash Digest Write Buffer Base Address Register */ 46 + #define ASPEED_HACE_HASH_KEY_BUFF 0x28 /* Hash HMAC Key Buffer Base Address Register */ 47 + #define ASPEED_HACE_HASH_DATA_LEN 0x2C /* Hash Data Length Register */ 48 + #define ASPEED_HACE_HASH_CMD 0x30 /* Hash Engine Command Register */ 49 + 50 + /* crypto cmd */ 51 + #define HACE_CMD_SINGLE_DES 0 52 + #define HACE_CMD_TRIPLE_DES BIT(17) 53 + #define HACE_CMD_AES_SELECT 0 54 + #define HACE_CMD_DES_SELECT BIT(16) 55 + #define HACE_CMD_ISR_EN BIT(12) 56 + #define HACE_CMD_CONTEXT_SAVE_ENABLE (0) 57 + #define HACE_CMD_CONTEXT_SAVE_DISABLE BIT(9) 58 + #define HACE_CMD_AES (0) 59 + #define HACE_CMD_DES (0) 60 + #define HACE_CMD_RC4 BIT(8) 61 + #define HACE_CMD_DECRYPT (0) 62 + #define HACE_CMD_ENCRYPT BIT(7) 63 + 64 + #define HACE_CMD_ECB (0x0 << 4) 65 + #define HACE_CMD_CBC (0x1 << 4) 66 + #define HACE_CMD_CFB (0x2 << 4) 67 + #define HACE_CMD_OFB (0x3 << 4) 68 + #define HACE_CMD_CTR (0x4 << 4) 69 + #define HACE_CMD_OP_MODE_MASK (0x7 << 4) 70 + 71 + #define HACE_CMD_AES128 (0x0 << 2) 72 + #define HACE_CMD_AES192 (0x1 << 2) 73 + #define HACE_CMD_AES256 (0x2 << 2) 74 + #define HACE_CMD_OP_CASCADE (0x3) 75 + #define HACE_CMD_OP_INDEPENDENT (0x1) 76 + 77 + /* G5 */ 78 + #define HACE_CMD_RI_WO_DATA_ENABLE (0) 79 + #define HACE_CMD_RI_WO_DATA_DISABLE BIT(11) 80 + #define HACE_CMD_CONTEXT_LOAD_ENABLE (0) 81 + #define HACE_CMD_CONTEXT_LOAD_DISABLE BIT(10) 82 + /* G6 */ 83 + #define HACE_CMD_AES_KEY_FROM_OTP BIT(24) 84 + #define HACE_CMD_GHASH_TAG_XOR_EN BIT(23) 85 + #define HACE_CMD_GHASH_PAD_LEN_INV BIT(22) 86 + #define HACE_CMD_GCM_TAG_ADDR_SEL BIT(21) 87 + #define HACE_CMD_MBUS_REQ_SYNC_EN BIT(20) 88 + #define HACE_CMD_DES_SG_CTRL BIT(19) 89 + #define HACE_CMD_SRC_SG_CTRL BIT(18) 90 + #define HACE_CMD_CTR_IV_AES_96 (0x1 << 14) 91 + #define HACE_CMD_CTR_IV_DES_32 (0x1 << 14) 92 + #define HACE_CMD_CTR_IV_AES_64 (0x2 << 14) 93 + #define HACE_CMD_CTR_IV_AES_32 (0x3 << 14) 94 + #define HACE_CMD_AES_KEY_HW_EXP BIT(13) 95 + #define HACE_CMD_GCM (0x5 << 4) 96 + 97 + /* interrupt status reg */ 98 + #define HACE_CRYPTO_ISR BIT(12) 99 + #define HACE_HASH_ISR BIT(9) 100 + #define HACE_HASH_BUSY BIT(0) 101 + 102 + /* hash cmd reg */ 103 + #define HASH_CMD_MBUS_REQ_SYNC_EN BIT(20) 104 + #define HASH_CMD_HASH_SRC_SG_CTRL BIT(18) 105 + #define HASH_CMD_SHA512_224 (0x3 << 10) 106 + #define HASH_CMD_SHA512_256 (0x2 << 10) 107 + #define HASH_CMD_SHA384 (0x1 << 10) 108 + #define HASH_CMD_SHA512 (0) 109 + #define HASH_CMD_INT_ENABLE BIT(9) 110 + #define HASH_CMD_HMAC (0x1 << 7) 111 + #define HASH_CMD_ACC_MODE (0x2 << 7) 112 + #define HASH_CMD_HMAC_KEY (0x3 << 7) 113 + #define HASH_CMD_SHA1 (0x2 << 4) 114 + #define HASH_CMD_SHA224 (0x4 << 4) 115 + #define HASH_CMD_SHA256 (0x5 << 4) 116 + #define HASH_CMD_SHA512_SER (0x6 << 4) 117 + #define HASH_CMD_SHA_SWAP (0x2 << 2) 118 + 119 + #define HASH_SG_LAST_LIST BIT(31) 120 + 121 + #define CRYPTO_FLAGS_BUSY BIT(1) 122 + 123 + #define SHA_OP_UPDATE 1 124 + #define SHA_OP_FINAL 2 125 + 126 + #define SHA_FLAGS_SHA1 BIT(0) 127 + #define SHA_FLAGS_SHA224 BIT(1) 128 + #define SHA_FLAGS_SHA256 BIT(2) 129 + #define SHA_FLAGS_SHA384 BIT(3) 130 + #define SHA_FLAGS_SHA512 BIT(4) 131 + #define SHA_FLAGS_SHA512_224 BIT(5) 132 + #define SHA_FLAGS_SHA512_256 BIT(6) 133 + #define SHA_FLAGS_HMAC BIT(8) 134 + #define SHA_FLAGS_FINUP BIT(9) 135 + #define SHA_FLAGS_MASK (0xff) 136 + 137 + #define ASPEED_CRYPTO_SRC_DMA_BUF_LEN 0xa000 138 + #define ASPEED_CRYPTO_DST_DMA_BUF_LEN 0xa000 139 + #define ASPEED_CRYPTO_GCM_TAG_OFFSET 0x9ff0 140 + #define ASPEED_HASH_SRC_DMA_BUF_LEN 0xa000 141 + #define ASPEED_HASH_QUEUE_LENGTH 50 142 + 143 + #define HACE_CMD_IV_REQUIRE (HACE_CMD_CBC | HACE_CMD_CFB | \ 144 + HACE_CMD_OFB | HACE_CMD_CTR) 145 + 146 + struct aspeed_hace_dev; 147 + 148 + typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *); 149 + 150 + struct aspeed_sg_list { 151 + __le32 len; 152 + __le32 phy_addr; 153 + }; 154 + 155 + struct aspeed_engine_hash { 156 + struct tasklet_struct done_task; 157 + unsigned long flags; 158 + struct ahash_request *req; 159 + 160 + /* input buffer */ 161 + void *ahash_src_addr; 162 + dma_addr_t ahash_src_dma_addr; 163 + 164 + dma_addr_t src_dma; 165 + dma_addr_t digest_dma; 166 + 167 + size_t src_length; 168 + 169 + /* callback func */ 170 + aspeed_hace_fn_t resume; 171 + aspeed_hace_fn_t dma_prepare; 172 + }; 173 + 174 + struct aspeed_sha_hmac_ctx { 175 + struct crypto_shash *shash; 176 + u8 ipad[SHA512_BLOCK_SIZE]; 177 + u8 opad[SHA512_BLOCK_SIZE]; 178 + }; 179 + 180 + struct aspeed_sham_ctx { 181 + struct crypto_engine_ctx enginectx; 182 + 183 + struct aspeed_hace_dev *hace_dev; 184 + unsigned long flags; /* hmac flag */ 185 + 186 + struct aspeed_sha_hmac_ctx base[0]; 187 + }; 188 + 189 + struct aspeed_sham_reqctx { 190 + unsigned long flags; /* final update flag should no use*/ 191 + unsigned long op; /* final or update */ 192 + u32 cmd; /* trigger cmd */ 193 + 194 + /* walk state */ 195 + struct scatterlist *src_sg; 196 + int src_nents; 197 + unsigned int offset; /* offset in current sg */ 198 + unsigned int total; /* per update length */ 199 + 200 + size_t digsize; 201 + size_t block_size; 202 + size_t ivsize; 203 + const __be32 *sha_iv; 204 + 205 + /* remain data buffer */ 206 + u8 buffer[SHA512_BLOCK_SIZE * 2]; 207 + dma_addr_t buffer_dma_addr; 208 + size_t bufcnt; /* buffer counter */ 209 + 210 + /* output buffer */ 211 + u8 digest[SHA512_DIGEST_SIZE] __aligned(64); 212 + dma_addr_t digest_dma_addr; 213 + u64 digcnt[2]; 214 + }; 215 + 216 + struct aspeed_engine_crypto { 217 + struct tasklet_struct done_task; 218 + unsigned long flags; 219 + struct skcipher_request *req; 220 + 221 + /* context buffer */ 222 + void *cipher_ctx; 223 + dma_addr_t cipher_ctx_dma; 224 + 225 + /* input buffer, could be single/scatter-gather lists */ 226 + void *cipher_addr; 227 + dma_addr_t cipher_dma_addr; 228 + 229 + /* output buffer, only used in scatter-gather lists */ 230 + void *dst_sg_addr; 231 + dma_addr_t dst_sg_dma_addr; 232 + 233 + /* callback func */ 234 + aspeed_hace_fn_t resume; 235 + }; 236 + 237 + struct aspeed_cipher_ctx { 238 + struct crypto_engine_ctx enginectx; 239 + 240 + struct aspeed_hace_dev *hace_dev; 241 + int key_len; 242 + u8 key[AES_MAX_KEYLENGTH]; 243 + 244 + /* callback func */ 245 + aspeed_hace_fn_t start; 246 + 247 + struct crypto_skcipher *fallback_tfm; 248 + }; 249 + 250 + struct aspeed_cipher_reqctx { 251 + int enc_cmd; 252 + int src_nents; 253 + int dst_nents; 254 + 255 + struct skcipher_request fallback_req; /* keep at the end */ 256 + }; 257 + 258 + struct aspeed_hace_dev { 259 + void __iomem *regs; 260 + struct device *dev; 261 + int irq; 262 + struct clk *clk; 263 + unsigned long version; 264 + 265 + struct crypto_engine *crypt_engine_hash; 266 + struct crypto_engine *crypt_engine_crypto; 267 + 268 + struct aspeed_engine_hash hash_engine; 269 + struct aspeed_engine_crypto crypto_engine; 270 + }; 271 + 272 + struct aspeed_hace_alg { 273 + struct aspeed_hace_dev *hace_dev; 274 + 275 + const char *alg_base; 276 + 277 + union { 278 + struct skcipher_alg skcipher; 279 + struct ahash_alg ahash; 280 + } alg; 281 + }; 282 + 283 + enum aspeed_version { 284 + AST2500_VERSION = 5, 285 + AST2600_VERSION 286 + }; 287 + 288 + #define ast_hace_write(hace, val, offset) \ 289 + writel((val), (hace)->regs + (offset)) 290 + #define ast_hace_read(hace, offset) \ 291 + readl((hace)->regs + (offset)) 292 + 293 + void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev); 294 + void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev); 295 + void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev); 296 + void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev); 297 + 298 + #endif
+3 -3
drivers/crypto/axis/artpec6_crypto.c
··· 1712 1712 cipher_len = regk_crypto_key_256; 1713 1713 break; 1714 1714 default: 1715 - pr_err("%s: Invalid key length %d!\n", 1715 + pr_err("%s: Invalid key length %zu!\n", 1716 1716 MODULE_NAME, ctx->key_length); 1717 1717 return -EINVAL; 1718 1718 } ··· 2091 2091 return; 2092 2092 } 2093 2093 2094 - spin_lock_bh(&ac->queue_lock); 2094 + spin_lock(&ac->queue_lock); 2095 2095 2096 2096 list_for_each_entry_safe(req, n, &ac->pending, list) { 2097 2097 struct artpec6_crypto_dma_descriptors *dma = req->dma; ··· 2128 2128 2129 2129 artpec6_crypto_process_queue(ac, &complete_in_progress); 2130 2130 2131 - spin_unlock_bh(&ac->queue_lock); 2131 + spin_unlock(&ac->queue_lock); 2132 2132 2133 2133 /* Perform the completion callbacks without holding the queue lock 2134 2134 * to allow new request submissions from the callbacks.
+2 -2
drivers/crypto/bcm/cipher.c
··· 1928 1928 /* SPU2 hardware does not compute hash of zero length data */ 1929 1929 if ((rctx->is_final == 1) && (rctx->total_todo == 0) && 1930 1930 (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) { 1931 - alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); 1931 + alg_name = crypto_ahash_alg_name(tfm); 1932 1932 flow_log("Doing %sfinal %s zero-len hash request in software\n", 1933 1933 rctx->is_final ? "" : "non-", alg_name); 1934 1934 err = do_shash((unsigned char *)alg_name, req->result, ··· 2029 2029 * supported by the hardware, we need to handle it in software 2030 2030 * by calling synchronous hash functions. 2031 2031 */ 2032 - alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); 2032 + alg_name = crypto_ahash_alg_name(tfm); 2033 2033 hash = crypto_alloc_shash(alg_name, 0, 0); 2034 2034 if (IS_ERR(hash)) { 2035 2035 ret = PTR_ERR(hash);
+1 -1
drivers/crypto/bcm/cipher.h
··· 231 231 232 232 /* 233 233 * shash descriptor - needed to perform incremental hashing in 234 - * in software, when hw doesn't support it. 234 + * software, when hw doesn't support it. 235 235 */ 236 236 struct shash_desc *shash; 237 237
+1 -1
drivers/crypto/cavium/cpt/cpt_hw_types.h
··· 396 396 * Word0 397 397 * reserved_20_63:44 [63:20] Reserved. 398 398 * dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add 399 - * to the CPT instruction doorbell count. Readback value is the the 399 + * to the CPT instruction doorbell count. Readback value is the 400 400 * current number of pending doorbell requests. If counter overflows 401 401 * CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to 402 402 * zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
+4 -2
drivers/crypto/cavium/cpt/cptpf_main.c
··· 253 253 const struct firmware *fw_entry; 254 254 struct device *dev = &cpt->pdev->dev; 255 255 struct ucode_header *ucode; 256 + unsigned int code_length; 256 257 struct microcode *mcode; 257 258 int j, ret = 0; 258 259 ··· 264 263 ucode = (struct ucode_header *)fw_entry->data; 265 264 mcode = &cpt->mcode[cpt->next_mc_idx]; 266 265 memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ); 267 - mcode->code_size = ntohl(ucode->code_length) * 2; 268 - if (!mcode->code_size) { 266 + code_length = ntohl(ucode->code_length); 267 + if (code_length == 0 || code_length >= INT_MAX / 2) { 269 268 ret = -EINVAL; 270 269 goto fw_release; 271 270 } 271 + mcode->code_size = code_length * 2; 272 272 273 273 mcode->is_ae = is_ae; 274 274 mcode->core_mask = 0ULL;
+6 -24
drivers/crypto/cavium/zip/zip_crypto.c
··· 198 198 /* Legacy Compress framework start */ 199 199 int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm) 200 200 { 201 - int ret; 202 201 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); 203 202 204 - ret = zip_ctx_init(zip_ctx, 0); 205 - 206 - return ret; 203 + return zip_ctx_init(zip_ctx, 0); 207 204 } 208 205 209 206 int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm) 210 207 { 211 - int ret; 212 208 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); 213 209 214 - ret = zip_ctx_init(zip_ctx, 1); 215 - 216 - return ret; 210 + return zip_ctx_init(zip_ctx, 1); 217 211 } 218 212 219 213 void zip_free_comp_ctx(struct crypto_tfm *tfm) ··· 221 227 const u8 *src, unsigned int slen, 222 228 u8 *dst, unsigned int *dlen) 223 229 { 224 - int ret; 225 230 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); 226 231 227 - ret = zip_compress(src, slen, dst, dlen, zip_ctx); 228 - 229 - return ret; 232 + return zip_compress(src, slen, dst, dlen, zip_ctx); 230 233 } 231 234 232 235 int zip_comp_decompress(struct crypto_tfm *tfm, 233 236 const u8 *src, unsigned int slen, 234 237 u8 *dst, unsigned int *dlen) 235 238 { 236 - int ret; 237 239 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); 238 240 239 - ret = zip_decompress(src, slen, dst, dlen, zip_ctx); 240 - 241 - return ret; 241 + return zip_decompress(src, slen, dst, dlen, zip_ctx); 242 242 } /* Legacy compress framework end */ 243 243 244 244 /* SCOMP framework start */ ··· 286 298 const u8 *src, unsigned int slen, 287 299 u8 *dst, unsigned int *dlen, void *ctx) 288 300 { 289 - int ret; 290 301 struct zip_kernel_ctx *zip_ctx = ctx; 291 302 292 - ret = zip_compress(src, slen, dst, dlen, zip_ctx); 293 - 294 - return ret; 303 + return zip_compress(src, slen, dst, dlen, zip_ctx); 295 304 } 296 305 297 306 int zip_scomp_decompress(struct crypto_scomp *tfm, 298 307 const u8 *src, unsigned int slen, 299 308 u8 *dst, unsigned int *dlen, void *ctx) 300 309 { 301 - int ret; 302 310 struct zip_kernel_ctx *zip_ctx = ctx; 303 311 304 - ret = zip_decompress(src, slen, dst, dlen, zip_ctx); 305 - 306 - return ret; 312 + return zip_decompress(src, slen, dst, dlen, zip_ctx); 307 313 } /* SCOMP framework end */
+1 -4
drivers/crypto/ccp/ccp-crypto-des3.c
··· 64 64 struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req); 65 65 struct scatterlist *iv_sg = NULL; 66 66 unsigned int iv_len = 0; 67 - int ret; 68 67 69 68 if (!ctx->u.des3.key_len) 70 69 return -EINVAL; ··· 99 100 rctx->cmd.u.des3.src_len = req->cryptlen; 100 101 rctx->cmd.u.des3.dst = req->dst; 101 102 102 - ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 103 - 104 - return ret; 103 + return ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 105 104 } 106 105 107 106 static int ccp_des3_encrypt(struct skcipher_request *req)
+5 -1
drivers/crypto/ccp/ccp-dmaengine.c
··· 641 641 for (i = 0; i < ccp->cmd_q_count; i++) { 642 642 chan = ccp->ccp_dma_chan + i; 643 643 dma_chan = &chan->dma_chan; 644 + 645 + if (dma_chan->client_count) 646 + dma_release_channel(dma_chan); 647 + 644 648 tasklet_kill(&chan->cleanup_tasklet); 645 649 list_del_rcu(&dma_chan->device_node); 646 650 } ··· 770 766 if (!dmaengine) 771 767 return; 772 768 773 - dma_async_device_unregister(dma_dev); 774 769 ccp_dma_release(ccp); 770 + dma_async_device_unregister(dma_dev); 775 771 776 772 kmem_cache_destroy(ccp->dma_desc_cache); 777 773 kmem_cache_destroy(ccp->dma_cmd_cache);
+51 -27
drivers/crypto/ccp/sev-dev.c
··· 211 211 if (IS_ERR(fp)) { 212 212 int ret = PTR_ERR(fp); 213 213 214 - dev_err(sev->dev, 215 - "SEV: could not open %s for read, error %d\n", 216 - init_ex_path, ret); 214 + if (ret == -ENOENT) { 215 + dev_info(sev->dev, 216 + "SEV: %s does not exist and will be created later.\n", 217 + init_ex_path); 218 + ret = 0; 219 + } else { 220 + dev_err(sev->dev, 221 + "SEV: could not open %s for read, error %d\n", 222 + init_ex_path, ret); 223 + } 217 224 return ret; 218 225 } 219 226 220 227 nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL); 221 228 if (nread != NV_LENGTH) { 222 - dev_err(sev->dev, 223 - "SEV: failed to read %u bytes to non volatile memory area, ret %ld\n", 229 + dev_info(sev->dev, 230 + "SEV: could not read %u bytes to non volatile memory area, ret %ld\n", 224 231 NV_LENGTH, nread); 225 - return -EIO; 226 232 } 227 233 228 234 dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread); ··· 237 231 return 0; 238 232 } 239 233 240 - static void sev_write_init_ex_file(void) 234 + static int sev_write_init_ex_file(void) 241 235 { 242 236 struct sev_device *sev = psp_master->sev_data; 243 237 struct file *fp; ··· 247 241 lockdep_assert_held(&sev_cmd_mutex); 248 242 249 243 if (!sev_init_ex_buffer) 250 - return; 244 + return 0; 251 245 252 246 fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600); 253 247 if (IS_ERR(fp)) { 248 + int ret = PTR_ERR(fp); 249 + 254 250 dev_err(sev->dev, 255 - "SEV: could not open file for write, error %ld\n", 256 - PTR_ERR(fp)); 257 - return; 251 + "SEV: could not open file for write, error %d\n", 252 + ret); 253 + return ret; 258 254 } 259 255 260 256 nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset); ··· 267 259 dev_err(sev->dev, 268 260 "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n", 269 261 NV_LENGTH, nwrite); 270 - return; 262 + return -EIO; 271 263 } 272 264 273 265 dev_dbg(sev->dev, "SEV: write successful to NV file\n"); 266 + 267 + return 0; 274 268 } 275 269 276 - static void sev_write_init_ex_file_if_required(int cmd_id) 270 + static int sev_write_init_ex_file_if_required(int cmd_id) 277 271 { 278 272 lockdep_assert_held(&sev_cmd_mutex); 279 273 280 274 if (!sev_init_ex_buffer) 281 - return; 275 + return 0; 282 276 283 277 /* 284 278 * Only a few platform commands modify the SPI/NV area, but none of the ··· 295 285 case SEV_CMD_PEK_GEN: 296 286 break; 297 287 default: 298 - return; 288 + return 0; 299 289 } 300 290 301 - sev_write_init_ex_file(); 291 + return sev_write_init_ex_file(); 302 292 } 303 293 304 294 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) ··· 371 361 cmd, reg & PSP_CMDRESP_ERR_MASK); 372 362 ret = -EIO; 373 363 } else { 374 - sev_write_init_ex_file_if_required(cmd); 364 + ret = sev_write_init_ex_file_if_required(cmd); 375 365 } 376 366 377 367 print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, ··· 420 410 static int __sev_init_ex_locked(int *error) 421 411 { 422 412 struct sev_data_init_ex data; 423 - int ret; 424 413 425 414 memset(&data, 0, sizeof(data)); 426 415 data.length = sizeof(data); 427 416 data.nv_address = __psp_pa(sev_init_ex_buffer); 428 417 data.nv_len = NV_LENGTH; 429 - 430 - ret = sev_read_init_ex_file(); 431 - if (ret) 432 - return ret; 433 418 434 419 if (sev_es_tmr) { 435 420 /* ··· 444 439 { 445 440 struct psp_device *psp = psp_master; 446 441 struct sev_device *sev; 447 - int rc, psp_ret = -1; 442 + int rc = 0, psp_ret = -1; 448 443 int (*init_function)(int *error); 449 444 450 445 if (!psp || !psp->sev_data) ··· 455 450 if (sev->state == SEV_STATE_INIT) 456 451 return 0; 457 452 458 - init_function = sev_init_ex_buffer ? __sev_init_ex_locked : 459 - __sev_init_locked; 453 + if (sev_init_ex_buffer) { 454 + init_function = __sev_init_ex_locked; 455 + rc = sev_read_init_ex_file(); 456 + if (rc) 457 + return rc; 458 + } else { 459 + init_function = __sev_init_locked; 460 + } 461 + 460 462 rc = init_function(&psp_ret); 461 463 if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { 462 464 /* ··· 756 744 struct page *p; 757 745 u64 data_size; 758 746 747 + if (!sev_version_greater_or_equal(0, 15)) { 748 + dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); 749 + return -1; 750 + } 751 + 759 752 if (sev_get_firmware(dev, &firmware) == -ENOENT) { 760 753 dev_dbg(dev, "No SEV firmware file present\n"); 761 754 return -1; ··· 793 776 data->len = firmware->size; 794 777 795 778 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 779 + 780 + /* 781 + * A quirk for fixing the committed TCB version, when upgrading from 782 + * earlier firmware version than 1.50. 783 + */ 784 + if (!ret && !sev_version_greater_or_equal(1, 50)) 785 + ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 786 + 796 787 if (ret) 797 788 dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); 798 789 else ··· 1310 1285 if (sev_get_api_version()) 1311 1286 goto err; 1312 1287 1313 - if (sev_version_greater_or_equal(0, 15) && 1314 - sev_update_firmware(sev->dev) == 0) 1288 + if (sev_update_firmware(sev->dev) == 0) 1315 1289 sev_get_api_version(); 1316 1290 1317 1291 /* If an init_ex_path is provided rely on INIT_EX for PSP initialization
+1 -1
drivers/crypto/ccree/cc_buffer_mgr.c
··· 274 274 } 275 275 276 276 ret = dma_map_sg(dev, sg, *nents, direction); 277 - if (dma_mapping_error(dev, ret)) { 277 + if (!ret) { 278 278 *nents = 0; 279 279 dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); 280 280 return -ENOMEM;
+3 -5
drivers/crypto/hisilicon/hpre/hpre.h
··· 22 22 HPRE_CLUSTER0, 23 23 HPRE_CLUSTER1, 24 24 HPRE_CLUSTER2, 25 - HPRE_CLUSTER3 25 + HPRE_CLUSTER3, 26 + HPRE_CLUSTERS_NUM_MAX 26 27 }; 27 28 28 29 enum hpre_ctrl_dbgfs_file { ··· 43 42 HPRE_DFX_FILE_NUM 44 43 }; 45 44 46 - #define HPRE_CLUSTERS_NUM_V2 (HPRE_CLUSTER3 + 1) 47 - #define HPRE_CLUSTERS_NUM_V3 1 48 - #define HPRE_CLUSTERS_NUM_MAX HPRE_CLUSTERS_NUM_V2 49 45 #define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1) 50 46 51 47 struct hpre_debugfs_file { ··· 103 105 struct hisi_qp *hpre_create_qp(u8 type); 104 106 int hpre_algs_register(struct hisi_qm *qm); 105 107 void hpre_algs_unregister(struct hisi_qm *qm); 106 - 108 + bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg); 107 109 #endif
+165 -95
drivers/crypto/hisilicon/hpre/hpre_crypto.c
··· 51 51 #define HPRE_ECC_HW256_KSZ_B 32 52 52 #define HPRE_ECC_HW384_KSZ_B 48 53 53 54 + /* capability register mask of driver */ 55 + #define HPRE_DRV_RSA_MASK_CAP BIT(0) 56 + #define HPRE_DRV_DH_MASK_CAP BIT(1) 57 + #define HPRE_DRV_ECDH_MASK_CAP BIT(2) 58 + #define HPRE_DRV_X25519_MASK_CAP BIT(5) 59 + 54 60 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); 55 61 56 62 struct hpre_rsa_ctx { ··· 153 147 int id; 154 148 155 149 spin_lock_irqsave(&ctx->req_lock, flags); 156 - id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC); 150 + id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC); 157 151 spin_unlock_irqrestore(&ctx->req_lock, flags); 158 152 159 153 return id; ··· 494 488 qp->qp_ctx = ctx; 495 489 qp->req_cb = hpre_alg_cb; 496 490 497 - ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH); 491 + ret = hpre_ctx_set(ctx, qp, qp->sq_depth); 498 492 if (ret) 499 493 hpre_stop_qp_and_put(qp); 500 494 ··· 2008 2002 }, 2009 2003 }; 2010 2004 2011 - static struct kpp_alg ecdh_nist_p192 = { 2012 - .set_secret = hpre_ecdh_set_secret, 2013 - .generate_public_key = hpre_ecdh_compute_value, 2014 - .compute_shared_secret = hpre_ecdh_compute_value, 2015 - .max_size = hpre_ecdh_max_size, 2016 - .init = hpre_ecdh_nist_p192_init_tfm, 2017 - .exit = hpre_ecdh_exit_tfm, 2018 - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, 2019 - .base = { 2020 - .cra_ctxsize = sizeof(struct hpre_ctx), 2021 - .cra_priority = HPRE_CRYPTO_ALG_PRI, 2022 - .cra_name = "ecdh-nist-p192", 2023 - .cra_driver_name = "hpre-ecdh-nist-p192", 2024 - .cra_module = THIS_MODULE, 2025 - }, 2026 - }; 2027 - 2028 - static struct kpp_alg ecdh_nist_p256 = { 2029 - .set_secret = hpre_ecdh_set_secret, 2030 - .generate_public_key = hpre_ecdh_compute_value, 2031 - .compute_shared_secret = hpre_ecdh_compute_value, 2032 - .max_size = hpre_ecdh_max_size, 2033 - .init = hpre_ecdh_nist_p256_init_tfm, 2034 - .exit = hpre_ecdh_exit_tfm, 2035 - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, 2036 - .base = { 2037 - .cra_ctxsize = sizeof(struct hpre_ctx), 2038 - .cra_priority = HPRE_CRYPTO_ALG_PRI, 2039 - .cra_name = "ecdh-nist-p256", 2040 - .cra_driver_name = "hpre-ecdh-nist-p256", 2041 - .cra_module = THIS_MODULE, 2042 - }, 2043 - }; 2044 - 2045 - static struct kpp_alg ecdh_nist_p384 = { 2046 - .set_secret = hpre_ecdh_set_secret, 2047 - .generate_public_key = hpre_ecdh_compute_value, 2048 - .compute_shared_secret = hpre_ecdh_compute_value, 2049 - .max_size = hpre_ecdh_max_size, 2050 - .init = hpre_ecdh_nist_p384_init_tfm, 2051 - .exit = hpre_ecdh_exit_tfm, 2052 - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, 2053 - .base = { 2054 - .cra_ctxsize = sizeof(struct hpre_ctx), 2055 - .cra_priority = HPRE_CRYPTO_ALG_PRI, 2056 - .cra_name = "ecdh-nist-p384", 2057 - .cra_driver_name = "hpre-ecdh-nist-p384", 2058 - .cra_module = THIS_MODULE, 2059 - }, 2005 + static struct kpp_alg ecdh_curves[] = { 2006 + { 2007 + .set_secret = hpre_ecdh_set_secret, 2008 + .generate_public_key = hpre_ecdh_compute_value, 2009 + .compute_shared_secret = hpre_ecdh_compute_value, 2010 + .max_size = hpre_ecdh_max_size, 2011 + .init = hpre_ecdh_nist_p192_init_tfm, 2012 + .exit = hpre_ecdh_exit_tfm, 2013 + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, 2014 + .base = { 2015 + .cra_ctxsize = sizeof(struct hpre_ctx), 2016 + .cra_priority = HPRE_CRYPTO_ALG_PRI, 2017 + .cra_name = "ecdh-nist-p192", 2018 + .cra_driver_name = "hpre-ecdh-nist-p192", 2019 + .cra_module = THIS_MODULE, 2020 + }, 2021 + }, { 2022 + .set_secret = hpre_ecdh_set_secret, 2023 + .generate_public_key = hpre_ecdh_compute_value, 2024 + .compute_shared_secret = hpre_ecdh_compute_value, 2025 + .max_size = hpre_ecdh_max_size, 2026 + .init = hpre_ecdh_nist_p256_init_tfm, 2027 + .exit = hpre_ecdh_exit_tfm, 2028 + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, 2029 + .base = { 2030 + .cra_ctxsize = sizeof(struct hpre_ctx), 2031 + .cra_priority = HPRE_CRYPTO_ALG_PRI, 2032 + .cra_name = "ecdh-nist-p256", 2033 + .cra_driver_name = "hpre-ecdh-nist-p256", 2034 + .cra_module = THIS_MODULE, 2035 + }, 2036 + }, { 2037 + .set_secret = hpre_ecdh_set_secret, 2038 + .generate_public_key = hpre_ecdh_compute_value, 2039 + .compute_shared_secret = hpre_ecdh_compute_value, 2040 + .max_size = hpre_ecdh_max_size, 2041 + .init = hpre_ecdh_nist_p384_init_tfm, 2042 + .exit = hpre_ecdh_exit_tfm, 2043 + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, 2044 + .base = { 2045 + .cra_ctxsize = sizeof(struct hpre_ctx), 2046 + .cra_priority = HPRE_CRYPTO_ALG_PRI, 2047 + .cra_name = "ecdh-nist-p384", 2048 + .cra_driver_name = "hpre-ecdh-nist-p384", 2049 + .cra_module = THIS_MODULE, 2050 + }, 2051 + } 2060 2052 }; 2061 2053 2062 2054 static struct kpp_alg curve25519_alg = { ··· 2074 2070 }, 2075 2071 }; 2076 2072 2077 - 2078 - static int hpre_register_ecdh(void) 2073 + static int hpre_register_rsa(struct hisi_qm *qm) 2079 2074 { 2080 2075 int ret; 2081 2076 2082 - ret = crypto_register_kpp(&ecdh_nist_p192); 2077 + if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP)) 2078 + return 0; 2079 + 2080 + rsa.base.cra_flags = 0; 2081 + ret = crypto_register_akcipher(&rsa); 2083 2082 if (ret) 2084 - return ret; 2083 + dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret); 2085 2084 2086 - ret = crypto_register_kpp(&ecdh_nist_p256); 2087 - if (ret) 2088 - goto unregister_ecdh_p192; 2089 - 2090 - ret = crypto_register_kpp(&ecdh_nist_p384); 2091 - if (ret) 2092 - goto unregister_ecdh_p256; 2093 - 2094 - return 0; 2095 - 2096 - unregister_ecdh_p256: 2097 - crypto_unregister_kpp(&ecdh_nist_p256); 2098 - unregister_ecdh_p192: 2099 - crypto_unregister_kpp(&ecdh_nist_p192); 2100 2085 return ret; 2101 2086 } 2102 2087 2103 - static void hpre_unregister_ecdh(void) 2088 + static void hpre_unregister_rsa(struct hisi_qm *qm) 2104 2089 { 2105 - crypto_unregister_kpp(&ecdh_nist_p384); 2106 - crypto_unregister_kpp(&ecdh_nist_p256); 2107 - crypto_unregister_kpp(&ecdh_nist_p192); 2090 + if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP)) 2091 + return; 2092 + 2093 + crypto_unregister_akcipher(&rsa); 2094 + } 2095 + 2096 + static int hpre_register_dh(struct hisi_qm *qm) 2097 + { 2098 + int ret; 2099 + 2100 + if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP)) 2101 + return 0; 2102 + 2103 + ret = crypto_register_kpp(&dh); 2104 + if (ret) 2105 + dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret); 2106 + 2107 + return ret; 2108 + } 2109 + 2110 + static void hpre_unregister_dh(struct hisi_qm *qm) 2111 + { 2112 + if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP)) 2113 + return; 2114 + 2115 + crypto_unregister_kpp(&dh); 2116 + } 2117 + 2118 + static int hpre_register_ecdh(struct hisi_qm *qm) 2119 + { 2120 + int ret, i; 2121 + 2122 + if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP)) 2123 + return 0; 2124 + 2125 + for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) { 2126 + ret = crypto_register_kpp(&ecdh_curves[i]); 2127 + if (ret) { 2128 + dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n", 2129 + ecdh_curves[i].base.cra_name, ret); 2130 + goto unreg_kpp; 2131 + } 2132 + } 2133 + 2134 + return 0; 2135 + 2136 + unreg_kpp: 2137 + for (--i; i >= 0; --i) 2138 + crypto_unregister_kpp(&ecdh_curves[i]); 2139 + 2140 + return ret; 2141 + } 2142 + 2143 + static void hpre_unregister_ecdh(struct hisi_qm *qm) 2144 + { 2145 + int i; 2146 + 2147 + if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP)) 2148 + return; 2149 + 2150 + for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i) 2151 + crypto_unregister_kpp(&ecdh_curves[i]); 2152 + } 2153 + 2154 + static int hpre_register_x25519(struct hisi_qm *qm) 2155 + { 2156 + int ret; 2157 + 2158 + if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP)) 2159 + return 0; 2160 + 2161 + ret = crypto_register_kpp(&curve25519_alg); 2162 + if (ret) 2163 + dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret); 2164 + 2165 + return ret; 2166 + } 2167 + 2168 + static void hpre_unregister_x25519(struct hisi_qm *qm) 2169 + { 2170 + if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP)) 2171 + return; 2172 + 2173 + crypto_unregister_kpp(&curve25519_alg); 2108 2174 } 2109 2175 2110 2176 int hpre_algs_register(struct hisi_qm *qm) 2111 2177 { 2112 2178 int ret; 2113 2179 2114 - rsa.base.cra_flags = 0; 2115 - ret = crypto_register_akcipher(&rsa); 2180 + ret = hpre_register_rsa(qm); 2116 2181 if (ret) 2117 2182 return ret; 2118 2183 2119 - ret = crypto_register_kpp(&dh); 2184 + ret = hpre_register_dh(qm); 2120 2185 if (ret) 2121 2186 goto unreg_rsa; 2122 2187 2123 - if (qm->ver >= QM_HW_V3) { 2124 - ret = hpre_register_ecdh(); 2125 - if (ret) 2126 - goto unreg_dh; 2127 - ret = crypto_register_kpp(&curve25519_alg); 2128 - if (ret) 2129 - goto unreg_ecdh; 2130 - } 2131 - return 0; 2188 + ret = hpre_register_ecdh(qm); 2189 + if (ret) 2190 + goto unreg_dh; 2191 + 2192 + ret = hpre_register_x25519(qm); 2193 + if (ret) 2194 + goto unreg_ecdh; 2195 + 2196 + return ret; 2132 2197 2133 2198 unreg_ecdh: 2134 - hpre_unregister_ecdh(); 2199 + hpre_unregister_ecdh(qm); 2135 2200 unreg_dh: 2136 - crypto_unregister_kpp(&dh); 2201 + hpre_unregister_dh(qm); 2137 2202 unreg_rsa: 2138 - crypto_unregister_akcipher(&rsa); 2203 + hpre_unregister_rsa(qm); 2139 2204 return ret; 2140 2205 } 2141 2206 2142 2207 void hpre_algs_unregister(struct hisi_qm *qm) 2143 2208 { 2144 - if (qm->ver >= QM_HW_V3) { 2145 - crypto_unregister_kpp(&curve25519_alg); 2146 - hpre_unregister_ecdh(); 2147 - } 2148 - 2149 - crypto_unregister_kpp(&dh); 2150 - crypto_unregister_akcipher(&rsa); 2209 + hpre_unregister_x25519(qm); 2210 + hpre_unregister_ecdh(qm); 2211 + hpre_unregister_dh(qm); 2212 + hpre_unregister_rsa(qm); 2151 2213 }
+183 -33
drivers/crypto/hisilicon/hpre/hpre_main.c
··· 53 53 #define HPRE_CORE_IS_SCHD_OFFSET 0x90 54 54 55 55 #define HPRE_RAS_CE_ENB 0x301410 56 - #define HPRE_HAC_RAS_CE_ENABLE (BIT(0) | BIT(22) | BIT(23)) 57 56 #define HPRE_RAS_NFE_ENB 0x301414 58 - #define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe 59 57 #define HPRE_RAS_FE_ENB 0x301418 60 58 #define HPRE_OOO_SHUTDOWN_SEL 0x301a3c 61 59 #define HPRE_HAC_RAS_FE_ENABLE 0 ··· 77 79 #define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0) 78 80 #define HPRE_QM_VFG_AX_MASK GENMASK(7, 0) 79 81 #define HPRE_BD_USR_MASK GENMASK(1, 0) 80 - #define HPRE_CLUSTER_CORE_MASK_V2 GENMASK(3, 0) 81 - #define HPRE_CLUSTER_CORE_MASK_V3 GENMASK(7, 0) 82 82 #define HPRE_PREFETCH_CFG 0x301130 83 83 #define HPRE_SVA_PREFTCH_DFX 0x30115C 84 84 #define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30))) ··· 118 122 #define HPRE_DFX_COMMON2_LEN 0xE 119 123 #define HPRE_DFX_CORE_LEN 0x43 120 124 125 + #define HPRE_DEV_ALG_MAX_LEN 256 126 + 121 127 static const char hpre_name[] = "hisi_hpre"; 122 128 static struct dentry *hpre_debugfs_root; 123 129 static const struct pci_device_id hpre_dev_ids[] = { ··· 135 137 const char *msg; 136 138 }; 137 139 140 + struct hpre_dev_alg { 141 + u32 alg_msk; 142 + const char *alg; 143 + }; 144 + 145 + static const struct hpre_dev_alg hpre_dev_algs[] = { 146 + { 147 + .alg_msk = BIT(0), 148 + .alg = "rsa\n" 149 + }, { 150 + .alg_msk = BIT(1), 151 + .alg = "dh\n" 152 + }, { 153 + .alg_msk = BIT(2), 154 + .alg = "ecdh\n" 155 + }, { 156 + .alg_msk = BIT(3), 157 + .alg = "ecdsa\n" 158 + }, { 159 + .alg_msk = BIT(4), 160 + .alg = "sm2\n" 161 + }, { 162 + .alg_msk = BIT(5), 163 + .alg = "x25519\n" 164 + }, { 165 + .alg_msk = BIT(6), 166 + .alg = "x448\n" 167 + }, { 168 + /* sentinel */ 169 + } 170 + }; 171 + 138 172 static struct hisi_qm_list hpre_devices = { 139 173 .register_to_crypto = hpre_algs_register, 140 174 .unregister_from_crypto = hpre_algs_unregister, ··· 175 145 static const char * const hpre_debug_file_name[] = { 176 146 [HPRE_CLEAR_ENABLE] = "rdclr_en", 177 147 [HPRE_CLUSTER_CTRL] = "cluster_ctrl", 148 + }; 149 + 150 + enum hpre_cap_type { 151 + HPRE_QM_NFE_MASK_CAP, 152 + HPRE_QM_RESET_MASK_CAP, 153 + HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 154 + HPRE_QM_CE_MASK_CAP, 155 + HPRE_NFE_MASK_CAP, 156 + HPRE_RESET_MASK_CAP, 157 + HPRE_OOO_SHUTDOWN_MASK_CAP, 158 + HPRE_CE_MASK_CAP, 159 + HPRE_CLUSTER_NUM_CAP, 160 + HPRE_CORE_TYPE_NUM_CAP, 161 + HPRE_CORE_NUM_CAP, 162 + HPRE_CLUSTER_CORE_NUM_CAP, 163 + HPRE_CORE_ENABLE_BITMAP_CAP, 164 + HPRE_DRV_ALG_BITMAP_CAP, 165 + HPRE_DEV_ALG_BITMAP_CAP, 166 + HPRE_CORE1_ALG_BITMAP_CAP, 167 + HPRE_CORE2_ALG_BITMAP_CAP, 168 + HPRE_CORE3_ALG_BITMAP_CAP, 169 + HPRE_CORE4_ALG_BITMAP_CAP, 170 + HPRE_CORE5_ALG_BITMAP_CAP, 171 + HPRE_CORE6_ALG_BITMAP_CAP, 172 + HPRE_CORE7_ALG_BITMAP_CAP, 173 + HPRE_CORE8_ALG_BITMAP_CAP, 174 + HPRE_CORE9_ALG_BITMAP_CAP, 175 + HPRE_CORE10_ALG_BITMAP_CAP 176 + }; 177 + 178 + static const struct hisi_qm_cap_info hpre_basic_info[] = { 179 + {HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37}, 180 + {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, 181 + {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, 182 + {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, 183 + {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE}, 184 + {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, 185 + {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, 186 + {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, 187 + {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, 188 + {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, 189 + {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA}, 190 + {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA}, 191 + {HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF}, 192 + {HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27}, 193 + {HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F}, 194 + {HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, 195 + {HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, 196 + {HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, 197 + {HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, 198 + {HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, 199 + {HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, 200 + {HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, 201 + {HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, 202 + {HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}, 203 + {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10} 178 204 }; 179 205 180 206 static const struct hpre_hw_error hpre_hw_errors[] = { ··· 348 262 }, 349 263 }; 350 264 265 + bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) 266 + { 267 + u32 cap_val; 268 + 269 + cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver); 270 + if (alg & cap_val) 271 + return true; 272 + 273 + return false; 274 + } 275 + 276 + static int hpre_set_qm_algs(struct hisi_qm *qm) 277 + { 278 + struct device *dev = &qm->pdev->dev; 279 + char *algs, *ptr; 280 + u32 alg_msk; 281 + int i; 282 + 283 + if (!qm->use_sva) 284 + return 0; 285 + 286 + algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); 287 + if (!algs) 288 + return -ENOMEM; 289 + 290 + alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver); 291 + 292 + for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++) 293 + if (alg_msk & hpre_dev_algs[i].alg_msk) 294 + strcat(algs, hpre_dev_algs[i].alg); 295 + 296 + ptr = strrchr(algs, '\n'); 297 + if (ptr) 298 + *ptr = '\0'; 299 + 300 + qm->uacce->algs = algs; 301 + 302 + return 0; 303 + } 304 + 351 305 static int hpre_diff_regs_show(struct seq_file *s, void *unused) 352 306 { 353 307 struct hisi_qm *qm = s->private; ··· 456 330 457 331 static inline int hpre_cluster_num(struct hisi_qm *qm) 458 332 { 459 - return (qm->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 : 460 - HPRE_CLUSTERS_NUM_V2; 333 + return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver); 461 334 } 462 335 463 336 static inline int hpre_cluster_core_mask(struct hisi_qm *qm) 464 337 { 465 - return (qm->ver >= QM_HW_V3) ? 466 - HPRE_CLUSTER_CORE_MASK_V3 : HPRE_CLUSTER_CORE_MASK_V2; 338 + return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver); 467 339 } 468 340 469 341 struct hisi_qp *hpre_create_qp(u8 type) ··· 581 457 u32 val; 582 458 int ret; 583 459 584 - if (qm->ver < QM_HW_V3) 460 + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 585 461 return; 586 462 587 463 /* Enable prefetch */ ··· 602 478 u32 val; 603 479 int ret; 604 480 605 - if (qm->ver < QM_HW_V3) 481 + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 606 482 return; 607 483 608 484 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); ··· 754 630 val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); 755 631 if (enable) { 756 632 val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE; 757 - val2 = HPRE_HAC_RAS_NFE_ENABLE; 633 + val2 = hisi_qm_get_hw_info(qm, hpre_basic_info, 634 + HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 758 635 } else { 759 636 val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE; 760 637 val2 = 0x0; ··· 769 644 770 645 static void hpre_hw_error_disable(struct hisi_qm *qm) 771 646 { 772 - /* disable hpre hw error interrupts */ 773 - writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK); 647 + u32 ce, nfe; 774 648 649 + ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); 650 + nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); 651 + 652 + /* disable hpre hw error interrupts */ 653 + writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK); 775 654 /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */ 776 655 hpre_master_ooo_ctrl(qm, false); 777 656 } 778 657 779 658 static void hpre_hw_error_enable(struct hisi_qm *qm) 780 659 { 660 + u32 ce, nfe; 661 + 662 + ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); 663 + nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); 664 + 781 665 /* clear HPRE hw error source if having */ 782 - writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT); 666 + writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT); 783 667 784 668 /* configure error type */ 785 - writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); 786 - writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB); 669 + writel(ce, qm->io_base + HPRE_RAS_CE_ENB); 670 + writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); 787 671 writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); 788 672 789 673 /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */ ··· 842 708 return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT); 843 709 } 844 710 845 - static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) 711 + static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) 846 712 { 847 713 struct hisi_qm *qm = hpre_file_to_qm(file); 848 714 int cluster_index = file->index - HPRE_CLUSTER_CTRL; ··· 850 716 HPRE_CLSTR_ADDR_INTRVL; 851 717 852 718 writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); 853 - 854 - return 0; 855 719 } 856 720 857 721 static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, ··· 924 792 goto err_input; 925 793 break; 926 794 case HPRE_CLUSTER_CTRL: 927 - ret = hpre_cluster_inqry_write(file, val); 928 - if (ret) 929 - goto err_input; 795 + hpre_cluster_inqry_write(file, val); 930 796 break; 931 797 default: 932 798 ret = -EINVAL; ··· 1136 1006 1137 1007 static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 1138 1008 { 1009 + int ret; 1010 + 1139 1011 if (pdev->revision == QM_HW_V1) { 1140 1012 pci_warn(pdev, "HPRE version 1 is not supported!\n"); 1141 1013 return -EINVAL; 1142 1014 } 1143 1015 1144 - if (pdev->revision >= QM_HW_V3) 1145 - qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2"; 1146 - else 1147 - qm->algs = "rsa\ndh"; 1148 1016 qm->mode = uacce_mode; 1149 1017 qm->pdev = pdev; 1150 1018 qm->ver = pdev->revision; ··· 1158 1030 qm->qm_list = &hpre_devices; 1159 1031 } 1160 1032 1161 - return hisi_qm_init(qm); 1033 + ret = hisi_qm_init(qm); 1034 + if (ret) { 1035 + pci_err(pdev, "Failed to init hpre qm configures!\n"); 1036 + return ret; 1037 + } 1038 + 1039 + ret = hpre_set_qm_algs(qm); 1040 + if (ret) { 1041 + pci_err(pdev, "Failed to set hpre algs!\n"); 1042 + hisi_qm_uninit(qm); 1043 + } 1044 + 1045 + return ret; 1162 1046 } 1163 1047 1164 1048 static int hpre_show_last_regs_init(struct hisi_qm *qm) ··· 1269 1129 1270 1130 static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) 1271 1131 { 1132 + u32 nfe; 1133 + 1272 1134 writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); 1135 + nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); 1136 + writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); 1273 1137 } 1274 1138 1275 1139 static void hpre_open_axi_master_ooo(struct hisi_qm *qm) ··· 1291 1147 { 1292 1148 struct hisi_qm_err_info *err_info = &qm->err_info; 1293 1149 1294 - err_info->ce = QM_BASE_CE; 1295 - err_info->fe = 0; 1296 - err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | 1297 - HPRE_OOO_ECC_2BIT_ERR; 1298 - err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE; 1150 + err_info->fe = HPRE_HAC_RAS_FE_ENABLE; 1151 + err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver); 1152 + err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver); 1153 + err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR; 1154 + err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, 1155 + HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 1156 + err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, 1157 + HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 1158 + err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, 1159 + HPRE_QM_RESET_MASK_CAP, qm->cap_ver); 1160 + err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, 1161 + HPRE_RESET_MASK_CAP, qm->cap_ver); 1299 1162 err_info->msi_wr_port = HPRE_WR_MSI_PORT; 1300 1163 err_info->acpi_rst = "HRST"; 1301 - err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT; 1302 1164 } 1303 1165 1304 1166 static const struct hisi_qm_err_ini hpre_err_ini = {
+550 -362
drivers/crypto/hisilicon/qm.c
··· 22 22 #define QM_VF_AEQ_INT_MASK 0x4 23 23 #define QM_VF_EQ_INT_SOURCE 0x8 24 24 #define QM_VF_EQ_INT_MASK 0xc 25 - #define QM_IRQ_NUM_V1 1 26 - #define QM_IRQ_NUM_PF_V2 4 27 - #define QM_IRQ_NUM_VF_V2 2 28 - #define QM_IRQ_NUM_VF_V3 3 29 25 30 - #define QM_EQ_EVENT_IRQ_VECTOR 0 31 - #define QM_AEQ_EVENT_IRQ_VECTOR 1 32 - #define QM_CMD_EVENT_IRQ_VECTOR 2 33 - #define QM_ABNORMAL_EVENT_IRQ_VECTOR 3 26 + #define QM_IRQ_VECTOR_MASK GENMASK(15, 0) 27 + #define QM_IRQ_TYPE_MASK GENMASK(15, 0) 28 + #define QM_IRQ_TYPE_SHIFT 16 29 + #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) 34 30 35 31 /* mailbox */ 36 32 #define QM_MB_PING_ALL_VFS 0xffff 37 33 #define QM_MB_CMD_DATA_SHIFT 32 38 34 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) 35 + #define QM_MB_STATUS_MASK GENMASK(12, 9) 39 36 40 37 /* sqc shift */ 41 38 #define QM_SQ_HOP_NUM_SHIFT 0 ··· 74 77 #define QM_EQ_OVERFLOW 1 75 78 #define QM_CQE_ERROR 2 76 79 80 + #define QM_XQ_DEPTH_SHIFT 16 81 + #define QM_XQ_DEPTH_MASK GENMASK(15, 0) 82 + 77 83 #define QM_DOORBELL_CMD_SQ 0 78 84 #define QM_DOORBELL_CMD_CQ 1 79 85 #define QM_DOORBELL_CMD_EQ 2 ··· 86 86 #define QM_DB_CMD_SHIFT_V1 16 87 87 #define QM_DB_INDEX_SHIFT_V1 32 88 88 #define QM_DB_PRIORITY_SHIFT_V1 48 89 - #define QM_QUE_ISO_CFG_V 0x0030 90 89 #define QM_PAGE_SIZE 0x0034 91 - #define QM_QUE_ISO_EN 0x100154 92 - #define QM_CAPBILITY 0x100158 93 - #define QM_QP_NUN_MASK GENMASK(10, 0) 94 90 #define QM_QP_DB_INTERVAL 0x10000 95 91 96 92 #define QM_MEM_START_INIT 0x100040 ··· 122 126 #define QM_DFX_CNT_CLR_CE 0x100118 123 127 124 128 #define QM_ABNORMAL_INT_SOURCE 0x100000 125 - #define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0) 126 129 #define QM_ABNORMAL_INT_MASK 0x100004 127 130 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff 128 131 #define QM_ABNORMAL_INT_STATUS 0x100008 ··· 139 144 #define QM_RAS_NFE_ENABLE 0x1000f4 140 145 #define QM_RAS_CE_THRESHOLD 0x1000f8 141 146 #define QM_RAS_CE_TIMES_PER_IRQ 1 142 - #define QM_RAS_MSI_INT_SEL 0x1040f4 143 147 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 148 + #define QM_ECC_MBIT BIT(2) 149 + #define QM_DB_TIMEOUT BIT(10) 150 + #define QM_OF_FIFO_OF BIT(11) 144 151 145 152 #define QM_RESET_WAIT_TIMEOUT 400 146 153 #define QM_PEH_VENDOR_ID 0x1000d8 ··· 202 205 #define MAX_WAIT_COUNTS 1000 203 206 #define QM_CACHE_WB_START 0x204 204 207 #define QM_CACHE_WB_DONE 0x208 208 + #define QM_FUNC_CAPS_REG 0x3100 209 + #define QM_CAPBILITY_VERSION GENMASK(7, 0) 205 210 206 211 #define PCI_BAR_2 2 207 212 #define PCI_BAR_4 4 ··· 220 221 #define WAIT_PERIOD 20 221 222 #define REMOVE_WAIT_DELAY 10 222 223 #define QM_SQE_ADDR_MASK GENMASK(7, 0) 223 - #define QM_EQ_DEPTH (1024 * 2) 224 224 225 225 #define QM_DRIVER_REMOVING 0 226 226 #define QM_RST_SCHED 1 ··· 268 270 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ 269 271 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 270 272 271 - #define QM_MK_CQC_DW3_V2(cqe_sz) \ 272 - ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 273 + #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ 274 + ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 273 275 274 276 #define QM_MK_SQC_W13(priority, orders, alg_type) \ 275 277 (((priority) << QM_SQ_PRIORITY_SHIFT) | \ ··· 282 284 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ 283 285 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 284 286 285 - #define QM_MK_SQC_DW3_V2(sqe_sz) \ 286 - ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 287 + #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ 288 + ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 287 289 288 290 #define INIT_QC_COMMON(qc, base, pasid) do { \ 289 291 (qc)->head = 0; \ ··· 325 327 QM_VF_START_FAIL, 326 328 QM_PF_SET_QOS, 327 329 QM_VF_GET_QOS, 330 + }; 331 + 332 + enum qm_basic_type { 333 + QM_TOTAL_QP_NUM_CAP = 0x0, 334 + QM_FUNC_MAX_QP_CAP, 335 + QM_XEQ_DEPTH_CAP, 336 + QM_QP_DEPTH_CAP, 337 + QM_EQ_IRQ_TYPE_CAP, 338 + QM_AEQ_IRQ_TYPE_CAP, 339 + QM_ABN_IRQ_TYPE_CAP, 340 + QM_PF2VF_IRQ_TYPE_CAP, 341 + QM_PF_IRQ_NUM_CAP, 342 + QM_VF_IRQ_NUM_CAP, 343 + }; 344 + 345 + static const struct hisi_qm_cap_info qm_cap_info_comm[] = { 346 + {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, 347 + {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, 348 + {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, 349 + {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, 350 + {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, 351 + }; 352 + 353 + static const struct hisi_qm_cap_info qm_cap_info_pf[] = { 354 + {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, 355 + }; 356 + 357 + static const struct hisi_qm_cap_info qm_cap_info_vf[] = { 358 + {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, 359 + }; 360 + 361 + static const struct hisi_qm_cap_info qm_basic_info[] = { 362 + {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 363 + {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 364 + {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800}, 365 + {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, 366 + {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, 367 + {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, 368 + {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, 369 + {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, 370 + {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, 371 + {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, 328 372 }; 329 373 330 374 struct qm_cqe { ··· 461 421 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); 462 422 void (*qm_db)(struct hisi_qm *qm, u16 qn, 463 423 u8 cmd, u16 index, u8 priority); 464 - u32 (*get_irq_num)(struct hisi_qm *qm); 465 424 int (*debug_init)(struct hisi_qm *qm); 466 - void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); 425 + void (*hw_error_init)(struct hisi_qm *qm); 467 426 void (*hw_error_uninit)(struct hisi_qm *qm); 468 427 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); 469 - int (*stop_qp)(struct hisi_qp *qp); 470 428 int (*set_msi)(struct hisi_qm *qm, bool set); 471 - int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd); 472 - int (*ping_pf)(struct hisi_qm *qm, u64 cmd); 473 429 }; 474 430 475 431 struct qm_dfx_item { ··· 569 533 {50100, 100000, 19} 570 534 }; 571 535 536 + static void qm_irqs_unregister(struct hisi_qm *qm); 537 + 572 538 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) 573 539 { 574 540 enum qm_state curr = atomic_read(&qm->status.flags); ··· 661 623 } 662 624 663 625 /* Check if the error causes the master ooo block */ 664 - static int qm_check_dev_error(struct hisi_qm *qm) 626 + static bool qm_check_dev_error(struct hisi_qm *qm) 665 627 { 666 628 u32 val, dev_val; 667 629 668 630 if (qm->fun_type == QM_HW_VF) 669 - return 0; 631 + return false; 670 632 671 - val = qm_get_hw_error_status(qm); 672 - dev_val = qm_get_dev_err_status(qm); 633 + val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; 634 + dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; 673 635 674 - if (qm->ver < QM_HW_V3) 675 - return (val & QM_ECC_MBIT) || 676 - (dev_val & qm->err_info.ecc_2bits_mask); 677 - 678 - return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) || 679 - (dev_val & (~qm->err_info.dev_ce_mask)); 636 + return val || dev_val; 680 637 } 681 638 682 639 static int qm_wait_reset_finish(struct hisi_qm *qm) ··· 761 728 762 729 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) 763 730 { 731 + int ret; 732 + u32 val; 733 + 764 734 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 765 735 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); 736 + ret = -EBUSY; 766 737 goto mb_busy; 767 738 } 768 739 ··· 774 737 775 738 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 776 739 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); 740 + ret = -ETIMEDOUT; 741 + goto mb_busy; 742 + } 743 + 744 + val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); 745 + if (val & QM_MB_STATUS_MASK) { 746 + dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); 747 + ret = -EIO; 777 748 goto mb_busy; 778 749 } 779 750 ··· 789 744 790 745 mb_busy: 791 746 atomic64_inc(&qm->debug.dfx.mb_err_cnt); 792 - return -EBUSY; 747 + return ret; 793 748 } 794 749 795 750 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, ··· 873 828 POLL_TIMEOUT); 874 829 } 875 830 876 - static u32 qm_get_irq_num_v1(struct hisi_qm *qm) 831 + /** 832 + * hisi_qm_get_hw_info() - Get device information. 833 + * @qm: The qm which want to get information. 834 + * @info_table: Array for storing device information. 835 + * @index: Index in info_table. 836 + * @is_read: Whether read from reg, 0: not support read from reg. 837 + * 838 + * This function returns device information the caller needs. 839 + */ 840 + u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 841 + const struct hisi_qm_cap_info *info_table, 842 + u32 index, bool is_read) 877 843 { 878 - return QM_IRQ_NUM_V1; 844 + u32 val; 845 + 846 + switch (qm->ver) { 847 + case QM_HW_V1: 848 + return info_table[index].v1_val; 849 + case QM_HW_V2: 850 + return info_table[index].v2_val; 851 + default: 852 + if (!is_read) 853 + return info_table[index].v3_val; 854 + 855 + val = readl(qm->io_base + info_table[index].offset); 856 + return (val >> info_table[index].shift) & info_table[index].mask; 857 + } 858 + } 859 + EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); 860 + 861 + static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, 862 + u16 *high_bits, enum qm_basic_type type) 863 + { 864 + u32 depth; 865 + 866 + depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); 867 + *high_bits = depth & QM_XQ_DEPTH_MASK; 868 + *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; 879 869 } 880 870 881 - static u32 qm_get_irq_num_v2(struct hisi_qm *qm) 871 + static u32 qm_get_irq_num(struct hisi_qm *qm) 882 872 { 883 873 if (qm->fun_type == QM_HW_PF) 884 - return QM_IRQ_NUM_PF_V2; 885 - else 886 - return QM_IRQ_NUM_VF_V2; 887 - } 874 + return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); 888 875 889 - static u32 qm_get_irq_num_v3(struct hisi_qm *qm) 890 - { 891 - if (qm->fun_type == QM_HW_PF) 892 - return QM_IRQ_NUM_PF_V2; 893 - 894 - return QM_IRQ_NUM_VF_V3; 876 + return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); 895 877 } 896 878 897 879 static int qm_pm_get_sync(struct hisi_qm *qm) ··· 926 854 struct device *dev = &qm->pdev->dev; 927 855 int ret; 928 856 929 - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) 857 + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 930 858 return 0; 931 859 932 860 ret = pm_runtime_resume_and_get(dev); ··· 942 870 { 943 871 struct device *dev = &qm->pdev->dev; 944 872 945 - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) 873 + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 946 874 return; 947 875 948 876 pm_runtime_mark_last_busy(dev); ··· 951 879 952 880 static void qm_cq_head_update(struct hisi_qp *qp) 953 881 { 954 - if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { 882 + if (qp->qp_status.cq_head == qp->cq_depth - 1) { 955 883 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; 956 884 qp->qp_status.cq_head = 0; 957 885 } else { ··· 983 911 { 984 912 struct hisi_qm *qm = poll_data->qm; 985 913 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; 914 + u16 eq_depth = qm->eq_depth; 986 915 int eqe_num = 0; 987 916 u16 cqn; 988 917 ··· 992 919 poll_data->qp_finish_id[eqe_num] = cqn; 993 920 eqe_num++; 994 921 995 - if (qm->status.eq_head == QM_EQ_DEPTH - 1) { 922 + if (qm->status.eq_head == eq_depth - 1) { 996 923 qm->status.eqc_phase = !qm->status.eqc_phase; 997 924 eqe = qm->eqe; 998 925 qm->status.eq_head = 0; ··· 1001 928 qm->status.eq_head++; 1002 929 } 1003 930 1004 - if (eqe_num == (QM_EQ_DEPTH >> 1) - 1) 931 + if (eqe_num == (eq_depth >> 1) - 1) 1005 932 break; 1006 933 } 1007 934 ··· 1141 1068 { 1142 1069 struct hisi_qm *qm = data; 1143 1070 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; 1071 + u16 aeq_depth = qm->aeq_depth; 1144 1072 u32 type, qp_id; 1145 1073 1146 1074 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { ··· 1166 1092 break; 1167 1093 } 1168 1094 1169 - if (qm->status.aeq_head == QM_Q_DEPTH - 1) { 1095 + if (qm->status.aeq_head == aeq_depth - 1) { 1170 1096 qm->status.aeqc_phase = !qm->status.aeqc_phase; 1171 1097 aeqe = qm->aeqe; 1172 1098 qm->status.aeq_head = 0; ··· 1192 1118 return IRQ_WAKE_THREAD; 1193 1119 } 1194 1120 1195 - static void qm_irq_unregister(struct hisi_qm *qm) 1196 - { 1197 - struct pci_dev *pdev = qm->pdev; 1198 - 1199 - free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); 1200 - 1201 - if (qm->ver > QM_HW_V1) { 1202 - free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); 1203 - 1204 - if (qm->fun_type == QM_HW_PF) 1205 - free_irq(pci_irq_vector(pdev, 1206 - QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); 1207 - } 1208 - 1209 - if (qm->ver > QM_HW_V2) 1210 - free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm); 1211 - } 1212 - 1213 1121 static void qm_init_qp_status(struct hisi_qp *qp) 1214 1122 { 1215 1123 struct hisi_qp_status *qp_status = &qp->qp_status; ··· 1207 1151 struct device *dev = &qm->pdev->dev; 1208 1152 u32 page_type = 0x0; 1209 1153 1210 - if (qm->ver < QM_HW_V3) 1154 + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 1211 1155 return; 1212 1156 1213 1157 switch (PAGE_SIZE) { ··· 1326 1270 } 1327 1271 break; 1328 1272 case SHAPER_VFT: 1329 - if (qm->ver >= QM_HW_V3) { 1273 + if (factor) { 1330 1274 tmp = factor->cir_b | 1331 1275 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | 1332 1276 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | ··· 1344 1288 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, 1345 1289 u32 fun_num, u32 base, u32 number) 1346 1290 { 1347 - struct qm_shaper_factor *factor = &qm->factor[fun_num]; 1291 + struct qm_shaper_factor *factor = NULL; 1348 1292 unsigned int val; 1349 1293 int ret; 1294 + 1295 + if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 1296 + factor = &qm->factor[fun_num]; 1350 1297 1351 1298 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1352 1299 val & BIT(0), POLL_PERIOD, ··· 1408 1349 } 1409 1350 1410 1351 /* init default shaper qos val */ 1411 - if (qm->ver >= QM_HW_V3) { 1352 + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 1412 1353 ret = qm_shaper_init_vft(qm, fun_num); 1413 1354 if (ret) 1414 1355 goto back_sqc_cqc; ··· 1416 1357 1417 1358 return 0; 1418 1359 back_sqc_cqc: 1419 - for (i = SQC_VFT; i <= CQC_VFT; i++) { 1420 - ret = qm_set_vft_common(qm, i, fun_num, 0, 0); 1421 - if (ret) 1422 - return ret; 1423 - } 1360 + for (i = SQC_VFT; i <= CQC_VFT; i++) 1361 + qm_set_vft_common(qm, i, fun_num, 0, 0); 1362 + 1424 1363 return ret; 1425 1364 } 1426 1365 ··· 1914 1857 kfree(ctx_addr); 1915 1858 } 1916 1859 1917 - static int dump_show(struct hisi_qm *qm, void *info, 1860 + static void dump_show(struct hisi_qm *qm, void *info, 1918 1861 unsigned int info_size, char *info_name) 1919 1862 { 1920 1863 struct device *dev = &qm->pdev->dev; 1921 - u8 *info_buf, *info_curr = info; 1864 + u8 *info_curr = info; 1922 1865 u32 i; 1923 1866 #define BYTE_PER_DW 4 1924 1867 1925 - info_buf = kzalloc(info_size, GFP_KERNEL); 1926 - if (!info_buf) 1927 - return -ENOMEM; 1928 - 1929 - for (i = 0; i < info_size; i++, info_curr++) { 1930 - if (i % BYTE_PER_DW == 0) 1931 - info_buf[i + 3UL] = *info_curr; 1932 - else if (i % BYTE_PER_DW == 1) 1933 - info_buf[i + 1UL] = *info_curr; 1934 - else if (i % BYTE_PER_DW == 2) 1935 - info_buf[i - 1] = *info_curr; 1936 - else if (i % BYTE_PER_DW == 3) 1937 - info_buf[i - 3] = *info_curr; 1938 - } 1939 - 1940 1868 dev_info(dev, "%s DUMP\n", info_name); 1941 - for (i = 0; i < info_size; i += BYTE_PER_DW) { 1869 + for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) { 1942 1870 pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW, 1943 - info_buf[i], info_buf[i + 1UL], 1944 - info_buf[i + 2UL], info_buf[i + 3UL]); 1871 + *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr)); 1945 1872 } 1946 - 1947 - kfree(info_buf); 1948 - 1949 - return 0; 1950 1873 } 1951 1874 1952 1875 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) ··· 1966 1929 if (qm->sqc) { 1967 1930 sqc_curr = qm->sqc + qp_id; 1968 1931 1969 - ret = dump_show(qm, sqc_curr, sizeof(*sqc), 1970 - "SOFT SQC"); 1971 - if (ret) 1972 - dev_info(dev, "Show soft sqc failed!\n"); 1932 + dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC"); 1973 1933 } 1974 1934 up_read(&qm->qps_lock); 1975 1935 1976 - goto err_free_ctx; 1936 + goto free_ctx; 1977 1937 } 1978 1938 1979 - ret = dump_show(qm, sqc, sizeof(*sqc), "SQC"); 1980 - if (ret) 1981 - dev_info(dev, "Show hw sqc failed!\n"); 1939 + dump_show(qm, sqc, sizeof(*sqc), "SQC"); 1982 1940 1983 - err_free_ctx: 1941 + free_ctx: 1984 1942 qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); 1985 - return ret; 1943 + return 0; 1986 1944 } 1987 1945 1988 1946 static int qm_cqc_dump(struct hisi_qm *qm, const char *s) ··· 2007 1975 if (qm->cqc) { 2008 1976 cqc_curr = qm->cqc + qp_id; 2009 1977 2010 - ret = dump_show(qm, cqc_curr, sizeof(*cqc), 2011 - "SOFT CQC"); 2012 - if (ret) 2013 - dev_info(dev, "Show soft cqc failed!\n"); 1978 + dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC"); 2014 1979 } 2015 1980 up_read(&qm->qps_lock); 2016 1981 2017 - goto err_free_ctx; 1982 + goto free_ctx; 2018 1983 } 2019 1984 2020 - ret = dump_show(qm, cqc, sizeof(*cqc), "CQC"); 2021 - if (ret) 2022 - dev_info(dev, "Show hw cqc failed!\n"); 1985 + dump_show(qm, cqc, sizeof(*cqc), "CQC"); 2023 1986 2024 - err_free_ctx: 1987 + free_ctx: 2025 1988 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); 2026 - return ret; 1989 + return 0; 2027 1990 } 2028 1991 2029 1992 static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, ··· 2042 2015 if (ret) 2043 2016 goto err_free_ctx; 2044 2017 2045 - ret = dump_show(qm, xeqc, size, name); 2046 - if (ret) 2047 - dev_info(dev, "Show hw %s failed!\n", name); 2018 + dump_show(qm, xeqc, size, name); 2048 2019 2049 2020 err_free_ctx: 2050 2021 qm_ctx_free(qm, size, xeqc, &xeqc_dma); ··· 2050 2025 } 2051 2026 2052 2027 static int q_dump_param_parse(struct hisi_qm *qm, char *s, 2053 - u32 *e_id, u32 *q_id) 2028 + u32 *e_id, u32 *q_id, u16 q_depth) 2054 2029 { 2055 2030 struct device *dev = &qm->pdev->dev; 2056 2031 unsigned int qp_num = qm->qp_num; ··· 2076 2051 } 2077 2052 2078 2053 ret = kstrtou32(presult, 0, e_id); 2079 - if (ret || *e_id >= QM_Q_DEPTH) { 2080 - dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1); 2054 + if (ret || *e_id >= q_depth) { 2055 + dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1); 2081 2056 return -EINVAL; 2082 2057 } 2083 2058 ··· 2091 2066 2092 2067 static int qm_sq_dump(struct hisi_qm *qm, char *s) 2093 2068 { 2094 - struct device *dev = &qm->pdev->dev; 2069 + u16 sq_depth = qm->qp_array->cq_depth; 2095 2070 void *sqe, *sqe_curr; 2096 2071 struct hisi_qp *qp; 2097 2072 u32 qp_id, sqe_id; 2098 2073 int ret; 2099 2074 2100 - ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id); 2075 + ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth); 2101 2076 if (ret) 2102 2077 return ret; 2103 2078 2104 - sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL); 2079 + sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL); 2105 2080 if (!sqe) 2106 2081 return -ENOMEM; 2107 2082 2108 2083 qp = &qm->qp_array[qp_id]; 2109 - memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH); 2084 + memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth); 2110 2085 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); 2111 2086 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, 2112 2087 qm->debug.sqe_mask_len); 2113 2088 2114 - ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); 2115 - if (ret) 2116 - dev_info(dev, "Show sqe failed!\n"); 2089 + dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); 2117 2090 2118 2091 kfree(sqe); 2119 2092 2120 - return ret; 2093 + return 0; 2121 2094 } 2122 2095 2123 2096 static int qm_cq_dump(struct hisi_qm *qm, char *s) 2124 2097 { 2125 - struct device *dev = &qm->pdev->dev; 2126 2098 struct qm_cqe *cqe_curr; 2127 2099 struct hisi_qp *qp; 2128 2100 u32 qp_id, cqe_id; 2129 2101 int ret; 2130 2102 2131 - ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id); 2103 + ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth); 2132 2104 if (ret) 2133 2105 return ret; 2134 2106 2135 2107 qp = &qm->qp_array[qp_id]; 2136 2108 cqe_curr = qp->cqe + cqe_id; 2137 - ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); 2138 - if (ret) 2139 - dev_info(dev, "Show cqe failed!\n"); 2109 + dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); 2140 2110 2141 - return ret; 2111 + return 0; 2142 2112 } 2143 2113 2144 2114 static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, ··· 2151 2131 if (ret) 2152 2132 return -EINVAL; 2153 2133 2154 - if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) { 2155 - dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1); 2134 + if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) { 2135 + dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1); 2156 2136 return -EINVAL; 2157 - } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) { 2158 - dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1); 2137 + } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) { 2138 + dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1); 2159 2139 return -EINVAL; 2160 2140 } 2161 2141 ··· 2170 2150 goto err_unlock; 2171 2151 } 2172 2152 2173 - ret = dump_show(qm, xeqe, size, name); 2174 - if (ret) 2175 - dev_info(dev, "Show %s failed!\n", name); 2153 + dump_show(qm, xeqe, size, name); 2176 2154 2177 2155 err_unlock: 2178 2156 up_read(&qm->qps_lock); ··· 2263 2245 return ret; 2264 2246 2265 2247 /* Judge if the instance is being reset. */ 2266 - if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) 2267 - return 0; 2248 + if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) { 2249 + ret = 0; 2250 + goto put_dfx_access; 2251 + } 2268 2252 2269 2253 if (count > QM_DBG_WRITE_LEN) { 2270 2254 ret = -ENOSPC; ··· 2320 2300 file->debug = &qm->debug; 2321 2301 } 2322 2302 2323 - static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) 2303 + static void qm_hw_error_init_v1(struct hisi_qm *qm) 2324 2304 { 2325 2305 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 2326 2306 } 2327 2307 2328 - static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) 2308 + static void qm_hw_error_cfg(struct hisi_qm *qm) 2329 2309 { 2330 - qm->error_mask = ce | nfe | fe; 2310 + struct hisi_qm_err_info *err_info = &qm->err_info; 2311 + 2312 + qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; 2331 2313 /* clear QM hw residual error source */ 2332 - writel(QM_ABNORMAL_INT_SOURCE_CLR, 2333 - qm->io_base + QM_ABNORMAL_INT_SOURCE); 2314 + writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); 2334 2315 2335 2316 /* configure error type */ 2336 - writel(ce, qm->io_base + QM_RAS_CE_ENABLE); 2317 + writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); 2337 2318 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); 2338 - writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); 2339 - writel(fe, qm->io_base + QM_RAS_FE_ENABLE); 2319 + writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); 2320 + writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); 2340 2321 } 2341 2322 2342 - static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) 2323 + static void qm_hw_error_init_v2(struct hisi_qm *qm) 2343 2324 { 2344 - u32 irq_enable = ce | nfe | fe; 2345 - u32 irq_unmask = ~irq_enable; 2325 + u32 irq_unmask; 2346 2326 2347 - qm_hw_error_cfg(qm, ce, nfe, fe); 2327 + qm_hw_error_cfg(qm); 2348 2328 2329 + irq_unmask = ~qm->error_mask; 2349 2330 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 2350 2331 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 2351 2332 } 2352 2333 2353 2334 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) 2354 2335 { 2355 - writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 2336 + u32 irq_mask = qm->error_mask; 2337 + 2338 + irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 2339 + writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 2356 2340 } 2357 2341 2358 - static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) 2342 + static void qm_hw_error_init_v3(struct hisi_qm *qm) 2359 2343 { 2360 - u32 irq_enable = ce | nfe | fe; 2361 - u32 irq_unmask = ~irq_enable; 2344 + u32 irq_unmask; 2362 2345 2363 - qm_hw_error_cfg(qm, ce, nfe, fe); 2346 + qm_hw_error_cfg(qm); 2364 2347 2365 2348 /* enable close master ooo when hardware error happened */ 2366 - writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL); 2349 + writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); 2367 2350 2351 + irq_unmask = ~qm->error_mask; 2368 2352 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 2369 2353 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 2370 2354 } 2371 2355 2372 2356 static void qm_hw_error_uninit_v3(struct hisi_qm *qm) 2373 2357 { 2374 - writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 2358 + u32 irq_mask = qm->error_mask; 2359 + 2360 + irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 2361 + writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 2375 2362 2376 2363 /* disable close master ooo when hardware error happened */ 2377 2364 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); ··· 2423 2396 2424 2397 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) 2425 2398 { 2426 - u32 error_status, tmp, val; 2399 + u32 error_status, tmp; 2427 2400 2428 2401 /* read err sts */ 2429 2402 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); ··· 2434 2407 qm->err_status.is_qm_ecc_mbit = true; 2435 2408 2436 2409 qm_log_hw_error(qm, error_status); 2437 - val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE; 2438 - /* ce error does not need to be reset */ 2439 - if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) { 2440 - writel(error_status, qm->io_base + 2441 - QM_ABNORMAL_INT_SOURCE); 2442 - writel(qm->err_info.nfe, 2443 - qm->io_base + QM_RAS_NFE_ENABLE); 2444 - return ACC_ERR_RECOVERED; 2445 - } 2410 + if (error_status & qm->err_info.qm_reset_mask) 2411 + return ACC_ERR_NEED_RESET; 2446 2412 2447 - return ACC_ERR_NEED_RESET; 2413 + writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); 2414 + writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); 2448 2415 } 2449 2416 2450 2417 return ACC_ERR_RECOVERED; ··· 2514 2493 u64 val; 2515 2494 u32 i; 2516 2495 2517 - if (!qm->vfs_num || qm->ver < QM_HW_V3) 2496 + if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2518 2497 return 0; 2519 2498 2520 2499 while (true) { ··· 2777 2756 2778 2757 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { 2779 2758 .qm_db = qm_db_v1, 2780 - .get_irq_num = qm_get_irq_num_v1, 2781 2759 .hw_error_init = qm_hw_error_init_v1, 2782 2760 .set_msi = qm_set_msi, 2783 2761 }; ··· 2784 2764 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { 2785 2765 .get_vft = qm_get_vft_v2, 2786 2766 .qm_db = qm_db_v2, 2787 - .get_irq_num = qm_get_irq_num_v2, 2788 2767 .hw_error_init = qm_hw_error_init_v2, 2789 2768 .hw_error_uninit = qm_hw_error_uninit_v2, 2790 2769 .hw_error_handle = qm_hw_error_handle_v2, ··· 2793 2774 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { 2794 2775 .get_vft = qm_get_vft_v2, 2795 2776 .qm_db = qm_db_v2, 2796 - .get_irq_num = qm_get_irq_num_v3, 2797 2777 .hw_error_init = qm_hw_error_init_v3, 2798 2778 .hw_error_uninit = qm_hw_error_uninit_v3, 2799 2779 .hw_error_handle = qm_hw_error_handle_v2, 2800 - .stop_qp = qm_stop_qp, 2801 2780 .set_msi = qm_set_msi_v3, 2802 - .ping_all_vfs = qm_ping_all_vfs, 2803 - .ping_pf = qm_ping_pf, 2804 2781 }; 2805 2782 2806 2783 static void *qm_get_avail_sqe(struct hisi_qp *qp) ··· 2804 2789 struct hisi_qp_status *qp_status = &qp->qp_status; 2805 2790 u16 sq_tail = qp_status->sq_tail; 2806 2791 2807 - if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1)) 2792 + if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) 2808 2793 return NULL; 2809 2794 2810 2795 return qp->sqe + sq_tail * qp->qm->sqe_size; ··· 2845 2830 2846 2831 qp = &qm->qp_array[qp_id]; 2847 2832 hisi_qm_unset_hw_reset(qp); 2848 - memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); 2833 + memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); 2849 2834 2850 2835 qp->event_cb = NULL; 2851 2836 qp->req_cb = NULL; ··· 2926 2911 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); 2927 2912 if (ver == QM_HW_V1) { 2928 2913 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); 2929 - sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); 2914 + sqc->w8 = cpu_to_le16(qp->sq_depth - 1); 2930 2915 } else { 2931 - sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); 2916 + sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); 2932 2917 sqc->w8 = 0; /* rand_qc */ 2933 2918 } 2934 2919 sqc->cq_num = cpu_to_le16(qp_id); ··· 2969 2954 if (ver == QM_HW_V1) { 2970 2955 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 2971 2956 QM_QC_CQE_SIZE)); 2972 - cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); 2957 + cqc->w8 = cpu_to_le16(qp->cq_depth - 1); 2973 2958 } else { 2974 - cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE)); 2959 + cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); 2975 2960 cqc->w8 = 0; /* rand_qc */ 2976 2961 } 2977 2962 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); ··· 3058 3043 { 3059 3044 int qp_used = atomic_read(&qp->qp_status.used); 3060 3045 u16 cur_tail = qp->qp_status.sq_tail; 3061 - u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH; 3046 + u16 sq_depth = qp->sq_depth; 3047 + u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; 3062 3048 struct hisi_qm *qm = qp->qm; 3063 3049 u16 pos; 3064 3050 int i; 3065 3051 3066 3052 for (i = 0; i < qp_used; i++) { 3067 - pos = (i + cur_head) % QM_Q_DEPTH; 3053 + pos = (i + cur_head) % sq_depth; 3068 3054 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); 3069 3055 atomic_dec(&qp->qp_status.used); 3070 3056 } ··· 3094 3078 return 0; 3095 3079 3096 3080 /* Kunpeng930 supports drain qp by device */ 3097 - if (qm->ops->stop_qp) { 3098 - ret = qm->ops->stop_qp(qp); 3081 + if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { 3082 + ret = qm_stop_qp(qp); 3099 3083 if (ret) 3100 3084 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); 3101 3085 return ret; ··· 3213 3197 { 3214 3198 struct hisi_qp_status *qp_status = &qp->qp_status; 3215 3199 u16 sq_tail = qp_status->sq_tail; 3216 - u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH; 3200 + u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; 3217 3201 void *sqe = qm_get_avail_sqe(qp); 3218 3202 3219 3203 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || ··· 3302 3286 { 3303 3287 struct hisi_qp *qp = q->priv; 3304 3288 3305 - hisi_qm_cache_wb(qp->qm); 3306 3289 hisi_qm_release_qp(qp); 3307 3290 } 3308 3291 ··· 3325 3310 if (qm->ver == QM_HW_V1) { 3326 3311 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) 3327 3312 return -EINVAL; 3328 - } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) { 3313 + } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 3329 3314 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + 3330 3315 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) 3331 3316 return -EINVAL; ··· 3402 3387 unsigned long arg) 3403 3388 { 3404 3389 struct hisi_qp *qp = q->priv; 3390 + struct hisi_qp_info qp_info; 3405 3391 struct hisi_qp_ctx qp_ctx; 3406 3392 3407 3393 if (cmd == UACCE_CMD_QM_SET_QP_CTX) { ··· 3419 3403 if (copy_to_user((void __user *)arg, &qp_ctx, 3420 3404 sizeof(struct hisi_qp_ctx))) 3421 3405 return -EFAULT; 3422 - } else { 3423 - return -EINVAL; 3406 + 3407 + return 0; 3408 + } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { 3409 + if (copy_from_user(&qp_info, (void __user *)arg, 3410 + sizeof(struct hisi_qp_info))) 3411 + return -EFAULT; 3412 + 3413 + qp_info.sqe_size = qp->qm->sqe_size; 3414 + qp_info.sq_depth = qp->sq_depth; 3415 + qp_info.cq_depth = qp->cq_depth; 3416 + 3417 + if (copy_to_user((void __user *)arg, &qp_info, 3418 + sizeof(struct hisi_qp_info))) 3419 + return -EFAULT; 3420 + 3421 + return 0; 3424 3422 } 3425 3423 3426 - return 0; 3424 + return -EINVAL; 3427 3425 } 3428 3426 3429 3427 static const struct uacce_ops uacce_qm_ops = { ··· 3457 3427 struct uacce_device *uacce; 3458 3428 unsigned long mmio_page_nr; 3459 3429 unsigned long dus_page_nr; 3430 + u16 sq_depth, cq_depth; 3460 3431 struct uacce_interface interface = { 3461 3432 .flags = UACCE_DEV_SVA, 3462 3433 .ops = &uacce_qm_ops, ··· 3484 3453 3485 3454 uacce->is_vf = pdev->is_virtfn; 3486 3455 uacce->priv = qm; 3487 - uacce->algs = qm->algs; 3488 3456 3489 3457 if (qm->ver == QM_HW_V1) 3490 3458 uacce->api_ver = HISI_QM_API_VER_BASE; ··· 3494 3464 3495 3465 if (qm->ver == QM_HW_V1) 3496 3466 mmio_page_nr = QM_DOORBELL_PAGE_NR; 3497 - else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) 3467 + else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 3498 3468 mmio_page_nr = QM_DOORBELL_PAGE_NR + 3499 3469 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; 3500 3470 else 3501 3471 mmio_page_nr = qm->db_interval / PAGE_SIZE; 3502 3472 3473 + qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 3474 + 3503 3475 /* Add one more page for device or qp status */ 3504 - dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + 3505 - sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >> 3476 + dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + 3477 + sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> 3506 3478 PAGE_SHIFT; 3507 3479 3508 3480 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; ··· 3609 3577 kfree(qm->qp_array); 3610 3578 } 3611 3579 3612 - static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) 3580 + static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, 3581 + u16 sq_depth, u16 cq_depth) 3613 3582 { 3614 3583 struct device *dev = &qm->pdev->dev; 3615 - size_t off = qm->sqe_size * QM_Q_DEPTH; 3584 + size_t off = qm->sqe_size * sq_depth; 3616 3585 struct hisi_qp *qp; 3617 3586 int ret = -ENOMEM; 3618 3587 ··· 3633 3600 qp->cqe = qp->qdma.va + off; 3634 3601 qp->cqe_dma = qp->qdma.dma + off; 3635 3602 qp->qdma.size = dma_size; 3603 + qp->sq_depth = sq_depth; 3604 + qp->cq_depth = cq_depth; 3636 3605 qp->qm = qm; 3637 3606 qp->qp_id = id; 3638 3607 ··· 3661 3626 init_rwsem(&qm->qps_lock); 3662 3627 qm->qp_in_used = 0; 3663 3628 qm->misc_ctl = false; 3664 - if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) { 3629 + if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { 3665 3630 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) 3666 3631 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); 3667 3632 } ··· 3671 3636 { 3672 3637 u32 val; 3673 3638 3674 - if (qm->ver < QM_HW_V3) 3639 + if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 3675 3640 return; 3676 3641 3677 3642 val = readl(qm->io_base + QM_IFC_INT_MASK); ··· 3683 3648 { 3684 3649 u32 val; 3685 3650 3686 - if (qm->ver < QM_HW_V3) 3651 + if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 3687 3652 return; 3688 3653 3689 3654 /* Clear communication interrupt source */ ··· 3699 3664 { 3700 3665 struct pci_dev *pdev = qm->pdev; 3701 3666 3702 - if (qm->use_db_isolation) 3667 + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 3703 3668 iounmap(qm->db_io_base); 3704 3669 3705 3670 iounmap(qm->io_base); ··· 3749 3714 } 3750 3715 3751 3716 idr_destroy(&qm->qp_idr); 3752 - kfree(qm->factor); 3717 + 3718 + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 3719 + kfree(qm->factor); 3753 3720 } 3754 3721 3755 3722 /** ··· 3777 3740 hisi_qm_set_state(qm, QM_NOT_READY); 3778 3741 up_write(&qm->qps_lock); 3779 3742 3780 - qm_irq_unregister(qm); 3743 + qm_irqs_unregister(qm); 3781 3744 hisi_qm_pci_uninit(qm); 3782 3745 if (qm->use_sva) { 3783 3746 uacce_remove(qm->uacce); ··· 3878 3841 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); 3879 3842 if (qm->ver == QM_HW_V1) 3880 3843 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); 3881 - eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3844 + eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3882 3845 3883 3846 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), 3884 3847 DMA_TO_DEVICE); ··· 3907 3870 3908 3871 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); 3909 3872 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); 3910 - aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3873 + aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3911 3874 3912 3875 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), 3913 3876 DMA_TO_DEVICE); ··· 4173 4136 4174 4137 static void qm_hw_error_init(struct hisi_qm *qm) 4175 4138 { 4176 - struct hisi_qm_err_info *err_info = &qm->err_info; 4177 - 4178 4139 if (!qm->ops->hw_error_init) { 4179 4140 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); 4180 4141 return; 4181 4142 } 4182 4143 4183 - qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe); 4144 + qm->ops->hw_error_init(qm); 4184 4145 } 4185 4146 4186 4147 static void qm_hw_error_uninit(struct hisi_qm *qm) ··· 4532 4497 qm->mb_qos = 0; 4533 4498 4534 4499 /* vf ping pf to get function qos */ 4535 - if (qm->ops->ping_pf) { 4536 - ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS); 4537 - if (ret) { 4538 - pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); 4539 - return ret; 4540 - } 4500 + ret = qm_ping_pf(qm, QM_VF_GET_QOS); 4501 + if (ret) { 4502 + pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); 4503 + return ret; 4541 4504 } 4542 4505 4543 4506 while (true) { ··· 4707 4674 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. 4708 4675 * @qm: The qm for which we want to add debugfs files. 4709 4676 * 4710 - * Create function qos debugfs files. 4677 + * Create function qos debugfs files, VF ping PF to get function qos. 4711 4678 */ 4712 4679 static void hisi_qm_set_algqos_init(struct hisi_qm *qm) 4713 4680 { 4714 4681 if (qm->fun_type == QM_HW_PF) 4715 4682 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, 4716 4683 qm, &qm_algqos_fops); 4717 - else 4684 + else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 4718 4685 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, 4719 4686 qm, &qm_algqos_fops); 4720 4687 } ··· 4762 4729 &qm_atomic64_ops); 4763 4730 } 4764 4731 4765 - if (qm->ver >= QM_HW_V3) 4732 + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 4766 4733 hisi_qm_set_algqos_init(qm); 4767 4734 } 4768 4735 EXPORT_SYMBOL_GPL(hisi_qm_debug_init); ··· 4801 4768 } 4802 4769 EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); 4803 4770 4771 + static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) 4772 + { 4773 + int i; 4774 + 4775 + for (i = 1; i <= total_func; i++) 4776 + qm->factor[i].func_qos = QM_QOS_MAX_VAL; 4777 + } 4778 + 4804 4779 /** 4805 4780 * hisi_qm_sriov_enable() - enable virtual functions 4806 4781 * @pdev: the PCIe device ··· 4835 4794 goto err_put_sync; 4836 4795 } 4837 4796 4838 - num_vfs = min_t(int, max_vfs, total_vfs); 4797 + if (max_vfs > total_vfs) { 4798 + pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs); 4799 + ret = -ERANGE; 4800 + goto err_put_sync; 4801 + } 4802 + 4803 + num_vfs = max_vfs; 4804 + 4805 + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 4806 + hisi_qm_init_vf_qos(qm, num_vfs); 4807 + 4839 4808 ret = qm_vf_q_assign(qm, num_vfs); 4840 4809 if (ret) { 4841 4810 pci_err(pdev, "Can't assign queues for VF!\n"); ··· 4881 4830 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) 4882 4831 { 4883 4832 struct hisi_qm *qm = pci_get_drvdata(pdev); 4884 - int total_vfs = pci_sriov_get_totalvfs(qm->pdev); 4885 4833 int ret; 4886 4834 4887 4835 if (pci_vfs_assigned(pdev)) { ··· 4895 4845 } 4896 4846 4897 4847 pci_disable_sriov(pdev); 4898 - /* clear vf function shaper configure array */ 4899 - memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs); 4848 + 4900 4849 ret = qm_clear_vft_config(qm); 4901 4850 if (ret) 4902 4851 return ret; ··· 4940 4891 if (qm->err_ini->log_dev_hw_err) 4941 4892 qm->err_ini->log_dev_hw_err(qm, err_sts); 4942 4893 4943 - /* ce error does not need to be reset */ 4944 - if ((err_sts | qm->err_info.dev_ce_mask) == 4945 - qm->err_info.dev_ce_mask) { 4946 - if (qm->err_ini->clear_dev_hw_err_status) 4947 - qm->err_ini->clear_dev_hw_err_status(qm, 4948 - err_sts); 4894 + if (err_sts & qm->err_info.dev_reset_mask) 4895 + return ACC_ERR_NEED_RESET; 4949 4896 4950 - return ACC_ERR_RECOVERED; 4951 - } 4952 - 4953 - return ACC_ERR_NEED_RESET; 4897 + if (qm->err_ini->clear_dev_hw_err_status) 4898 + qm->err_ini->clear_dev_hw_err_status(qm, err_sts); 4954 4899 } 4955 4900 4956 4901 return ACC_ERR_RECOVERED; ··· 5113 5070 return 0; 5114 5071 5115 5072 /* Kunpeng930 supports to notify VFs to stop before PF reset */ 5116 - if (qm->ops->ping_all_vfs) { 5117 - ret = qm->ops->ping_all_vfs(qm, cmd); 5073 + if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 5074 + ret = qm_ping_all_vfs(qm, cmd); 5118 5075 if (ret) 5119 5076 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); 5120 5077 } else { ··· 5305 5262 } 5306 5263 5307 5264 /* Kunpeng930 supports to notify VFs to start after PF reset. */ 5308 - if (qm->ops->ping_all_vfs) { 5309 - ret = qm->ops->ping_all_vfs(qm, cmd); 5265 + if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 5266 + ret = qm_ping_all_vfs(qm, cmd); 5310 5267 if (ret) 5311 5268 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); 5312 5269 } else { ··· 5509 5466 if (pdev->is_virtfn) 5510 5467 return PCI_ERS_RESULT_RECOVERED; 5511 5468 5512 - pci_aer_clear_nonfatal_status(pdev); 5513 - 5514 5469 /* reset pcie device controller */ 5515 5470 ret = qm_controller_reset(qm); 5516 5471 if (ret) { ··· 5640 5599 return IRQ_HANDLED; 5641 5600 } 5642 5601 5643 - static int qm_irq_register(struct hisi_qm *qm) 5644 - { 5645 - struct pci_dev *pdev = qm->pdev; 5646 - int ret; 5647 - 5648 - ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), 5649 - qm_irq, 0, qm->dev_name, qm); 5650 - if (ret) 5651 - return ret; 5652 - 5653 - if (qm->ver > QM_HW_V1) { 5654 - ret = request_threaded_irq(pci_irq_vector(pdev, 5655 - QM_AEQ_EVENT_IRQ_VECTOR), 5656 - qm_aeq_irq, qm_aeq_thread, 5657 - 0, qm->dev_name, qm); 5658 - if (ret) 5659 - goto err_aeq_irq; 5660 - 5661 - if (qm->fun_type == QM_HW_PF) { 5662 - ret = request_irq(pci_irq_vector(pdev, 5663 - QM_ABNORMAL_EVENT_IRQ_VECTOR), 5664 - qm_abnormal_irq, 0, qm->dev_name, qm); 5665 - if (ret) 5666 - goto err_abonormal_irq; 5667 - } 5668 - } 5669 - 5670 - if (qm->ver > QM_HW_V2) { 5671 - ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), 5672 - qm_mb_cmd_irq, 0, qm->dev_name, qm); 5673 - if (ret) 5674 - goto err_mb_cmd_irq; 5675 - } 5676 - 5677 - return 0; 5678 - 5679 - err_mb_cmd_irq: 5680 - if (qm->fun_type == QM_HW_PF) 5681 - free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); 5682 - err_abonormal_irq: 5683 - free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); 5684 - err_aeq_irq: 5685 - free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); 5686 - return ret; 5687 - } 5688 5602 5689 5603 /** 5690 5604 * hisi_qm_dev_shutdown() - Shutdown device. ··· 5707 5711 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 5708 5712 out: 5709 5713 pci_save_state(pdev); 5710 - ret = qm->ops->ping_pf(qm, cmd); 5714 + ret = qm_ping_pf(qm, cmd); 5711 5715 if (ret) 5712 5716 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); 5713 5717 } ··· 5725 5729 cmd = QM_VF_START_FAIL; 5726 5730 } 5727 5731 5728 - ret = qm->ops->ping_pf(qm, cmd); 5732 + ret = qm_ping_pf(qm, cmd); 5729 5733 if (ret) 5730 5734 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); 5731 5735 ··· 5920 5924 } 5921 5925 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); 5922 5926 5927 + static void qm_unregister_abnormal_irq(struct hisi_qm *qm) 5928 + { 5929 + struct pci_dev *pdev = qm->pdev; 5930 + u32 irq_vector, val; 5931 + 5932 + if (qm->fun_type == QM_HW_VF) 5933 + return; 5934 + 5935 + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); 5936 + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 5937 + return; 5938 + 5939 + irq_vector = val & QM_IRQ_VECTOR_MASK; 5940 + free_irq(pci_irq_vector(pdev, irq_vector), qm); 5941 + } 5942 + 5943 + static int qm_register_abnormal_irq(struct hisi_qm *qm) 5944 + { 5945 + struct pci_dev *pdev = qm->pdev; 5946 + u32 irq_vector, val; 5947 + int ret; 5948 + 5949 + if (qm->fun_type == QM_HW_VF) 5950 + return 0; 5951 + 5952 + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); 5953 + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 5954 + return 0; 5955 + 5956 + irq_vector = val & QM_IRQ_VECTOR_MASK; 5957 + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); 5958 + if (ret) 5959 + dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); 5960 + 5961 + return ret; 5962 + } 5963 + 5964 + static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) 5965 + { 5966 + struct pci_dev *pdev = qm->pdev; 5967 + u32 irq_vector, val; 5968 + 5969 + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); 5970 + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5971 + return; 5972 + 5973 + irq_vector = val & QM_IRQ_VECTOR_MASK; 5974 + free_irq(pci_irq_vector(pdev, irq_vector), qm); 5975 + } 5976 + 5977 + static int qm_register_mb_cmd_irq(struct hisi_qm *qm) 5978 + { 5979 + struct pci_dev *pdev = qm->pdev; 5980 + u32 irq_vector, val; 5981 + int ret; 5982 + 5983 + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); 5984 + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5985 + return 0; 5986 + 5987 + irq_vector = val & QM_IRQ_VECTOR_MASK; 5988 + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); 5989 + if (ret) 5990 + dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); 5991 + 5992 + return ret; 5993 + } 5994 + 5995 + static void qm_unregister_aeq_irq(struct hisi_qm *qm) 5996 + { 5997 + struct pci_dev *pdev = qm->pdev; 5998 + u32 irq_vector, val; 5999 + 6000 + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); 6001 + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 6002 + return; 6003 + 6004 + irq_vector = val & QM_IRQ_VECTOR_MASK; 6005 + free_irq(pci_irq_vector(pdev, irq_vector), qm); 6006 + } 6007 + 6008 + static int qm_register_aeq_irq(struct hisi_qm *qm) 6009 + { 6010 + struct pci_dev *pdev = qm->pdev; 6011 + u32 irq_vector, val; 6012 + int ret; 6013 + 6014 + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); 6015 + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 6016 + return 0; 6017 + 6018 + irq_vector = val & QM_IRQ_VECTOR_MASK; 6019 + ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq, 6020 + qm_aeq_thread, 0, qm->dev_name, qm); 6021 + if (ret) 6022 + dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 6023 + 6024 + return ret; 6025 + } 6026 + 6027 + static void qm_unregister_eq_irq(struct hisi_qm *qm) 6028 + { 6029 + struct pci_dev *pdev = qm->pdev; 6030 + u32 irq_vector, val; 6031 + 6032 + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); 6033 + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 6034 + return; 6035 + 6036 + irq_vector = val & QM_IRQ_VECTOR_MASK; 6037 + free_irq(pci_irq_vector(pdev, irq_vector), qm); 6038 + } 6039 + 6040 + static int qm_register_eq_irq(struct hisi_qm *qm) 6041 + { 6042 + struct pci_dev *pdev = qm->pdev; 6043 + u32 irq_vector, val; 6044 + int ret; 6045 + 6046 + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); 6047 + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 6048 + return 0; 6049 + 6050 + irq_vector = val & QM_IRQ_VECTOR_MASK; 6051 + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm); 6052 + if (ret) 6053 + dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 6054 + 6055 + return ret; 6056 + } 6057 + 6058 + static void qm_irqs_unregister(struct hisi_qm *qm) 6059 + { 6060 + qm_unregister_mb_cmd_irq(qm); 6061 + qm_unregister_abnormal_irq(qm); 6062 + qm_unregister_aeq_irq(qm); 6063 + qm_unregister_eq_irq(qm); 6064 + } 6065 + 6066 + static int qm_irqs_register(struct hisi_qm *qm) 6067 + { 6068 + int ret; 6069 + 6070 + ret = qm_register_eq_irq(qm); 6071 + if (ret) 6072 + return ret; 6073 + 6074 + ret = qm_register_aeq_irq(qm); 6075 + if (ret) 6076 + goto free_eq_irq; 6077 + 6078 + ret = qm_register_abnormal_irq(qm); 6079 + if (ret) 6080 + goto free_aeq_irq; 6081 + 6082 + ret = qm_register_mb_cmd_irq(qm); 6083 + if (ret) 6084 + goto free_abnormal_irq; 6085 + 6086 + return 0; 6087 + 6088 + free_abnormal_irq: 6089 + qm_unregister_abnormal_irq(qm); 6090 + free_aeq_irq: 6091 + qm_unregister_aeq_irq(qm); 6092 + free_eq_irq: 6093 + qm_unregister_eq_irq(qm); 6094 + return ret; 6095 + } 6096 + 5923 6097 static int qm_get_qp_num(struct hisi_qm *qm) 5924 6098 { 5925 - if (qm->ver == QM_HW_V1) 5926 - qm->ctrl_qp_num = QM_QNUM_V1; 5927 - else if (qm->ver == QM_HW_V2) 5928 - qm->ctrl_qp_num = QM_QNUM_V2; 5929 - else 5930 - qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) & 5931 - QM_QP_NUN_MASK; 6099 + bool is_db_isolation; 5932 6100 5933 - if (qm->use_db_isolation) 5934 - qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >> 5935 - QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK; 5936 - else 5937 - qm->max_qp_num = qm->ctrl_qp_num; 6101 + /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ 6102 + if (qm->fun_type == QM_HW_VF) { 6103 + if (qm->ver != QM_HW_V1) 6104 + /* v2 starts to support get vft by mailbox */ 6105 + return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 6106 + 6107 + return 0; 6108 + } 6109 + 6110 + is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 6111 + qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); 6112 + qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, 6113 + QM_FUNC_MAX_QP_CAP, is_db_isolation); 5938 6114 5939 6115 /* check if qp number is valid */ 5940 6116 if (qm->qp_num > qm->max_qp_num) { ··· 6116 5948 } 6117 5949 6118 5950 return 0; 5951 + } 5952 + 5953 + static void qm_get_hw_caps(struct hisi_qm *qm) 5954 + { 5955 + const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? 5956 + qm_cap_info_pf : qm_cap_info_vf; 5957 + u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : 5958 + ARRAY_SIZE(qm_cap_info_vf); 5959 + u32 val, i; 5960 + 5961 + /* Doorbell isolate register is a independent register. */ 5962 + val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); 5963 + if (val) 5964 + set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 5965 + 5966 + if (qm->ver >= QM_HW_V3) { 5967 + val = readl(qm->io_base + QM_FUNC_CAPS_REG); 5968 + qm->cap_ver = val & QM_CAPBILITY_VERSION; 5969 + } 5970 + 5971 + /* Get PF/VF common capbility */ 5972 + for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { 5973 + val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); 5974 + if (val) 5975 + set_bit(qm_cap_info_comm[i].type, &qm->caps); 5976 + } 5977 + 5978 + /* Get PF/VF different capbility */ 5979 + for (i = 0; i < size; i++) { 5980 + val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); 5981 + if (val) 5982 + set_bit(cap_info[i].type, &qm->caps); 5983 + } 6119 5984 } 6120 5985 6121 5986 static int qm_get_pci_res(struct hisi_qm *qm) ··· 6170 5969 goto err_request_mem_regions; 6171 5970 } 6172 5971 6173 - if (qm->ver > QM_HW_V2) { 6174 - if (qm->fun_type == QM_HW_PF) 6175 - qm->use_db_isolation = readl(qm->io_base + 6176 - QM_QUE_ISO_EN) & BIT(0); 6177 - else 6178 - qm->use_db_isolation = readl(qm->io_base + 6179 - QM_QUE_ISO_CFG_V) & BIT(0); 6180 - } 6181 - 6182 - if (qm->use_db_isolation) { 5972 + qm_get_hw_caps(qm); 5973 + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 6183 5974 qm->db_interval = QM_QP_DB_INTERVAL; 6184 5975 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); 6185 5976 qm->db_io_base = ioremap(qm->db_phys_base, ··· 6186 5993 qm->db_interval = 0; 6187 5994 } 6188 5995 6189 - if (qm->fun_type == QM_HW_PF) { 6190 - ret = qm_get_qp_num(qm); 6191 - if (ret) 6192 - goto err_db_ioremap; 6193 - } 5996 + ret = qm_get_qp_num(qm); 5997 + if (ret) 5998 + goto err_db_ioremap; 6194 5999 6195 6000 return 0; 6196 6001 6197 6002 err_db_ioremap: 6198 - if (qm->use_db_isolation) 6003 + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 6199 6004 iounmap(qm->db_io_base); 6200 6005 err_ioremap: 6201 6006 iounmap(qm->io_base); ··· 6224 6033 goto err_get_pci_res; 6225 6034 pci_set_master(pdev); 6226 6035 6227 - if (!qm->ops->get_irq_num) { 6228 - ret = -EOPNOTSUPP; 6229 - goto err_get_pci_res; 6230 - } 6231 - num_vec = qm->ops->get_irq_num(qm); 6036 + num_vec = qm_get_irq_num(qm); 6232 6037 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); 6233 6038 if (ret < 0) { 6234 6039 dev_err(dev, "Failed to enable MSI vectors!\n"); ··· 6267 6080 static int hisi_qp_alloc_memory(struct hisi_qm *qm) 6268 6081 { 6269 6082 struct device *dev = &qm->pdev->dev; 6083 + u16 sq_depth, cq_depth; 6270 6084 size_t qp_dma_size; 6271 6085 int i, ret; 6272 6086 ··· 6281 6093 return -ENOMEM; 6282 6094 } 6283 6095 6096 + qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 6097 + 6284 6098 /* one more page for device or qp statuses */ 6285 - qp_dma_size = qm->sqe_size * QM_Q_DEPTH + 6286 - sizeof(struct qm_cqe) * QM_Q_DEPTH; 6099 + qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; 6287 6100 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; 6288 6101 for (i = 0; i < qm->qp_num; i++) { 6289 6102 qm->poll_data[i].qm = qm; 6290 - ret = hisi_qp_memory_init(qm, qp_dma_size, i); 6103 + ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); 6291 6104 if (ret) 6292 6105 goto err_init_qp_mem; 6293 6106 ··· 6305 6116 static int hisi_qm_memory_init(struct hisi_qm *qm) 6306 6117 { 6307 6118 struct device *dev = &qm->pdev->dev; 6308 - int ret, total_func, i; 6119 + int ret, total_func; 6309 6120 size_t off = 0; 6310 6121 6311 - total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; 6312 - qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); 6313 - if (!qm->factor) 6314 - return -ENOMEM; 6315 - for (i = 0; i < total_func; i++) 6316 - qm->factor[i].func_qos = QM_QOS_MAX_VAL; 6122 + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 6123 + total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; 6124 + qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); 6125 + if (!qm->factor) 6126 + return -ENOMEM; 6127 + 6128 + /* Only the PF value needs to be initialized */ 6129 + qm->factor[0].func_qos = QM_QOS_MAX_VAL; 6130 + } 6317 6131 6318 6132 #define QM_INIT_BUF(qm, type, num) do { \ 6319 6133 (qm)->type = ((qm)->qdma.va + (off)); \ ··· 6325 6133 } while (0) 6326 6134 6327 6135 idr_init(&qm->qp_idr); 6328 - qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + 6329 - QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + 6136 + qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); 6137 + qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + 6138 + QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + 6330 6139 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + 6331 6140 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); 6332 6141 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, 6333 6142 GFP_ATOMIC); 6334 6143 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); 6335 6144 if (!qm->qdma.va) { 6336 - ret = -ENOMEM; 6337 - goto err_alloc_qdma; 6145 + ret = -ENOMEM; 6146 + goto err_destroy_idr; 6338 6147 } 6339 6148 6340 - QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); 6341 - QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); 6149 + QM_INIT_BUF(qm, eqe, qm->eq_depth); 6150 + QM_INIT_BUF(qm, aeqe, qm->aeq_depth); 6342 6151 QM_INIT_BUF(qm, sqc, qm->qp_num); 6343 6152 QM_INIT_BUF(qm, cqc, qm->qp_num); 6344 6153 ··· 6351 6158 6352 6159 err_alloc_qp_array: 6353 6160 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); 6354 - err_alloc_qdma: 6355 - kfree(qm->factor); 6161 + err_destroy_idr: 6162 + idr_destroy(&qm->qp_idr); 6163 + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 6164 + kfree(qm->factor); 6356 6165 6357 6166 return ret; 6358 6167 } ··· 6397 6202 if (ret) 6398 6203 return ret; 6399 6204 6400 - ret = qm_irq_register(qm); 6205 + ret = qm_irqs_register(qm); 6401 6206 if (ret) 6402 6207 goto err_pci_init; 6403 - 6404 - if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { 6405 - /* v2 starts to support get vft by mailbox */ 6406 - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 6407 - if (ret) 6408 - goto err_irq_register; 6409 - } 6410 6208 6411 6209 if (qm->fun_type == QM_HW_PF) { 6412 6210 qm_disable_clock_gate(qm); ··· 6439 6251 qm->uacce = NULL; 6440 6252 } 6441 6253 err_irq_register: 6442 - qm_irq_unregister(qm); 6254 + qm_irqs_unregister(qm); 6443 6255 err_pci_init: 6444 6256 hisi_qm_pci_uninit(qm); 6445 6257 return ret; ··· 6490 6302 { 6491 6303 struct device *dev = &qm->pdev->dev; 6492 6304 6493 - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) 6305 + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 6494 6306 return; 6495 6307 6496 6308 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); ··· 6509 6321 { 6510 6322 struct device *dev = &qm->pdev->dev; 6511 6323 6512 - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) 6324 + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 6513 6325 return; 6514 6326 6515 6327 pm_runtime_get_noresume(dev);
+32 -2
drivers/crypto/hisilicon/sec2/sec.h
··· 17 17 dma_addr_t a_ivin_dma; 18 18 u8 *out_mac; 19 19 dma_addr_t out_mac_dma; 20 + u16 depth; 20 21 }; 21 22 22 23 /* Cipher request of SEC private */ ··· 116 115 /* SEC queue context which defines queue's relatives */ 117 116 struct sec_qp_ctx { 118 117 struct hisi_qp *qp; 119 - struct sec_req *req_list[QM_Q_DEPTH]; 118 + struct sec_req **req_list; 120 119 struct idr req_idr; 121 - struct sec_alg_res res[QM_Q_DEPTH]; 120 + struct sec_alg_res *res; 122 121 struct sec_ctx *ctx; 123 122 spinlock_t req_lock; 124 123 struct list_head backlog; ··· 192 191 bool iommu_used; 193 192 }; 194 193 194 + enum sec_cap_type { 195 + SEC_QM_NFE_MASK_CAP = 0x0, 196 + SEC_QM_RESET_MASK_CAP, 197 + SEC_QM_OOO_SHUTDOWN_MASK_CAP, 198 + SEC_QM_CE_MASK_CAP, 199 + SEC_NFE_MASK_CAP, 200 + SEC_RESET_MASK_CAP, 201 + SEC_OOO_SHUTDOWN_MASK_CAP, 202 + SEC_CE_MASK_CAP, 203 + SEC_CLUSTER_NUM_CAP, 204 + SEC_CORE_TYPE_NUM_CAP, 205 + SEC_CORE_NUM_CAP, 206 + SEC_CORES_PER_CLUSTER_NUM_CAP, 207 + SEC_CORE_ENABLE_BITMAP, 208 + SEC_DRV_ALG_BITMAP_LOW, 209 + SEC_DRV_ALG_BITMAP_HIGH, 210 + SEC_DEV_ALG_BITMAP_LOW, 211 + SEC_DEV_ALG_BITMAP_HIGH, 212 + SEC_CORE1_ALG_BITMAP_LOW, 213 + SEC_CORE1_ALG_BITMAP_HIGH, 214 + SEC_CORE2_ALG_BITMAP_LOW, 215 + SEC_CORE2_ALG_BITMAP_HIGH, 216 + SEC_CORE3_ALG_BITMAP_LOW, 217 + SEC_CORE3_ALG_BITMAP_HIGH, 218 + SEC_CORE4_ALG_BITMAP_LOW, 219 + SEC_CORE4_ALG_BITMAP_HIGH, 220 + }; 221 + 195 222 void sec_destroy_qps(struct hisi_qp **qps, int qp_num); 196 223 struct hisi_qp **sec_create_qps(void); 197 224 int sec_register_to_crypto(struct hisi_qm *qm); 198 225 void sec_unregister_from_crypto(struct hisi_qm *qm); 226 + u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low); 199 227 #endif
+281 -181
drivers/crypto/hisilicon/sec2/sec_crypto.c
··· 59 59 #define SEC_ICV_MASK 0x000E 60 60 #define SEC_SQE_LEN_RATE_MASK 0x3 61 61 62 - #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) 62 + #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) 63 63 #define SEC_SGL_SGE_NR 128 64 64 #define SEC_CIPHER_AUTH 0xfe 65 65 #define SEC_AUTH_CIPHER 0x1 66 66 #define SEC_MAX_MAC_LEN 64 67 67 #define SEC_MAX_AAD_LEN 65535 68 68 #define SEC_MAX_CCM_AAD_LEN 65279 69 - #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) 69 + #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth)) 70 70 71 71 #define SEC_PBUF_SZ 512 72 72 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ ··· 74 74 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ 75 75 SEC_MAX_MAC_LEN * 2) 76 76 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) 77 - #define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM) 78 - #define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \ 79 - SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM)) 80 - #define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \ 81 - SEC_PBUF_LEFT_SZ) 77 + #define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM) 78 + #define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \ 79 + SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM)) 80 + #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ 81 + SEC_PBUF_LEFT_SZ(depth)) 82 82 83 83 #define SEC_SQE_LEN_RATE 4 84 84 #define SEC_SQE_CFLAG 2 ··· 104 104 #define IV_CTR_INIT 0x1 105 105 #define IV_BYTE_OFFSET 0x8 106 106 107 + struct sec_skcipher { 108 + u64 alg_msk; 109 + struct skcipher_alg alg; 110 + }; 111 + 112 + struct sec_aead { 113 + u64 alg_msk; 114 + struct aead_alg alg; 115 + }; 116 + 107 117 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ 108 118 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) 109 119 { ··· 138 128 int req_id; 139 129 140 130 spin_lock_bh(&qp_ctx->req_lock); 141 - 142 - req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 143 - 0, QM_Q_DEPTH, GFP_ATOMIC); 131 + req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC); 144 132 spin_unlock_bh(&qp_ctx->req_lock); 145 133 if (unlikely(req_id < 0)) { 146 134 dev_err(req->ctx->dev, "alloc req id fail!\n"); ··· 156 148 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 157 149 int req_id = req->req_id; 158 150 159 - if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { 151 + if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) { 160 152 dev_err(req->ctx->dev, "free request id invalid!\n"); 161 153 return; 162 154 } ··· 308 300 /* Get DMA memory resources */ 309 301 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) 310 302 { 303 + u16 q_depth = res->depth; 311 304 int i; 312 305 313 - res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, 306 + res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), 314 307 &res->c_ivin_dma, GFP_KERNEL); 315 308 if (!res->c_ivin) 316 309 return -ENOMEM; 317 310 318 - for (i = 1; i < QM_Q_DEPTH; i++) { 311 + for (i = 1; i < q_depth; i++) { 319 312 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; 320 313 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; 321 314 } ··· 327 318 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) 328 319 { 329 320 if (res->c_ivin) 330 - dma_free_coherent(dev, SEC_TOTAL_IV_SZ, 321 + dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), 331 322 res->c_ivin, res->c_ivin_dma); 332 323 } 333 324 334 325 static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res) 335 326 { 327 + u16 q_depth = res->depth; 336 328 int i; 337 329 338 - res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, 330 + res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), 339 331 &res->a_ivin_dma, GFP_KERNEL); 340 332 if (!res->a_ivin) 341 333 return -ENOMEM; 342 334 343 - for (i = 1; i < QM_Q_DEPTH; i++) { 335 + for (i = 1; i < q_depth; i++) { 344 336 res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE; 345 337 res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE; 346 338 } ··· 352 342 static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res) 353 343 { 354 344 if (res->a_ivin) 355 - dma_free_coherent(dev, SEC_TOTAL_IV_SZ, 345 + dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), 356 346 res->a_ivin, res->a_ivin_dma); 357 347 } 358 348 359 349 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) 360 350 { 351 + u16 q_depth = res->depth; 361 352 int i; 362 353 363 - res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1, 354 + res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1, 364 355 &res->out_mac_dma, GFP_KERNEL); 365 356 if (!res->out_mac) 366 357 return -ENOMEM; 367 358 368 - for (i = 1; i < QM_Q_DEPTH; i++) { 359 + for (i = 1; i < q_depth; i++) { 369 360 res[i].out_mac_dma = res->out_mac_dma + 370 361 i * (SEC_MAX_MAC_LEN << 1); 371 362 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); ··· 378 367 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) 379 368 { 380 369 if (res->out_mac) 381 - dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1, 370 + dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1, 382 371 res->out_mac, res->out_mac_dma); 383 372 } 384 373 385 374 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) 386 375 { 387 376 if (res->pbuf) 388 - dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ, 377 + dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth), 389 378 res->pbuf, res->pbuf_dma); 390 379 } 391 380 ··· 395 384 */ 396 385 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) 397 386 { 387 + u16 q_depth = res->depth; 388 + int size = SEC_PBUF_PAGE_NUM(q_depth); 398 389 int pbuf_page_offset; 399 390 int i, j, k; 400 391 401 - res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, 392 + res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth), 402 393 &res->pbuf_dma, GFP_KERNEL); 403 394 if (!res->pbuf) 404 395 return -ENOMEM; ··· 413 400 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE 414 401 * for the SEC_TOTAL_PBUF_SZ 415 402 */ 416 - for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) { 403 + for (i = 0; i <= size; i++) { 417 404 pbuf_page_offset = PAGE_SIZE * i; 418 405 for (j = 0; j < SEC_PBUF_NUM; j++) { 419 406 k = i * SEC_PBUF_NUM + j; 420 - if (k == QM_Q_DEPTH) 407 + if (k == q_depth) 421 408 break; 422 409 res[k].pbuf = res->pbuf + 423 410 j * SEC_PBUF_PKG + pbuf_page_offset; ··· 483 470 sec_free_mac_resource(dev, qp_ctx->res); 484 471 } 485 472 473 + static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx, 474 + struct sec_qp_ctx *qp_ctx) 475 + { 476 + u16 q_depth = qp_ctx->qp->sq_depth; 477 + struct device *dev = ctx->dev; 478 + int ret = -ENOMEM; 479 + 480 + qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL); 481 + if (!qp_ctx->req_list) 482 + return ret; 483 + 484 + qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL); 485 + if (!qp_ctx->res) 486 + goto err_free_req_list; 487 + qp_ctx->res->depth = q_depth; 488 + 489 + qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); 490 + if (IS_ERR(qp_ctx->c_in_pool)) { 491 + dev_err(dev, "fail to create sgl pool for input!\n"); 492 + goto err_free_res; 493 + } 494 + 495 + qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); 496 + if (IS_ERR(qp_ctx->c_out_pool)) { 497 + dev_err(dev, "fail to create sgl pool for output!\n"); 498 + goto err_free_c_in_pool; 499 + } 500 + 501 + ret = sec_alg_resource_alloc(ctx, qp_ctx); 502 + if (ret) 503 + goto err_free_c_out_pool; 504 + 505 + return 0; 506 + 507 + err_free_c_out_pool: 508 + hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 509 + err_free_c_in_pool: 510 + hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 511 + err_free_res: 512 + kfree(qp_ctx->res); 513 + err_free_req_list: 514 + kfree(qp_ctx->req_list); 515 + return ret; 516 + } 517 + 518 + static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) 519 + { 520 + struct device *dev = ctx->dev; 521 + 522 + sec_alg_resource_free(ctx, qp_ctx); 523 + hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 524 + hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 525 + kfree(qp_ctx->res); 526 + kfree(qp_ctx->req_list); 527 + } 528 + 486 529 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, 487 530 int qp_ctx_id, int alg_type) 488 531 { 489 - struct device *dev = ctx->dev; 490 532 struct sec_qp_ctx *qp_ctx; 491 533 struct hisi_qp *qp; 492 - int ret = -ENOMEM; 534 + int ret; 493 535 494 536 qp_ctx = &ctx->qp_ctx[qp_ctx_id]; 495 537 qp = ctx->qps[qp_ctx_id]; ··· 559 491 idr_init(&qp_ctx->req_idr); 560 492 INIT_LIST_HEAD(&qp_ctx->backlog); 561 493 562 - qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 563 - SEC_SGL_SGE_NR); 564 - if (IS_ERR(qp_ctx->c_in_pool)) { 565 - dev_err(dev, "fail to create sgl pool for input!\n"); 566 - goto err_destroy_idr; 567 - } 568 - 569 - qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 570 - SEC_SGL_SGE_NR); 571 - if (IS_ERR(qp_ctx->c_out_pool)) { 572 - dev_err(dev, "fail to create sgl pool for output!\n"); 573 - goto err_free_c_in_pool; 574 - } 575 - 576 - ret = sec_alg_resource_alloc(ctx, qp_ctx); 494 + ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx); 577 495 if (ret) 578 - goto err_free_c_out_pool; 496 + goto err_destroy_idr; 579 497 580 498 ret = hisi_qm_start_qp(qp, 0); 581 499 if (ret < 0) 582 - goto err_queue_free; 500 + goto err_resource_free; 583 501 584 502 return 0; 585 503 586 - err_queue_free: 587 - sec_alg_resource_free(ctx, qp_ctx); 588 - err_free_c_out_pool: 589 - hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 590 - err_free_c_in_pool: 591 - hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 504 + err_resource_free: 505 + sec_free_qp_ctx_resource(ctx, qp_ctx); 592 506 err_destroy_idr: 593 507 idr_destroy(&qp_ctx->req_idr); 594 508 return ret; ··· 579 529 static void sec_release_qp_ctx(struct sec_ctx *ctx, 580 530 struct sec_qp_ctx *qp_ctx) 581 531 { 582 - struct device *dev = ctx->dev; 583 - 584 532 hisi_qm_stop_qp(qp_ctx->qp); 585 - sec_alg_resource_free(ctx, qp_ctx); 586 - 587 - hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 588 - hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 589 - 533 + sec_free_qp_ctx_resource(ctx, qp_ctx); 590 534 idr_destroy(&qp_ctx->req_idr); 591 535 } 592 536 ··· 603 559 ctx->pbuf_supported = ctx->sec->iommu_used; 604 560 605 561 /* Half of queue depth is taken as fake requests limit in the queue. */ 606 - ctx->fake_req_limit = QM_Q_DEPTH >> 1; 562 + ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1; 607 563 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), 608 564 GFP_KERNEL); 609 565 if (!ctx->qp_ctx) { ··· 1723 1679 aead_req->out_mac, 1724 1680 authsize, a_req->cryptlen + 1725 1681 a_req->assoclen); 1726 - 1727 1682 if (unlikely(sz != authsize)) { 1728 1683 dev_err(c->dev, "copy out mac err!\n"); 1729 1684 err = -EINVAL; ··· 2009 1966 return sec_aead_ctx_init(tfm, "sha512"); 2010 1967 } 2011 1968 2012 - 2013 1969 static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx, 2014 1970 struct sec_req *sreq) 2015 1971 { ··· 2168 2126 .min_keysize = sec_min_key_size,\ 2169 2127 .max_keysize = sec_max_key_size,\ 2170 2128 .ivsize = iv_size,\ 2171 - }, 2129 + } 2172 2130 2173 2131 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ 2174 2132 max_key_size, blk_size, iv_size) \ 2175 2133 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ 2176 2134 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) 2177 2135 2178 - static struct skcipher_alg sec_skciphers[] = { 2179 - SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, 2180 - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 2181 - AES_BLOCK_SIZE, 0) 2182 - 2183 - SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, 2184 - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 2185 - AES_BLOCK_SIZE, AES_BLOCK_SIZE) 2186 - 2187 - SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, 2188 - SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, 2189 - AES_BLOCK_SIZE, AES_BLOCK_SIZE) 2190 - 2191 - SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, 2192 - SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, 2193 - DES3_EDE_BLOCK_SIZE, 0) 2194 - 2195 - SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, 2196 - SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, 2197 - DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) 2198 - 2199 - SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, 2200 - SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, 2201 - AES_BLOCK_SIZE, AES_BLOCK_SIZE) 2202 - 2203 - SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, 2204 - AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, 2205 - AES_BLOCK_SIZE, AES_BLOCK_SIZE) 2206 - }; 2207 - 2208 - static struct skcipher_alg sec_skciphers_v3[] = { 2209 - SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, 2210 - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 2211 - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) 2212 - 2213 - SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, 2214 - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 2215 - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) 2216 - 2217 - SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, 2218 - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 2219 - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) 2220 - 2221 - SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, 2222 - AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, 2223 - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) 2224 - 2225 - SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, 2226 - AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, 2227 - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) 2228 - 2229 - SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, 2230 - AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, 2231 - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) 2136 + static struct sec_skcipher sec_skciphers[] = { 2137 + { 2138 + .alg_msk = BIT(0), 2139 + .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE, 2140 + AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0), 2141 + }, 2142 + { 2143 + .alg_msk = BIT(1), 2144 + .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE, 2145 + AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2146 + }, 2147 + { 2148 + .alg_msk = BIT(2), 2149 + .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE, 2150 + AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2151 + }, 2152 + { 2153 + .alg_msk = BIT(3), 2154 + .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE, 2155 + SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2156 + }, 2157 + { 2158 + .alg_msk = BIT(4), 2159 + .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE, 2160 + AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2161 + }, 2162 + { 2163 + .alg_msk = BIT(5), 2164 + .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE, 2165 + AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2166 + }, 2167 + { 2168 + .alg_msk = BIT(12), 2169 + .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE, 2170 + AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2171 + }, 2172 + { 2173 + .alg_msk = BIT(13), 2174 + .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE, 2175 + AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2176 + }, 2177 + { 2178 + .alg_msk = BIT(14), 2179 + .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE, 2180 + SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2181 + }, 2182 + { 2183 + .alg_msk = BIT(15), 2184 + .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE, 2185 + AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2186 + }, 2187 + { 2188 + .alg_msk = BIT(16), 2189 + .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE, 2190 + AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2191 + }, 2192 + { 2193 + .alg_msk = BIT(23), 2194 + .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE, 2195 + SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0), 2196 + }, 2197 + { 2198 + .alg_msk = BIT(24), 2199 + .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE, 2200 + SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 2201 + DES3_EDE_BLOCK_SIZE), 2202 + }, 2232 2203 }; 2233 2204 2234 2205 static int aead_iv_demension_check(struct aead_request *aead_req) ··· 2435 2380 .maxauthsize = max_authsize,\ 2436 2381 } 2437 2382 2438 - static struct aead_alg sec_aeads[] = { 2439 - SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", 2440 - sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, 2441 - sec_aead_ctx_exit, AES_BLOCK_SIZE, 2442 - AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), 2443 - 2444 - SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", 2445 - sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, 2446 - sec_aead_ctx_exit, AES_BLOCK_SIZE, 2447 - AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), 2448 - 2449 - SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", 2450 - sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, 2451 - sec_aead_ctx_exit, AES_BLOCK_SIZE, 2452 - AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), 2453 - 2454 - SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init, 2455 - sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, 2456 - AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2457 - 2458 - SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init, 2459 - sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, 2460 - SEC_AIV_SIZE, AES_BLOCK_SIZE) 2383 + static struct sec_aead sec_aeads[] = { 2384 + { 2385 + .alg_msk = BIT(6), 2386 + .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init, 2387 + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, 2388 + AES_BLOCK_SIZE), 2389 + }, 2390 + { 2391 + .alg_msk = BIT(7), 2392 + .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init, 2393 + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, 2394 + AES_BLOCK_SIZE), 2395 + }, 2396 + { 2397 + .alg_msk = BIT(17), 2398 + .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init, 2399 + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, 2400 + AES_BLOCK_SIZE), 2401 + }, 2402 + { 2403 + .alg_msk = BIT(18), 2404 + .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init, 2405 + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, 2406 + AES_BLOCK_SIZE), 2407 + }, 2408 + { 2409 + .alg_msk = BIT(43), 2410 + .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1, 2411 + sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, 2412 + AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), 2413 + }, 2414 + { 2415 + .alg_msk = BIT(44), 2416 + .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256, 2417 + sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, 2418 + AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), 2419 + }, 2420 + { 2421 + .alg_msk = BIT(45), 2422 + .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512, 2423 + sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, 2424 + AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), 2425 + }, 2461 2426 }; 2462 2427 2463 - static struct aead_alg sec_aeads_v3[] = { 2464 - SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init, 2465 - sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, 2466 - AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2428 + static void sec_unregister_skcipher(u64 alg_mask, int end) 2429 + { 2430 + int i; 2467 2431 2468 - SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init, 2469 - sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, 2470 - SEC_AIV_SIZE, AES_BLOCK_SIZE) 2471 - }; 2432 + for (i = 0; i < end; i++) 2433 + if (sec_skciphers[i].alg_msk & alg_mask) 2434 + crypto_unregister_skcipher(&sec_skciphers[i].alg); 2435 + } 2436 + 2437 + static int sec_register_skcipher(u64 alg_mask) 2438 + { 2439 + int i, ret, count; 2440 + 2441 + count = ARRAY_SIZE(sec_skciphers); 2442 + 2443 + for (i = 0; i < count; i++) { 2444 + if (!(sec_skciphers[i].alg_msk & alg_mask)) 2445 + continue; 2446 + 2447 + ret = crypto_register_skcipher(&sec_skciphers[i].alg); 2448 + if (ret) 2449 + goto err; 2450 + } 2451 + 2452 + return 0; 2453 + 2454 + err: 2455 + sec_unregister_skcipher(alg_mask, i); 2456 + 2457 + return ret; 2458 + } 2459 + 2460 + static void sec_unregister_aead(u64 alg_mask, int end) 2461 + { 2462 + int i; 2463 + 2464 + for (i = 0; i < end; i++) 2465 + if (sec_aeads[i].alg_msk & alg_mask) 2466 + crypto_unregister_aead(&sec_aeads[i].alg); 2467 + } 2468 + 2469 + static int sec_register_aead(u64 alg_mask) 2470 + { 2471 + int i, ret, count; 2472 + 2473 + count = ARRAY_SIZE(sec_aeads); 2474 + 2475 + for (i = 0; i < count; i++) { 2476 + if (!(sec_aeads[i].alg_msk & alg_mask)) 2477 + continue; 2478 + 2479 + ret = crypto_register_aead(&sec_aeads[i].alg); 2480 + if (ret) 2481 + goto err; 2482 + } 2483 + 2484 + return 0; 2485 + 2486 + err: 2487 + sec_unregister_aead(alg_mask, i); 2488 + 2489 + return ret; 2490 + } 2472 2491 2473 2492 int sec_register_to_crypto(struct hisi_qm *qm) 2474 2493 { 2494 + u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); 2475 2495 int ret; 2476 2496 2477 - /* To avoid repeat register */ 2478 - ret = crypto_register_skciphers(sec_skciphers, 2479 - ARRAY_SIZE(sec_skciphers)); 2497 + ret = sec_register_skcipher(alg_mask); 2480 2498 if (ret) 2481 2499 return ret; 2482 2500 2483 - if (qm->ver > QM_HW_V2) { 2484 - ret = crypto_register_skciphers(sec_skciphers_v3, 2485 - ARRAY_SIZE(sec_skciphers_v3)); 2486 - if (ret) 2487 - goto reg_skcipher_fail; 2488 - } 2489 - 2490 - ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 2501 + ret = sec_register_aead(alg_mask); 2491 2502 if (ret) 2492 - goto reg_aead_fail; 2493 - if (qm->ver > QM_HW_V2) { 2494 - ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3)); 2495 - if (ret) 2496 - goto reg_aead_v3_fail; 2497 - } 2498 - return ret; 2503 + sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); 2499 2504 2500 - reg_aead_v3_fail: 2501 - crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 2502 - reg_aead_fail: 2503 - if (qm->ver > QM_HW_V2) 2504 - crypto_unregister_skciphers(sec_skciphers_v3, 2505 - ARRAY_SIZE(sec_skciphers_v3)); 2506 - reg_skcipher_fail: 2507 - crypto_unregister_skciphers(sec_skciphers, 2508 - ARRAY_SIZE(sec_skciphers)); 2509 2505 return ret; 2510 2506 } 2511 2507 2512 2508 void sec_unregister_from_crypto(struct hisi_qm *qm) 2513 2509 { 2514 - if (qm->ver > QM_HW_V2) 2515 - crypto_unregister_aeads(sec_aeads_v3, 2516 - ARRAY_SIZE(sec_aeads_v3)); 2517 - crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 2510 + u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); 2518 2511 2519 - if (qm->ver > QM_HW_V2) 2520 - crypto_unregister_skciphers(sec_skciphers_v3, 2521 - ARRAY_SIZE(sec_skciphers_v3)); 2522 - crypto_unregister_skciphers(sec_skciphers, 2523 - ARRAY_SIZE(sec_skciphers)); 2512 + sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads)); 2513 + sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); 2524 2514 }
+139 -21
drivers/crypto/hisilicon/sec2/sec_main.c
··· 27 27 #define SEC_BD_ERR_CHK_EN3 0xffffbfff 28 28 29 29 #define SEC_SQE_SIZE 128 30 - #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) 31 30 #define SEC_PF_DEF_Q_NUM 256 32 31 #define SEC_PF_DEF_Q_BASE 0 33 32 #define SEC_CTX_Q_NUM_DEF 2 ··· 41 42 #define SEC_ECC_NUM 16 42 43 #define SEC_ECC_MASH 0xFF 43 44 #define SEC_CORE_INT_DISABLE 0x0 44 - #define SEC_CORE_INT_ENABLE 0x7c1ff 45 - #define SEC_CORE_INT_CLEAR 0x7c1ff 46 - #define SEC_SAA_ENABLE 0x17f 47 45 48 46 #define SEC_RAS_CE_REG 0x301050 49 47 #define SEC_RAS_FE_REG 0x301054 50 48 #define SEC_RAS_NFE_REG 0x301058 51 - #define SEC_RAS_CE_ENB_MSK 0x88 52 49 #define SEC_RAS_FE_ENB_MSK 0x0 53 - #define SEC_RAS_NFE_ENB_MSK 0x7c177 54 50 #define SEC_OOO_SHUTDOWN_SEL 0x301014 55 51 #define SEC_RAS_DISABLE 0x0 56 52 #define SEC_MEM_START_INIT_REG 0x301100 ··· 113 119 #define SEC_DFX_COMMON1_LEN 0x45 114 120 #define SEC_DFX_COMMON2_LEN 0xBA 115 121 122 + #define SEC_ALG_BITMAP_SHIFT 32 123 + 124 + #define SEC_CIPHER_BITMAP (GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \ 125 + GENMASK(24, 21)) 126 + #define SEC_DIGEST_BITMAP (GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \ 127 + GENMASK_ULL(42, 25)) 128 + #define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \ 129 + GENMASK_ULL(45, 43)) 130 + #define SEC_DEV_ALG_MAX_LEN 256 131 + 116 132 struct sec_hw_error { 117 133 u32 int_msk; 118 134 const char *msg; ··· 133 129 u32 offset; 134 130 }; 135 131 132 + struct sec_dev_alg { 133 + u64 alg_msk; 134 + const char *algs; 135 + }; 136 + 136 137 static const char sec_name[] = "hisi_sec2"; 137 138 static struct dentry *sec_debugfs_root; 138 139 139 140 static struct hisi_qm_list sec_devices = { 140 141 .register_to_crypto = sec_register_to_crypto, 141 142 .unregister_from_crypto = sec_unregister_from_crypto, 143 + }; 144 + 145 + static const struct hisi_qm_cap_info sec_basic_info[] = { 146 + {SEC_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77}, 147 + {SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77}, 148 + {SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77}, 149 + {SEC_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, 150 + {SEC_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177}, 151 + {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177}, 152 + {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177}, 153 + {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088}, 154 + {SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1}, 155 + {SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1}, 156 + {SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4}, 157 + {SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4}, 158 + {SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF}, 159 + {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF}, 160 + {SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C}, 161 + {SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 162 + {SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 163 + {SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 164 + {SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 165 + {SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 166 + {SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 167 + {SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 168 + {SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 169 + {SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 170 + {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 171 + }; 172 + 173 + static const struct sec_dev_alg sec_dev_algs[] = { { 174 + .alg_msk = SEC_CIPHER_BITMAP, 175 + .algs = "cipher\n", 176 + }, { 177 + .alg_msk = SEC_DIGEST_BITMAP, 178 + .algs = "digest\n", 179 + }, { 180 + .alg_msk = SEC_AEAD_BITMAP, 181 + .algs = "aead\n", 182 + }, 142 183 }; 143 184 144 185 static const struct sec_hw_error sec_hw_errors[] = { ··· 388 339 return NULL; 389 340 } 390 341 342 + u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low) 343 + { 344 + u32 cap_val_h, cap_val_l; 345 + 346 + cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver); 347 + cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver); 348 + 349 + return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l; 350 + } 351 + 391 352 static const struct kernel_param_ops sec_uacce_mode_ops = { 392 353 .set = uacce_mode_set, 393 354 .get = param_get_int, ··· 474 415 u32 val; 475 416 int ret; 476 417 477 - if (qm->ver < QM_HW_V3) 418 + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 478 419 return; 479 420 480 421 /* Enable prefetch */ ··· 494 435 u32 val; 495 436 int ret; 496 437 497 - if (qm->ver < QM_HW_V3) 438 + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 498 439 return; 499 440 500 441 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); ··· 565 506 writel(SEC_SINGLE_PORT_MAX_TRANS, 566 507 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); 567 508 568 - writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); 509 + reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver); 510 + writel(reg, qm->io_base + SEC_SAA_EN_REG); 569 511 570 512 if (qm->ver < QM_HW_V3) { 571 513 /* HW V2 enable sm4 extra mode, as ctr/ecb */ ··· 636 576 val1 = readl(qm->io_base + SEC_CONTROL_REG); 637 577 if (enable) { 638 578 val1 |= SEC_AXI_SHUTDOWN_ENABLE; 639 - val2 = SEC_RAS_NFE_ENB_MSK; 579 + val2 = hisi_qm_get_hw_info(qm, sec_basic_info, 580 + SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 640 581 } else { 641 582 val1 &= SEC_AXI_SHUTDOWN_DISABLE; 642 583 val2 = 0x0; ··· 651 590 652 591 static void sec_hw_error_enable(struct hisi_qm *qm) 653 592 { 593 + u32 ce, nfe; 594 + 654 595 if (qm->ver == QM_HW_V1) { 655 596 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 656 597 pci_info(qm->pdev, "V1 not support hw error handle\n"); 657 598 return; 658 599 } 659 600 601 + ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver); 602 + nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); 603 + 660 604 /* clear SEC hw error source if having */ 661 - writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); 605 + writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE); 662 606 663 607 /* enable RAS int */ 664 - writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); 608 + writel(ce, qm->io_base + SEC_RAS_CE_REG); 665 609 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); 666 - writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); 610 + writel(nfe, qm->io_base + SEC_RAS_NFE_REG); 667 611 668 612 /* enable SEC block master OOO when nfe occurs on Kunpeng930 */ 669 613 sec_master_ooo_ctrl(qm, true); 670 614 671 615 /* enable SEC hw error interrupts */ 672 - writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); 616 + writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK); 673 617 } 674 618 675 619 static void sec_hw_error_disable(struct hisi_qm *qm) ··· 1005 939 1006 940 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) 1007 941 { 942 + u32 nfe; 943 + 1008 944 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); 945 + nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); 946 + writel(nfe, qm->io_base + SEC_RAS_NFE_REG); 1009 947 } 1010 948 1011 949 static void sec_open_axi_master_ooo(struct hisi_qm *qm) ··· 1025 955 { 1026 956 struct hisi_qm_err_info *err_info = &qm->err_info; 1027 957 1028 - err_info->ce = QM_BASE_CE; 1029 - err_info->fe = 0; 958 + err_info->fe = SEC_RAS_FE_ENB_MSK; 959 + err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver); 960 + err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver); 1030 961 err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; 1031 - err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK; 962 + err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, 963 + SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 964 + err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, 965 + SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 966 + err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, 967 + SEC_QM_RESET_MASK_CAP, qm->cap_ver); 968 + err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, 969 + SEC_RESET_MASK_CAP, qm->cap_ver); 1032 970 err_info->msi_wr_port = BIT(0); 1033 971 err_info->acpi_rst = "SRST"; 1034 - err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | 1035 - QM_ACC_WB_NOT_READY_TIMEOUT; 1036 972 } 1037 973 1038 974 static const struct hisi_qm_err_ini sec_err_ini = { ··· 1077 1001 return ret; 1078 1002 } 1079 1003 1004 + static int sec_set_qm_algs(struct hisi_qm *qm) 1005 + { 1006 + struct device *dev = &qm->pdev->dev; 1007 + char *algs, *ptr; 1008 + u64 alg_mask; 1009 + int i; 1010 + 1011 + if (!qm->use_sva) 1012 + return 0; 1013 + 1014 + algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); 1015 + if (!algs) 1016 + return -ENOMEM; 1017 + 1018 + alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW); 1019 + 1020 + for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++) 1021 + if (alg_mask & sec_dev_algs[i].alg_msk) 1022 + strcat(algs, sec_dev_algs[i].algs); 1023 + 1024 + ptr = strrchr(algs, '\n'); 1025 + if (ptr) 1026 + *ptr = '\0'; 1027 + 1028 + qm->uacce->algs = algs; 1029 + 1030 + return 0; 1031 + } 1032 + 1080 1033 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 1081 1034 { 1035 + int ret; 1036 + 1082 1037 qm->pdev = pdev; 1083 1038 qm->ver = pdev->revision; 1084 - qm->algs = "cipher\ndigest\naead"; 1085 1039 qm->mode = uacce_mode; 1086 1040 qm->sqe_size = SEC_SQE_SIZE; 1087 1041 qm->dev_name = sec_name; ··· 1134 1028 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; 1135 1029 } 1136 1030 1137 - return hisi_qm_init(qm); 1031 + ret = hisi_qm_init(qm); 1032 + if (ret) { 1033 + pci_err(qm->pdev, "Failed to init sec qm configures!\n"); 1034 + return ret; 1035 + } 1036 + 1037 + ret = sec_set_qm_algs(qm); 1038 + if (ret) { 1039 + pci_err(qm->pdev, "Failed to set sec algs!\n"); 1040 + hisi_qm_uninit(qm); 1041 + } 1042 + 1043 + return ret; 1138 1044 } 1139 1045 1140 1046 static void sec_qm_uninit(struct hisi_qm *qm)
+2 -1
drivers/crypto/hisilicon/zip/zip.h
··· 81 81 u32 rsvd1[4]; 82 82 }; 83 83 84 - int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node); 84 + int zip_create_qps(struct hisi_qp **qps, int qp_num, int node); 85 85 int hisi_zip_register_to_crypto(struct hisi_qm *qm); 86 86 void hisi_zip_unregister_from_crypto(struct hisi_qm *qm); 87 + bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg); 87 88 #endif
+89 -45
drivers/crypto/hisilicon/zip/zip_crypto.c
··· 39 39 #define HZIP_ALG_PRIORITY 300 40 40 #define HZIP_SGL_SGE_NR 10 41 41 42 + #define HZIP_ALG_ZLIB GENMASK(1, 0) 43 + #define HZIP_ALG_GZIP GENMASK(3, 2) 44 + 42 45 static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c}; 43 46 static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = { 44 47 0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03 ··· 126 123 if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX) 127 124 return -EINVAL; 128 125 129 - return param_set_int(val, kp); 126 + return param_set_ushort(val, kp); 130 127 } 131 128 132 129 static const struct kernel_param_ops sgl_sge_nr_ops = { 133 130 .set = sgl_sge_nr_set, 134 - .get = param_get_int, 131 + .get = param_get_ushort, 135 132 }; 136 133 137 134 static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; 138 135 module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444); 139 136 MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)"); 140 137 141 - static u16 get_extra_field_size(const u8 *start) 138 + static u32 get_extra_field_size(const u8 *start) 142 139 { 143 140 return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; 144 141 } ··· 170 167 return size; 171 168 } 172 169 173 - static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl) 170 + static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl) 174 171 { 175 172 char buf[HZIP_GZIP_HEAD_BUF]; 176 173 ··· 186 183 int ret; 187 184 188 185 ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); 189 - if (ret != head_size) { 186 + if (unlikely(ret != head_size)) { 190 187 pr_err("the head size of buffer is wrong (%d)!\n", ret); 191 188 return -ENOMEM; 192 189 } ··· 196 193 197 194 static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type) 198 195 { 199 - if (!acomp_req->src || !acomp_req->slen) 196 + if (unlikely(!acomp_req->src || !acomp_req->slen)) 200 197 return -EINVAL; 201 198 202 - if (req_type == HZIP_ALG_TYPE_GZIP && 203 - acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT) 199 + if (unlikely(req_type == HZIP_ALG_TYPE_GZIP && 200 + acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)) 204 201 return -EINVAL; 205 202 206 203 switch (req_type) { ··· 233 230 } 234 231 set_bit(req_id, req_q->req_bitmap); 235 232 233 + write_unlock(&req_q->req_lock); 234 + 236 235 req_cache = q + req_id; 237 236 req_cache->req_id = req_id; 238 237 req_cache->req = req; ··· 247 242 req_cache->dskip = 0; 248 243 } 249 244 250 - write_unlock(&req_q->req_lock); 251 - 252 245 return req_cache; 253 246 } 254 247 ··· 257 254 258 255 write_lock(&req_q->req_lock); 259 256 clear_bit(req->req_id, req_q->req_bitmap); 260 - memset(req, 0, sizeof(struct hisi_zip_req)); 261 257 write_unlock(&req_q->req_lock); 262 258 } 263 259 ··· 341 339 struct hisi_zip_sqe zip_sqe; 342 340 int ret; 343 341 344 - if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen) 342 + if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)) 345 343 return -EINVAL; 346 344 347 345 req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, ··· 367 365 /* send command to start a task */ 368 366 atomic64_inc(&dfx->send_cnt); 369 367 ret = hisi_qp_send(qp, &zip_sqe); 370 - if (ret < 0) { 368 + if (unlikely(ret < 0)) { 371 369 atomic64_inc(&dfx->send_busy_cnt); 372 370 ret = -EAGAIN; 373 371 dev_dbg_ratelimited(dev, "failed to send request!\n"); ··· 419 417 420 418 atomic64_inc(&dfx->recv_cnt); 421 419 status = ops->get_status(sqe); 422 - if (status != 0 && status != HZIP_NC_ERR) { 420 + if (unlikely(status != 0 && status != HZIP_NC_ERR)) { 423 421 dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", 424 422 (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, 425 423 sqe->produced); ··· 452 450 453 451 /* let's output compression head now */ 454 452 head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); 455 - if (head_size < 0) { 453 + if (unlikely(head_size < 0)) { 456 454 dev_err_ratelimited(dev, "failed to add comp head (%d)!\n", 457 455 head_size); 458 456 return head_size; ··· 463 461 return PTR_ERR(req); 464 462 465 463 ret = hisi_zip_do_work(req, qp_ctx); 466 - if (ret != -EINPROGRESS) { 464 + if (unlikely(ret != -EINPROGRESS)) { 467 465 dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret); 468 466 hisi_zip_remove_req(qp_ctx, req); 469 467 } ··· 480 478 int head_size, ret; 481 479 482 480 head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type); 483 - if (head_size < 0) { 481 + if (unlikely(head_size < 0)) { 484 482 dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n", 485 483 head_size); 486 484 return head_size; ··· 491 489 return PTR_ERR(req); 492 490 493 491 ret = hisi_zip_do_work(req, qp_ctx); 494 - if (ret != -EINPROGRESS) { 492 + if (unlikely(ret != -EINPROGRESS)) { 495 493 dev_info_ratelimited(dev, "failed to do decompress (%d)!\n", 496 494 ret); 497 495 hisi_zip_remove_req(qp_ctx, req); ··· 500 498 return ret; 501 499 } 502 500 503 - static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, 501 + static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx, 504 502 int alg_type, int req_type) 505 503 { 506 504 struct device *dev = &qp->qm->pdev->dev; ··· 508 506 509 507 qp->req_type = req_type; 510 508 qp->alg_type = alg_type; 511 - qp->qp_ctx = ctx; 509 + qp->qp_ctx = qp_ctx; 512 510 513 511 ret = hisi_qm_start_qp(qp, 0); 514 512 if (ret < 0) { ··· 516 514 return ret; 517 515 } 518 516 519 - ctx->qp = qp; 517 + qp_ctx->qp = qp; 520 518 521 519 return 0; 522 520 } 523 521 524 - static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) 522 + static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx) 525 523 { 526 - hisi_qm_stop_qp(ctx->qp); 527 - hisi_qm_free_qps(&ctx->qp, 1); 524 + hisi_qm_stop_qp(qp_ctx->qp); 525 + hisi_qm_free_qps(&qp_ctx->qp, 1); 528 526 } 529 527 530 528 static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = { ··· 596 594 { 597 595 int i; 598 596 599 - for (i = 1; i >= 0; i--) 597 + for (i = 0; i < HZIP_CTX_Q_NUM; i++) 600 598 hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); 601 599 } 602 600 603 601 static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) 604 602 { 603 + u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; 605 604 struct hisi_zip_req_q *req_q; 606 605 int i, ret; 607 606 608 607 for (i = 0; i < HZIP_CTX_Q_NUM; i++) { 609 608 req_q = &ctx->qp_ctx[i].req_q; 610 - req_q->size = QM_Q_DEPTH; 609 + req_q->size = q_depth; 611 610 612 611 req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL); 613 612 if (!req_q->req_bitmap) { ··· 616 613 if (i == 0) 617 614 return ret; 618 615 619 - goto err_free_loop0; 616 + goto err_free_comp_q; 620 617 } 621 618 rwlock_init(&req_q->req_lock); 622 619 ··· 625 622 if (!req_q->q) { 626 623 ret = -ENOMEM; 627 624 if (i == 0) 628 - goto err_free_bitmap; 625 + goto err_free_comp_bitmap; 629 626 else 630 - goto err_free_loop1; 627 + goto err_free_decomp_bitmap; 631 628 } 632 629 } 633 630 634 631 return 0; 635 632 636 - err_free_loop1: 633 + err_free_decomp_bitmap: 637 634 bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap); 638 - err_free_loop0: 635 + err_free_comp_q: 639 636 kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q); 640 - err_free_bitmap: 637 + err_free_comp_bitmap: 641 638 bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap); 642 639 return ret; 643 640 } ··· 654 651 655 652 static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx) 656 653 { 654 + u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; 657 655 struct hisi_zip_qp_ctx *tmp; 658 656 struct device *dev; 659 657 int i; ··· 662 658 for (i = 0; i < HZIP_CTX_Q_NUM; i++) { 663 659 tmp = &ctx->qp_ctx[i]; 664 660 dev = &tmp->qp->qm->pdev->dev; 665 - tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1, 661 + tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1, 666 662 sgl_sge_nr); 667 663 if (IS_ERR(tmp->sgl_pool)) { 668 664 if (i == 1) ··· 759 755 } 760 756 }; 761 757 758 + static int hisi_zip_register_zlib(struct hisi_qm *qm) 759 + { 760 + int ret; 761 + 762 + if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB)) 763 + return 0; 764 + 765 + ret = crypto_register_acomp(&hisi_zip_acomp_zlib); 766 + if (ret) 767 + dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret); 768 + 769 + return ret; 770 + } 771 + 772 + static void hisi_zip_unregister_zlib(struct hisi_qm *qm) 773 + { 774 + if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB)) 775 + return; 776 + 777 + crypto_unregister_acomp(&hisi_zip_acomp_zlib); 778 + } 779 + 762 780 static struct acomp_alg hisi_zip_acomp_gzip = { 763 781 .init = hisi_zip_acomp_init, 764 782 .exit = hisi_zip_acomp_exit, ··· 795 769 } 796 770 }; 797 771 798 - int hisi_zip_register_to_crypto(struct hisi_qm *qm) 772 + static int hisi_zip_register_gzip(struct hisi_qm *qm) 799 773 { 800 774 int ret; 801 775 802 - ret = crypto_register_acomp(&hisi_zip_acomp_zlib); 803 - if (ret) { 804 - pr_err("failed to register to zlib (%d)!\n", ret); 805 - return ret; 806 - } 776 + if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP)) 777 + return 0; 807 778 808 779 ret = crypto_register_acomp(&hisi_zip_acomp_gzip); 809 - if (ret) { 810 - pr_err("failed to register to gzip (%d)!\n", ret); 811 - crypto_unregister_acomp(&hisi_zip_acomp_zlib); 812 - } 780 + if (ret) 781 + dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret); 782 + 783 + return ret; 784 + } 785 + 786 + static void hisi_zip_unregister_gzip(struct hisi_qm *qm) 787 + { 788 + if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP)) 789 + return; 790 + 791 + crypto_unregister_acomp(&hisi_zip_acomp_gzip); 792 + } 793 + 794 + int hisi_zip_register_to_crypto(struct hisi_qm *qm) 795 + { 796 + int ret = 0; 797 + 798 + ret = hisi_zip_register_zlib(qm); 799 + if (ret) 800 + return ret; 801 + 802 + ret = hisi_zip_register_gzip(qm); 803 + if (ret) 804 + hisi_zip_unregister_zlib(qm); 813 805 814 806 return ret; 815 807 } 816 808 817 809 void hisi_zip_unregister_from_crypto(struct hisi_qm *qm) 818 810 { 819 - crypto_unregister_acomp(&hisi_zip_acomp_gzip); 820 - crypto_unregister_acomp(&hisi_zip_acomp_zlib); 811 + hisi_zip_unregister_zlib(qm); 812 + hisi_zip_unregister_gzip(qm); 821 813 }
+207 -61
drivers/crypto/hisilicon/zip/zip_main.c
··· 20 20 #define HZIP_QUEUE_NUM_V1 4096 21 21 22 22 #define HZIP_CLOCK_GATE_CTRL 0x301004 23 - #define COMP0_ENABLE BIT(0) 24 - #define COMP1_ENABLE BIT(1) 25 - #define DECOMP0_ENABLE BIT(2) 26 - #define DECOMP1_ENABLE BIT(3) 27 - #define DECOMP2_ENABLE BIT(4) 28 - #define DECOMP3_ENABLE BIT(5) 29 - #define DECOMP4_ENABLE BIT(6) 30 - #define DECOMP5_ENABLE BIT(7) 31 - #define HZIP_ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \ 32 - DECOMP0_ENABLE | DECOMP1_ENABLE | \ 33 - DECOMP2_ENABLE | DECOMP3_ENABLE | \ 34 - DECOMP4_ENABLE | DECOMP5_ENABLE) 35 23 #define HZIP_DECOMP_CHECK_ENABLE BIT(16) 36 24 #define HZIP_FSM_MAX_CNT 0x301008 37 25 ··· 57 69 #define HZIP_CORE_INT_STATUS_M_ECC BIT(1) 58 70 #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 59 71 #define HZIP_CORE_INT_RAS_CE_ENB 0x301160 60 - #define HZIP_CORE_INT_RAS_CE_ENABLE 0x1 61 72 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164 62 73 #define HZIP_CORE_INT_RAS_FE_ENB 0x301168 74 + #define HZIP_CORE_INT_RAS_FE_ENB_MASK 0x0 63 75 #define HZIP_OOO_SHUTDOWN_SEL 0x30120C 64 - #define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE 65 76 #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 66 77 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 67 78 #define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0) 68 - #define HZIP_COMP_CORE_NUM 2 69 - #define HZIP_DECOMP_CORE_NUM 6 70 - #define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \ 71 - HZIP_DECOMP_CORE_NUM) 72 79 #define HZIP_SQE_SIZE 128 73 - #define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH) 74 80 #define HZIP_PF_DEF_Q_NUM 64 75 81 #define HZIP_PF_DEF_Q_BASE 0 76 82 ··· 73 91 #define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C 74 92 #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14) 75 93 #define HZIP_WR_PORT BIT(11) 94 + 95 + #define HZIP_DEV_ALG_MAX_LEN 256 96 + #define HZIP_ALG_ZLIB_BIT GENMASK(1, 0) 97 + #define HZIP_ALG_GZIP_BIT GENMASK(3, 2) 98 + #define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4) 99 + #define HZIP_ALG_LZ77_BIT GENMASK(7, 6) 76 100 77 101 #define HZIP_BUF_SIZE 22 78 102 #define HZIP_SQE_MASK_OFFSET 64 ··· 118 130 struct zip_dfx_item { 119 131 const char *name; 120 132 u32 offset; 133 + }; 134 + 135 + struct zip_dev_alg { 136 + u32 alg_msk; 137 + const char *algs; 138 + }; 139 + 140 + static const struct zip_dev_alg zip_dev_algs[] = { { 141 + .alg_msk = HZIP_ALG_ZLIB_BIT, 142 + .algs = "zlib\n", 143 + }, { 144 + .alg_msk = HZIP_ALG_GZIP_BIT, 145 + .algs = "gzip\n", 146 + }, { 147 + .alg_msk = HZIP_ALG_DEFLATE_BIT, 148 + .algs = "deflate\n", 149 + }, { 150 + .alg_msk = HZIP_ALG_LZ77_BIT, 151 + .algs = "lz77_zstd\n", 152 + }, 121 153 }; 122 154 123 155 static struct hisi_qm_list zip_devices = { ··· 193 185 struct hisi_zip_ctrl { 194 186 struct hisi_zip *hisi_zip; 195 187 struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM]; 188 + }; 189 + 190 + enum zip_cap_type { 191 + ZIP_QM_NFE_MASK_CAP = 0x0, 192 + ZIP_QM_RESET_MASK_CAP, 193 + ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 194 + ZIP_QM_CE_MASK_CAP, 195 + ZIP_NFE_MASK_CAP, 196 + ZIP_RESET_MASK_CAP, 197 + ZIP_OOO_SHUTDOWN_MASK_CAP, 198 + ZIP_CE_MASK_CAP, 199 + ZIP_CLUSTER_NUM_CAP, 200 + ZIP_CORE_TYPE_NUM_CAP, 201 + ZIP_CORE_NUM_CAP, 202 + ZIP_CLUSTER_COMP_NUM_CAP, 203 + ZIP_CLUSTER_DECOMP_NUM_CAP, 204 + ZIP_DECOMP_ENABLE_BITMAP, 205 + ZIP_COMP_ENABLE_BITMAP, 206 + ZIP_DRV_ALG_BITMAP, 207 + ZIP_DEV_ALG_BITMAP, 208 + ZIP_CORE1_ALG_BITMAP, 209 + ZIP_CORE2_ALG_BITMAP, 210 + ZIP_CORE3_ALG_BITMAP, 211 + ZIP_CORE4_ALG_BITMAP, 212 + ZIP_CORE5_ALG_BITMAP, 213 + ZIP_CAP_MAX 214 + }; 215 + 216 + static struct hisi_qm_cap_info zip_basic_cap_info[] = { 217 + {ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77}, 218 + {ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77}, 219 + {ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77}, 220 + {ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, 221 + {ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE}, 222 + {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE}, 223 + {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE}, 224 + {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, 225 + {ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1}, 226 + {ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2}, 227 + {ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5}, 228 + {ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2}, 229 + {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3}, 230 + {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C}, 231 + {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3}, 232 + {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF}, 233 + {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF}, 234 + {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5}, 235 + {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5}, 236 + {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A}, 237 + {ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A}, 238 + {ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A}, 239 + {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0} 196 240 }; 197 241 198 242 enum { ··· 403 343 return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); 404 344 } 405 345 346 + bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg) 347 + { 348 + u32 cap_val; 349 + 350 + cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver); 351 + if ((alg & cap_val) == alg) 352 + return true; 353 + 354 + return false; 355 + } 356 + 357 + static int hisi_zip_set_qm_algs(struct hisi_qm *qm) 358 + { 359 + struct device *dev = &qm->pdev->dev; 360 + char *algs, *ptr; 361 + u32 alg_mask; 362 + int i; 363 + 364 + if (!qm->use_sva) 365 + return 0; 366 + 367 + algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); 368 + if (!algs) 369 + return -ENOMEM; 370 + 371 + alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver); 372 + 373 + for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++) 374 + if (alg_mask & zip_dev_algs[i].alg_msk) 375 + strcat(algs, zip_dev_algs[i].algs); 376 + 377 + ptr = strrchr(algs, '\n'); 378 + if (ptr) 379 + *ptr = '\0'; 380 + 381 + qm->uacce->algs = algs; 382 + 383 + return 0; 384 + } 385 + 406 386 static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) 407 387 { 408 388 u32 val; 409 389 int ret; 410 390 411 - if (qm->ver < QM_HW_V3) 391 + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 412 392 return; 413 393 414 394 /* Enable prefetch */ ··· 468 368 u32 val; 469 369 int ret; 470 370 471 - if (qm->ver < QM_HW_V3) 371 + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 472 372 return; 473 373 474 374 val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG); ··· 501 401 static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) 502 402 { 503 403 void __iomem *base = qm->io_base; 404 + u32 dcomp_bm, comp_bm; 504 405 505 406 /* qm user domain */ 506 407 writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); ··· 539 438 } 540 439 541 440 /* let's open all compression/decompression cores */ 542 - writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN, 543 - base + HZIP_CLOCK_GATE_CTRL); 441 + dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info, 442 + ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver); 443 + comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info, 444 + ZIP_COMP_ENABLE_BITMAP, qm->cap_ver); 445 + writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL); 544 446 545 447 /* enable sqc,cqc writeback */ 546 448 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | ··· 562 458 val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); 563 459 if (enable) { 564 460 val1 |= HZIP_AXI_SHUTDOWN_ENABLE; 565 - val2 = HZIP_CORE_INT_RAS_NFE_ENABLE; 461 + val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info, 462 + ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 566 463 } else { 567 464 val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE; 568 465 val2 = 0x0; ··· 577 472 578 473 static void hisi_zip_hw_error_enable(struct hisi_qm *qm) 579 474 { 475 + u32 nfe, ce; 476 + 580 477 if (qm->ver == QM_HW_V1) { 581 478 writel(HZIP_CORE_INT_MASK_ALL, 582 479 qm->io_base + HZIP_CORE_INT_MASK_REG); ··· 586 479 return; 587 480 } 588 481 482 + nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); 483 + ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); 484 + 589 485 /* clear ZIP hw error source if having */ 590 - writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE); 486 + writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE); 591 487 592 488 /* configure error type */ 593 - writel(HZIP_CORE_INT_RAS_CE_ENABLE, 594 - qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); 595 - writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); 596 - writel(HZIP_CORE_INT_RAS_NFE_ENABLE, 597 - qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); 489 + writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); 490 + writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); 491 + writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); 598 492 599 - /* enable ZIP block master OOO when nfe occurs on Kunpeng930 */ 600 493 hisi_zip_master_ooo_ctrl(qm, true); 601 494 602 495 /* enable ZIP hw error interrupts */ ··· 605 498 606 499 static void hisi_zip_hw_error_disable(struct hisi_qm *qm) 607 500 { 608 - /* disable ZIP hw error interrupts */ 609 - writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG); 501 + u32 nfe, ce; 610 502 611 - /* disable ZIP block master OOO when nfe occurs on Kunpeng930 */ 503 + /* disable ZIP hw error interrupts */ 504 + nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); 505 + ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); 506 + writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG); 507 + 612 508 hisi_zip_master_ooo_ctrl(qm, false); 613 509 } 614 510 ··· 696 586 return len; 697 587 698 588 tbuf[len] = '\0'; 699 - if (kstrtoul(tbuf, 0, &val)) 700 - return -EFAULT; 589 + ret = kstrtoul(tbuf, 0, &val); 590 + if (ret) 591 + return ret; 701 592 702 593 ret = hisi_qm_get_dfx_access(qm); 703 594 if (ret) ··· 762 651 763 652 static int hisi_zip_core_debug_init(struct hisi_qm *qm) 764 653 { 654 + u32 zip_core_num, zip_comp_core_num; 765 655 struct device *dev = &qm->pdev->dev; 766 656 struct debugfs_regset32 *regset; 767 657 struct dentry *tmp_d; 768 658 char buf[HZIP_BUF_SIZE]; 769 659 int i; 770 660 771 - for (i = 0; i < HZIP_CORE_NUM; i++) { 772 - if (i < HZIP_COMP_CORE_NUM) 661 + zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); 662 + zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP, 663 + qm->cap_ver); 664 + 665 + for (i = 0; i < zip_core_num; i++) { 666 + if (i < zip_comp_core_num) 773 667 scnprintf(buf, sizeof(buf), "comp_core%d", i); 774 668 else 775 669 scnprintf(buf, sizeof(buf), "decomp_core%d", 776 - i - HZIP_COMP_CORE_NUM); 670 + i - zip_comp_core_num); 777 671 778 672 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 779 673 if (!regset) ··· 791 675 792 676 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); 793 677 debugfs_create_file("regs", 0444, tmp_d, regset, 794 - &hisi_zip_regs_fops); 678 + &hisi_zip_regs_fops); 795 679 } 796 680 797 681 return 0; ··· 911 795 int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); 912 796 struct qm_debug *debug = &qm->debug; 913 797 void __iomem *io_base; 798 + u32 zip_core_num; 914 799 int i, j, idx; 915 800 916 - debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM + 917 - com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); 801 + zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); 802 + 803 + debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num, 804 + sizeof(unsigned int), GFP_KERNEL); 918 805 if (!debug->last_words) 919 806 return -ENOMEM; 920 807 ··· 926 807 debug->last_words[i] = readl_relaxed(io_base); 927 808 } 928 809 929 - for (i = 0; i < HZIP_CORE_NUM; i++) { 810 + for (i = 0; i < zip_core_num; i++) { 930 811 io_base = qm->io_base + core_offsets[i]; 931 812 for (j = 0; j < core_dfx_regs_num; j++) { 932 813 idx = com_dfx_regs_num + i * core_dfx_regs_num + j; ··· 953 834 { 954 835 int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs); 955 836 int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); 837 + u32 zip_core_num, zip_comp_core_num; 956 838 struct qm_debug *debug = &qm->debug; 957 839 char buf[HZIP_BUF_SIZE]; 958 840 void __iomem *base; ··· 967 847 val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset); 968 848 if (debug->last_words[i] != val) 969 849 pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n", 970 - hzip_com_dfx_regs[i].name, debug->last_words[i], val); 850 + hzip_com_dfx_regs[i].name, debug->last_words[i], val); 971 851 } 972 852 973 - for (i = 0; i < HZIP_CORE_NUM; i++) { 974 - if (i < HZIP_COMP_CORE_NUM) 853 + zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); 854 + zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP, 855 + qm->cap_ver); 856 + for (i = 0; i < zip_core_num; i++) { 857 + if (i < zip_comp_core_num) 975 858 scnprintf(buf, sizeof(buf), "Comp_core-%d", i); 976 859 else 977 860 scnprintf(buf, sizeof(buf), "Decomp_core-%d", 978 - i - HZIP_COMP_CORE_NUM); 861 + i - zip_comp_core_num); 979 862 base = qm->io_base + core_offsets[i]; 980 863 981 864 pci_info(qm->pdev, "==>%s:\n", buf); ··· 988 865 val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset); 989 866 if (debug->last_words[idx] != val) 990 867 pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n", 991 - hzip_dump_dfx_regs[j].name, debug->last_words[idx], val); 868 + hzip_dump_dfx_regs[j].name, 869 + debug->last_words[idx], val); 992 870 } 993 871 } 994 872 } ··· 1024 900 1025 901 static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) 1026 902 { 903 + u32 nfe; 904 + 1027 905 writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); 906 + nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); 907 + writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); 1028 908 } 1029 909 1030 910 static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) ··· 1062 934 { 1063 935 struct hisi_qm_err_info *err_info = &qm->err_info; 1064 936 1065 - err_info->ce = QM_BASE_CE; 1066 - err_info->fe = 0; 937 + err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK; 938 + err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver); 939 + err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, 940 + ZIP_QM_NFE_MASK_CAP, qm->cap_ver); 1067 941 err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; 1068 - err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE; 942 + err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, 943 + ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 944 + err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, 945 + ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 946 + err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, 947 + ZIP_QM_RESET_MASK_CAP, qm->cap_ver); 948 + err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, 949 + ZIP_RESET_MASK_CAP, qm->cap_ver); 1069 950 err_info->msi_wr_port = HZIP_WR_PORT; 1070 951 err_info->acpi_rst = "ZRST"; 1071 - err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT; 1072 - 1073 - if (qm->ver >= QM_HW_V3) 1074 - err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT; 1075 952 } 1076 953 1077 954 static const struct hisi_qm_err_ini hisi_zip_err_ini = { ··· 1109 976 qm->err_ini = &hisi_zip_err_ini; 1110 977 qm->err_ini->err_info_init(qm); 1111 978 1112 - hisi_zip_set_user_domain_and_cache(qm); 979 + ret = hisi_zip_set_user_domain_and_cache(qm); 980 + if (ret) 981 + return ret; 982 + 1113 983 hisi_zip_open_sva_prefetch(qm); 1114 984 hisi_qm_dev_err_init(qm); 1115 985 hisi_zip_debug_regs_clear(qm); ··· 1126 990 1127 991 static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 1128 992 { 993 + int ret; 994 + 1129 995 qm->pdev = pdev; 1130 996 qm->ver = pdev->revision; 1131 - if (pdev->revision >= QM_HW_V3) 1132 - qm->algs = "zlib\ngzip\ndeflate\nlz77_zstd"; 1133 - else 1134 - qm->algs = "zlib\ngzip"; 1135 997 qm->mode = uacce_mode; 1136 998 qm->sqe_size = HZIP_SQE_SIZE; 1137 999 qm->dev_name = hisi_zip_name; ··· 1153 1019 qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; 1154 1020 } 1155 1021 1156 - return hisi_qm_init(qm); 1022 + ret = hisi_qm_init(qm); 1023 + if (ret) { 1024 + pci_err(qm->pdev, "Failed to init zip qm configures!\n"); 1025 + return ret; 1026 + } 1027 + 1028 + ret = hisi_zip_set_qm_algs(qm); 1029 + if (ret) { 1030 + pci_err(qm->pdev, "Failed to set zip algs!\n"); 1031 + hisi_qm_uninit(qm); 1032 + } 1033 + 1034 + return ret; 1157 1035 } 1158 1036 1159 1037 static void hisi_zip_qm_uninit(struct hisi_qm *qm)
+33 -27
drivers/crypto/inside-secure/safexcel_cipher.c
··· 63 63 u32 hash_alg; 64 64 u32 state_sz; 65 65 66 - struct crypto_cipher *hkaes; 67 66 struct crypto_aead *fback; 68 67 }; 69 68 ··· 641 642 safexcel_complete(priv, ring); 642 643 643 644 if (src == dst) { 644 - dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); 645 + if (sreq->nr_src > 0) 646 + dma_unmap_sg(priv->dev, src, sreq->nr_src, 647 + DMA_BIDIRECTIONAL); 645 648 } else { 646 - dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); 647 - dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); 649 + if (sreq->nr_src > 0) 650 + dma_unmap_sg(priv->dev, src, sreq->nr_src, 651 + DMA_TO_DEVICE); 652 + if (sreq->nr_dst > 0) 653 + dma_unmap_sg(priv->dev, dst, sreq->nr_dst, 654 + DMA_FROM_DEVICE); 648 655 } 649 656 650 657 /* ··· 742 737 max(totlen_src, totlen_dst)); 743 738 return -EINVAL; 744 739 } 745 - dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); 740 + if (sreq->nr_src > 0) 741 + dma_map_sg(priv->dev, src, sreq->nr_src, 742 + DMA_BIDIRECTIONAL); 746 743 } else { 747 744 if (unlikely(totlen_src && (sreq->nr_src <= 0))) { 748 745 dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!", 749 746 totlen_src); 750 747 return -EINVAL; 751 748 } 752 - dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); 749 + 750 + if (sreq->nr_src > 0) 751 + dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); 753 752 754 753 if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) { 755 754 dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!", 756 755 totlen_dst); 757 - dma_unmap_sg(priv->dev, src, sreq->nr_src, 758 - DMA_TO_DEVICE); 759 - return -EINVAL; 756 + ret = -EINVAL; 757 + goto unmap; 760 758 } 761 - dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); 759 + 760 + if (sreq->nr_dst > 0) 761 + dma_map_sg(priv->dev, dst, sreq->nr_dst, 762 + DMA_FROM_DEVICE); 762 763 } 763 764 764 765 memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); ··· 894 883 cdesc_rollback: 895 884 for (i = 0; i < n_cdesc; i++) 896 885 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); 897 - 886 + unmap: 898 887 if (src == dst) { 899 - dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); 888 + if (sreq->nr_src > 0) 889 + dma_unmap_sg(priv->dev, src, sreq->nr_src, 890 + DMA_BIDIRECTIONAL); 900 891 } else { 901 - dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); 902 - dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); 892 + if (sreq->nr_src > 0) 893 + dma_unmap_sg(priv->dev, src, sreq->nr_src, 894 + DMA_TO_DEVICE); 895 + if (sreq->nr_dst > 0) 896 + dma_unmap_sg(priv->dev, dst, sreq->nr_dst, 897 + DMA_FROM_DEVICE); 903 898 } 904 899 905 900 return ret; ··· 2606 2589 ctx->key_len = len; 2607 2590 2608 2591 /* Compute hash key by encrypting zeroes with cipher key */ 2609 - crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK); 2610 - crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) & 2611 - CRYPTO_TFM_REQ_MASK); 2612 - ret = crypto_cipher_setkey(ctx->hkaes, key, len); 2613 - if (ret) 2614 - return ret; 2615 - 2616 2592 memset(hashkey, 0, AES_BLOCK_SIZE); 2617 - crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey); 2593 + aes_encrypt(&aes, (u8 *)hashkey, (u8 *)hashkey); 2618 2594 2619 2595 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { 2620 2596 for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) { ··· 2636 2626 ctx->xcm = EIP197_XCM_MODE_GCM; 2637 2627 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ 2638 2628 2639 - ctx->hkaes = crypto_alloc_cipher("aes", 0, 0); 2640 - return PTR_ERR_OR_ZERO(ctx->hkaes); 2629 + return 0; 2641 2630 } 2642 2631 2643 2632 static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm) 2644 2633 { 2645 - struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 2646 - 2647 - crypto_free_cipher(ctx->hkaes); 2648 2634 safexcel_aead_cra_exit(tfm); 2649 2635 } 2650 2636
+22 -43
drivers/crypto/inside-secure/safexcel_hash.c
··· 30 30 bool fb_init_done; 31 31 bool fb_do_setkey; 32 32 33 - struct crypto_cipher *kaes; 33 + struct crypto_aes_ctx *aes; 34 34 struct crypto_ahash *fback; 35 35 struct crypto_shash *shpre; 36 36 struct shash_desc *shdesc; ··· 383 383 u32 x; 384 384 385 385 x = ipad[i] ^ ipad[i + 4]; 386 - cache[i] ^= swab(x); 386 + cache[i] ^= swab32(x); 387 387 } 388 388 } 389 389 cache_len = AES_BLOCK_SIZE; ··· 821 821 u32 *result = (void *)areq->result; 822 822 823 823 /* K3 */ 824 - result[i] = swab(ctx->base.ipad.word[i + 4]); 824 + result[i] = swab32(ctx->base.ipad.word[i + 4]); 825 825 } 826 826 areq->result[0] ^= 0x80; // 10- padding 827 - crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result); 827 + aes_encrypt(ctx->aes, areq->result, areq->result); 828 828 return 0; 829 829 } else if (unlikely(req->hmac && 830 830 (req->len == req->block_sz) && ··· 2083 2083 unsigned int len) 2084 2084 { 2085 2085 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2086 - struct crypto_aes_ctx aes; 2087 2086 u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)]; 2088 2087 int ret, i; 2089 2088 2090 - ret = aes_expandkey(&aes, key, len); 2089 + ret = aes_expandkey(ctx->aes, key, len); 2091 2090 if (ret) 2092 2091 return ret; 2093 2092 2094 2093 /* precompute the XCBC key material */ 2095 - crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); 2096 - crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & 2097 - CRYPTO_TFM_REQ_MASK); 2098 - ret = crypto_cipher_setkey(ctx->kaes, key, len); 2099 - if (ret) 2100 - return ret; 2101 - 2102 - crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, 2103 - "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1"); 2104 - crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp, 2105 - "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2"); 2106 - crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE, 2107 - "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3"); 2094 + aes_encrypt(ctx->aes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, 2095 + "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1"); 2096 + aes_encrypt(ctx->aes, (u8 *)key_tmp, 2097 + "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2"); 2098 + aes_encrypt(ctx->aes, (u8 *)key_tmp + AES_BLOCK_SIZE, 2099 + "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3"); 2108 2100 for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++) 2109 - ctx->base.ipad.word[i] = swab(key_tmp[i]); 2101 + ctx->base.ipad.word[i] = swab32(key_tmp[i]); 2110 2102 2111 - crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); 2112 - crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & 2113 - CRYPTO_TFM_REQ_MASK); 2114 - ret = crypto_cipher_setkey(ctx->kaes, 2115 - (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, 2116 - AES_MIN_KEY_SIZE); 2103 + ret = aes_expandkey(ctx->aes, 2104 + (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, 2105 + AES_MIN_KEY_SIZE); 2117 2106 if (ret) 2118 2107 return ret; 2119 2108 ··· 2110 2121 ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE; 2111 2122 ctx->cbcmac = false; 2112 2123 2113 - memzero_explicit(&aes, sizeof(aes)); 2114 2124 return 0; 2115 2125 } 2116 2126 ··· 2118 2130 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); 2119 2131 2120 2132 safexcel_ahash_cra_init(tfm); 2121 - ctx->kaes = crypto_alloc_cipher("aes", 0, 0); 2122 - return PTR_ERR_OR_ZERO(ctx->kaes); 2133 + ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL); 2134 + return PTR_ERR_OR_ZERO(ctx->aes); 2123 2135 } 2124 2136 2125 2137 static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm) 2126 2138 { 2127 2139 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); 2128 2140 2129 - crypto_free_cipher(ctx->kaes); 2141 + kfree(ctx->aes); 2130 2142 safexcel_ahash_cra_exit(tfm); 2131 2143 } 2132 2144 ··· 2166 2178 unsigned int len) 2167 2179 { 2168 2180 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2169 - struct crypto_aes_ctx aes; 2170 2181 __be64 consts[4]; 2171 2182 u64 _const[2]; 2172 2183 u8 msb_mask, gfmask; 2173 2184 int ret, i; 2174 2185 2175 - ret = aes_expandkey(&aes, key, len); 2186 + /* precompute the CMAC key material */ 2187 + ret = aes_expandkey(ctx->aes, key, len); 2176 2188 if (ret) 2177 2189 return ret; 2178 2190 2179 2191 for (i = 0; i < len / sizeof(u32); i++) 2180 - ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]); 2181 - 2182 - /* precompute the CMAC key material */ 2183 - crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); 2184 - crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & 2185 - CRYPTO_TFM_REQ_MASK); 2186 - ret = crypto_cipher_setkey(ctx->kaes, key, len); 2187 - if (ret) 2188 - return ret; 2192 + ctx->base.ipad.word[i + 8] = swab32(ctx->aes->key_enc[i]); 2189 2193 2190 2194 /* code below borrowed from crypto/cmac.c */ 2191 2195 /* encrypt the zero block */ 2192 2196 memset(consts, 0, AES_BLOCK_SIZE); 2193 - crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts); 2197 + aes_encrypt(ctx->aes, (u8 *)consts, (u8 *)consts); 2194 2198 2195 2199 gfmask = 0x87; 2196 2200 _const[0] = be64_to_cpu(consts[1]); ··· 2214 2234 } 2215 2235 ctx->cbcmac = false; 2216 2236 2217 - memzero_explicit(&aes, sizeof(aes)); 2218 2237 return 0; 2219 2238 } 2220 2239
+2 -2
drivers/crypto/keembay/Kconfig
··· 42 42 config CRYPTO_DEV_KEEMBAY_OCS_ECC 43 43 tristate "Support for Intel Keem Bay OCS ECC HW acceleration" 44 44 depends on ARCH_KEEMBAY || COMPILE_TEST 45 - depends on OF || COMPILE_TEST 45 + depends on OF 46 46 depends on HAS_IOMEM 47 47 select CRYPTO_ECDH 48 48 select CRYPTO_ENGINE ··· 64 64 select CRYPTO_ENGINE 65 65 depends on HAS_IOMEM 66 66 depends on ARCH_KEEMBAY || COMPILE_TEST 67 - depends on OF || COMPILE_TEST 67 + depends on OF 68 68 help 69 69 Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash 70 70 Control Unit (HCU) hardware acceleration for use with Crypto API.
+1 -1
drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
··· 403 403 * big-endian format in memory. 404 404 * iqb_ldwb:1 [7:7](R/W) Instruction load don't write back. 405 405 * 0 = The hardware issues NCB transient load (LDT) towards the cache, 406 - * which if the line hits and is is dirty will cause the line to be 406 + * which if the line hits and is dirty will cause the line to be 407 407 * written back before being replaced. 408 408 * 1 = The hardware issues NCB LDWB read-and-invalidate command towards 409 409 * the cache when fetching the last word of instructions; as a result the
+19 -5
drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
··· 97 97 static void set_ucode_filename(struct otx_cpt_ucode *ucode, 98 98 const char *filename) 99 99 { 100 - strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH); 100 + strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH); 101 101 } 102 102 103 103 static char *get_eng_type_str(int eng_type) ··· 138 138 u32 i, val = 0; 139 139 u8 nn; 140 140 141 - strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); 141 + strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); 142 142 for (i = 0; i < strlen(tmp_ver_str); i++) 143 143 tmp_ver_str[i] = tolower(tmp_ver_str[i]); 144 144 ··· 286 286 struct tar_ucode_info_t *tar_info; 287 287 struct otx_cpt_ucode_hdr *ucode_hdr; 288 288 int ucode_type, ucode_size; 289 + unsigned int code_length; 289 290 290 291 /* 291 292 * If size is less than microcode header size then don't report ··· 304 303 if (get_ucode_type(ucode_hdr, &ucode_type)) 305 304 return 0; 306 305 307 - ucode_size = ntohl(ucode_hdr->code_length) * 2; 306 + code_length = ntohl(ucode_hdr->code_length); 307 + if (code_length >= INT_MAX / 2) { 308 + dev_err(dev, "Invalid code_length %u\n", code_length); 309 + return -EINVAL; 310 + } 311 + 312 + ucode_size = code_length * 2; 308 313 if (!ucode_size || (size < round_up(ucode_size, 16) + 309 314 sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { 310 315 dev_err(dev, "Ucode %s invalid size\n", filename); ··· 893 886 { 894 887 struct otx_cpt_ucode_hdr *ucode_hdr; 895 888 const struct firmware *fw; 889 + unsigned int code_length; 896 890 int ret; 897 891 898 892 set_ucode_filename(ucode, ucode_filename); ··· 904 896 ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data; 905 897 memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); 906 898 ucode->ver_num = ucode_hdr->ver_num; 907 - ucode->size = ntohl(ucode_hdr->code_length) * 2; 899 + code_length = ntohl(ucode_hdr->code_length); 900 + if (code_length >= INT_MAX / 2) { 901 + dev_err(dev, "Ucode invalid code_length %u\n", code_length); 902 + ret = -EINVAL; 903 + goto release_fw; 904 + } 905 + ucode->size = code_length * 2; 908 906 if (!ucode->size || (fw->size < round_up(ucode->size, 16) 909 907 + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { 910 908 dev_err(dev, "Ucode %s invalid size\n", ucode_filename); ··· 1342 1328 1343 1329 eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr); 1344 1330 err_msg = "Invalid engine group format"; 1345 - strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH); 1331 + strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH); 1346 1332 start = tmp_buf; 1347 1333 1348 1334 has_se = has_ie = has_ae = false;
+4 -4
drivers/crypto/marvell/octeontx/otx_cptvf_main.c
··· 661 661 msg = "Invalid"; 662 662 } 663 663 664 - return scnprintf(buf, PAGE_SIZE, "%s\n", msg); 664 + return sysfs_emit(buf, "%s\n", msg); 665 665 } 666 666 667 667 static ssize_t vf_engine_group_show(struct device *dev, ··· 670 670 { 671 671 struct otx_cptvf *cptvf = dev_get_drvdata(dev); 672 672 673 - return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp); 673 + return sysfs_emit(buf, "%d\n", cptvf->vfgrp); 674 674 } 675 675 676 676 static ssize_t vf_engine_group_store(struct device *dev, ··· 706 706 { 707 707 struct otx_cptvf *cptvf = dev_get_drvdata(dev); 708 708 709 - return scnprintf(buf, PAGE_SIZE, "%d\n", 709 + return sysfs_emit(buf, "%d\n", 710 710 cptvf_read_vq_done_timewait(cptvf)); 711 711 } 712 712 ··· 716 716 { 717 717 struct otx_cptvf *cptvf = dev_get_drvdata(dev); 718 718 719 - return scnprintf(buf, PAGE_SIZE, "%d\n", 719 + return sysfs_emit(buf, "%d\n", 720 720 cptvf_read_vq_done_numwait(cptvf)); 721 721 } 722 722
+5 -15
drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
··· 159 159 int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf) 160 160 { 161 161 struct otx_cpt_mbox mbx = {}; 162 - int ret; 163 162 164 163 mbx.msg = OTX_CPT_MSG_READY; 165 - ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx); 166 164 167 - return ret; 165 + return cptvf_send_msg_to_pf_timeout(cptvf, &mbx); 168 166 } 169 167 170 168 /* ··· 172 174 int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf) 173 175 { 174 176 struct otx_cpt_mbox mbx = {}; 175 - int ret; 176 177 177 178 mbx.msg = OTX_CPT_MSG_QLEN; 178 179 mbx.data = cptvf->qsize; 179 - ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx); 180 180 181 - return ret; 181 + return cptvf_send_msg_to_pf_timeout(cptvf, &mbx); 182 182 } 183 183 184 184 /* ··· 204 208 int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf) 205 209 { 206 210 struct otx_cpt_mbox mbx = {}; 207 - int ret; 208 211 209 212 mbx.msg = OTX_CPT_MSG_VQ_PRIORITY; 210 213 /* Convey group of the VF */ 211 214 mbx.data = cptvf->priority; 212 - ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx); 213 215 214 - return ret; 216 + return cptvf_send_msg_to_pf_timeout(cptvf, &mbx); 215 217 } 216 218 217 219 /* ··· 218 224 int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf) 219 225 { 220 226 struct otx_cpt_mbox mbx = {}; 221 - int ret; 222 227 223 228 mbx.msg = OTX_CPT_MSG_VF_UP; 224 - ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx); 225 229 226 - return ret; 230 + return cptvf_send_msg_to_pf_timeout(cptvf, &mbx); 227 231 } 228 232 229 233 /* ··· 230 238 int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf) 231 239 { 232 240 struct otx_cpt_mbox mbx = {}; 233 - int ret; 234 241 235 242 mbx.msg = OTX_CPT_MSG_VF_DOWN; 236 - ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx); 237 243 238 - return ret; 244 + return cptvf_send_msg_to_pf_timeout(cptvf, &mbx); 239 245 }
+2 -2
drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
··· 68 68 static void set_ucode_filename(struct otx2_cpt_ucode *ucode, 69 69 const char *filename) 70 70 { 71 - strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH); 71 + strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH); 72 72 } 73 73 74 74 static char *get_eng_type_str(int eng_type) ··· 126 126 int i, val = 0; 127 127 u8 nn; 128 128 129 - strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ); 129 + strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ); 130 130 for (i = 0; i < strlen(tmp_ver_str); i++) 131 131 tmp_ver_str[i] = tolower(tmp_ver_str[i]); 132 132
+1 -4
drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
··· 191 191 struct otx2_mbox *mbox = &cptvf->pfvf_mbox; 192 192 struct pci_dev *pdev = cptvf->pdev; 193 193 struct mbox_msghdr *req; 194 - int ret; 195 194 196 195 req = (struct mbox_msghdr *) 197 196 otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req), ··· 203 204 req->sig = OTX2_MBOX_REQ_SIG; 204 205 req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0); 205 206 206 - ret = otx2_cpt_send_mbox_msg(mbox, pdev); 207 - 208 - return ret; 207 + return otx2_cpt_send_mbox_msg(mbox, pdev); 209 208 }
+1 -1
drivers/crypto/n2_core.c
··· 1494 1494 * 1495 1495 * So we have to back-translate, going through the 'intr' and 'ino' 1496 1496 * property tables of the n2cp MDESC node, matching it with the OF 1497 - * 'interrupts' property entries, in order to to figure out which 1497 + * 'interrupts' property entries, in order to figure out which 1498 1498 * devino goes to which already-translated IRQ. 1499 1499 */ 1500 1500 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
+1 -4
drivers/crypto/nx/nx-aes-ccm.c
··· 134 134 unsigned int cryptlen, u8 *b0) 135 135 { 136 136 unsigned int l, lp, m = authsize; 137 - int rc; 138 137 139 138 memcpy(b0, iv, 16); 140 139 ··· 147 148 if (assoclen) 148 149 *b0 |= 64; 149 150 150 - rc = set_msg_len(b0 + 16 - l, cryptlen, l); 151 - 152 - return rc; 151 + return set_msg_len(b0 + 16 - l, cryptlen, l); 153 152 } 154 153 155 154 static int generate_pat(u8 *iv,
+3 -3
drivers/crypto/qat/qat_common/adf_cfg.c
··· 251 251 return -ENOMEM; 252 252 253 253 INIT_LIST_HEAD(&key_val->list); 254 - strlcpy(key_val->key, key, sizeof(key_val->key)); 254 + strscpy(key_val->key, key, sizeof(key_val->key)); 255 255 256 256 if (type == ADF_DEC) { 257 257 snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, 258 258 "%ld", (*((long *)val))); 259 259 } else if (type == ADF_STR) { 260 - strlcpy(key_val->val, (char *)val, sizeof(key_val->val)); 260 + strscpy(key_val->val, (char *)val, sizeof(key_val->val)); 261 261 } else if (type == ADF_HEX) { 262 262 snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, 263 263 "0x%lx", (unsigned long)val); ··· 315 315 if (!sec) 316 316 return -ENOMEM; 317 317 318 - strlcpy(sec->name, name, sizeof(sec->name)); 318 + strscpy(sec->name, name, sizeof(sec->name)); 319 319 INIT_LIST_HEAD(&sec->param_head); 320 320 down_write(&cfg->lock); 321 321 list_add_tail(&sec->list, &cfg->sec_list);
+7 -3
drivers/crypto/qat/qat_common/adf_ctl_drv.c
··· 16 16 #include "adf_cfg_common.h" 17 17 #include "adf_cfg_user.h" 18 18 19 + #define ADF_CFG_MAX_SECTION 512 20 + #define ADF_CFG_MAX_KEY_VAL 256 21 + 19 22 #define DEVICE_NAME "qat_adf_ctl" 20 23 21 24 static DEFINE_MUTEX(adf_ctl_lock); ··· 140 137 struct adf_user_cfg_key_val key_val; 141 138 struct adf_user_cfg_key_val *params_head; 142 139 struct adf_user_cfg_section section, *section_head; 140 + int i, j; 143 141 144 142 section_head = ctl_data->config_section; 145 143 146 - while (section_head) { 144 + for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) { 147 145 if (copy_from_user(&section, (void __user *)section_head, 148 146 sizeof(*section_head))) { 149 147 dev_err(&GET_DEV(accel_dev), ··· 160 156 161 157 params_head = section.params; 162 158 163 - while (params_head) { 159 + for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) { 164 160 if (copy_from_user(&key_val, (void __user *)params_head, 165 161 sizeof(key_val))) { 166 162 dev_err(&GET_DEV(accel_dev), ··· 367 363 dev_info.num_logical_accel = hw_data->num_logical_accel; 368 364 dev_info.banks_per_accel = hw_data->num_banks 369 365 / hw_data->num_logical_accel; 370 - strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); 366 + strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); 371 367 dev_info.instance_id = hw_data->instance_id; 372 368 dev_info.type = hw_data->dev_class->type; 373 369 dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
+1 -1
drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
··· 107 107 * Timeout is in cycles. Clock speed may vary across products but this 108 108 * value should be a few milli-seconds. 109 109 */ 110 - #define ADF_SSM_WDT_DEFAULT_VALUE 0x200000 110 + #define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL 111 111 #define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000 112 112 #define ADF_SSMWDTL_OFFSET 0x54 113 113 #define ADF_SSMWDTH_OFFSET 0x5C
+1 -1
drivers/crypto/qat/qat_common/adf_transport_debug.c
··· 96 96 if (!ring_debug) 97 97 return -ENOMEM; 98 98 99 - strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); 99 + strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); 100 100 snprintf(entry_name, sizeof(entry_name), "ring_%02d", 101 101 ring->ring_number); 102 102
+2 -1
drivers/crypto/qat/qat_common/icp_qat_uclo.h
··· 86 86 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \ 87 87 ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \ 88 88 ICP_QAT_CSS_SIGNATURE_LEN(handle)) 89 - #define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000 89 + #define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000 90 + #define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000 90 91 91 92 #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) 92 93 #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
+12 -6
drivers/crypto/qat/qat_common/qat_algs.c
··· 673 673 dma_addr_t blpout = qat_req->buf.bloutp; 674 674 size_t sz = qat_req->buf.sz; 675 675 size_t sz_out = qat_req->buf.sz_out; 676 + int bl_dma_dir; 676 677 int i; 678 + 679 + bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 677 680 678 681 for (i = 0; i < bl->num_bufs; i++) 679 682 dma_unmap_single(dev, bl->bufers[i].addr, 680 - bl->bufers[i].len, DMA_BIDIRECTIONAL); 683 + bl->bufers[i].len, bl_dma_dir); 681 684 682 685 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); 683 686 ··· 694 691 for (i = bufless; i < blout->num_bufs; i++) { 695 692 dma_unmap_single(dev, blout->bufers[i].addr, 696 693 blout->bufers[i].len, 697 - DMA_BIDIRECTIONAL); 694 + DMA_FROM_DEVICE); 698 695 } 699 696 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); 700 697 ··· 719 716 struct scatterlist *sg; 720 717 size_t sz_out, sz = struct_size(bufl, bufers, n); 721 718 int node = dev_to_node(&GET_DEV(inst->accel_dev)); 719 + int bufl_dma_dir; 722 720 723 721 if (unlikely(!n)) 724 722 return -EINVAL; ··· 737 733 qat_req->buf.sgl_src_valid = true; 738 734 } 739 735 736 + bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 737 + 740 738 for_each_sg(sgl, sg, n, i) 741 739 bufl->bufers[i].addr = DMA_MAPPING_ERROR; 742 740 ··· 750 744 751 745 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), 752 746 sg->length, 753 - DMA_BIDIRECTIONAL); 747 + bufl_dma_dir); 754 748 bufl->bufers[y].len = sg->length; 755 749 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) 756 750 goto err_in; ··· 793 787 794 788 bufers[y].addr = dma_map_single(dev, sg_virt(sg), 795 789 sg->length, 796 - DMA_BIDIRECTIONAL); 790 + DMA_FROM_DEVICE); 797 791 if (unlikely(dma_mapping_error(dev, bufers[y].addr))) 798 792 goto err_out; 799 793 bufers[y].len = sg->length; ··· 823 817 if (!dma_mapping_error(dev, buflout->bufers[i].addr)) 824 818 dma_unmap_single(dev, buflout->bufers[i].addr, 825 819 buflout->bufers[i].len, 826 - DMA_BIDIRECTIONAL); 820 + DMA_FROM_DEVICE); 827 821 828 822 if (!qat_req->buf.sgl_dst_valid) 829 823 kfree(buflout); ··· 837 831 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) 838 832 dma_unmap_single(dev, bufl->bufers[i].addr, 839 833 bufl->bufers[i].len, 840 - DMA_BIDIRECTIONAL); 834 + bufl_dma_dir); 841 835 842 836 if (!qat_req->buf.sgl_src_valid) 843 837 kfree(bufl);
+12 -12
drivers/crypto/qat/qat_common/qat_asym_algs.c
··· 332 332 qat_req->in.dh.in_tab[n_input_params] = 0; 333 333 qat_req->out.dh.out_tab[1] = 0; 334 334 /* Mapping in.in.b or in.in_g2.xa is the same */ 335 - qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b, 336 - sizeof(qat_req->in.dh.in.b), 335 + qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh, 336 + sizeof(struct qat_dh_input_params), 337 337 DMA_TO_DEVICE); 338 338 if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) 339 339 goto unmap_dst; 340 340 341 - qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r, 342 - sizeof(qat_req->out.dh.r), 341 + qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh, 342 + sizeof(struct qat_dh_output_params), 343 343 DMA_TO_DEVICE); 344 344 if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) 345 345 goto unmap_in_params; ··· 729 729 730 730 qat_req->in.rsa.in_tab[3] = 0; 731 731 qat_req->out.rsa.out_tab[1] = 0; 732 - qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, 733 - sizeof(qat_req->in.rsa.enc.m), 732 + qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa, 733 + sizeof(struct qat_rsa_input_params), 734 734 DMA_TO_DEVICE); 735 735 if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) 736 736 goto unmap_dst; 737 737 738 - qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c, 739 - sizeof(qat_req->out.rsa.enc.c), 738 + qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa, 739 + sizeof(struct qat_rsa_output_params), 740 740 DMA_TO_DEVICE); 741 741 if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) 742 742 goto unmap_in_params; ··· 875 875 else 876 876 qat_req->in.rsa.in_tab[3] = 0; 877 877 qat_req->out.rsa.out_tab[1] = 0; 878 - qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c, 879 - sizeof(qat_req->in.rsa.dec.c), 878 + qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa, 879 + sizeof(struct qat_rsa_input_params), 880 880 DMA_TO_DEVICE); 881 881 if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) 882 882 goto unmap_dst; 883 883 884 - qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m, 885 - sizeof(qat_req->out.rsa.dec.m), 884 + qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa, 885 + sizeof(struct qat_rsa_output_params), 886 886 DMA_TO_DEVICE); 887 887 if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) 888 888 goto unmap_in_params;
+55 -1
drivers/crypto/qat/qat_common/qat_uclo.c
··· 1367 1367 } 1368 1368 } 1369 1369 1370 + static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle, 1371 + char *image, unsigned int size, 1372 + unsigned int fw_type) 1373 + { 1374 + char *fw_type_name = fw_type ? "MMP" : "AE"; 1375 + unsigned int css_dword_size = sizeof(u32); 1376 + 1377 + if (handle->chip_info->fw_auth) { 1378 + struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; 1379 + unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle); 1380 + 1381 + if ((css_hdr->header_len * css_dword_size) != header_len) 1382 + goto err; 1383 + if ((css_hdr->size * css_dword_size) != size) 1384 + goto err; 1385 + if (fw_type != css_hdr->fw_type) 1386 + goto err; 1387 + if (size <= header_len) 1388 + goto err; 1389 + size -= header_len; 1390 + } 1391 + 1392 + if (fw_type == CSS_AE_FIRMWARE) { 1393 + if (size < sizeof(struct icp_qat_simg_ae_mode *) + 1394 + ICP_QAT_SIMG_AE_INIT_SEQ_LEN) 1395 + goto err; 1396 + if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) 1397 + goto err; 1398 + } else if (fw_type == CSS_MMP_FIRMWARE) { 1399 + if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN) 1400 + goto err; 1401 + } else { 1402 + pr_err("QAT: Unsupported firmware type\n"); 1403 + return -EINVAL; 1404 + } 1405 + return 0; 1406 + 1407 + err: 1408 + pr_err("QAT: Invalid %s firmware image\n", fw_type_name); 1409 + return -EINVAL; 1410 + } 1411 + 1370 1412 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, 1371 1413 char *image, unsigned int size, 1372 1414 struct icp_qat_fw_auth_desc **desc) ··· 1421 1379 struct icp_qat_simg_ae_mode *simg_ae_mode; 1422 1380 struct icp_firml_dram_desc img_desc; 1423 1381 1424 - if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_MAX_IMAGE_LEN)) { 1382 + if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) { 1425 1383 pr_err("QAT: error, input image size overflow %d\n", size); 1426 1384 return -EINVAL; 1427 1385 } ··· 1589 1547 { 1590 1548 struct icp_qat_fw_auth_desc *desc = NULL; 1591 1549 int status = 0; 1550 + int ret; 1551 + 1552 + ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE); 1553 + if (ret) 1554 + return ret; 1592 1555 1593 1556 if (handle->chip_info->fw_auth) { 1594 1557 status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc); ··· 2065 2018 struct icp_qat_fw_auth_desc *desc = NULL; 2066 2019 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; 2067 2020 struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; 2021 + int ret; 2068 2022 2069 2023 for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { 2024 + ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf, 2025 + simg_hdr[i].simg_len, 2026 + CSS_AE_FIRMWARE); 2027 + if (ret) 2028 + return ret; 2029 + 2070 2030 if (qat_uclo_map_auth_fw(handle, 2071 2031 (char *)simg_hdr[i].simg_buf, 2072 2032 (unsigned int)
+2 -2
drivers/crypto/qce/aead.c
··· 450 450 if (ret) 451 451 return ret; 452 452 dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 453 - if (dst_nents < 0) { 454 - ret = dst_nents; 453 + if (!dst_nents) { 454 + ret = -EIO; 455 455 goto error_free; 456 456 } 457 457
+5 -3
drivers/crypto/qce/sha.c
··· 97 97 } 98 98 99 99 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); 100 - if (ret < 0) 101 - return ret; 100 + if (!ret) 101 + return -EIO; 102 102 103 103 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); 104 104 105 105 ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); 106 - if (ret < 0) 106 + if (!ret) { 107 + ret = -EIO; 107 108 goto error_unmap_src; 109 + } 108 110 109 111 ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, 110 112 &rctx->result_sg, 1, qce_ahash_done, async_req);
+4 -4
drivers/crypto/qce/skcipher.c
··· 124 124 rctx->dst_sg = rctx->dst_tbl.sgl; 125 125 126 126 dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 127 - if (dst_nents < 0) { 128 - ret = dst_nents; 127 + if (!dst_nents) { 128 + ret = -EIO; 129 129 goto error_free; 130 130 } 131 131 132 132 if (diff_dst) { 133 133 src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); 134 - if (src_nents < 0) { 135 - ret = src_nents; 134 + if (!src_nents) { 135 + ret = -EIO; 136 136 goto error_unmap_dst; 137 137 } 138 138 rctx->src_sg = req->src;
+3 -4
drivers/crypto/qcom-rng.c
··· 9 9 #include <linux/crypto.h> 10 10 #include <linux/io.h> 11 11 #include <linux/iopoll.h> 12 + #include <linux/kernel.h> 12 13 #include <linux/module.h> 13 14 #include <linux/of.h> 14 15 #include <linux/platform_device.h> ··· 202 201 return 0; 203 202 } 204 203 205 - #if IS_ENABLED(CONFIG_ACPI) 206 - static const struct acpi_device_id qcom_rng_acpi_match[] = { 204 + static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = { 207 205 { .id = "QCOM8160", .driver_data = 1 }, 208 206 {} 209 207 }; 210 208 MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match); 211 - #endif 212 209 213 - static const struct of_device_id qcom_rng_of_match[] = { 210 + static const struct of_device_id __maybe_unused qcom_rng_of_match[] = { 214 211 { .compatible = "qcom,prng", .data = (void *)0}, 215 212 { .compatible = "qcom,prng-ee", .data = (void *)1}, 216 213 {}
+11 -11
drivers/crypto/sahara.c
··· 26 26 #include <linux/kernel.h> 27 27 #include <linux/kthread.h> 28 28 #include <linux/module.h> 29 - #include <linux/mutex.h> 30 29 #include <linux/of.h> 31 30 #include <linux/of_device.h> 32 31 #include <linux/platform_device.h> 32 + #include <linux/spinlock.h> 33 33 34 34 #define SHA_BUFFER_LEN PAGE_SIZE 35 35 #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE ··· 196 196 void __iomem *regs_base; 197 197 struct clk *clk_ipg; 198 198 struct clk *clk_ahb; 199 - struct mutex queue_mutex; 199 + spinlock_t queue_spinlock; 200 200 struct task_struct *kthread; 201 201 struct completion dma_completion; 202 202 ··· 487 487 488 488 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, 489 489 DMA_TO_DEVICE); 490 - if (ret != dev->nb_in_sg) { 490 + if (!ret) { 491 491 dev_err(dev->device, "couldn't map in sg\n"); 492 492 goto unmap_in; 493 493 } 494 494 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg, 495 495 DMA_FROM_DEVICE); 496 - if (ret != dev->nb_out_sg) { 496 + if (!ret) { 497 497 dev_err(dev->device, "couldn't map out sg\n"); 498 498 goto unmap_out; 499 499 } ··· 642 642 643 643 rctx->mode = mode; 644 644 645 - mutex_lock(&dev->queue_mutex); 645 + spin_lock_bh(&dev->queue_spinlock); 646 646 err = crypto_enqueue_request(&dev->queue, &req->base); 647 - mutex_unlock(&dev->queue_mutex); 647 + spin_unlock_bh(&dev->queue_spinlock); 648 648 649 649 wake_up_process(dev->kthread); 650 650 ··· 1043 1043 do { 1044 1044 __set_current_state(TASK_INTERRUPTIBLE); 1045 1045 1046 - mutex_lock(&dev->queue_mutex); 1046 + spin_lock_bh(&dev->queue_spinlock); 1047 1047 backlog = crypto_get_backlog(&dev->queue); 1048 1048 async_req = crypto_dequeue_request(&dev->queue); 1049 - mutex_unlock(&dev->queue_mutex); 1049 + spin_unlock_bh(&dev->queue_spinlock); 1050 1050 1051 1051 if (backlog) 1052 1052 backlog->complete(backlog, -EINPROGRESS); ··· 1092 1092 rctx->first = 1; 1093 1093 } 1094 1094 1095 - mutex_lock(&dev->queue_mutex); 1095 + spin_lock_bh(&dev->queue_spinlock); 1096 1096 ret = crypto_enqueue_request(&dev->queue, &req->base); 1097 - mutex_unlock(&dev->queue_mutex); 1097 + spin_unlock_bh(&dev->queue_spinlock); 1098 1098 1099 1099 wake_up_process(dev->kthread); 1100 1100 ··· 1449 1449 1450 1450 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); 1451 1451 1452 - mutex_init(&dev->queue_mutex); 1452 + spin_lock_init(&dev->queue_spinlock); 1453 1453 1454 1454 dev_ptr = dev; 1455 1455
-2
drivers/net/Kconfig
··· 85 85 select CRYPTO_POLY1305_X86_64 if X86 && 64BIT 86 86 select CRYPTO_BLAKE2S_X86 if X86 && 64BIT 87 87 select CRYPTO_CURVE25519_X86 if X86 && 64BIT 88 - select ARM_CRYPTO if ARM 89 - select ARM64_CRYPTO if ARM64 90 88 select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON) 91 89 select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON 92 90 select CRYPTO_POLY1305_ARM if ARM
+2 -1
drivers/net/wireless/ath/ath9k/rng.c
··· 83 83 if (!wait || !max || likely(bytes_read) || fail_stats > 110) 84 84 break; 85 85 86 - msleep_interruptible(ath9k_rng_delay_get(++fail_stats)); 86 + if (hwrng_msleep(rng, ath9k_rng_delay_get(++fail_stats))) 87 + break; 87 88 } 88 89 89 90 if (wait && !bytes_read && max)
+7 -10
include/crypto/aria.h
··· 32 32 #define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32)) 33 33 34 34 struct aria_ctx { 35 - int key_length; 36 - int rounds; 37 35 u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; 38 36 u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; 39 - }; 40 - 41 - static const u32 key_rc[5][4] = { 42 - { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 }, 43 - { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 }, 44 - { 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e }, 45 - { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 }, 46 - { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 } 37 + int rounds; 38 + int key_length; 47 39 }; 48 40 49 41 static const u32 s1[256] = { ··· 449 457 ((y[(q + 3) % 4]) >> r) ^ 450 458 ((y[(q + 2) % 4]) << (32 - r)); 451 459 } 460 + 461 + void aria_encrypt(void *ctx, u8 *out, const u8 *in); 462 + void aria_decrypt(void *ctx, u8 *out, const u8 *in); 463 + int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, 464 + unsigned int key_len); 452 465 453 466 #endif
-25
include/crypto/internal/aead.h
··· 114 114 crypto_init_queue(&queue->base, max_qlen); 115 115 } 116 116 117 - static inline int aead_enqueue_request(struct aead_queue *queue, 118 - struct aead_request *request) 119 - { 120 - return crypto_enqueue_request(&queue->base, &request->base); 121 - } 122 - 123 - static inline struct aead_request *aead_dequeue_request( 124 - struct aead_queue *queue) 125 - { 126 - struct crypto_async_request *req; 127 - 128 - req = crypto_dequeue_request(&queue->base); 129 - 130 - return req ? container_of(req, struct aead_request, base) : NULL; 131 - } 132 - 133 - static inline struct aead_request *aead_get_backlog(struct aead_queue *queue) 134 - { 135 - struct crypto_async_request *req; 136 - 137 - req = crypto_get_backlog(&queue->base); 138 - 139 - return req ? container_of(req, struct aead_request, base) : NULL; 140 - } 141 - 142 117 static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg) 143 118 { 144 119 return alg->chunksize;
-6
include/crypto/scatterwalk.h
··· 46 46 walk->offset += nbytes; 47 47 } 48 48 49 - static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, 50 - unsigned int alignmask) 51 - { 52 - return !(walk->offset & alignmask); 53 - } 54 - 55 49 static inline struct page *scatterwalk_page(struct scatter_walk *walk) 56 50 { 57 51 return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+1
include/dt-bindings/clock/aspeed-clock.h
··· 52 52 #define ASPEED_RESET_I2C 7 53 53 #define ASPEED_RESET_AHB 8 54 54 #define ASPEED_RESET_CRT1 9 55 + #define ASPEED_RESET_HACE 10 55 56 56 57 #endif
+1
include/dt-bindings/clock/ast2600-clock.h
··· 111 111 #define ASPEED_RESET_PCIE_RC_O 19 112 112 #define ASPEED_RESET_PCIE_RC_OEN 18 113 113 #define ASPEED_RESET_PCI_DP 5 114 + #define ASPEED_RESET_HACE 4 114 115 #define ASPEED_RESET_AHB 1 115 116 #define ASPEED_RESET_SDRAM 0 116 117
+37 -26
include/linux/hisi_acc_qm.h
··· 87 87 #define PEH_AXUSER_CFG 0x401001 88 88 #define PEH_AXUSER_CFG_ENABLE 0xffffffff 89 89 90 - #define QM_AXI_RRESP BIT(0) 91 - #define QM_AXI_BRESP BIT(1) 92 - #define QM_ECC_MBIT BIT(2) 93 - #define QM_ECC_1BIT BIT(3) 94 - #define QM_ACC_GET_TASK_TIMEOUT BIT(4) 95 - #define QM_ACC_DO_TASK_TIMEOUT BIT(5) 96 - #define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6) 97 - #define QM_SQ_CQ_VF_INVALID BIT(7) 98 - #define QM_CQ_VF_INVALID BIT(8) 99 - #define QM_SQ_VF_INVALID BIT(9) 100 - #define QM_DB_TIMEOUT BIT(10) 101 - #define QM_OF_FIFO_OF BIT(11) 102 - #define QM_DB_RANDOM_INVALID BIT(12) 103 - #define QM_MAILBOX_TIMEOUT BIT(13) 104 - #define QM_FLR_TIMEOUT BIT(14) 105 - 106 - #define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ 107 - QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ 108 - QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \ 109 - QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT) 110 - #define QM_BASE_CE QM_ECC_1BIT 111 - 112 - #define QM_Q_DEPTH 1024 113 90 #define QM_MIN_QNUM 2 114 91 #define HISI_ACC_SGL_SGE_NR_MAX 255 115 92 #define QM_SHAPER_CFG 0x100164 ··· 143 166 enum qm_vf_state { 144 167 QM_READY = 0, 145 168 QM_NOT_READY, 169 + }; 170 + 171 + enum qm_cap_bits { 172 + QM_SUPPORT_DB_ISOLATION = 0x0, 173 + QM_SUPPORT_FUNC_QOS, 174 + QM_SUPPORT_STOP_QP, 175 + QM_SUPPORT_MB_COMMAND, 176 + QM_SUPPORT_SVA_PREFETCH, 177 + QM_SUPPORT_RPM, 146 178 }; 147 179 148 180 struct dfx_diff_registers { ··· 218 232 char *acpi_rst; 219 233 u32 msi_wr_port; 220 234 u32 ecc_2bits_mask; 221 - u32 dev_ce_mask; 235 + u32 qm_shutdown_mask; 236 + u32 dev_shutdown_mask; 237 + u32 qm_reset_mask; 238 + u32 dev_reset_mask; 222 239 u32 ce; 223 240 u32 nfe; 224 241 u32 fe; ··· 247 258 void (*err_info_init)(struct hisi_qm *qm); 248 259 }; 249 260 261 + struct hisi_qm_cap_info { 262 + u32 type; 263 + /* Register offset */ 264 + u32 offset; 265 + /* Bit offset in register */ 266 + u32 shift; 267 + u32 mask; 268 + u32 v1_val; 269 + u32 v2_val; 270 + u32 v3_val; 271 + }; 272 + 250 273 struct hisi_qm_list { 251 274 struct mutex lock; 252 275 struct list_head list; ··· 279 278 struct pci_dev *pdev; 280 279 void __iomem *io_base; 281 280 void __iomem *db_io_base; 281 + 282 + /* Capbility version, 0: not supports */ 283 + u32 cap_ver; 282 284 u32 sqe_size; 283 285 u32 qp_base; 284 286 u32 qp_num; ··· 290 286 u32 max_qp_num; 291 287 u32 vfs_num; 292 288 u32 db_interval; 289 + u16 eq_depth; 290 + u16 aeq_depth; 293 291 struct list_head list; 294 292 struct hisi_qm_list *qm_list; 295 293 ··· 310 304 struct hisi_qm_err_info err_info; 311 305 struct hisi_qm_err_status err_status; 312 306 unsigned long misc_ctl; /* driver removing and reset sched */ 307 + /* Device capability bit */ 308 + unsigned long caps; 313 309 314 310 struct rw_semaphore qps_lock; 315 311 struct idr qp_idr; ··· 334 326 bool use_sva; 335 327 bool is_frozen; 336 328 337 - /* doorbell isolation enable */ 338 - bool use_db_isolation; 339 329 resource_size_t phys_base; 340 330 resource_size_t db_phys_base; 341 331 struct uacce_device *uacce; ··· 357 351 358 352 struct hisi_qp { 359 353 u32 qp_id; 354 + u16 sq_depth; 355 + u16 cq_depth; 360 356 u8 alg_type; 361 357 u8 req_type; 362 358 ··· 509 501 int hisi_qm_get_dfx_access(struct hisi_qm *qm); 510 502 void hisi_qm_put_dfx_access(struct hisi_qm *qm); 511 503 void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); 504 + u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 505 + const struct hisi_qm_cap_info *info_table, 506 + u32 index, bool is_read); 512 507 513 508 /* Used by VFIO ACC live migration driver */ 514 509 struct pci_driver *hisi_sec_get_pf_driver(void);
+3
include/linux/hw_random.h
··· 50 50 struct list_head list; 51 51 struct kref ref; 52 52 struct completion cleanup_done; 53 + struct completion dying; 53 54 }; 54 55 55 56 struct device; ··· 61 60 /** Unregister a Hardware Random Number Generator driver. */ 62 61 extern void hwrng_unregister(struct hwrng *rng); 63 62 extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); 63 + 64 + extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs); 64 65 65 66 #endif /* LINUX_HWRANDOM_H_ */
+16 -1
include/uapi/misc/uacce/hisi_qm.h
··· 14 14 __u16 qc_type; 15 15 }; 16 16 17 + /** 18 + * struct hisi_qp_info - User data for hisi qp. 19 + * @sqe_size: Submission queue element size 20 + * @sq_depth: The number of sqe 21 + * @cq_depth: The number of cqe 22 + * @reserved: Reserved data 23 + */ 24 + struct hisi_qp_info { 25 + __u32 sqe_size; 26 + __u16 sq_depth; 27 + __u16 cq_depth; 28 + __u64 reserved; 29 + }; 30 + 17 31 #define HISI_QM_API_VER_BASE "hisi_qm_v1" 18 32 #define HISI_QM_API_VER2_BASE "hisi_qm_v2" 19 33 #define HISI_QM_API_VER3_BASE "hisi_qm_v3" 20 34 21 35 /* UACCE_CMD_QM_SET_QP_CTX: Set qp algorithm type */ 22 36 #define UACCE_CMD_QM_SET_QP_CTX _IOWR('H', 10, struct hisi_qp_ctx) 23 - 37 + /* UACCE_CMD_QM_SET_QP_INFO: Set qp depth and BD size */ 38 + #define UACCE_CMD_QM_SET_QP_INFO _IOWR('H', 11, struct hisi_qp_info) 24 39 #endif
-3
lib/Kconfig
··· 127 127 128 128 source "lib/crypto/Kconfig" 129 129 130 - config LIB_MEMNEQ 131 - bool 132 - 133 130 config CRC_CCITT 134 131 tristate "CRC-CCITT functions" 135 132 help
-1
lib/Makefile
··· 255 255 obj-$(CONFIG_SIGNATURE) += digsig.o 256 256 257 257 lib-$(CONFIG_CLZ_TAB) += clz_tab.o 258 - lib-$(CONFIG_LIB_MEMNEQ) += memneq.o 259 258 260 259 obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o 261 260 obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
+5 -2
lib/crypto/Kconfig
··· 2 2 3 3 menu "Crypto library routines" 4 4 5 + config CRYPTO_LIB_UTILS 6 + tristate 7 + 5 8 config CRYPTO_LIB_AES 6 9 tristate 7 10 ··· 36 33 37 34 config CRYPTO_LIB_CHACHA_GENERIC 38 35 tristate 36 + select CRYPTO_LIB_UTILS 39 37 help 40 38 This symbol can be depended upon by arch implementations of the 41 39 ChaCha library interface that require the generic code as a ··· 46 42 47 43 config CRYPTO_LIB_CHACHA 48 44 tristate "ChaCha library interface" 49 - depends on CRYPTO 50 45 depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA 51 46 select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n 52 47 help ··· 73 70 tristate "Curve25519 scalar multiplication library" 74 71 depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 75 72 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n 76 - select LIB_MEMNEQ 73 + select CRYPTO_LIB_UTILS 77 74 help 78 75 Enable the Curve25519 library interface. This interface may be 79 76 fulfilled by either the generic implementation or an arch-specific
+3
lib/crypto/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 + obj-$(CONFIG_CRYPTO_LIB_UTILS) += libcryptoutils.o 4 + libcryptoutils-y := memneq.o utils.o 5 + 3 6 # chacha is used by the /dev/random driver which is always builtin 4 7 obj-y += chacha.o 5 8 obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
+88
lib/crypto/utils.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * Crypto library utility functions 4 + * 5 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 6 + */ 7 + 8 + #include <asm/unaligned.h> 9 + #include <crypto/algapi.h> 10 + #include <linux/module.h> 11 + 12 + /* 13 + * XOR @len bytes from @src1 and @src2 together, writing the result to @dst 14 + * (which may alias one of the sources). Don't call this directly; call 15 + * crypto_xor() or crypto_xor_cpy() instead. 16 + */ 17 + void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) 18 + { 19 + int relalign = 0; 20 + 21 + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { 22 + int size = sizeof(unsigned long); 23 + int d = (((unsigned long)dst ^ (unsigned long)src1) | 24 + ((unsigned long)dst ^ (unsigned long)src2)) & 25 + (size - 1); 26 + 27 + relalign = d ? 1 << __ffs(d) : size; 28 + 29 + /* 30 + * If we care about alignment, process as many bytes as 31 + * needed to advance dst and src to values whose alignments 32 + * equal their relative alignment. This will allow us to 33 + * process the remainder of the input using optimal strides. 34 + */ 35 + while (((unsigned long)dst & (relalign - 1)) && len > 0) { 36 + *dst++ = *src1++ ^ *src2++; 37 + len--; 38 + } 39 + } 40 + 41 + while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { 42 + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { 43 + u64 l = get_unaligned((u64 *)src1) ^ 44 + get_unaligned((u64 *)src2); 45 + put_unaligned(l, (u64 *)dst); 46 + } else { 47 + *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; 48 + } 49 + dst += 8; 50 + src1 += 8; 51 + src2 += 8; 52 + len -= 8; 53 + } 54 + 55 + while (len >= 4 && !(relalign & 3)) { 56 + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { 57 + u32 l = get_unaligned((u32 *)src1) ^ 58 + get_unaligned((u32 *)src2); 59 + put_unaligned(l, (u32 *)dst); 60 + } else { 61 + *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; 62 + } 63 + dst += 4; 64 + src1 += 4; 65 + src2 += 4; 66 + len -= 4; 67 + } 68 + 69 + while (len >= 2 && !(relalign & 1)) { 70 + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { 71 + u16 l = get_unaligned((u16 *)src1) ^ 72 + get_unaligned((u16 *)src2); 73 + put_unaligned(l, (u16 *)dst); 74 + } else { 75 + *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2; 76 + } 77 + dst += 2; 78 + src1 += 2; 79 + src2 += 2; 80 + len -= 2; 81 + } 82 + 83 + while (len--) 84 + *dst++ = *src1++ ^ *src2++; 85 + } 86 + EXPORT_SYMBOL_GPL(__crypto_xor); 87 + 88 + MODULE_LICENSE("GPL");
+2 -5
lib/memneq.c lib/crypto/memneq.c
··· 59 59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 60 */ 61 61 62 - #include <crypto/algapi.h> 63 62 #include <asm/unaligned.h> 64 - 65 - #ifndef __HAVE_ARCH_CRYPTO_MEMNEQ 63 + #include <crypto/algapi.h> 64 + #include <linux/module.h> 66 65 67 66 /* Generic path for arbitrary size */ 68 67 static inline unsigned long ··· 171 172 } 172 173 } 173 174 EXPORT_SYMBOL(__crypto_memneq); 174 - 175 - #endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */