Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

spi: axi-spi-engine: support SPI_MULTI_LANE_MODE_STRIPE

Add support for SPI_MULTI_LANE_MODE_STRIPE to the AXI SPI engine driver.

The v2.0.0 version of the AXI SPI Engine IP core supports multiple
lanes. This can be used with SPI_MULTI_LANE_MODE_STRIPE to support
reading from simultaneous sampling ADCs that have a separate SDO line
for each analog channel. This allows reading all channels at the same
time to increase throughput.

Reviewed-by: Marcelo Schmitt <marcelo.schmitt@analog.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: David Lechner <dlechner@baylibre.com>
Link: https://patch.msgid.link/20260123-spi-add-multi-bus-support-v6-7-12af183c06eb@baylibre.com
Signed-off-by: Mark Brown <broonie@kernel.org>

authored by

David Lechner and committed by
Mark Brown
0ec5ed7c 2e706f86

+141 -4
+141 -4
drivers/spi/spi-axi-spi-engine.c
··· 23 23 #include <linux/spi/spi.h> 24 24 #include <trace/events/spi.h> 25 25 26 + #define SPI_ENGINE_REG_DATA_WIDTH 0x0C 27 + #define SPI_ENGINE_REG_DATA_WIDTH_NUM_OF_SDIO_MASK GENMASK(23, 16) 28 + #define SPI_ENGINE_REG_DATA_WIDTH_MASK GENMASK(15, 0) 26 29 #define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH 0x10 27 30 #define SPI_ENGINE_REG_RESET 0x40 28 31 ··· 78 75 #define SPI_ENGINE_CMD_REG_CLK_DIV 0x0 79 76 #define SPI_ENGINE_CMD_REG_CONFIG 0x1 80 77 #define SPI_ENGINE_CMD_REG_XFER_BITS 0x2 78 + #define SPI_ENGINE_CMD_REG_SDI_MASK 0x3 79 + #define SPI_ENGINE_CMD_REG_SDO_MASK 0x4 81 80 82 81 #define SPI_ENGINE_MISC_SYNC 0x0 83 82 #define SPI_ENGINE_MISC_SLEEP 0x1 ··· 109 104 /* default sizes - can be changed when SPI Engine firmware is compiled */ 110 105 #define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16 111 106 #define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16 107 + 108 + /* Extending SPI_MULTI_LANE_MODE values for optimizing messages. */ 109 + #define SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN -1 110 + #define SPI_ENGINE_MULTI_BUS_MODE_CONFLICTING -2 112 111 113 112 struct spi_engine_program { 114 113 unsigned int length; ··· 151 142 unsigned long flags; 152 143 unsigned int offload_num; 153 144 unsigned int spi_mode_config; 145 + unsigned int multi_lane_mode; 146 + u8 rx_primary_lane_mask; 147 + u8 tx_primary_lane_mask; 148 + u8 rx_all_lanes_mask; 149 + u8 tx_all_lanes_mask; 154 150 u8 bits_per_word; 155 151 }; 156 152 ··· 178 164 u32 offload_caps; 179 165 bool offload_requires_sync; 180 166 }; 167 + 168 + static void spi_engine_primary_lane_flag(struct spi_device *spi, 169 + u8 *rx_lane_flags, u8 *tx_lane_flags) 170 + { 171 + *rx_lane_flags = BIT(spi->rx_lane_map[0]); 172 + *tx_lane_flags = BIT(spi->tx_lane_map[0]); 173 + } 174 + 175 + static void spi_engine_all_lanes_flags(struct spi_device *spi, 176 + u8 *rx_lane_flags, u8 *tx_lane_flags) 177 + { 178 + int i; 179 + 180 + for (i = 0; i < spi->num_rx_lanes; i++) 181 + *rx_lane_flags |= BIT(spi->rx_lane_map[i]); 182 + 183 + for (i = 0; i < spi->num_tx_lanes; i++) 184 + *tx_lane_flags |= BIT(spi->tx_lane_map[i]); 185 + } 181 186 182 187 static void spi_engine_program_add_cmd(struct spi_engine_program *p, 183 188 bool dry, uint16_t cmd) ··· 226 193 } 227 194 228 195 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry, 229 - struct spi_transfer *xfer) 196 + struct spi_transfer *xfer, u32 num_lanes) 230 197 { 231 198 unsigned int len; 232 199 ··· 236 203 len = xfer->len / 2; 237 204 else 238 205 len = xfer->len / 4; 206 + 207 + if (xfer->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) 208 + len /= num_lanes; 239 209 240 210 while (len) { 241 211 unsigned int n = min(len, 256U); ··· 305 269 { 306 270 unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz; 307 271 struct spi_transfer *xfer; 272 + int multi_lane_mode = SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN; 308 273 u8 min_bits_per_word = U8_MAX; 309 274 u8 max_bits_per_word = 0; 310 275 ··· 321 284 min_bits_per_word = min(min_bits_per_word, xfer->bits_per_word); 322 285 max_bits_per_word = max(max_bits_per_word, xfer->bits_per_word); 323 286 } 287 + 288 + if (xfer->rx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM || 289 + xfer->tx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM) { 290 + switch (xfer->multi_lane_mode) { 291 + case SPI_MULTI_LANE_MODE_SINGLE: 292 + case SPI_MULTI_LANE_MODE_STRIPE: 293 + break; 294 + default: 295 + /* Other modes, like mirror not supported */ 296 + return -EINVAL; 297 + } 298 + 299 + /* If all xfers have the same multi-lane mode, we can optimize. */ 300 + if (multi_lane_mode == SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN) 301 + multi_lane_mode = xfer->multi_lane_mode; 302 + else if (multi_lane_mode != xfer->multi_lane_mode) 303 + multi_lane_mode = SPI_ENGINE_MULTI_BUS_MODE_CONFLICTING; 304 + } 324 305 } 325 306 326 307 /* ··· 352 297 priv->bits_per_word = min_bits_per_word; 353 298 else 354 299 priv->bits_per_word = 0; 300 + 301 + priv->multi_lane_mode = multi_lane_mode; 302 + spi_engine_primary_lane_flag(msg->spi, 303 + &priv->rx_primary_lane_mask, 304 + &priv->tx_primary_lane_mask); 305 + spi_engine_all_lanes_flags(msg->spi, 306 + &priv->rx_all_lanes_mask, 307 + &priv->tx_all_lanes_mask); 355 308 } 356 309 357 310 return 0; ··· 373 310 struct spi_engine_offload *priv; 374 311 struct spi_transfer *xfer; 375 312 int clk_div, new_clk_div, inst_ns; 313 + int prev_multi_lane_mode = SPI_MULTI_LANE_MODE_SINGLE; 376 314 bool keep_cs = false; 377 315 u8 bits_per_word = 0; 378 316 ··· 398 334 * in the same way. 399 335 */ 400 336 bits_per_word = priv->bits_per_word; 337 + prev_multi_lane_mode = priv->multi_lane_mode; 401 338 } else { 402 339 spi_engine_program_add_cmd(p, dry, 403 340 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG, ··· 409 344 spi_engine_gen_cs(p, dry, spi, !xfer->cs_off); 410 345 411 346 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 347 + if (xfer->rx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM || 348 + xfer->tx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM) { 349 + if (xfer->multi_lane_mode != prev_multi_lane_mode) { 350 + u8 tx_lane_flags, rx_lane_flags; 351 + 352 + if (xfer->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) 353 + spi_engine_all_lanes_flags(spi, &rx_lane_flags, 354 + &tx_lane_flags); 355 + else 356 + spi_engine_primary_lane_flag(spi, &rx_lane_flags, 357 + &tx_lane_flags); 358 + 359 + spi_engine_program_add_cmd(p, dry, 360 + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, 361 + rx_lane_flags)); 362 + spi_engine_program_add_cmd(p, dry, 363 + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, 364 + tx_lane_flags)); 365 + } 366 + prev_multi_lane_mode = xfer->multi_lane_mode; 367 + } 368 + 412 369 new_clk_div = host->max_speed_hz / xfer->effective_speed_hz; 413 370 if (new_clk_div != clk_div) { 414 371 clk_div = new_clk_div; ··· 447 360 bits_per_word)); 448 361 } 449 362 450 - spi_engine_gen_xfer(p, dry, xfer); 363 + spi_engine_gen_xfer(p, dry, xfer, spi->num_rx_lanes); 451 364 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer), 452 365 inst_ns, xfer->effective_speed_hz); 453 366 ··· 481 394 if (clk_div != 1) 482 395 spi_engine_program_add_cmd(p, dry, 483 396 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0)); 397 + 398 + /* Restore single lane mode unless offload disable will restore it later. */ 399 + if (prev_multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE && 400 + (!msg->offload || priv->multi_lane_mode != SPI_MULTI_LANE_MODE_STRIPE)) { 401 + u8 rx_lane_flags, tx_lane_flags; 402 + 403 + spi_engine_primary_lane_flag(spi, &rx_lane_flags, &tx_lane_flags); 404 + 405 + spi_engine_program_add_cmd(p, dry, 406 + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, rx_lane_flags)); 407 + spi_engine_program_add_cmd(p, dry, 408 + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, tx_lane_flags)); 409 + } 484 410 } 485 411 486 412 static void spi_engine_xfer_next(struct spi_message *msg, ··· 899 799 writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv), 900 800 spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); 901 801 802 + if (host->num_data_lanes > 1) { 803 + u8 rx_lane_flags, tx_lane_flags; 804 + 805 + spi_engine_primary_lane_flag(device, &rx_lane_flags, &tx_lane_flags); 806 + 807 + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, 808 + rx_lane_flags), 809 + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); 810 + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, 811 + tx_lane_flags), 812 + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); 813 + } 814 + 902 815 /* 903 816 * In addition to setting the flags, we have to do a CS assert command 904 817 * to make the new setting actually take effect. ··· 1015 902 priv->bits_per_word), 1016 903 spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); 1017 904 905 + if (priv->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) { 906 + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, 907 + priv->rx_all_lanes_mask), 908 + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); 909 + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, 910 + priv->tx_all_lanes_mask), 911 + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); 912 + } 913 + 1018 914 writel_relaxed(SPI_ENGINE_CMD_SYNC(1), 1019 915 spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); 1020 916 ··· 1051 929 reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE; 1052 930 writel_relaxed(reg, spi_engine->base + 1053 931 SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); 932 + 933 + /* Restore single-lane mode. */ 934 + if (priv->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) { 935 + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, 936 + priv->rx_primary_lane_mask), 937 + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); 938 + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, 939 + priv->tx_primary_lane_mask), 940 + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); 941 + } 1054 942 } 1055 943 1056 944 static struct dma_chan ··· 1105 973 { 1106 974 struct spi_engine *spi_engine; 1107 975 struct spi_controller *host; 1108 - unsigned int version; 976 + unsigned int version, data_width_reg_val; 1109 977 int irq, ret; 1110 978 1111 979 irq = platform_get_irq(pdev, 0); ··· 1174 1042 return PTR_ERR(spi_engine->base); 1175 1043 1176 1044 version = readl(spi_engine->base + ADI_AXI_REG_VERSION); 1177 - if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) { 1045 + if (ADI_AXI_PCORE_VER_MAJOR(version) > 2) { 1178 1046 dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n", 1179 1047 ADI_AXI_PCORE_VER_MAJOR(version), 1180 1048 ADI_AXI_PCORE_VER_MINOR(version), 1181 1049 ADI_AXI_PCORE_VER_PATCH(version)); 1182 1050 return -ENODEV; 1183 1051 } 1052 + 1053 + data_width_reg_val = readl(spi_engine->base + SPI_ENGINE_REG_DATA_WIDTH); 1184 1054 1185 1055 if (adi_axi_pcore_ver_gteq(version, 1, 1)) { 1186 1056 unsigned int sizes = readl(spi_engine->base + ··· 1231 1097 } 1232 1098 if (adi_axi_pcore_ver_gteq(version, 1, 3)) 1233 1099 host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH; 1100 + if (adi_axi_pcore_ver_gteq(version, 2, 0)) 1101 + host->num_data_lanes = FIELD_GET(SPI_ENGINE_REG_DATA_WIDTH_NUM_OF_SDIO_MASK, 1102 + data_width_reg_val); 1234 1103 1235 1104 if (host->max_speed_hz == 0) 1236 1105 return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");