Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

net: enetc: fix NTMP DMA use-after-free issue

The AI-generated review reported a potential DMA use-after-free issue
[1]. If netc_xmit_ntmp_cmd() times out and returns an error, the pending
command is not explicitly aborted, while ntmp_free_data_mem()
unconditionally frees the DMA buffer. If the buffer has already been
reallocated elsewhere, this may lead to silent memory corruption. Because
the hardware eventually processes the pending command and perform a DMA
write of the response to the physical address of the freed buffer.

To resolve this issue, this patch does the following modifications:

1. Convert cbdr->ring_lock from a spinlock to a mutex

The lock was originally a spinlock in case NTMP operations might be
invoked from atomic context. After downstream support for all NTMP
tables, no such usage has materialized. A mutex lock is now required
because the driver now needs to reclaim used BDs and release associated
DMA memory within the lock's context, while dma_free_coherent() might
sleep.

2. Introduce software command BD (struct netc_swcbd)

The hardware write-back overwrites the addr and len fields of the BD,
so the driver cannot rely on the hardware BD to free the associated DMA
memory. The driver now maintains a software shadow BD storing the DMA
buffer pointer, DMA address, and size. And netc_xmit_ntmp_cmd() only
reclaims older BDs when the number of used BDs reaches
NETC_CBDR_CLEAN_WORK (16). The software BD enables correct DMA memory
release. With this, struct ntmp_dma_buf and ntmp_free_data_mem() are no
longer needed and are removed.

3. Require callers to hold ring_lock across netc_xmit_ntmp_cmd()

netc_xmit_ntmp_cmd() releases the ring_lock before the caller finishes
consuming the response. At this point, if a concurrent thread submits
a new command, it may trigger ntmp_clean_cbdr() and free the DMA buffer
while it is still in use. Move ring_lock ownership to the caller to
ensure the response buffer cannot be reclaimed prematurely. So the
helpers ntmp_select_and_lock_cbdr() and ntmp_unlock_cbdr() are added.

These changes eliminate the DMA use-after-free condition and ensure safe
and consistent BD reclamation and DMA buffer lifecycle management.

Fixes: 4701073c3deb ("net: enetc: add initial netc-lib driver to support NTMP")
Link: https://lore.kernel.org/netdev/20260403011729.1795413-1-kuba@kernel.org/ # [1]
Signed-off-by: Wei Fang <wei.fang@nxp.com>
Link: https://patch.msgid.link/20260415060833.2303846-3-wei.fang@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Wei Fang and committed by
Jakub Kicinski
3cade698 759a3290

+141 -104
+132 -96
drivers/net/ethernet/freescale/enetc/ntmp.c
··· 7 7 #include <linux/dma-mapping.h> 8 8 #include <linux/fsl/netc_global.h> 9 9 #include <linux/iopoll.h> 10 + #include <linux/vmalloc.h> 10 11 11 12 #include "ntmp_private.h" 12 13 ··· 43 42 if (!cbdr->addr_base) 44 43 return -ENOMEM; 45 44 45 + cbdr->swcbd = vcalloc(cbd_num, sizeof(struct netc_swcbd)); 46 + if (!cbdr->swcbd) { 47 + dma_free_coherent(dev, size, cbdr->addr_base, cbdr->dma_base); 48 + return -ENOMEM; 49 + } 50 + 46 51 cbdr->dma_size = size; 47 52 cbdr->bd_num = cbd_num; 48 53 cbdr->regs = *regs; ··· 59 52 cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base, 60 53 NTMP_BASE_ADDR_ALIGN); 61 54 62 - spin_lock_init(&cbdr->ring_lock); 55 + mutex_init(&cbdr->ring_lock); 63 56 64 57 cbdr->next_to_use = netc_read(cbdr->regs.pir); 65 58 cbdr->next_to_clean = netc_read(cbdr->regs.cir) & NETC_CBDRCIR_INDEX; ··· 78 71 } 79 72 EXPORT_SYMBOL_GPL(ntmp_init_cbdr); 80 73 74 + static void ntmp_free_data_mem(struct device *dev, struct netc_swcbd *swcbd) 75 + { 76 + if (unlikely(!swcbd->buf)) 77 + return; 78 + 79 + dma_free_coherent(dev, swcbd->size + NTMP_DATA_ADDR_ALIGN, 80 + swcbd->buf, swcbd->dma); 81 + } 82 + 81 83 void ntmp_free_cbdr(struct netc_cbdr *cbdr) 82 84 { 83 85 /* Disable the Control BD Ring */ 84 86 netc_write(cbdr->regs.mr, 0); 87 + 88 + for (int i = 0; i < cbdr->bd_num; i++) 89 + ntmp_free_data_mem(cbdr->dev, &cbdr->swcbd[i]); 90 + 91 + vfree(cbdr->swcbd); 85 92 dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base, 86 93 cbdr->dma_base); 87 94 memset(cbdr, 0, sizeof(*cbdr)); ··· 115 94 116 95 static void ntmp_clean_cbdr(struct netc_cbdr *cbdr) 117 96 { 118 - union netc_cbd *cbd; 119 - int i; 97 + int i = cbdr->next_to_clean; 120 98 121 - i = cbdr->next_to_clean; 122 99 while ((netc_read(cbdr->regs.cir) & NETC_CBDRCIR_INDEX) != i) { 123 - cbd = ntmp_get_cbd(cbdr, i); 100 + union netc_cbd *cbd = ntmp_get_cbd(cbdr, i); 101 + struct netc_swcbd *swcbd = &cbdr->swcbd[i]; 102 + 103 + ntmp_free_data_mem(cbdr->dev, swcbd); 104 + memset(swcbd, 0, sizeof(*swcbd)); 124 105 memset(cbd, 0, sizeof(*cbd)); 125 106 i = (i + 1) % cbdr->bd_num; 126 107 } 127 108 109 + dma_wmb(); 128 110 cbdr->next_to_clean = i; 129 111 } 130 112 131 - static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd) 113 + static void ntmp_select_and_lock_cbdr(struct ntmp_user *user, 114 + struct netc_cbdr **cbdr) 115 + { 116 + /* Currently only ENETC is supported, and it has only one command 117 + * BD ring. 118 + */ 119 + *cbdr = &user->ring[0]; 120 + 121 + mutex_lock(&(*cbdr)->ring_lock); 122 + } 123 + 124 + static void ntmp_unlock_cbdr(struct netc_cbdr *cbdr) 125 + { 126 + mutex_unlock(&cbdr->ring_lock); 127 + } 128 + 129 + static int netc_xmit_ntmp_cmd(struct netc_cbdr *cbdr, union netc_cbd *cbd, 130 + struct netc_swcbd *swcbd) 132 131 { 133 132 union netc_cbd *cur_cbd; 134 - struct netc_cbdr *cbdr; 135 - int i, err; 133 + int i, err, used_bds; 136 134 u16 status; 137 135 u32 val; 138 136 139 - /* Currently only i.MX95 ENETC is supported, and it only has one 140 - * command BD ring 141 - */ 142 - cbdr = &user->ring[0]; 143 - 144 - spin_lock_bh(&cbdr->ring_lock); 145 - 146 - if (unlikely(!ntmp_get_free_cbd_num(cbdr))) 137 + used_bds = cbdr->bd_num - ntmp_get_free_cbd_num(cbdr); 138 + if (unlikely(used_bds >= NETC_CBDR_CLEAN_WORK)) { 147 139 ntmp_clean_cbdr(cbdr); 140 + if (unlikely(!ntmp_get_free_cbd_num(cbdr))) { 141 + ntmp_free_data_mem(cbdr->dev, swcbd); 142 + return -EBUSY; 143 + } 144 + } 148 145 149 146 i = cbdr->next_to_use; 150 147 cur_cbd = ntmp_get_cbd(cbdr, i); 151 148 *cur_cbd = *cbd; 149 + cbdr->swcbd[i] = *swcbd; 152 150 dma_wmb(); 153 151 154 152 /* Update producer index of both software and hardware */ ··· 175 135 cbdr->next_to_use = i; 176 136 netc_write(cbdr->regs.pir, i); 177 137 178 - err = read_poll_timeout_atomic(netc_read, val, 179 - (val & NETC_CBDRCIR_INDEX) == i, 180 - NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT, 181 - true, cbdr->regs.cir); 138 + err = read_poll_timeout(netc_read, val, 139 + (val & NETC_CBDRCIR_INDEX) == i, 140 + NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT, 141 + true, cbdr->regs.cir); 182 142 if (unlikely(err)) 183 - goto cbdr_unlock; 143 + return err; 184 144 185 145 if (unlikely(val & NETC_CBDRCIR_SBE)) { 186 - dev_err(user->dev, "Command BD system bus error\n"); 187 - err = -EIO; 188 - goto cbdr_unlock; 146 + dev_err(cbdr->dev, "Command BD system bus error\n"); 147 + return -EIO; 189 148 } 190 149 191 150 dma_rmb(); ··· 196 157 /* Check the writeback error status */ 197 158 status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR; 198 159 if (unlikely(status)) { 199 - err = -EIO; 200 - dev_err(user->dev, "Command BD error: 0x%04x\n", status); 160 + dev_err(cbdr->dev, "Command BD error: 0x%04x\n", status); 161 + return -EIO; 201 162 } 202 - 203 - ntmp_clean_cbdr(cbdr); 204 - dma_wmb(); 205 - 206 - cbdr_unlock: 207 - spin_unlock_bh(&cbdr->ring_lock); 208 - 209 - return err; 210 - } 211 - 212 - static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align) 213 - { 214 - void *buf; 215 - 216 - buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN, 217 - &data->dma, GFP_KERNEL); 218 - if (!buf) 219 - return -ENOMEM; 220 - 221 - data->buf = buf; 222 - *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN); 223 163 224 164 return 0; 225 165 } 226 166 227 - static void ntmp_free_data_mem(struct ntmp_dma_buf *data) 167 + static int ntmp_alloc_data_mem(struct device *dev, struct netc_swcbd *swcbd, 168 + void **buf_align) 228 169 { 229 - dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN, 230 - data->buf, data->dma); 170 + void *buf; 171 + 172 + buf = dma_alloc_coherent(dev, swcbd->size + NTMP_DATA_ADDR_ALIGN, 173 + &swcbd->dma, GFP_KERNEL); 174 + if (!buf) 175 + return -ENOMEM; 176 + 177 + swcbd->buf = buf; 178 + *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN); 179 + 180 + return 0; 231 181 } 232 182 233 183 static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma, ··· 269 241 u8 tbl_ver, u32 entry_id, u32 req_len, 270 242 u32 resp_len) 271 243 { 272 - struct ntmp_dma_buf data = { 273 - .dev = user->dev, 244 + struct netc_swcbd swcbd = { 274 245 .size = max(req_len, resp_len), 275 246 }; 276 247 struct ntmp_req_by_eid *req; 248 + struct netc_cbdr *cbdr; 277 249 union netc_cbd cbd; 278 250 int err; 279 251 280 - err = ntmp_alloc_data_mem(&data, (void **)&req); 252 + err = ntmp_alloc_data_mem(user->dev, &swcbd, (void **)&req); 281 253 if (err) 282 254 return err; 283 255 284 256 ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id); 285 - ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len), 257 + ntmp_fill_request_hdr(&cbd, swcbd.dma, NTMP_LEN(req_len, resp_len), 286 258 tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID); 287 259 288 - err = netc_xmit_ntmp_cmd(user, &cbd); 260 + ntmp_select_and_lock_cbdr(user, &cbdr); 261 + err = netc_xmit_ntmp_cmd(cbdr, &cbd, &swcbd); 289 262 if (err) 290 263 dev_err(user->dev, 291 264 "Failed to delete entry 0x%x of %s, err: %pe", 292 265 entry_id, ntmp_table_name(tbl_id), ERR_PTR(err)); 293 - 294 - ntmp_free_data_mem(&data); 266 + ntmp_unlock_cbdr(cbdr); 295 267 296 268 return err; 297 269 } 298 270 299 - static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id, 300 - u32 len, struct ntmp_req_by_eid *req, 301 - dma_addr_t dma, bool compare_eid) 271 + static int ntmp_query_entry_by_id(struct netc_cbdr *cbdr, int tbl_id, 272 + struct ntmp_req_by_eid *req, 273 + struct netc_swcbd *swcbd, 274 + bool compare_eid) 302 275 { 276 + u32 len = NTMP_LEN(sizeof(*req), swcbd->size); 303 277 struct ntmp_cmn_resp_query *resp; 304 278 int cmd = NTMP_CMD_QUERY; 305 279 union netc_cbd cbd; ··· 313 283 cmd = NTMP_CMD_QU; 314 284 315 285 /* Request header */ 316 - ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID); 317 - err = netc_xmit_ntmp_cmd(user, &cbd); 286 + ntmp_fill_request_hdr(&cbd, swcbd->dma, len, tbl_id, cmd, 287 + NTMP_AM_ENTRY_ID); 288 + err = netc_xmit_ntmp_cmd(cbdr, &cbd, swcbd); 318 289 if (err) { 319 - dev_err(user->dev, 290 + dev_err(cbdr->dev, 320 291 "Failed to query entry 0x%x of %s, err: %pe\n", 321 292 entry_id, ntmp_table_name(tbl_id), ERR_PTR(err)); 322 293 return err; ··· 331 300 332 301 resp = (struct ntmp_cmn_resp_query *)req; 333 302 if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) { 334 - dev_err(user->dev, 303 + dev_err(cbdr->dev, 335 304 "%s: query EID 0x%x doesn't match response EID 0x%x\n", 336 305 ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id)); 337 306 return -EIO; ··· 343 312 int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id, 344 313 struct maft_entry_data *maft) 345 314 { 346 - struct ntmp_dma_buf data = { 347 - .dev = user->dev, 315 + struct netc_swcbd swcbd = { 348 316 .size = sizeof(struct maft_req_add), 349 317 }; 350 318 struct maft_req_add *req; 319 + struct netc_cbdr *cbdr; 351 320 union netc_cbd cbd; 352 321 int err; 353 322 354 - err = ntmp_alloc_data_mem(&data, (void **)&req); 323 + err = ntmp_alloc_data_mem(user->dev, &swcbd, (void **)&req); 355 324 if (err) 356 325 return err; 357 326 ··· 360 329 req->keye = maft->keye; 361 330 req->cfge = maft->cfge; 362 331 363 - ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0), 332 + ntmp_fill_request_hdr(&cbd, swcbd.dma, NTMP_LEN(swcbd.size, 0), 364 333 NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID); 365 - err = netc_xmit_ntmp_cmd(user, &cbd); 334 + 335 + ntmp_select_and_lock_cbdr(user, &cbdr); 336 + err = netc_xmit_ntmp_cmd(cbdr, &cbd, &swcbd); 366 337 if (err) 367 338 dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n", 368 339 entry_id, ERR_PTR(err)); 369 - 370 - ntmp_free_data_mem(&data); 340 + ntmp_unlock_cbdr(cbdr); 371 341 372 342 return err; 373 343 } ··· 377 345 int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id, 378 346 struct maft_entry_data *maft) 379 347 { 380 - struct ntmp_dma_buf data = { 381 - .dev = user->dev, 348 + struct netc_swcbd swcbd = { 382 349 .size = sizeof(struct maft_resp_query), 383 350 }; 384 351 struct maft_resp_query *resp; 385 352 struct ntmp_req_by_eid *req; 353 + struct netc_cbdr *cbdr; 386 354 int err; 387 355 388 - err = ntmp_alloc_data_mem(&data, (void **)&req); 356 + err = ntmp_alloc_data_mem(user->dev, &swcbd, (void **)&req); 389 357 if (err) 390 358 return err; 391 359 392 360 ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id); 393 - err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID, 394 - NTMP_LEN(sizeof(*req), data.size), 395 - req, data.dma, true); 361 + 362 + ntmp_select_and_lock_cbdr(user, &cbdr); 363 + err = ntmp_query_entry_by_id(cbdr, NTMP_MAFT_ID, req, &swcbd, true); 396 364 if (err) 397 - goto end; 365 + goto unlock_cbdr; 398 366 399 367 resp = (struct maft_resp_query *)req; 400 368 maft->keye = resp->keye; 401 369 maft->cfge = resp->cfge; 402 370 403 - end: 404 - ntmp_free_data_mem(&data); 371 + unlock_cbdr: 372 + ntmp_unlock_cbdr(cbdr); 405 373 406 374 return err; 407 375 } ··· 417 385 int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table, 418 386 int count) 419 387 { 420 - struct ntmp_dma_buf data = {.dev = user->dev}; 421 388 struct rsst_req_update *req; 389 + struct netc_swcbd swcbd; 390 + struct netc_cbdr *cbdr; 422 391 union netc_cbd cbd; 423 392 int err, i; 424 393 ··· 427 394 /* HW only takes in a full 64 entry table */ 428 395 return -EINVAL; 429 396 430 - data.size = struct_size(req, groups, count); 431 - err = ntmp_alloc_data_mem(&data, (void **)&req); 397 + swcbd.size = struct_size(req, groups, count); 398 + err = ntmp_alloc_data_mem(user->dev, &swcbd, (void **)&req); 432 399 if (err) 433 400 return err; 434 401 ··· 438 405 for (i = 0; i < count; i++) 439 406 req->groups[i] = (u8)(table[i]); 440 407 441 - ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0), 408 + ntmp_fill_request_hdr(&cbd, swcbd.dma, NTMP_LEN(swcbd.size, 0), 442 409 NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID); 443 410 444 - err = netc_xmit_ntmp_cmd(user, &cbd); 411 + ntmp_select_and_lock_cbdr(user, &cbdr); 412 + err = netc_xmit_ntmp_cmd(cbdr, &cbd, &swcbd); 445 413 if (err) 446 414 dev_err(user->dev, "Failed to update RSST entry, err: %pe\n", 447 415 ERR_PTR(err)); 448 - 449 - ntmp_free_data_mem(&data); 416 + ntmp_unlock_cbdr(cbdr); 450 417 451 418 return err; 452 419 } ··· 454 421 455 422 int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count) 456 423 { 457 - struct ntmp_dma_buf data = {.dev = user->dev}; 458 424 struct ntmp_req_by_eid *req; 425 + struct netc_swcbd swcbd; 426 + struct netc_cbdr *cbdr; 459 427 union netc_cbd cbd; 460 428 int err, i; 461 429 u8 *group; ··· 465 431 /* HW only takes in a full 64 entry table */ 466 432 return -EINVAL; 467 433 468 - data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) + 469 - RSST_CFGE_DATA_SIZE(count); 470 - err = ntmp_alloc_data_mem(&data, (void **)&req); 434 + swcbd.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) + 435 + RSST_CFGE_DATA_SIZE(count); 436 + err = ntmp_alloc_data_mem(user->dev, &swcbd, (void **)&req); 471 437 if (err) 472 438 return err; 473 439 474 440 /* Set the request data buffer */ 475 441 ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0); 476 - ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size), 442 + ntmp_fill_request_hdr(&cbd, swcbd.dma, NTMP_LEN(sizeof(*req), swcbd.size), 477 443 NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID); 478 - err = netc_xmit_ntmp_cmd(user, &cbd); 444 + 445 + ntmp_select_and_lock_cbdr(user, &cbdr); 446 + err = netc_xmit_ntmp_cmd(cbdr, &cbd, &swcbd); 479 447 if (err) { 480 448 dev_err(user->dev, "Failed to query RSST entry, err: %pe\n", 481 449 ERR_PTR(err)); 482 - goto end; 450 + goto unlock_cbdr; 483 451 } 484 452 485 453 group = (u8 *)req; ··· 489 453 for (i = 0; i < count; i++) 490 454 table[i] = group[i]; 491 455 492 - end: 493 - ntmp_free_data_mem(&data); 456 + unlock_cbdr: 457 + ntmp_unlock_cbdr(cbdr); 494 458 495 459 return err; 496 460 }
+1 -7
drivers/net/ethernet/freescale/enetc/ntmp_private.h
··· 14 14 #define NETC_CBDR_BD_NUM 256 15 15 #define NETC_CBDRCIR_INDEX GENMASK(9, 0) 16 16 #define NETC_CBDRCIR_SBE BIT(31) 17 + #define NETC_CBDR_CLEAN_WORK 16 17 18 18 19 union netc_cbd { 19 20 struct { ··· 55 54 #define NTMP_RESP_RR BIT(15) 56 55 __le32 resv1[4]; 57 56 } resp_hdr; /* NTMP Response Message Header Format */ 58 - }; 59 - 60 - struct ntmp_dma_buf { 61 - struct device *dev; 62 - size_t size; 63 - void *buf; 64 - dma_addr_t dma; 65 57 }; 66 58 67 59 struct ntmp_cmn_req_data {
+8 -1
include/linux/fsl/ntmp.h
··· 31 31 u8 rsst_ver; 32 32 }; 33 33 34 + struct netc_swcbd { 35 + void *buf; 36 + dma_addr_t dma; 37 + size_t size; 38 + }; 39 + 34 40 struct netc_cbdr { 35 41 struct device *dev; 36 42 struct netc_cbdr_regs regs; ··· 50 44 void *addr_base_align; 51 45 dma_addr_t dma_base; 52 46 dma_addr_t dma_base_align; 47 + struct netc_swcbd *swcbd; 53 48 54 49 /* Serialize the order of command BD ring */ 55 - spinlock_t ring_lock; 50 + struct mutex ring_lock; 56 51 }; 57 52 58 53 struct ntmp_user {