Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

misc: fastrpc: Rename phys to dma_addr for clarity

The fields buf->phys and map->phys currently store DMA addresses
returned by dma_map_*() APIs, not physical addresses. This naming
is misleading and may lead to incorrect assumptions about the
address type and its translation.
Rename these fields from phys to dma_addr to improve code clarity
and align with kernel conventions for dma_addr_t usage.

Signed-off-by: Kumari Pallavi <kumari.pallavi@oss.qualcomm.com>
Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
Link: https://patch.msgid.link/20251226070534.602021-3-kumari.pallavi@oss.qualcomm.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Kumari Pallavi and committed by
Greg Kroah-Hartman
428b2f2b 53da3f51

+41 -36
+41 -36
drivers/misc/fastrpc.c
··· 106 106 #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev) 107 107 108 108 struct fastrpc_phy_page { 109 - u64 addr; /* physical address */ 109 + dma_addr_t addr; /* dma address */ 110 110 u64 size; /* size of contiguous region */ 111 111 }; 112 112 ··· 171 171 u64 ctx; /* invoke caller context */ 172 172 u32 handle; /* handle to invoke */ 173 173 u32 sc; /* scalars structure describing the data */ 174 - u64 addr; /* physical address */ 174 + dma_addr_t addr; /* dma address */ 175 175 u64 size; /* size of contiguous region */ 176 176 }; 177 177 ··· 194 194 struct dma_buf *dmabuf; 195 195 struct device *dev; 196 196 void *virt; 197 - u64 phys; 197 + dma_addr_t dma_addr; 198 198 u64 size; 199 199 /* Lock for dma buf attachments */ 200 200 struct mutex lock; ··· 217 217 struct dma_buf *buf; 218 218 struct sg_table *table; 219 219 struct dma_buf_attachment *attach; 220 - u64 phys; 220 + dma_addr_t dma_addr; 221 221 u64 size; 222 222 void *va; 223 223 u64 len; ··· 320 320 321 321 perm.vmid = QCOM_SCM_VMID_HLOS; 322 322 perm.perm = QCOM_SCM_PERM_RWX; 323 - err = qcom_scm_assign_mem(map->phys, map->len, 323 + err = qcom_scm_assign_mem(map->dma_addr, map->len, 324 324 &src_perms, &perm, 1); 325 325 if (err) { 326 - dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", 327 - map->phys, map->len, err); 326 + dev_err(map->fl->sctx->dev, 327 + "Failed to assign memory dma_addr %pad size 0x%llx err %d\n", 328 + &map->dma_addr, map->len, err); 328 329 return; 329 330 } 330 331 } ··· 390 389 static void fastrpc_buf_free(struct fastrpc_buf *buf) 391 390 { 392 391 dma_free_coherent(buf->dev, buf->size, buf->virt, 393 - FASTRPC_PHYS(buf->phys)); 392 + FASTRPC_PHYS(buf->dma_addr)); 394 393 kfree(buf); 395 394 } 396 395 ··· 409 408 410 409 buf->fl = fl; 411 410 buf->virt = NULL; 412 - buf->phys = 0; 411 + buf->dma_addr = 0; 413 412 buf->size = size; 414 413 buf->dev = dev; 415 414 buf->raddr = 0; 416 415 417 - buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, 416 + buf->virt = dma_alloc_coherent(dev, buf->size, &buf->dma_addr, 418 417 GFP_KERNEL); 419 418 if (!buf->virt) { 420 419 mutex_destroy(&buf->lock); ··· 440 439 buf = *obuf; 441 440 442 441 if (fl->sctx && fl->sctx->sid) 443 - buf->phys += ((u64)fl->sctx->sid << 32); 442 + buf->dma_addr += ((u64)fl->sctx->sid << 32); 444 443 445 444 return 0; 446 445 } ··· 685 684 return -ENOMEM; 686 685 687 686 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, 688 - FASTRPC_PHYS(buffer->phys), buffer->size); 687 + FASTRPC_PHYS(buffer->dma_addr), buffer->size); 689 688 if (ret < 0) { 690 689 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); 691 690 kfree(a); ··· 734 733 dma_resv_assert_held(dmabuf->resv); 735 734 736 735 return dma_mmap_coherent(buf->dev, vma, buf->virt, 737 - FASTRPC_PHYS(buf->phys), size); 736 + FASTRPC_PHYS(buf->dma_addr), size); 738 737 } 739 738 740 739 static const struct dma_buf_ops fastrpc_dma_buf_ops = { ··· 786 785 map->table = table; 787 786 788 787 if (attr & FASTRPC_ATTR_SECUREMAP) { 789 - map->phys = sg_phys(map->table->sgl); 788 + map->dma_addr = sg_phys(map->table->sgl); 790 789 } else { 791 - map->phys = sg_dma_address(map->table->sgl); 792 - map->phys += ((u64)fl->sctx->sid << 32); 790 + map->dma_addr = sg_dma_address(map->table->sgl); 791 + map->dma_addr += ((u64)fl->sctx->sid << 32); 793 792 } 794 793 for_each_sg(map->table->sgl, sgl, map->table->nents, 795 794 sgl_index) ··· 816 815 dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; 817 816 dst_perms[1].perm = QCOM_SCM_PERM_RWX; 818 817 map->attr = attr; 819 - err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2); 818 + err = qcom_scm_assign_mem(map->dma_addr, (u64)map->len, &src_perms, dst_perms, 2); 820 819 if (err) { 821 - dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", 822 - map->phys, map->len, err); 820 + dev_err(sess->dev, 821 + "Failed to assign memory with dma_addr %pad size 0x%llx err %d\n", 822 + &map->dma_addr, map->len, err); 823 823 goto map_err; 824 824 } 825 825 } ··· 1011 1009 struct vm_area_struct *vma = NULL; 1012 1010 1013 1011 rpra[i].buf.pv = (u64) ctx->args[i].ptr; 1014 - pages[i].addr = ctx->maps[i]->phys; 1012 + pages[i].addr = ctx->maps[i]->dma_addr; 1015 1013 1016 1014 mmap_read_lock(current->mm); 1017 1015 vma = find_vma(current->mm, ctx->args[i].ptr); ··· 1038 1036 goto bail; 1039 1037 1040 1038 rpra[i].buf.pv = args - ctx->olaps[oix].offset; 1041 - pages[i].addr = ctx->buf->phys - 1039 + pages[i].addr = ctx->buf->dma_addr - 1042 1040 ctx->olaps[oix].offset + 1043 1041 (pkt_size - rlen); 1044 1042 pages[i].addr = pages[i].addr & PAGE_MASK; ··· 1070 1068 list[i].num = ctx->args[i].length ? 1 : 0; 1071 1069 list[i].pgidx = i; 1072 1070 if (ctx->maps[i]) { 1073 - pages[i].addr = ctx->maps[i]->phys; 1071 + pages[i].addr = ctx->maps[i]->dma_addr; 1074 1072 pages[i].size = ctx->maps[i]->size; 1075 1073 } 1076 1074 rpra[i].dma.fd = ctx->args[i].fd; ··· 1152 1150 msg->ctx = ctx->ctxid | fl->pd; 1153 1151 msg->handle = handle; 1154 1152 msg->sc = ctx->sc; 1155 - msg->addr = ctx->buf ? ctx->buf->phys : 0; 1153 + msg->addr = ctx->buf ? ctx->buf->dma_addr : 0; 1156 1154 msg->size = roundup(ctx->msg_sz, PAGE_SIZE); 1157 1155 fastrpc_context_get(ctx); 1158 1156 ··· 1308 1306 if (fl->cctx->vmcount) { 1309 1307 u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); 1310 1308 1311 - err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, 1309 + err = qcom_scm_assign_mem(fl->cctx->remote_heap->dma_addr, 1312 1310 (u64)fl->cctx->remote_heap->size, 1313 1311 &src_perms, 1314 1312 fl->cctx->vmperms, fl->cctx->vmcount); 1315 1313 if (err) { 1316 - dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", 1317 - fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); 1314 + dev_err(fl->sctx->dev, 1315 + "Failed to assign memory with dma_addr %pad size 0x%llx err %d\n", 1316 + &fl->cctx->remote_heap->dma_addr, 1317 + fl->cctx->remote_heap->size, err); 1318 1318 goto err_map; 1319 1319 } 1320 1320 scm_done = true; ··· 1336 1332 args[1].length = inbuf.namelen; 1337 1333 args[1].fd = -1; 1338 1334 1339 - pages[0].addr = fl->cctx->remote_heap->phys; 1335 + pages[0].addr = fl->cctx->remote_heap->dma_addr; 1340 1336 pages[0].size = fl->cctx->remote_heap->size; 1341 1337 1342 1338 args[2].ptr = (u64)(uintptr_t) pages; ··· 1365 1361 1366 1362 dst_perms.vmid = QCOM_SCM_VMID_HLOS; 1367 1363 dst_perms.perm = QCOM_SCM_PERM_RWX; 1368 - err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, 1364 + err = qcom_scm_assign_mem(fl->cctx->remote_heap->dma_addr, 1369 1365 (u64)fl->cctx->remote_heap->size, 1370 1366 &src_perms, &dst_perms, 1); 1371 1367 if (err) 1372 - dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", 1373 - fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); 1368 + dev_err(fl->sctx->dev, "Failed to assign memory dma_addr %pad size 0x%llx err %d\n", 1369 + &fl->cctx->remote_heap->dma_addr, fl->cctx->remote_heap->size, err); 1374 1370 } 1375 1371 err_map: 1376 1372 fastrpc_buf_free(fl->cctx->remote_heap); ··· 1459 1455 args[2].length = inbuf.filelen; 1460 1456 args[2].fd = init.filefd; 1461 1457 1462 - pages[0].addr = imem->phys; 1458 + pages[0].addr = imem->dma_addr; 1463 1459 pages[0].size = imem->size; 1464 1460 1465 1461 args[3].ptr = (u64)(uintptr_t) pages; ··· 1917 1913 args[0].ptr = (u64) (uintptr_t) &req_msg; 1918 1914 args[0].length = sizeof(req_msg); 1919 1915 1920 - pages.addr = buf->phys; 1916 + pages.addr = buf->dma_addr; 1921 1917 pages.size = buf->size; 1922 1918 1923 1919 args[1].ptr = (u64) (uintptr_t) &pages; ··· 1945 1941 if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { 1946 1942 u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); 1947 1943 1948 - err = qcom_scm_assign_mem(buf->phys, (u64)buf->size, 1944 + err = qcom_scm_assign_mem(buf->dma_addr, (u64)buf->size, 1949 1945 &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); 1950 1946 if (err) { 1951 - dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", 1952 - buf->phys, buf->size, err); 1947 + dev_err(fl->sctx->dev, 1948 + "Failed to assign memory dma_addr %pad size 0x%llx err %d", 1949 + &buf->dma_addr, buf->size, err); 1953 1950 goto err_assign; 1954 1951 } 1955 1952 } ··· 2064 2059 args[0].ptr = (u64) (uintptr_t) &req_msg; 2065 2060 args[0].length = sizeof(req_msg); 2066 2061 2067 - pages.addr = map->phys; 2062 + pages.addr = map->dma_addr; 2068 2063 pages.size = map->len; 2069 2064 2070 2065 args[1].ptr = (u64) (uintptr_t) &pages;