Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

RDMA/bnxt_re: Support application specific CQs

This patch supports application allocated memory for CQs.

The application allocates and manages the CQs directly. To support
this, the driver exports a new comp_mask to indicate direct control
of the CQ. When this comp_mask bit is set in the ureq, the driver
maps this application allocated CQ memory into hardware. As the
application manages this memory, the CQ depth ('cqe') passed by it
must be used as is and the driver shouldn't update it.

For CQs, ib_core supports pinning dmabuf based application memory,
specified through provider attributes. This umem is mananged by the
ib_core and is available in ib_cq. Register 'create_cq_user' devop
to process this umem. The driver also supports the legacy interface
that allocates umem internally.

Link: https://patch.msgid.link/r/20260302110036.36387-7-sriharsha.basavapatna@broadcom.com
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Sriharsha Basavapatna and committed by
Jason Gunthorpe
a06165a7 cec5157b

+28 -19
+19 -17
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 3342 3342 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); 3343 3343 3344 3344 bnxt_re_put_nq(rdev, nq); 3345 - ib_umem_release(cq->umem); 3346 3345 3347 3346 atomic_dec(&rdev->stats.res.cq_count); 3348 3347 kfree(cq->cql); ··· 3368 3369 return 0; 3369 3370 } 3370 3371 3371 - static int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 3372 - struct uverbs_attr_bundle *attrs) 3372 + int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 3373 + struct uverbs_attr_bundle *attrs) 3373 3374 { 3374 3375 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); 3375 3376 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev); ··· 3401 3402 if (entries > dev_attr->max_cq_wqes + 1) 3402 3403 entries = dev_attr->max_cq_wqes + 1; 3403 3404 3404 - rc = ib_copy_validate_udata_in(udata, req, cq_handle); 3405 + rc = ib_copy_validate_udata_in_cm(udata, req, cq_handle, 3406 + BNXT_RE_CQ_FIXED_NUM_CQE_ENABLE); 3405 3407 if (rc) 3406 3408 return rc; 3407 3409 3408 - cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va, 3409 - entries * sizeof(struct cq_base), 3410 - IB_ACCESS_LOCAL_WRITE); 3411 - if (IS_ERR(cq->umem)) { 3412 - rc = PTR_ERR(cq->umem); 3413 - return rc; 3410 + if (req.comp_mask & BNXT_RE_CQ_FIXED_NUM_CQE_ENABLE) 3411 + entries = cqe; 3412 + 3413 + if (!ibcq->umem) { 3414 + ibcq->umem = ib_umem_get(&rdev->ibdev, req.cq_va, 3415 + entries * sizeof(struct cq_base), 3416 + IB_ACCESS_LOCAL_WRITE); 3417 + if (IS_ERR(ibcq->umem)) { 3418 + rc = PTR_ERR(ibcq->umem); 3419 + goto fail; 3420 + } 3414 3421 } 3415 3422 3416 - rc = bnxt_re_setup_sginfo(rdev, cq->umem, &cq->qplib_cq.sg_info); 3423 + rc = bnxt_re_setup_sginfo(rdev, ibcq->umem, &cq->qplib_cq.sg_info); 3417 3424 if (rc) 3418 3425 goto fail; 3419 3426 ··· 3467 3462 free_mem: 3468 3463 free_page((unsigned long)cq->uctx_cq_page); 3469 3464 fail: 3470 - ib_umem_release(cq->umem); 3471 3465 return rc; 3472 3466 } 3473 3467 ··· 3479 3475 struct bnxt_re_ucontext *uctx = 3480 3476 rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); 3481 3477 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; 3482 - struct bnxt_qplib_chip_ctx *cctx; 3483 3478 int cqe = attr->cqe; 3484 3479 int rc, entries; 3485 3480 u32 active_cqs; ··· 3496 3493 } 3497 3494 3498 3495 cq->rdev = rdev; 3499 - cctx = rdev->chip_ctx; 3500 3496 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq); 3501 3497 3502 3498 entries = bnxt_re_init_depth(cqe + 1, uctx); ··· 3544 3542 3545 3543 cq->qplib_cq.max_wqe = cq->resize_cqe; 3546 3544 if (cq->resize_umem) { 3547 - ib_umem_release(cq->umem); 3548 - cq->umem = cq->resize_umem; 3545 + ib_umem_release(cq->ib_cq.umem); 3546 + cq->ib_cq.umem = cq->resize_umem; 3549 3547 cq->resize_umem = NULL; 3550 3548 cq->resize_cqe = 0; 3551 3549 } ··· 4144 4142 /* User CQ; the only processing we do is to 4145 4143 * complete any pending CQ resize operation. 4146 4144 */ 4147 - if (cq->umem) { 4145 + if (cq->ib_cq.umem) { 4148 4146 if (cq->resize_umem) 4149 4147 bnxt_re_resize_cq_complete(cq); 4150 4148 return 0;
+2 -1
drivers/infiniband/hw/bnxt_re/ib_verbs.h
··· 108 108 struct bnxt_qplib_cqe *cql; 109 109 #define MAX_CQL_PER_POLL 1024 110 110 u32 max_cql; 111 - struct ib_umem *umem; 112 111 struct ib_umem *resize_umem; 113 112 int resize_cqe; 114 113 void *uctx_cq_page; ··· 253 254 const struct ib_recv_wr **bad_recv_wr); 254 255 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 255 256 struct uverbs_attr_bundle *attrs); 257 + int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 258 + struct uverbs_attr_bundle *attrs); 256 259 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); 257 260 int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 258 261 int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
+1
drivers/infiniband/hw/bnxt_re/main.c
··· 1335 1335 .alloc_ucontext = bnxt_re_alloc_ucontext, 1336 1336 .create_ah = bnxt_re_create_ah, 1337 1337 .create_cq = bnxt_re_create_cq, 1338 + .create_user_cq = bnxt_re_create_user_cq, 1338 1339 .create_qp = bnxt_re_create_qp, 1339 1340 .create_srq = bnxt_re_create_srq, 1340 1341 .create_user_ah = bnxt_re_create_ah,
+6 -1
include/uapi/rdma/bnxt_re-abi.h
··· 102 102 struct bnxt_re_cq_req { 103 103 __aligned_u64 cq_va; 104 104 __aligned_u64 cq_handle; 105 + __aligned_u64 comp_mask; 105 106 }; 106 107 107 - enum bnxt_re_cq_mask { 108 + enum bnxt_re_resp_cq_mask { 108 109 BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT = 0x1, 110 + }; 111 + 112 + enum bnxt_re_req_cq_mask { 113 + BNXT_RE_CQ_FIXED_NUM_CQE_ENABLE = 0x1, 109 114 }; 110 115 111 116 struct bnxt_re_cq_resp {