Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

RDMA/bnxt_re: Separate kernel and user CQ creation paths

This patch refactors kernel and user CQ creation logic into
two separate code paths. This will be used to support dmabuf
based user CQ memory in the next patch. There is no functional
change in this patch.

Link: https://patch.msgid.link/r/20260302110036.36387-6-sriharsha.basavapatna@broadcom.com
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Sriharsha Basavapatna and committed by
Jason Gunthorpe
cec5157b 3d4a4236

+103 -54
+103 -54
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 3369 3369 return 0; 3370 3370 } 3371 3371 3372 - int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 3373 - struct uverbs_attr_bundle *attrs) 3372 + static int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 3373 + struct uverbs_attr_bundle *attrs) 3374 3374 { 3375 3375 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); 3376 3376 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev); ··· 3379 3379 rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); 3380 3380 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; 3381 3381 struct bnxt_qplib_chip_ctx *cctx; 3382 + struct bnxt_re_cq_resp resp = {}; 3383 + struct bnxt_re_cq_req req; 3382 3384 int cqe = attr->cqe; 3383 3385 int rc, entries; 3384 3386 u32 active_cqs; ··· 3402 3400 if (entries > dev_attr->max_cq_wqes + 1) 3403 3401 entries = dev_attr->max_cq_wqes + 1; 3404 3402 3405 - if (udata) { 3406 - struct bnxt_re_cq_req req; 3403 + rc = ib_copy_validate_udata_in(udata, req, cq_handle); 3404 + if (rc) 3405 + return rc; 3407 3406 3408 - rc = ib_copy_validate_udata_in(udata, req, cq_handle); 3409 - if (rc) 3410 - goto fail; 3411 - 3412 - cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va, 3413 - entries * sizeof(struct cq_base), 3414 - IB_ACCESS_LOCAL_WRITE); 3415 - if (IS_ERR(cq->umem)) { 3416 - rc = PTR_ERR(cq->umem); 3417 - goto fail; 3418 - } 3419 - rc = bnxt_re_setup_sginfo(rdev, cq->umem, &cq->qplib_cq.sg_info); 3420 - if (rc) 3421 - goto fail; 3422 - 3423 - cq->qplib_cq.dpi = &uctx->dpi; 3424 - } else { 3425 - cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); 3426 - cq->cql = kzalloc_objs(struct bnxt_qplib_cqe, cq->max_cql); 3427 - if (!cq->cql) { 3428 - rc = -ENOMEM; 3429 - goto fail; 3430 - } 3431 - 3432 - cq->qplib_cq.sg_info.pgsize = SZ_4K; 3433 - cq->qplib_cq.sg_info.pgshft = __builtin_ctz(SZ_4K); 3434 - cq->qplib_cq.dpi = &rdev->dpi_privileged; 3407 + cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va, 3408 + entries * sizeof(struct cq_base), 3409 + IB_ACCESS_LOCAL_WRITE); 3410 + if (IS_ERR(cq->umem)) { 3411 + rc = PTR_ERR(cq->umem); 3412 + return rc; 3435 3413 } 3414 + 3415 + rc = bnxt_re_setup_sginfo(rdev, cq->umem, &cq->qplib_cq.sg_info); 3416 + if (rc) 3417 + goto fail; 3418 + 3419 + cq->qplib_cq.dpi = &uctx->dpi; 3436 3420 cq->qplib_cq.max_wqe = entries; 3437 3421 cq->qplib_cq.coalescing = &rdev->cq_coalescing; 3438 3422 cq->qplib_cq.nq = bnxt_re_get_nq(rdev); ··· 3432 3444 3433 3445 cq->ib_cq.cqe = entries; 3434 3446 cq->cq_period = cq->qplib_cq.period; 3435 - 3436 3447 active_cqs = atomic_inc_return(&rdev->stats.res.cq_count); 3437 3448 if (active_cqs > rdev->stats.res.cq_watermark) 3438 3449 rdev->stats.res.cq_watermark = active_cqs; 3439 3450 spin_lock_init(&cq->cq_lock); 3440 3451 3441 - if (udata) { 3442 - struct bnxt_re_cq_resp resp = {}; 3443 - 3444 - if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) { 3445 - hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id); 3446 - /* Allocate a page */ 3447 - cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL); 3448 - if (!cq->uctx_cq_page) { 3449 - rc = -ENOMEM; 3450 - goto c2fail; 3451 - } 3452 - resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT; 3452 + if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) { 3453 + hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id); 3454 + /* Allocate a page */ 3455 + cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL); 3456 + if (!cq->uctx_cq_page) { 3457 + rc = -ENOMEM; 3458 + goto fail; 3453 3459 } 3454 - resp.cqid = cq->qplib_cq.id; 3455 - resp.tail = cq->qplib_cq.hwq.cons; 3456 - resp.phase = cq->qplib_cq.period; 3457 - resp.rsvd = 0; 3458 - rc = ib_respond_udata(udata, resp); 3459 - if (rc) { 3460 - bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); 3461 - goto free_mem; 3462 - } 3460 + resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT; 3461 + } 3462 + resp.cqid = cq->qplib_cq.id; 3463 + resp.tail = cq->qplib_cq.hwq.cons; 3464 + resp.phase = cq->qplib_cq.period; 3465 + resp.rsvd = 0; 3466 + rc = ib_respond_udata(udata, resp); 3467 + if (rc) { 3468 + bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); 3469 + goto free_mem; 3463 3470 } 3464 3471 3465 3472 return 0; 3466 3473 3467 3474 free_mem: 3468 3475 free_page((unsigned long)cq->uctx_cq_page); 3469 - c2fail: 3476 + fail: 3470 3477 ib_umem_release(cq->umem); 3478 + return rc; 3479 + } 3480 + 3481 + int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 3482 + struct uverbs_attr_bundle *attrs) 3483 + { 3484 + struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); 3485 + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev); 3486 + struct ib_udata *udata = &attrs->driver_udata; 3487 + struct bnxt_re_ucontext *uctx = 3488 + rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); 3489 + struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; 3490 + struct bnxt_qplib_chip_ctx *cctx; 3491 + int cqe = attr->cqe; 3492 + int rc, entries; 3493 + u32 active_cqs; 3494 + 3495 + if (udata) 3496 + return bnxt_re_create_user_cq(ibcq, attr, attrs); 3497 + 3498 + if (attr->flags) 3499 + return -EOPNOTSUPP; 3500 + 3501 + /* Validate CQ fields */ 3502 + if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { 3503 + ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded"); 3504 + return -EINVAL; 3505 + } 3506 + 3507 + cq->rdev = rdev; 3508 + cctx = rdev->chip_ctx; 3509 + cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq); 3510 + 3511 + entries = bnxt_re_init_depth(cqe + 1, uctx); 3512 + if (entries > dev_attr->max_cq_wqes + 1) 3513 + entries = dev_attr->max_cq_wqes + 1; 3514 + 3515 + cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); 3516 + cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), 3517 + GFP_KERNEL); 3518 + if (!cq->cql) 3519 + return -ENOMEM; 3520 + 3521 + cq->qplib_cq.sg_info.pgsize = SZ_4K; 3522 + cq->qplib_cq.sg_info.pgshft = __builtin_ctz(SZ_4K); 3523 + cq->qplib_cq.dpi = &rdev->dpi_privileged; 3524 + cq->qplib_cq.max_wqe = entries; 3525 + cq->qplib_cq.coalescing = &rdev->cq_coalescing; 3526 + cq->qplib_cq.nq = bnxt_re_get_nq(rdev); 3527 + cq->qplib_cq.cnq_hw_ring_id = cq->qplib_cq.nq->ring_id; 3528 + 3529 + rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq); 3530 + if (rc) { 3531 + ibdev_err(&rdev->ibdev, "Failed to create HW CQ"); 3532 + goto fail; 3533 + } 3534 + 3535 + cq->ib_cq.cqe = entries; 3536 + cq->cq_period = cq->qplib_cq.period; 3537 + active_cqs = atomic_inc_return(&rdev->stats.res.cq_count); 3538 + if (active_cqs > rdev->stats.res.cq_watermark) 3539 + rdev->stats.res.cq_watermark = active_cqs; 3540 + spin_lock_init(&cq->cq_lock); 3541 + 3542 + return 0; 3543 + 3471 3544 fail: 3472 3545 kfree(cq->cql); 3473 3546 return rc;