Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
IPoIB/cm: Drain cq in ipoib_cm_dev_stop()
IPoIB/cm: Fix timeout check in ipoib_cm_dev_stop()
IB/ehca: Fix number of send WRs reported for new QP
IB/mlx4: Initialize send queue entry ownership bits
IB/mlx4: Don't allocate RQ doorbell if using SRQ

+60 -36
+1 -1
drivers/infiniband/hw/ehca/hcp_if.c
··· 331 331 0); 332 332 qp->ipz_qp_handle.handle = outs[0]; 333 333 qp->real_qp_num = (u32)outs[1]; 334 - parms->act_nr_send_sges = 334 + parms->act_nr_send_wqes = 335 335 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]); 336 336 parms->act_nr_recv_wqes = 337 337 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
+37 -22
drivers/infiniband/hw/mlx4/qp.c
··· 270 270 struct ib_qp_init_attr *init_attr, 271 271 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) 272 272 { 273 - struct mlx4_wqe_ctrl_seg *ctrl; 274 273 int err; 275 - int i; 276 274 277 275 mutex_init(&qp->mutex); 278 276 spin_lock_init(&qp->sq.lock); ··· 317 319 if (err) 318 320 goto err_mtt; 319 321 320 - err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), 321 - ucmd.db_addr, &qp->db); 322 - if (err) 323 - goto err_mtt; 322 + if (!init_attr->srq) { 323 + err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), 324 + ucmd.db_addr, &qp->db); 325 + if (err) 326 + goto err_mtt; 327 + } 324 328 } else { 325 329 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); 326 330 if (err) 327 331 goto err; 328 332 329 - err = mlx4_ib_db_alloc(dev, &qp->db, 0); 330 - if (err) 331 - goto err; 333 + if (!init_attr->srq) { 334 + err = mlx4_ib_db_alloc(dev, &qp->db, 0); 335 + if (err) 336 + goto err; 332 337 333 - *qp->db.db = 0; 338 + *qp->db.db = 0; 339 + } 334 340 335 341 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { 336 342 err = -ENOMEM; ··· 349 347 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); 350 348 if (err) 351 349 goto err_mtt; 352 - 353 - for (i = 0; i < qp->sq.max; ++i) { 354 - ctrl = get_send_wqe(qp, i); 355 - ctrl->owner_opcode = cpu_to_be32(1 << 31); 356 - } 357 350 358 351 qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL); 359 352 qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL); ··· 383 386 return 0; 384 387 385 388 err_wrid: 386 - if (pd->uobject) 389 + if (pd->uobject && !init_attr->srq) 387 390 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); 388 391 else { 389 392 kfree(qp->sq.wrid); ··· 400 403 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 401 404 402 405 err_db: 403 - if (!pd->uobject) 406 + if (!pd->uobject && !init_attr->srq) 404 407 mlx4_ib_db_free(dev, &qp->db); 405 408 406 409 err: ··· 478 481 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 479 482 480 483 if (is_user) { 481 - mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), 482 - &qp->db); 484 + if (!qp->ibqp.srq) 485 + mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), 486 + &qp->db); 483 487 ib_umem_release(qp->umem); 484 488 } else { 485 489 kfree(qp->sq.wrid); 486 490 kfree(qp->rq.wrid); 487 491 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 488 - mlx4_ib_db_free(dev, &qp->db); 492 + if (!qp->ibqp.srq) 493 + mlx4_ib_db_free(dev, &qp->db); 489 494 } 490 495 } 491 496 ··· 851 852 if (ibqp->srq) 852 853 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); 853 854 854 - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 855 + if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 855 856 context->db_rec_addr = cpu_to_be64(qp->db.dma); 856 857 857 858 if (cur_state == IB_QPS_INIT && ··· 870 871 sqd_event = 1; 871 872 else 872 873 sqd_event = 0; 874 + 875 + /* 876 + * Before passing a kernel QP to the HW, make sure that the 877 + * ownership bits of the send queue are set so that the 878 + * hardware doesn't start processing stale work requests. 879 + */ 880 + if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 881 + struct mlx4_wqe_ctrl_seg *ctrl; 882 + int i; 883 + 884 + for (i = 0; i < qp->sq.max; ++i) { 885 + ctrl = get_send_wqe(qp, i); 886 + ctrl->owner_opcode = cpu_to_be32(1 << 31); 887 + } 888 + } 873 889 874 890 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), 875 891 to_mlx4_state(new_state), context, optpar, ··· 933 919 qp->rq.tail = 0; 934 920 qp->sq.head = 0; 935 921 qp->sq.tail = 0; 936 - *qp->db.db = 0; 922 + if (!ibqp->srq) 923 + *qp->db.db = 0; 937 924 } 938 925 939 926 out:
+1
drivers/infiniband/ulp/ipoib/ipoib.h
··· 429 429 430 430 void ipoib_pkey_poll(struct work_struct *work); 431 431 int ipoib_pkey_dev_delay_open(struct net_device *dev); 432 + void ipoib_drain_cq(struct net_device *dev); 432 433 433 434 #ifdef CONFIG_INFINIBAND_IPOIB_CM 434 435
+2 -1
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 713 713 while (!list_empty(&priv->cm.rx_error_list) || 714 714 !list_empty(&priv->cm.rx_flush_list) || 715 715 !list_empty(&priv->cm.rx_drain_list)) { 716 - if (!time_after(jiffies, begin + 5 * HZ)) { 716 + if (time_after(jiffies, begin + 5 * HZ)) { 717 717 ipoib_warn(priv, "RX drain timing out\n"); 718 718 719 719 /* ··· 726 726 } 727 727 spin_unlock_irq(&priv->lock); 728 728 msleep(1); 729 + ipoib_drain_cq(dev); 729 730 spin_lock_irq(&priv->lock); 730 731 } 731 732
+19 -12
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 550 550 return pending; 551 551 } 552 552 553 + void ipoib_drain_cq(struct net_device *dev) 554 + { 555 + struct ipoib_dev_priv *priv = netdev_priv(dev); 556 + int i, n; 557 + do { 558 + n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc); 559 + for (i = 0; i < n; ++i) { 560 + if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ) 561 + ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); 562 + else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) 563 + ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); 564 + else 565 + ipoib_ib_handle_tx_wc(dev, priv->ibwc + i); 566 + } 567 + } while (n == IPOIB_NUM_WC); 568 + } 569 + 553 570 int ipoib_ib_dev_stop(struct net_device *dev, int flush) 554 571 { 555 572 struct ipoib_dev_priv *priv = netdev_priv(dev); 556 573 struct ib_qp_attr qp_attr; 557 574 unsigned long begin; 558 575 struct ipoib_tx_buf *tx_req; 559 - int i, n; 576 + int i; 560 577 561 578 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 562 579 netif_poll_disable(dev); ··· 628 611 goto timeout; 629 612 } 630 613 631 - do { 632 - n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc); 633 - for (i = 0; i < n; ++i) { 634 - if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ) 635 - ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); 636 - else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) 637 - ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); 638 - else 639 - ipoib_ib_handle_tx_wc(dev, priv->ibwc + i); 640 - } 641 - } while (n == IPOIB_NUM_WC); 614 + ipoib_drain_cq(dev); 642 615 643 616 msleep(1); 644 617 }