Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:

- Smattering of miscellanous fixes

- A five patch series for i40iw that had a patch (5/5) that was larger
than I would like, but I took it because it's needed for large scale
users

- An 8 patch series for bnxt_re that landed right as I was leaving on
PTO and so had to wait until now...they are all appropriate fixes for
-rc IMO

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (22 commits)
bnxt_re: Don't issue cmd to delete GID for QP1 GID entry before the QP is destroyed
bnxt_re: Fix memory leak in FRMR path
bnxt_re: Remove RTNL lock dependency in bnxt_re_query_port
bnxt_re: Fix race between the netdev register and unregister events
bnxt_re: Free up devices in module_exit path
bnxt_re: Fix compare and swap atomic operands
bnxt_re: Stop issuing further cmds to FW once a cmd times out
bnxt_re: Fix update of qplib_qp.mtu when modified
i40iw: Add support for port reuse on active side connections
i40iw: Add missing VLAN priority
i40iw: Call i40iw_cm_disconn on modify QP to disconnect
i40iw: Prevent multiple netdev event notifier registrations
i40iw: Fail open if there are no available MSI-X vectors
RDMA/vmw_pvrdma: Fix reporting correct opcodes for completion
IB/bnxt_re: Fix frame stack compilation warning
IB/mlx5: fix debugfs cleanup
IB/ocrdma: fix incorrect fall-through on switch statement
IB/ipoib: Suppress the retry related completion errors
iw_cxgb4: remove the stid on listen create failure
iw_cxgb4: drop listen destroy replies if no ep found
...

+280 -160
+2 -2
drivers/infiniband/core/verbs.c
··· 1646 1646 */ 1647 1647 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { 1648 1648 if (attr.qp_state >= IB_QPS_INIT) { 1649 - if (qp->device->get_link_layer(qp->device, attr.port_num) != 1649 + if (rdma_port_get_link_layer(qp->device, attr.port_num) != 1650 1650 IB_LINK_LAYER_INFINIBAND) 1651 1651 return true; 1652 1652 goto lid_check; ··· 1655 1655 1656 1656 /* Can't get a quick answer, iterate over all ports */ 1657 1657 for (port = 0; port < qp->device->phys_port_cnt; port++) 1658 - if (qp->device->get_link_layer(qp->device, port) != 1658 + if (rdma_port_get_link_layer(qp->device, port) != 1659 1659 IB_LINK_LAYER_INFINIBAND) 1660 1660 num_eth_ports++; 1661 1661
+9 -5
drivers/infiniband/hw/bnxt_re/bnxt_re.h
··· 93 93 struct ib_device ibdev; 94 94 struct list_head list; 95 95 unsigned long flags; 96 - #define BNXT_RE_FLAG_NETDEV_REGISTERED 0 97 - #define BNXT_RE_FLAG_IBDEV_REGISTERED 1 98 - #define BNXT_RE_FLAG_GOT_MSIX 2 99 - #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8 100 - #define BNXT_RE_FLAG_QOS_WORK_REG 16 96 + #define BNXT_RE_FLAG_NETDEV_REGISTERED 0 97 + #define BNXT_RE_FLAG_IBDEV_REGISTERED 1 98 + #define BNXT_RE_FLAG_GOT_MSIX 2 99 + #define BNXT_RE_FLAG_HAVE_L2_REF 3 100 + #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 101 + #define BNXT_RE_FLAG_QOS_WORK_REG 5 102 + #define BNXT_RE_FLAG_TASK_IN_PROG 6 101 103 struct net_device *netdev; 102 104 unsigned int version, major, minor; 103 105 struct bnxt_en_dev *en_dev; ··· 110 108 111 109 struct delayed_work worker; 112 110 u8 cur_prio_map; 111 + u8 active_speed; 112 + u8 active_width; 113 113 114 114 /* FP Notification Queue (CQ & SRQ) */ 115 115 struct tasklet_struct nq_task;
+64 -43
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 259 259 port_attr->sm_sl = 0; 260 260 port_attr->subnet_timeout = 0; 261 261 port_attr->init_type_reply = 0; 262 - /* call the underlying netdev's ethtool hooks to query speed settings 263 - * for which we acquire rtnl_lock _only_ if it's registered with 264 - * IB stack to avoid race in the NETDEV_UNREG path 265 - */ 266 - if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) 267 - if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed, 268 - &port_attr->active_width)) 269 - return -EINVAL; 262 + port_attr->active_speed = rdev->active_speed; 263 + port_attr->active_width = rdev->active_width; 264 + 270 265 return 0; 271 266 } 272 267 ··· 314 319 struct bnxt_re_gid_ctx *ctx, **ctx_tbl; 315 320 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 316 321 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 322 + struct bnxt_qplib_gid *gid_to_del; 317 323 318 324 /* Delete the entry from the hardware */ 319 325 ctx = *context; ··· 324 328 if (sgid_tbl && sgid_tbl->active) { 325 329 if (ctx->idx >= sgid_tbl->max) 326 330 return -EINVAL; 331 + gid_to_del = &sgid_tbl->tbl[ctx->idx]; 332 + /* DEL_GID is called in WQ context(netdevice_event_work_handler) 333 + * or via the ib_unregister_device path. In the former case QP1 334 + * may not be destroyed yet, in which case just return as FW 335 + * needs that entry to be present and will fail it's deletion. 336 + * We could get invoked again after QP1 is destroyed OR get an 337 + * ADD_GID call with a different GID value for the same index 338 + * where we issue MODIFY_GID cmd to update the GID entry -- TBD 339 + */ 340 + if (ctx->idx == 0 && 341 + rdma_link_local_addr((struct in6_addr *)gid_to_del) && 342 + ctx->refcnt == 1 && rdev->qp1_sqp) { 343 + dev_dbg(rdev_to_dev(rdev), 344 + "Trying to delete GID0 while QP1 is alive\n"); 345 + return -EFAULT; 346 + } 327 347 ctx->refcnt--; 328 348 if (!ctx->refcnt) { 329 - rc = bnxt_qplib_del_sgid(sgid_tbl, 330 - &sgid_tbl->tbl[ctx->idx], 331 - true); 349 + rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); 332 350 if (rc) { 333 351 dev_err(rdev_to_dev(rdev), 334 352 "Failed to remove GID: %#x", rc); ··· 826 816 827 817 kfree(rdev->sqp_ah); 828 818 kfree(rdev->qp1_sqp); 819 + rdev->qp1_sqp = NULL; 820 + rdev->sqp_ah = NULL; 829 821 } 830 822 831 823 if (!IS_ERR_OR_NULL(qp->rumem)) ··· 1448 1436 qp->qplib_qp.modify_flags |= 1449 1437 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 1450 1438 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); 1439 + qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu); 1451 1440 } else if (qp_attr->qp_state == IB_QPS_RTR) { 1452 1441 qp->qplib_qp.modify_flags |= 1453 1442 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 1454 1443 qp->qplib_qp.path_mtu = 1455 1444 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); 1445 + qp->qplib_qp.mtu = 1446 + ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); 1456 1447 } 1457 1448 1458 1449 if (qp_attr_mask & IB_QP_TIMEOUT) { ··· 1566 1551 { 1567 1552 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 1568 1553 struct bnxt_re_dev *rdev = qp->rdev; 1569 - struct bnxt_qplib_qp qplib_qp; 1554 + struct bnxt_qplib_qp *qplib_qp; 1570 1555 int rc; 1571 1556 1572 - memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp)); 1573 - qplib_qp.id = qp->qplib_qp.id; 1574 - qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; 1557 + qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL); 1558 + if (!qplib_qp) 1559 + return -ENOMEM; 1575 1560 1576 - rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp); 1561 + qplib_qp->id = qp->qplib_qp.id; 1562 + qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; 1563 + 1564 + rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); 1577 1565 if (rc) { 1578 1566 dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); 1579 - return rc; 1567 + goto out; 1580 1568 } 1581 - qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state); 1582 - qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0; 1583 - qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access); 1584 - qp_attr->pkey_index = qplib_qp.pkey_index; 1585 - qp_attr->qkey = qplib_qp.qkey; 1569 + qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); 1570 + qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; 1571 + qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); 1572 + qp_attr->pkey_index = qplib_qp->pkey_index; 1573 + qp_attr->qkey = qplib_qp->qkey; 1586 1574 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 1587 - rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label, 1588 - qplib_qp.ah.host_sgid_index, 1589 - qplib_qp.ah.hop_limit, 1590 - qplib_qp.ah.traffic_class); 1591 - rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data); 1592 - rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl); 1593 - ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac); 1594 - qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu); 1595 - qp_attr->timeout = qplib_qp.timeout; 1596 - qp_attr->retry_cnt = qplib_qp.retry_cnt; 1597 - qp_attr->rnr_retry = qplib_qp.rnr_retry; 1598 - qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer; 1599 - qp_attr->rq_psn = qplib_qp.rq.psn; 1600 - qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic; 1601 - qp_attr->sq_psn = qplib_qp.sq.psn; 1602 - qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic; 1603 - qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR : 1604 - IB_SIGNAL_REQ_WR; 1605 - qp_attr->dest_qp_num = qplib_qp.dest_qpn; 1575 + rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label, 1576 + qplib_qp->ah.host_sgid_index, 1577 + qplib_qp->ah.hop_limit, 1578 + qplib_qp->ah.traffic_class); 1579 + rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data); 1580 + rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl); 1581 + ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac); 1582 + qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu); 1583 + qp_attr->timeout = qplib_qp->timeout; 1584 + qp_attr->retry_cnt = qplib_qp->retry_cnt; 1585 + qp_attr->rnr_retry = qplib_qp->rnr_retry; 1586 + qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; 1587 + qp_attr->rq_psn = qplib_qp->rq.psn; 1588 + qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; 1589 + qp_attr->sq_psn = qplib_qp->sq.psn; 1590 + qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; 1591 + qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : 1592 + IB_SIGNAL_REQ_WR; 1593 + qp_attr->dest_qp_num = qplib_qp->dest_qpn; 1606 1594 1607 1595 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; 1608 1596 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; ··· 1614 1596 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; 1615 1597 qp_init_attr->cap = qp_attr->cap; 1616 1598 1617 - return 0; 1599 + out: 1600 + kfree(qplib_qp); 1601 + return rc; 1618 1602 } 1619 1603 1620 1604 /* Routine for sending QP1 packets for RoCE V1 an V2 ··· 1928 1908 switch (wr->opcode) { 1929 1909 case IB_WR_ATOMIC_CMP_AND_SWP: 1930 1910 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; 1911 + wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; 1931 1912 wqe->atomic.swap_data = atomic_wr(wr)->swap; 1932 1913 break; 1933 1914 case IB_WR_ATOMIC_FETCH_AND_ADD: ··· 3083 3062 return rc; 3084 3063 } 3085 3064 3086 - if (mr->npages && mr->pages) { 3065 + if (mr->pages) { 3087 3066 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, 3088 3067 &mr->qplib_frpl); 3089 3068 kfree(mr->pages);
+28
drivers/infiniband/hw/bnxt_re/main.c
··· 1161 1161 } 1162 1162 } 1163 1163 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); 1164 + ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, 1165 + &rdev->active_width); 1164 1166 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); 1165 1167 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); 1166 1168 ··· 1257 1255 else if (netif_carrier_ok(rdev->netdev)) 1258 1256 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1259 1257 IB_EVENT_PORT_ACTIVE); 1258 + ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, 1259 + &rdev->active_width); 1260 1260 break; 1261 1261 default: 1262 1262 break; 1263 1263 } 1264 + smp_mb__before_atomic(); 1265 + clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); 1264 1266 kfree(re_work); 1265 1267 } 1266 1268 ··· 1323 1317 break; 1324 1318 1325 1319 case NETDEV_UNREGISTER: 1320 + /* netdev notifier will call NETDEV_UNREGISTER again later since 1321 + * we are still holding the reference to the netdev 1322 + */ 1323 + if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) 1324 + goto exit; 1326 1325 bnxt_re_ib_unreg(rdev, false); 1327 1326 bnxt_re_remove_one(rdev); 1328 1327 bnxt_re_dev_unreg(rdev); ··· 1346 1335 re_work->vlan_dev = (real_dev == netdev ? 1347 1336 NULL : netdev); 1348 1337 INIT_WORK(&re_work->work, bnxt_re_task); 1338 + set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); 1349 1339 queue_work(bnxt_re_wq, &re_work->work); 1350 1340 } 1351 1341 } ··· 1387 1375 1388 1376 static void __exit bnxt_re_mod_exit(void) 1389 1377 { 1378 + struct bnxt_re_dev *rdev; 1379 + LIST_HEAD(to_be_deleted); 1380 + 1381 + mutex_lock(&bnxt_re_dev_lock); 1382 + /* Free all adapter allocated resources */ 1383 + if (!list_empty(&bnxt_re_dev_list)) 1384 + list_splice_init(&bnxt_re_dev_list, &to_be_deleted); 1385 + mutex_unlock(&bnxt_re_dev_lock); 1386 + 1387 + list_for_each_entry(rdev, &to_be_deleted, list) { 1388 + dev_info(rdev_to_dev(rdev), "Unregistering Device"); 1389 + bnxt_re_dev_stop(rdev); 1390 + bnxt_re_ib_unreg(rdev, true); 1391 + bnxt_re_remove_one(rdev); 1392 + bnxt_re_dev_unreg(rdev); 1393 + } 1390 1394 unregister_netdevice_notifier(&bnxt_re_netdev_notifier); 1391 1395 if (bnxt_re_wq) 1392 1396 destroy_workqueue(bnxt_re_wq);
+4
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
··· 107 107 return -EINVAL; 108 108 } 109 109 110 + if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) 111 + return -ETIMEDOUT; 112 + 110 113 /* Cmdq are in 16-byte units, each request can consume 1 or more 111 114 * cmdqe 112 115 */ ··· 229 226 /* timed out */ 230 227 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", 231 228 cookie, opcode, RCFW_CMD_WAIT_TIME_MS); 229 + set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); 232 230 return rc; 233 231 } 234 232
+2 -1
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
··· 162 162 unsigned long *cmdq_bitmap; 163 163 u32 bmap_size; 164 164 unsigned long flags; 165 - #define FIRMWARE_INITIALIZED_FLAG 1 165 + #define FIRMWARE_INITIALIZED_FLAG BIT(0) 166 166 #define FIRMWARE_FIRST_FLAG BIT(31) 167 + #define FIRMWARE_TIMED_OUT BIT(3) 167 168 wait_queue_head_t waitq; 168 169 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 169 170 struct creq_func_event *);
+7 -2
drivers/infiniband/hw/cxgb4/cm.c
··· 2333 2333 unsigned int stid = GET_TID(rpl); 2334 2334 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2335 2335 2336 + if (!ep) { 2337 + pr_debug("%s stid %d lookup failure!\n", __func__, stid); 2338 + goto out; 2339 + } 2336 2340 pr_debug("%s ep %p\n", __func__, ep); 2337 2341 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2338 2342 c4iw_put_ep(&ep->com); 2343 + out: 2339 2344 return 0; 2340 2345 } 2341 2346 ··· 2599 2594 c4iw_put_ep(&child_ep->com); 2600 2595 reject: 2601 2596 reject_cr(dev, hwtid, skb); 2597 + out: 2602 2598 if (parent_ep) 2603 2599 c4iw_put_ep(&parent_ep->com); 2604 - out: 2605 2600 return 0; 2606 2601 } 2607 2602 ··· 3462 3457 cm_id->provider_data = ep; 3463 3458 goto out; 3464 3459 } 3465 - 3460 + remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 3466 3461 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3467 3462 ep->com.local_addr.ss_family); 3468 3463 fail2:
-1
drivers/infiniband/hw/i40iw/i40iw.h
··· 201 201 CEQ_CREATED, 202 202 ILQ_CREATED, 203 203 IEQ_CREATED, 204 - INET_NOTIFIER, 205 204 IP_ADDR_REGISTERED, 206 205 RDMA_DEV_REGISTERED 207 206 };
+77 -77
drivers/infiniband/hw/i40iw/i40iw_cm.c
··· 1504 1504 } 1505 1505 1506 1506 /** 1507 - * listen_port_in_use - determine if port is in use 1508 - * @port: Listen port number 1507 + * i40iw_port_in_use - determine if port is in use 1508 + * @port: port number 1509 + * @active_side: flag for listener side vs active side 1509 1510 */ 1510 - static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port) 1511 + static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side) 1511 1512 { 1512 1513 struct i40iw_cm_listener *listen_node; 1514 + struct i40iw_cm_node *cm_node; 1513 1515 unsigned long flags; 1514 1516 bool ret = false; 1515 1517 1516 - spin_lock_irqsave(&cm_core->listen_list_lock, flags); 1517 - list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { 1518 - if (listen_node->loc_port == port) { 1519 - ret = true; 1520 - break; 1518 + if (active_side) { 1519 + /* search connected node list */ 1520 + spin_lock_irqsave(&cm_core->ht_lock, flags); 1521 + list_for_each_entry(cm_node, &cm_core->connected_nodes, list) { 1522 + if (cm_node->loc_port == port) { 1523 + ret = true; 1524 + break; 1525 + } 1521 1526 } 1527 + if (!ret) 1528 + clear_bit(port, cm_core->active_side_ports); 1529 + spin_unlock_irqrestore(&cm_core->ht_lock, flags); 1530 + } else { 1531 + spin_lock_irqsave(&cm_core->listen_list_lock, flags); 1532 + list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { 1533 + if (listen_node->loc_port == port) { 1534 + ret = true; 1535 + break; 1536 + } 1537 + } 1538 + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 1522 1539 } 1523 - spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 1540 + 1524 1541 return ret; 1525 1542 } 1526 1543 ··· 1885 1868 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 1886 1869 1887 1870 if (listener->iwdev) { 1888 - if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port)) 1871 + if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false)) 1889 1872 i40iw_manage_apbvt(listener->iwdev, 1890 1873 listener->loc_port, 1891 1874 I40IW_MANAGE_APBVT_DEL); ··· 2264 2247 if (cm_node->listener) { 2265 2248 i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); 2266 2249 } else { 2267 - if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) && 2268 - cm_node->apbvt_set) { 2250 + if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) { 2269 2251 i40iw_manage_apbvt(cm_node->iwdev, 2270 2252 cm_node->loc_port, 2271 2253 I40IW_MANAGE_APBVT_DEL); 2272 - i40iw_get_addr_info(cm_node, &nfo); 2273 - if (cm_node->qhash_set) { 2274 - i40iw_manage_qhash(cm_node->iwdev, 2275 - &nfo, 2276 - I40IW_QHASH_TYPE_TCP_ESTABLISHED, 2277 - I40IW_QHASH_MANAGE_TYPE_DELETE, 2278 - NULL, 2279 - false); 2280 - cm_node->qhash_set = 0; 2281 - } 2254 + cm_node->apbvt_set = 0; 2255 + } 2256 + i40iw_get_addr_info(cm_node, &nfo); 2257 + if (cm_node->qhash_set) { 2258 + i40iw_manage_qhash(cm_node->iwdev, 2259 + &nfo, 2260 + I40IW_QHASH_TYPE_TCP_ESTABLISHED, 2261 + I40IW_QHASH_MANAGE_TYPE_DELETE, 2262 + NULL, 2263 + false); 2264 + cm_node->qhash_set = 0; 2282 2265 } 2283 2266 } 2284 2267 ··· 3272 3255 tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); 3273 3256 if (cm_node->vlan_id < VLAN_TAG_PRESENT) { 3274 3257 tcp_info->insert_vlan_tag = true; 3275 - tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id); 3258 + tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) | 3259 + cm_node->vlan_id); 3276 3260 } 3277 3261 if (cm_node->ipv4) { 3278 3262 tcp_info->src_port = cpu_to_le16(cm_node->loc_port); ··· 3755 3737 struct sockaddr_in *raddr; 3756 3738 struct sockaddr_in6 *laddr6; 3757 3739 struct sockaddr_in6 *raddr6; 3758 - bool qhash_set = false; 3759 - int apbvt_set = 0; 3760 - int err = 0; 3761 - enum i40iw_status_code status; 3740 + int ret = 0; 3741 + unsigned long flags; 3762 3742 3763 3743 ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); 3764 3744 if (!ibqp) ··· 3805 3789 cm_info.user_pri = rt_tos2priority(cm_id->tos); 3806 3790 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", 3807 3791 __func__, cm_id->tos, cm_info.user_pri); 3808 - if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || 3809 - (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, 3810 - raddr6->sin6_addr.in6_u.u6_addr32, 3811 - sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { 3812 - status = i40iw_manage_qhash(iwdev, 3813 - &cm_info, 3814 - I40IW_QHASH_TYPE_TCP_ESTABLISHED, 3815 - I40IW_QHASH_MANAGE_TYPE_ADD, 3816 - NULL, 3817 - true); 3818 - if (status) 3819 - return -EINVAL; 3820 - qhash_set = true; 3821 - } 3822 - status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD); 3823 - if (status) { 3824 - i40iw_manage_qhash(iwdev, 3825 - &cm_info, 3826 - I40IW_QHASH_TYPE_TCP_ESTABLISHED, 3827 - I40IW_QHASH_MANAGE_TYPE_DELETE, 3828 - NULL, 3829 - false); 3830 - return -EINVAL; 3831 - } 3832 - 3833 - apbvt_set = 1; 3834 3792 cm_id->add_ref(cm_id); 3835 3793 cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, 3836 3794 conn_param->private_data_len, ··· 3812 3822 &cm_info); 3813 3823 3814 3824 if (IS_ERR(cm_node)) { 3815 - err = PTR_ERR(cm_node); 3816 - goto err_out; 3825 + ret = PTR_ERR(cm_node); 3826 + cm_id->rem_ref(cm_id); 3827 + return ret; 3817 3828 } 3818 3829 3830 + if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || 3831 + (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, 3832 + raddr6->sin6_addr.in6_u.u6_addr32, 3833 + sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { 3834 + if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED, 3835 + I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) { 3836 + ret = -EINVAL; 3837 + goto err; 3838 + } 3839 + cm_node->qhash_set = true; 3840 + } 3841 + 3842 + spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); 3843 + if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) { 3844 + spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); 3845 + if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) { 3846 + ret = -EINVAL; 3847 + goto err; 3848 + } 3849 + } else { 3850 + spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); 3851 + } 3852 + 3853 + cm_node->apbvt_set = true; 3819 3854 i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); 3820 3855 if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && 3821 3856 !cm_node->ord_size) 3822 3857 cm_node->ord_size = 1; 3823 3858 3824 - cm_node->apbvt_set = apbvt_set; 3825 - cm_node->qhash_set = qhash_set; 3826 3859 iwqp->cm_node = cm_node; 3827 3860 cm_node->iwqp = iwqp; 3828 3861 iwqp->cm_id = cm_id; ··· 3853 3840 3854 3841 if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { 3855 3842 cm_node->state = I40IW_CM_STATE_SYN_SENT; 3856 - err = i40iw_send_syn(cm_node, 0); 3857 - if (err) { 3858 - i40iw_rem_ref_cm_node(cm_node); 3859 - goto err_out; 3860 - } 3843 + ret = i40iw_send_syn(cm_node, 0); 3844 + if (ret) 3845 + goto err; 3861 3846 } 3862 3847 3863 3848 i40iw_debug(cm_node->dev, ··· 3864 3853 cm_node->rem_port, 3865 3854 cm_node, 3866 3855 cm_node->cm_id); 3856 + 3867 3857 return 0; 3868 3858 3869 - err_out: 3859 + err: 3870 3860 if (cm_info.ipv4) 3871 3861 i40iw_debug(&iwdev->sc_dev, 3872 3862 I40IW_DEBUG_CM, ··· 3879 3867 "Api - connect() FAILED: dest addr=%pI6", 3880 3868 cm_info.rem_addr); 3881 3869 3882 - if (qhash_set) 3883 - i40iw_manage_qhash(iwdev, 3884 - &cm_info, 3885 - I40IW_QHASH_TYPE_TCP_ESTABLISHED, 3886 - I40IW_QHASH_MANAGE_TYPE_DELETE, 3887 - NULL, 3888 - false); 3889 - 3890 - if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core, 3891 - cm_info.loc_port)) 3892 - i40iw_manage_apbvt(iwdev, 3893 - cm_info.loc_port, 3894 - I40IW_MANAGE_APBVT_DEL); 3870 + i40iw_rem_ref_cm_node(cm_node); 3895 3871 cm_id->rem_ref(cm_id); 3896 3872 iwdev->cm_core.stats_connect_errs++; 3897 - return err; 3873 + return ret; 3898 3874 } 3899 3875 3900 3876 /**
+5
drivers/infiniband/hw/i40iw/i40iw_cm.h
··· 71 71 #define I40IW_HW_IRD_SETTING_32 32 72 72 #define I40IW_HW_IRD_SETTING_64 64 73 73 74 + #define MAX_PORTS 65536 75 + #define I40IW_VLAN_PRIO_SHIFT 13 76 + 74 77 enum ietf_mpa_flags { 75 78 IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ 76 79 IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ ··· 413 410 414 411 spinlock_t ht_lock; /* manage hash table */ 415 412 spinlock_t listen_list_lock; /* listen list */ 413 + 414 + unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)]; 416 415 417 416 u64 stats_nodes_created; 418 417 u64 stats_nodes_destroyed;
+23 -16
drivers/infiniband/hw/i40iw/i40iw_main.c
··· 99 99 .notifier_call = i40iw_net_event 100 100 }; 101 101 102 - static atomic_t i40iw_notifiers_registered; 103 - 104 102 /** 105 103 * i40iw_find_i40e_handler - find a handler given a client info 106 104 * @ldev: pointer to a client info ··· 1374 1376 */ 1375 1377 static void i40iw_register_notifiers(void) 1376 1378 { 1377 - if (atomic_inc_return(&i40iw_notifiers_registered) == 1) { 1378 - register_inetaddr_notifier(&i40iw_inetaddr_notifier); 1379 - register_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1380 - register_netevent_notifier(&i40iw_net_notifier); 1381 - } 1379 + register_inetaddr_notifier(&i40iw_inetaddr_notifier); 1380 + register_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1381 + register_netevent_notifier(&i40iw_net_notifier); 1382 + } 1383 + 1384 + /** 1385 + * i40iw_unregister_notifiers - unregister tcp ip notifiers 1386 + */ 1387 + 1388 + static void i40iw_unregister_notifiers(void) 1389 + { 1390 + unregister_netevent_notifier(&i40iw_net_notifier); 1391 + unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); 1392 + unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1382 1393 } 1383 1394 1384 1395 /** ··· 1406 1399 u32 ceq_idx; 1407 1400 u32 i; 1408 1401 u32 size; 1402 + 1403 + if (!ldev->msix_count) { 1404 + i40iw_pr_err("No MSI-X vectors\n"); 1405 + return I40IW_ERR_CONFIG; 1406 + } 1409 1407 1410 1408 iwdev->msix_count = ldev->msix_count; 1411 1409 ··· 1474 1462 if (!iwdev->reset) 1475 1463 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); 1476 1464 /* fallthrough */ 1477 - case INET_NOTIFIER: 1478 - if (!atomic_dec_return(&i40iw_notifiers_registered)) { 1479 - unregister_netevent_notifier(&i40iw_net_notifier); 1480 - unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); 1481 - unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1482 - } 1483 1465 /* fallthrough */ 1484 1466 case PBLE_CHUNK_MEM: 1485 1467 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); ··· 1556 1550 1557 1551 status = i40iw_save_msix_info(iwdev, ldev); 1558 1552 if (status) 1559 - goto exit; 1553 + return status; 1560 1554 iwdev->hw.dev_context = (void *)ldev->pcidev; 1561 1555 iwdev->hw.hw_addr = ldev->hw_addr; 1562 1556 status = i40iw_allocate_dma_mem(&iwdev->hw, ··· 1673 1667 break; 1674 1668 iwdev->init_state = PBLE_CHUNK_MEM; 1675 1669 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); 1676 - i40iw_register_notifiers(); 1677 - iwdev->init_state = INET_NOTIFIER; 1678 1670 status = i40iw_add_mac_ip(iwdev); 1679 1671 if (status) 1680 1672 break; ··· 2022 2018 i40iw_client.type = I40E_CLIENT_IWARP; 2023 2019 spin_lock_init(&i40iw_handler_lock); 2024 2020 ret = i40e_register_client(&i40iw_client); 2021 + i40iw_register_notifiers(); 2022 + 2025 2023 return ret; 2026 2024 } 2027 2025 ··· 2035 2029 */ 2036 2030 static void __exit i40iw_exit_module(void) 2037 2031 { 2032 + i40iw_unregister_notifiers(); 2038 2033 i40e_unregister_client(&i40iw_client); 2039 2034 } 2040 2035
+3 -3
drivers/infiniband/hw/i40iw/i40iw_utils.c
··· 160 160 return NOTIFY_DONE; 161 161 162 162 iwdev = &hdl->device; 163 - if (iwdev->init_state < INET_NOTIFIER) 163 + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) 164 164 return NOTIFY_DONE; 165 165 166 166 netdev = iwdev->ldev->netdev; ··· 217 217 return NOTIFY_DONE; 218 218 219 219 iwdev = &hdl->device; 220 - if (iwdev->init_state < INET_NOTIFIER) 220 + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) 221 221 return NOTIFY_DONE; 222 222 223 223 netdev = iwdev->ldev->netdev; ··· 266 266 if (!iwhdl) 267 267 return NOTIFY_DONE; 268 268 iwdev = &iwhdl->device; 269 - if (iwdev->init_state < INET_NOTIFIER) 269 + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) 270 270 return NOTIFY_DONE; 271 271 p = (__be32 *)neigh->primary_key; 272 272 i40iw_copy_ip_ntohl(local_ipaddr, p);
+12
drivers/infiniband/hw/i40iw/i40iw_verbs.c
··· 1027 1027 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; 1028 1028 iwqp->last_aeq = I40IW_AE_RESET_SENT; 1029 1029 spin_unlock_irqrestore(&iwqp->lock, flags); 1030 + i40iw_cm_disconn(iwqp); 1030 1031 } 1032 + } else { 1033 + spin_lock_irqsave(&iwqp->lock, flags); 1034 + if (iwqp->cm_id) { 1035 + if (atomic_inc_return(&iwqp->close_timer_started) == 1) { 1036 + iwqp->cm_id->add_ref(iwqp->cm_id); 1037 + i40iw_schedule_cm_timer(iwqp->cm_node, 1038 + (struct i40iw_puda_buf *)iwqp, 1039 + I40IW_TIMER_TYPE_CLOSE, 1, 0); 1040 + } 1041 + } 1042 + spin_unlock_irqrestore(&iwqp->lock, flags); 1031 1043 } 1032 1044 } 1033 1045 return 0;
+3 -3
drivers/infiniband/hw/mlx5/main.c
··· 3837 3837 if (!dbg) 3838 3838 return -ENOMEM; 3839 3839 3840 + dev->delay_drop.dbg = dbg; 3841 + 3840 3842 dbg->dir_debugfs = 3841 3843 debugfs_create_dir("delay_drop", 3842 3844 dev->mdev->priv.dbg_root); 3843 3845 if (!dbg->dir_debugfs) 3844 - return -ENOMEM; 3846 + goto out_debugfs; 3845 3847 3846 3848 dbg->events_cnt_debugfs = 3847 3849 debugfs_create_atomic_t("num_timeout_events", 0400, ··· 3866 3864 &fops_delay_drop_timeout); 3867 3865 if (!dbg->timeout_debugfs) 3868 3866 goto out_debugfs; 3869 - 3870 - dev->delay_drop.dbg = dbg; 3871 3867 3872 3868 return 0; 3873 3869
+3
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 252 252 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: 253 253 err_num = -EAGAIN; 254 254 break; 255 + default: 256 + err_num = -EFAULT; 255 257 } 258 + break; 256 259 default: 257 260 err_num = -EFAULT; 258 261 }
+27 -2
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
··· 416 416 return (enum ib_wc_status)status; 417 417 } 418 418 419 - static inline int pvrdma_wc_opcode_to_ib(int opcode) 419 + static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode) 420 420 { 421 - return opcode; 421 + switch (opcode) { 422 + case PVRDMA_WC_SEND: 423 + return IB_WC_SEND; 424 + case PVRDMA_WC_RDMA_WRITE: 425 + return IB_WC_RDMA_WRITE; 426 + case PVRDMA_WC_RDMA_READ: 427 + return IB_WC_RDMA_READ; 428 + case PVRDMA_WC_COMP_SWAP: 429 + return IB_WC_COMP_SWAP; 430 + case PVRDMA_WC_FETCH_ADD: 431 + return IB_WC_FETCH_ADD; 432 + case PVRDMA_WC_LOCAL_INV: 433 + return IB_WC_LOCAL_INV; 434 + case PVRDMA_WC_FAST_REG_MR: 435 + return IB_WC_REG_MR; 436 + case PVRDMA_WC_MASKED_COMP_SWAP: 437 + return IB_WC_MASKED_COMP_SWAP; 438 + case PVRDMA_WC_MASKED_FETCH_ADD: 439 + return IB_WC_MASKED_FETCH_ADD; 440 + case PVRDMA_WC_RECV: 441 + return IB_WC_RECV; 442 + case PVRDMA_WC_RECV_RDMA_WITH_IMM: 443 + return IB_WC_RECV_RDMA_WITH_IMM; 444 + default: 445 + return IB_WC_SEND; 446 + } 422 447 } 423 448 424 449 static inline int pvrdma_wc_flags_to_ib(int flags)
+11 -5
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 823 823 wc->status != IB_WC_WR_FLUSH_ERR) { 824 824 struct ipoib_neigh *neigh; 825 825 826 - if (wc->status != IB_WC_RNR_RETRY_EXC_ERR) 827 - ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", 828 - wc->status, wr_id, wc->vendor_err); 826 + /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle, 827 + * so don't make waves. 828 + */ 829 + if (wc->status == IB_WC_RNR_RETRY_EXC_ERR || 830 + wc->status == IB_WC_RETRY_EXC_ERR) 831 + ipoib_dbg(priv, 832 + "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n", 833 + __func__, wc->status, wr_id, wc->vendor_err); 829 834 else 830 - ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", 831 - wc->status, wr_id, wc->vendor_err); 835 + ipoib_warn(priv, 836 + "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n", 837 + __func__, wc->status, wr_id, wc->vendor_err); 832 838 833 839 spin_lock_irqsave(&priv->lock, flags); 834 840 neigh = tx->neigh;