Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'rdma-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull InfiniBand/RDMA fixes from Roland Dreier:
- Fixes to new ocrdma driver
- Typo in test in CMA

* tag 'rdma-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
RDMA/cma: QP type check on received REQs should be AND not OR
RDMA/ocrdma: Fix off by one in ocrdma_query_gid()
RDMA/ocrdma: Fixed RQ error CQE polling
RDMA/ocrdma: Correct queue SGE calculation
RDMA/ocrdma: Correct reported max queue sizes
RDMA/ocrdma: Fixed GID table for vlan and events

+55 -46
+1 -1
drivers/infiniband/core/cma.c
··· 1184 1184 1185 1185 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 1186 1186 { 1187 - return (((ib_event->event == IB_CM_REQ_RECEIVED) || 1187 + return (((ib_event->event == IB_CM_REQ_RECEIVED) && 1188 1188 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 1189 1189 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 1190 1190 (id->qp_type == IB_QPT_UD)) ||
+1
drivers/infiniband/hw/ocrdma/ocrdma.h
··· 61 61 u32 max_inline_data; 62 62 int max_send_sge; 63 63 int max_recv_sge; 64 + int max_srq_sge; 64 65 int max_mr; 65 66 u64 max_mr_size; 66 67 u32 max_num_mr_pbl;
+8 -10
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 990 990 struct ocrdma_dev_attr *attr, 991 991 struct ocrdma_mbx_query_config *rsp) 992 992 { 993 - int max_q_mem; 994 - 995 993 attr->max_pd = 996 994 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> 997 995 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; ··· 1002 1004 attr->max_recv_sge = (rsp->max_write_send_sge & 1003 1005 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> 1004 1006 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; 1007 + attr->max_srq_sge = (rsp->max_srq_rqe_sge & 1008 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >> 1009 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET; 1005 1010 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & 1006 1011 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> 1007 1012 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; ··· 1038 1037 attr->max_inline_data = 1039 1038 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + 1040 1039 sizeof(struct ocrdma_sge)); 1041 - max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1); 1042 - /* hw can queue one less then the configured size, 1043 - * so publish less by one to stack. 1044 - */ 1045 1040 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1046 - dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size; 1047 1041 attr->ird = 1; 1048 1042 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; 1049 1043 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; 1050 - } else 1051 - dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1; 1052 - dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1; 1044 + } 1045 + dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >> 1046 + OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET; 1047 + dev->attr.max_rqe = rsp->max_wqes_rqes_per_q & 1048 + OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK; 1053 1049 } 1054 1050 1055 1051 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
+34 -29
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 97 97 sgid->raw[15] = mac_addr[5]; 98 98 } 99 99 100 - static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, 100 + static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, 101 101 bool is_vlan, u16 vlan_id) 102 102 { 103 103 int i; 104 - bool found = false; 105 104 union ib_gid new_sgid; 106 - int free_idx = OCRDMA_MAX_SGID; 107 105 unsigned long flags; 108 106 109 107 memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); ··· 113 115 if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, 114 116 sizeof(union ib_gid))) { 115 117 /* found free entry */ 116 - if (!found) { 117 - free_idx = i; 118 - found = true; 119 - break; 120 - } 118 + memcpy(&dev->sgid_tbl[i], &new_sgid, 119 + sizeof(union ib_gid)); 120 + spin_unlock_irqrestore(&dev->sgid_lock, flags); 121 + return true; 121 122 } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, 122 123 sizeof(union ib_gid))) { 123 124 /* entry already present, no addition is required. */ 124 125 spin_unlock_irqrestore(&dev->sgid_lock, flags); 125 - return; 126 + return false; 126 127 } 127 128 } 128 - /* if entry doesn't exist and if table has some space, add entry */ 129 - if (found) 130 - memcpy(&dev->sgid_tbl[free_idx], &new_sgid, 131 - sizeof(union ib_gid)); 132 129 spin_unlock_irqrestore(&dev->sgid_lock, flags); 130 + return false; 133 131 } 134 132 135 133 static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, ··· 161 167 ocrdma_get_guid(dev, &sgid->raw[8]); 162 168 } 163 169 164 - static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) 170 + #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 171 + static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) 165 172 { 166 173 struct net_device *netdev, *tmp; 167 174 u16 vlan_id; 168 175 bool is_vlan; 169 176 170 177 netdev = dev->nic_info.netdev; 171 - 172 - ocrdma_add_default_sgid(dev); 173 178 174 179 rcu_read_lock(); 175 180 for_each_netdev_rcu(&init_net, tmp) { ··· 187 194 } 188 195 } 189 196 rcu_read_unlock(); 197 + } 198 + #else 199 + static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) 200 + { 201 + 202 + } 203 + #endif /* VLAN */ 204 + 205 + static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) 206 + { 207 + ocrdma_add_default_sgid(dev); 208 + ocrdma_add_vlan_sgids(dev); 190 209 return 0; 191 210 } 192 211 193 - #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 212 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \ 213 + defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 194 214 195 215 static int ocrdma_inet6addr_event(struct notifier_block *notifier, 196 216 unsigned long event, void *ptr) ··· 214 208 struct ib_event gid_event; 215 209 struct ocrdma_dev *dev; 216 210 bool found = false; 211 + bool updated = false; 217 212 bool is_vlan = false; 218 213 u16 vid = 0; 219 214 ··· 240 233 mutex_lock(&dev->dev_lock); 241 234 switch (event) { 242 235 case NETDEV_UP: 243 - ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); 236 + updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); 244 237 break; 245 238 case NETDEV_DOWN: 246 - found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); 247 - if (found) { 248 - /* found the matching entry, notify 249 - * the consumers about it 250 - */ 251 - gid_event.device = &dev->ibdev; 252 - gid_event.element.port_num = 1; 253 - gid_event.event = IB_EVENT_GID_CHANGE; 254 - ib_dispatch_event(&gid_event); 255 - } 239 + updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); 256 240 break; 257 241 default: 258 242 break; 243 + } 244 + if (updated) { 245 + /* GID table updated, notify the consumers about it */ 246 + gid_event.device = &dev->ibdev; 247 + gid_event.element.port_num = 1; 248 + gid_event.event = IB_EVENT_GID_CHANGE; 249 + ib_dispatch_event(&gid_event); 259 250 } 260 251 mutex_unlock(&dev->dev_lock); 261 252 return NOTIFY_OK; ··· 263 258 .notifier_call = ocrdma_inet6addr_event 264 259 }; 265 260 266 - #endif /* IPV6 */ 261 + #endif /* IPV6 and VLAN */ 267 262 268 263 static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, 269 264 u8 port_num)
+4 -1
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
··· 418 418 419 419 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, 420 420 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, 421 + OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16, 422 + OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF << 423 + OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT, 421 424 422 425 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, 423 426 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, ··· 461 458 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, 462 459 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0, 463 460 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF << 464 - OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, 461 + OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET, 465 462 466 463 OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16, 467 464 OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF <<
+7 -5
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 53 53 54 54 dev = get_ocrdma_dev(ibdev); 55 55 memset(sgid, 0, sizeof(*sgid)); 56 - if (index > OCRDMA_MAX_SGID) 56 + if (index >= OCRDMA_MAX_SGID) 57 57 return -EINVAL; 58 58 59 59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); ··· 83 83 IB_DEVICE_SHUTDOWN_PORT | 84 84 IB_DEVICE_SYS_IMAGE_GUID | 85 85 IB_DEVICE_LOCAL_DMA_LKEY; 86 - attr->max_sge = dev->attr.max_send_sge; 87 - attr->max_sge_rd = dev->attr.max_send_sge; 86 + attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); 87 + attr->max_sge_rd = 0; 88 88 attr->max_cq = dev->attr.max_cq; 89 89 attr->max_cqe = dev->attr.max_cqe; 90 90 attr->max_mr = dev->attr.max_mr; ··· 97 97 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); 98 98 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; 99 99 attr->max_srq = (dev->attr.max_qp - 1); 100 - attr->max_srq_sge = attr->max_sge; 100 + attr->max_srq_sge = attr->max_srq_sge; 101 101 attr->max_srq_wr = dev->attr.max_rqe; 102 102 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; 103 103 attr->max_fast_reg_page_list_len = 0; ··· 2301 2301 *stop = true; 2302 2302 expand = false; 2303 2303 } 2304 - } else 2304 + } else { 2305 + *polled = true; 2305 2306 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2307 + } 2306 2308 return expand; 2307 2309 } 2308 2310