Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
IB/ipath: deref correct pointer when using kernel SMA
IB/ipath: fix null deref during rdma ops
IB/ipath: register as IB device owner
IB/ipath: enable PE800 receive interrupts on user ports
IB/ipath: enable GPIO interrupt on HT-460
IB/ipath: fix NULL dereference during cleanup
IB/ipath: replace uses of LIST_POISON
IB/ipath: fix reporting of driver version to userspace
IB/ipath: don't modify QP if changes fail
IB/ipath: fix spinlock recursion bug

+92 -75
+11 -11
drivers/infiniband/hw/ipath/ipath_driver.c
··· 1905 1905 } else 1906 1906 ipath_dbg("irq is 0, not doing free_irq " 1907 1907 "for unit %u\n", dd->ipath_unit); 1908 + 1909 + /* 1910 + * we check for NULL here, because it's outside 1911 + * the kregbase check, and we need to call it 1912 + * after the free_irq. Thus it's possible that 1913 + * the function pointers were never initialized. 1914 + */ 1915 + if (dd->ipath_f_cleanup) 1916 + /* clean up chip-specific stuff */ 1917 + dd->ipath_f_cleanup(dd); 1918 + 1908 1919 dd->pcidev = NULL; 1909 1920 } 1910 - 1911 - /* 1912 - * we check for NULL here, because it's outside the kregbase 1913 - * check, and we need to call it after the free_irq. Thus 1914 - * it's possible that the function pointers were never 1915 - * initialized. 1916 - */ 1917 - if (dd->ipath_f_cleanup) 1918 - /* clean up chip-specific stuff */ 1919 - dd->ipath_f_cleanup(dd); 1920 - 1921 1921 spin_lock_irqsave(&ipath_devs_lock, flags); 1922 1922 } 1923 1923
+3 -4
drivers/infiniband/hw/ipath/ipath_eeprom.c
··· 505 505 * ipath_get_guid - get the GUID from the i2c device 506 506 * @dd: the infinipath device 507 507 * 508 - * When we add the multi-chip support, we will probably have to add 509 - * the ability to use the number of guids field, and get the guid from 510 - * the first chip's flash, to use for all of them. 508 + * We have the capability to use the ipath_nguid field, and get 509 + * the guid from the first chip's flash, to use for all of them. 511 510 */ 512 - void ipath_get_guid(struct ipath_devdata *dd) 511 + void ipath_get_eeprom_info(struct ipath_devdata *dd) 513 512 { 514 513 void *buf; 515 514 struct ipath_flash *ifp;
+5 -1
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 139 139 kinfo->spi_piosize = dd->ipath_ibmaxlen; 140 140 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ 141 141 kinfo->spi_port = pd->port_port; 142 - kinfo->spi_sw_version = IPATH_USER_SWVERSION; 142 + kinfo->spi_sw_version = IPATH_KERN_SWVERSION; 143 143 kinfo->spi_hw_version = dd->ipath_revision; 144 144 145 145 if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) ··· 1224 1224 1225 1225 if (tail == head) { 1226 1226 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); 1227 + if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ 1228 + (void)ipath_write_ureg(dd, ur_rcvhdrhead, 1229 + dd->ipath_rhdrhead_intr_off 1230 + | head, pd->port_port); 1227 1231 poll_wait(fp, &pd->port_wait, pt); 1228 1232 1229 1233 if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
+19 -2
drivers/infiniband/hw/ipath/ipath_ht400.c
··· 607 607 case 4: /* Ponderosa is one of the bringup boards */ 608 608 n = "Ponderosa"; 609 609 break; 610 - case 5: /* HT-460 original production board */ 610 + case 5: 611 + /* 612 + * HT-460 original production board; two production levels, with 613 + * different serial number ranges. See ipath_ht_early_init() for 614 + * case where we enable IPATH_GPIO_INTR for later serial # range. 615 + */ 611 616 n = "InfiniPath_HT-460"; 612 617 break; 613 618 case 6: ··· 647 642 if (n) 648 643 snprintf(name, namelen, "%s", n); 649 644 650 - if (dd->ipath_majrev != 3 || dd->ipath_minrev != 2) { 645 + if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { 651 646 /* 652 647 * This version of the driver only supports the HT-400 653 648 * Rev 3.2 ··· 1525 1520 */ 1526 1521 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1527 1522 INFINIPATH_S_ABORT); 1523 + 1524 + ipath_get_eeprom_info(dd); 1525 + if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && 1526 + dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { 1527 + /* 1528 + * Later production HT-460 has same changes as HT-465, so 1529 + * can use GPIO interrupts. They have serial #'s starting 1530 + * with 128, rather than 112. 1531 + */ 1532 + dd->ipath_flags |= IPATH_GPIO_INTR; 1533 + dd->ipath_flags &= ~IPATH_POLL_RX_INTR; 1534 + } 1528 1535 return 0; 1529 1536 } 1530 1537
-1
drivers/infiniband/hw/ipath/ipath_init_chip.c
··· 879 879 880 880 done: 881 881 if (!ret) { 882 - ipath_get_guid(dd); 883 882 *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; 884 883 if (!dd->ipath_f_intrsetup(dd)) { 885 884 /* now we can enable all interrupts from the chip */
+1 -1
drivers/infiniband/hw/ipath/ipath_kernel.h
··· 650 650 void ipath_init_pe800_funcs(struct ipath_devdata *); 651 651 /* init HT-400-specific func */ 652 652 void ipath_init_ht400_funcs(struct ipath_devdata *); 653 - void ipath_get_guid(struct ipath_devdata *); 653 + void ipath_get_eeprom_info(struct ipath_devdata *); 654 654 u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); 655 655 656 656 /*
-6
drivers/infiniband/hw/ipath/ipath_keys.c
··· 136 136 ret = 1; 137 137 goto bail; 138 138 } 139 - spin_lock(&rkt->lock); 140 139 mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; 141 - spin_unlock(&rkt->lock); 142 140 if (unlikely(mr == NULL || mr->lkey != sge->lkey)) { 143 141 ret = 0; 144 142 goto bail; ··· 182 184 * @acc: access flags 183 185 * 184 186 * Return 1 if successful, otherwise 0. 185 - * 186 - * The QP r_rq.lock should be held. 187 187 */ 188 188 int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, 189 189 u32 len, u64 vaddr, u32 rkey, int acc) ··· 192 196 size_t off; 193 197 int ret; 194 198 195 - spin_lock(&rkt->lock); 196 199 mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; 197 - spin_unlock(&rkt->lock); 198 200 if (unlikely(mr == NULL || mr->lkey != rkey)) { 199 201 ret = 0; 200 202 goto bail;
+7 -5
drivers/infiniband/hw/ipath/ipath_layer.c
··· 872 872 update_sge(ss, len); 873 873 length -= len; 874 874 } 875 + /* Update address before sending packet. */ 876 + update_sge(ss, length); 875 877 /* must flush early everything before trigger word */ 876 878 ipath_flush_wc(); 877 879 __raw_writel(last, piobuf); 878 880 /* be sure trigger word is written */ 879 881 ipath_flush_wc(); 880 - update_sge(ss, length); 881 882 } 882 883 883 884 /** ··· 944 943 if (likely(ss->num_sge == 1 && len <= ss->sge.length && 945 944 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { 946 945 u32 w; 946 + u32 *addr = (u32 *) ss->sge.vaddr; 947 947 948 + /* Update address before sending packet. */ 949 + update_sge(ss, len); 948 950 /* Need to round up for the last dword in the packet. */ 949 951 w = (len + 3) >> 2; 950 - __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); 952 + __iowrite32_copy(piobuf, addr, w - 1); 951 953 /* must flush early everything before trigger word */ 952 954 ipath_flush_wc(); 953 - __raw_writel(((u32 *) ss->sge.vaddr)[w - 1], 954 - piobuf + w - 1); 955 + __raw_writel(addr[w - 1], piobuf + w - 1); 955 956 /* be sure trigger word is written */ 956 957 ipath_flush_wc(); 957 - update_sge(ss, len); 958 958 ret = 0; 959 959 goto bail; 960 960 }
+2
drivers/infiniband/hw/ipath/ipath_pe800.c
··· 1180 1180 */ 1181 1181 dd->ipath_rhdrhead_intr_off = 1ULL<<32; 1182 1182 1183 + ipath_get_eeprom_info(dd); 1184 + 1183 1185 return 0; 1184 1186 } 1185 1187
+32 -32
drivers/infiniband/hw/ipath/ipath_qp.c
··· 375 375 376 376 spin_lock(&dev->pending_lock); 377 377 /* XXX What if its already removed by the timeout code? */ 378 - if (qp->timerwait.next != LIST_POISON1) 379 - list_del(&qp->timerwait); 380 - if (qp->piowait.next != LIST_POISON1) 381 - list_del(&qp->piowait); 378 + if (!list_empty(&qp->timerwait)) 379 + list_del_init(&qp->timerwait); 380 + if (!list_empty(&qp->piowait)) 381 + list_del_init(&qp->piowait); 382 382 spin_unlock(&dev->pending_lock); 383 383 384 384 wc.status = IB_WC_WR_FLUSH_ERR; ··· 427 427 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 428 428 int attr_mask) 429 429 { 430 + struct ipath_ibdev *dev = to_idev(ibqp->device); 430 431 struct ipath_qp *qp = to_iqp(ibqp); 431 432 enum ib_qp_state cur_state, new_state; 432 433 unsigned long flags; ··· 444 443 attr_mask)) 445 444 goto inval; 446 445 446 + if (attr_mask & IB_QP_AV) 447 + if (attr->ah_attr.dlid == 0 || 448 + attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) 449 + goto inval; 450 + 451 + if (attr_mask & IB_QP_PKEY_INDEX) 452 + if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) 453 + goto inval; 454 + 455 + if (attr_mask & IB_QP_MIN_RNR_TIMER) 456 + if (attr->min_rnr_timer > 31) 457 + goto inval; 458 + 447 459 switch (new_state) { 448 460 case IB_QPS_RESET: 449 461 ipath_reset_qp(qp); ··· 471 457 472 458 } 473 459 474 - if (attr_mask & IB_QP_PKEY_INDEX) { 475 - struct ipath_ibdev *dev = to_idev(ibqp->device); 476 - 477 - if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) 478 - goto inval; 460 + if (attr_mask & IB_QP_PKEY_INDEX) 479 461 qp->s_pkey_index = attr->pkey_index; 480 - } 481 462 482 463 if (attr_mask & IB_QP_DEST_QPN) 483 464 qp->remote_qpn = attr->dest_qp_num; ··· 488 479 if (attr_mask & IB_QP_ACCESS_FLAGS) 489 480 qp->qp_access_flags = attr->qp_access_flags; 490 481 491 - if (attr_mask & IB_QP_AV) { 492 - if (attr->ah_attr.dlid == 0 || 493 - attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) 494 - goto inval; 482 + if (attr_mask & IB_QP_AV) 495 483 qp->remote_ah_attr = attr->ah_attr; 496 - } 497 484 498 485 if (attr_mask & IB_QP_PATH_MTU) 499 486 qp->path_mtu = attr->path_mtu; ··· 504 499 qp->s_rnr_retry_cnt = qp->s_rnr_retry; 505 500 } 506 501 507 - if (attr_mask & IB_QP_MIN_RNR_TIMER) { 508 - if (attr->min_rnr_timer > 31) 509 - goto inval; 502 + if (attr_mask & IB_QP_MIN_RNR_TIMER) 510 503 qp->s_min_rnr_timer = attr->min_rnr_timer; 511 - } 512 504 513 505 if (attr_mask & IB_QP_QKEY) 514 506 qp->qkey = attr->qkey; ··· 712 710 init_attr->qp_type == IB_QPT_RC ? 713 711 ipath_do_rc_send : ipath_do_uc_send, 714 712 (unsigned long)qp); 715 - qp->piowait.next = LIST_POISON1; 716 - qp->piowait.prev = LIST_POISON2; 717 - qp->timerwait.next = LIST_POISON1; 718 - qp->timerwait.prev = LIST_POISON2; 713 + INIT_LIST_HEAD(&qp->piowait); 714 + INIT_LIST_HEAD(&qp->timerwait); 719 715 qp->state = IB_QPS_RESET; 720 716 qp->s_wq = swq; 721 717 qp->s_size = init_attr->cap.max_send_wr + 1; ··· 734 734 ipath_reset_qp(qp); 735 735 736 736 /* Tell the core driver that the kernel SMA is present. */ 737 - if (qp->ibqp.qp_type == IB_QPT_SMI) 737 + if (init_attr->qp_type == IB_QPT_SMI) 738 738 ipath_layer_set_verbs_flags(dev->dd, 739 739 IPATH_VERBS_KERNEL_SMA); 740 740 break; ··· 783 783 784 784 /* Make sure the QP isn't on the timeout list. */ 785 785 spin_lock_irqsave(&dev->pending_lock, flags); 786 - if (qp->timerwait.next != LIST_POISON1) 787 - list_del(&qp->timerwait); 788 - if (qp->piowait.next != LIST_POISON1) 789 - list_del(&qp->piowait); 786 + if (!list_empty(&qp->timerwait)) 787 + list_del_init(&qp->timerwait); 788 + if (!list_empty(&qp->piowait)) 789 + list_del_init(&qp->piowait); 790 790 spin_unlock_irqrestore(&dev->pending_lock, flags); 791 791 792 792 /* ··· 855 855 856 856 spin_lock(&dev->pending_lock); 857 857 /* XXX What if its already removed by the timeout code? */ 858 - if (qp->timerwait.next != LIST_POISON1) 859 - list_del(&qp->timerwait); 860 - if (qp->piowait.next != LIST_POISON1) 861 - list_del(&qp->piowait); 858 + if (!list_empty(&qp->timerwait)) 859 + list_del_init(&qp->timerwait); 860 + if (!list_empty(&qp->piowait)) 861 + list_del_init(&qp->piowait); 862 862 spin_unlock(&dev->pending_lock); 863 863 864 864 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
+7 -8
drivers/infiniband/hw/ipath/ipath_rc.c
··· 57 57 qp->s_len = wqe->length - len; 58 58 dev = to_idev(qp->ibqp.device); 59 59 spin_lock(&dev->pending_lock); 60 - if (qp->timerwait.next == LIST_POISON1) 60 + if (list_empty(&qp->timerwait)) 61 61 list_add_tail(&qp->timerwait, 62 62 &dev->pending[dev->pending_index]); 63 63 spin_unlock(&dev->pending_lock); ··· 356 356 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 357 357 qp->s_next_psn = qp->s_psn; 358 358 spin_lock(&dev->pending_lock); 359 - if (qp->timerwait.next == LIST_POISON1) 359 + if (list_empty(&qp->timerwait)) 360 360 list_add_tail(&qp->timerwait, 361 361 &dev->pending[dev->pending_index]); 362 362 spin_unlock(&dev->pending_lock); ··· 726 726 */ 727 727 dev = to_idev(qp->ibqp.device); 728 728 spin_lock(&dev->pending_lock); 729 - if (qp->timerwait.next != LIST_POISON1) 730 - list_del(&qp->timerwait); 729 + if (!list_empty(&qp->timerwait)) 730 + list_del_init(&qp->timerwait); 731 731 spin_unlock(&dev->pending_lock); 732 732 733 733 if (wqe->wr.opcode == IB_WR_RDMA_READ) ··· 886 886 * just won't find anything to restart if we ACK everything. 887 887 */ 888 888 spin_lock(&dev->pending_lock); 889 - if (qp->timerwait.next != LIST_POISON1) 890 - list_del(&qp->timerwait); 889 + if (!list_empty(&qp->timerwait)) 890 + list_del_init(&qp->timerwait); 891 891 spin_unlock(&dev->pending_lock); 892 892 893 893 /* ··· 1194 1194 IB_WR_RDMA_READ)) 1195 1195 goto ack_done; 1196 1196 spin_lock(&dev->pending_lock); 1197 - if (qp->s_rnr_timeout == 0 && 1198 - qp->timerwait.next != LIST_POISON1) 1197 + if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) 1199 1198 list_move_tail(&qp->timerwait, 1200 1199 &dev->pending[dev->pending_index]); 1201 1200 spin_unlock(&dev->pending_lock);
+1 -1
drivers/infiniband/hw/ipath/ipath_ruc.c
··· 435 435 unsigned long flags; 436 436 437 437 spin_lock_irqsave(&dev->pending_lock, flags); 438 - if (qp->piowait.next == LIST_POISON1) 438 + if (list_empty(&qp->piowait)) 439 439 list_add_tail(&qp->piowait, &dev->piowait); 440 440 spin_unlock_irqrestore(&dev->pending_lock, flags); 441 441 /*
+4 -3
drivers/infiniband/hw/ipath/ipath_verbs.c
··· 464 464 last = &dev->pending[dev->pending_index]; 465 465 while (!list_empty(last)) { 466 466 qp = list_entry(last->next, struct ipath_qp, timerwait); 467 - list_del(&qp->timerwait); 467 + list_del_init(&qp->timerwait); 468 468 qp->timer_next = resend; 469 469 resend = qp; 470 470 atomic_inc(&qp->refcount); ··· 474 474 qp = list_entry(last->next, struct ipath_qp, timerwait); 475 475 if (--qp->s_rnr_timeout == 0) { 476 476 do { 477 - list_del(&qp->timerwait); 477 + list_del_init(&qp->timerwait); 478 478 tasklet_hi_schedule(&qp->s_task); 479 479 if (list_empty(last)) 480 480 break; ··· 554 554 while (!list_empty(&dev->piowait)) { 555 555 qp = list_entry(dev->piowait.next, struct ipath_qp, 556 556 piowait); 557 - list_del(&qp->piowait); 557 + list_del_init(&qp->piowait); 558 558 tasklet_hi_schedule(&qp->s_task); 559 559 } 560 560 spin_unlock_irqrestore(&dev->pending_lock, flags); ··· 951 951 idev->dd = dd; 952 952 953 953 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); 954 + dev->owner = THIS_MODULE; 954 955 dev->node_guid = ipath_layer_get_guid(dd); 955 956 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; 956 957 dev->uverbs_cmd_mask =