Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking updates from David Miller:

1) Make syn floods consume significantly less resources by

a) Not pre-COW'ing routing metrics for SYN/ACKs
b) Mirroring the device queue mapping of the SYN for the SYN/ACK
reply.

Both from Eric Dumazet.

2) Fix calculation errors in Byte Queue Limiting, from Hiroaki SHIMODA.

3) Validate the length requested when building a paged SKB for a
socket, so we don't overrun the page vector accidently. From Jason
Wang.

4) When netlabel is disabled, we abort all IP option processing when we
see a CIPSO option. This isn't the right thing to do, we should
simply skip over it and continue processing the remaining options
(if any). Fix from Paul Moore.

5) SRIOV fixes for the mellanox driver from Jack orgenstein and Marcel
Apfelbaum.

6) 8139cp enables the receiver before the ring address is properly
programmed, which potentially lets the device crap over random
memory. Fix from Jason Wang.

7) e1000/e1000e fixes for i217 RST handling, and an improper buffer
address reference in jumbo RX frame processing from Bruce Allan and
Sebastian Andrzej Siewior, respectively.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
fec_mpc52xx: fix timestamp filtering
mcs7830: Implement link state detection
e1000e: fix Rapid Start Technology support for i217
e1000: look into the page instead of skb->data for e1000_tbi_adjust_stats()
r8169: call netif_napi_del at errpaths and at driver unload
tcp: reflect SYN queue_mapping into SYNACK packets
tcp: do not create inetpeer on SYNACK message
8139cp/8139too: terminate the eeprom access with the right opmode
8139cp: set ring address before enabling receiver
cipso: handle CIPSO options correctly when NetLabel is disabled
net: sock: validate data_len before allocating skb in sock_alloc_send_pskb()
bql: Avoid possible inconsistent calculation.
bql: Avoid unneeded limit decrement.
bql: Fix POSDIFF() to integer overflow aware.
net/mlx4_core: Fix obscure mlx4_cmd_box parameter in QUERY_DEV_CAP
net/mlx4_core: Check port out-of-range before using in mlx4_slave_cap
net/mlx4_core: Fixes for VF / Guest startup flow
net/mlx4_en: Fix improper use of "port" parameter in mlx4_en_event
net/mlx4_core: Fix number of EQs used in ICM initialisation
net/mlx4_core: Fix the slave_id out-of-range test in mlx4_eq_int

+201 -84
+1 -1
drivers/net/ethernet/freescale/fec_mpc52xx.c
··· 437 437 length = status & BCOM_FEC_RX_BD_LEN_MASK; 438 438 skb_put(rskb, length - 4); /* length without CRC32 */ 439 439 rskb->protocol = eth_type_trans(rskb, dev); 440 - if (!skb_defer_rx_timestamp(skb)) 440 + if (!skb_defer_rx_timestamp(rskb)) 441 441 netif_rx(rskb); 442 442 443 443 spin_lock(&priv->lock);
+1 -1
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 4080 4080 spin_lock_irqsave(&adapter->stats_lock, 4081 4081 irq_flags); 4082 4082 e1000_tbi_adjust_stats(hw, &adapter->stats, 4083 - length, skb->data); 4083 + length, mapped); 4084 4084 spin_unlock_irqrestore(&adapter->stats_lock, 4085 4085 irq_flags); 4086 4086 length--;
+9 -9
drivers/net/ethernet/intel/e1000e/ich8lan.c
··· 165 165 #define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ 166 166 167 167 /* Intel Rapid Start Technology Support */ 168 - #define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70) 168 + #define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) 169 169 #define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 170 170 #define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) 171 - #define I217_SxCTRL_MASK 0x1000 171 + #define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 172 172 #define I217_CGFREG PHY_REG(772, 29) 173 - #define I217_CGFREG_MASK 0x0002 173 + #define I217_CGFREG_ENABLE_MTA_RESET 0x0002 174 174 #define I217_MEMPWR PHY_REG(772, 26) 175 - #define I217_MEMPWR_MASK 0x0010 175 + #define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 176 176 177 177 /* Strapping Option Register - RO */ 178 178 #define E1000_STRAP 0x0000C ··· 4089 4089 * power good. 4090 4090 */ 4091 4091 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); 4092 - phy_reg |= I217_SxCTRL_MASK; 4092 + phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; 4093 4093 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); 4094 4094 4095 4095 /* Disable the SMB release on LCD reset. */ 4096 4096 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4097 - phy_reg &= ~I217_MEMPWR; 4097 + phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; 4098 4098 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4099 4099 } 4100 4100 ··· 4103 4103 * Support 4104 4104 */ 4105 4105 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4106 - phy_reg |= I217_CGFREG_MASK; 4106 + phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; 4107 4107 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 4108 4108 4109 4109 release: ··· 4176 4176 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4177 4177 if (ret_val) 4178 4178 goto release; 4179 - phy_reg |= I217_MEMPWR_MASK; 4179 + phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; 4180 4180 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4181 4181 4182 4182 /* Disable Proxy */ ··· 4186 4186 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4187 4187 if (ret_val) 4188 4188 goto release; 4189 - phy_reg &= ~I217_CGFREG_MASK; 4189 + phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; 4190 4190 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 4191 4191 release: 4192 4192 if (ret_val)
+2 -2
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 617 617 .out_is_imm = false, 618 618 .encode_slave_id = false, 619 619 .verify = NULL, 620 - .wrapper = NULL 620 + .wrapper = mlx4_QUERY_FW_wrapper 621 621 }, 622 622 { 623 623 .opcode = MLX4_CMD_QUERY_HCA, ··· 635 635 .out_is_imm = false, 636 636 .encode_slave_id = false, 637 637 .verify = NULL, 638 - .wrapper = NULL 638 + .wrapper = mlx4_QUERY_DEV_CAP_wrapper 639 639 }, 640 640 { 641 641 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
+7 -5
drivers/net/ethernet/mellanox/mlx4/en_main.c
··· 136 136 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; 137 137 struct mlx4_en_priv *priv; 138 138 139 - if (!mdev->pndev[port]) 140 - return; 141 - 142 - priv = netdev_priv(mdev->pndev[port]); 143 139 switch (event) { 144 140 case MLX4_DEV_EVENT_PORT_UP: 145 141 case MLX4_DEV_EVENT_PORT_DOWN: 142 + if (!mdev->pndev[port]) 143 + return; 144 + priv = netdev_priv(mdev->pndev[port]); 146 145 /* To prevent races, we poll the link state in a separate 147 146 task rather than changing it here */ 148 147 priv->link_state = event; ··· 153 154 break; 154 155 155 156 default: 156 - mlx4_warn(mdev, "Unhandled event: %d\n", event); 157 + if (port < 1 || port > dev->caps.num_ports || 158 + !mdev->pndev[port]) 159 + return; 160 + mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port); 157 161 } 158 162 } 159 163
+1 -1
drivers/net/ethernet/mellanox/mlx4/eq.c
··· 426 426 427 427 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); 428 428 429 - if (flr_slave > dev->num_slaves) { 429 + if (flr_slave >= dev->num_slaves) { 430 430 mlx4_warn(dev, 431 431 "Got FLR for unknown function: %d\n", 432 432 flr_slave);
+48 -3
drivers/net/ethernet/mellanox/mlx4/fw.c
··· 412 412 outbox = mailbox->buf; 413 413 414 414 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 415 - MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev)); 415 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 416 416 if (err) 417 417 goto out; 418 418 ··· 590 590 591 591 for (i = 1; i <= dev_cap->num_ports; ++i) { 592 592 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 593 - MLX4_CMD_TIME_CLASS_B, 594 - !mlx4_is_slave(dev)); 593 + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 595 594 if (err) 596 595 goto out; 597 596 ··· 666 667 out: 667 668 mlx4_free_cmd_mailbox(dev, mailbox); 668 669 return err; 670 + } 671 + 672 + int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, 673 + struct mlx4_vhcr *vhcr, 674 + struct mlx4_cmd_mailbox *inbox, 675 + struct mlx4_cmd_mailbox *outbox, 676 + struct mlx4_cmd_info *cmd) 677 + { 678 + int err = 0; 679 + u8 field; 680 + 681 + err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 682 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 683 + if (err) 684 + return err; 685 + 686 + /* For guests, report Blueflame disabled */ 687 + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 688 + field &= 0x7f; 689 + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 690 + 691 + return 0; 669 692 } 670 693 671 694 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, ··· 881 860 ((fw_ver & 0xffff0000ull) >> 16) | 882 861 ((fw_ver & 0x0000ffffull) << 16); 883 862 863 + if (mlx4_is_slave(dev)) 864 + goto out; 865 + 884 866 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); 885 867 dev->caps.function = lg; 886 868 ··· 949 925 out: 950 926 mlx4_free_cmd_mailbox(dev, mailbox); 951 927 return err; 928 + } 929 + 930 + int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, 931 + struct mlx4_vhcr *vhcr, 932 + struct mlx4_cmd_mailbox *inbox, 933 + struct mlx4_cmd_mailbox *outbox, 934 + struct mlx4_cmd_info *cmd) 935 + { 936 + u8 *outbuf; 937 + int err; 938 + 939 + outbuf = outbox->buf; 940 + err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 941 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 942 + if (err) 943 + return err; 944 + 945 + /* for slaves, zero out everything except FW version */ 946 + outbuf[0] = outbuf[1] = 0; 947 + memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); 948 + return 0; 952 949 } 953 950 954 951 static void get_board_id(void *vsd, char *board_id)
+13 -27
drivers/net/ethernet/mellanox/mlx4/main.c
··· 142 142 struct pci_dev *pdev; 143 143 }; 144 144 145 - static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev) 146 - { 147 - return dev->caps.reserved_eqs + 148 - MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1); 149 - } 150 - 151 145 int mlx4_check_port_params(struct mlx4_dev *dev, 152 146 enum mlx4_port_type *port_type) 153 147 { ··· 211 217 } 212 218 213 219 dev->caps.num_ports = dev_cap->num_ports; 220 + dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; 214 221 for (i = 1; i <= dev->caps.num_ports; ++i) { 215 222 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 216 223 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; ··· 430 435 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 431 436 432 437 memset(&dev_cap, 0, sizeof(dev_cap)); 438 + dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 433 439 err = mlx4_dev_cap(dev, &dev_cap); 434 440 if (err) { 435 441 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 436 442 return err; 437 443 } 444 + 445 + err = mlx4_QUERY_FW(dev); 446 + if (err) 447 + mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); 438 448 439 449 page_size = ~dev->caps.page_size_cap + 1; 440 450 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); ··· 485 485 dev->caps.num_mgms = 0; 486 486 dev->caps.num_amgms = 0; 487 487 488 - for (i = 1; i <= dev->caps.num_ports; ++i) 489 - dev->caps.port_mask[i] = dev->caps.port_type[i]; 490 - 491 488 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 492 489 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 493 490 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 494 491 return -ENODEV; 495 492 } 493 + 494 + for (i = 1; i <= dev->caps.num_ports; ++i) 495 + dev->caps.port_mask[i] = dev->caps.port_type[i]; 496 496 497 497 if (dev->caps.uar_page_size * (dev->caps.num_uars - 498 498 dev->caps.reserved_uars) > ··· 504 504 return -ENODEV; 505 505 } 506 506 507 - #if 0 508 - mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux); 509 - mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n", 510 - dev->caps.num_uars, dev->caps.reserved_uars, 511 - dev->caps.uar_page_size * dev->caps.num_uars, 512 - pci_resource_len(dev->pdev, 2)); 513 - mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs, 514 - dev->caps.reserved_eqs); 515 - mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n", 516 - dev->caps.num_pds, dev->caps.reserved_pds, 517 - dev->caps.slave_pd_shift, dev->caps.pd_base); 518 - #endif 519 507 return 0; 520 508 } 521 509 ··· 798 810 if (err) 799 811 goto err_srq; 800 812 801 - num_eqs = (mlx4_is_master(dev)) ? 802 - roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 803 - dev->caps.num_eqs; 813 + num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 814 + dev->caps.num_eqs; 804 815 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 805 816 cmpt_base + 806 817 ((u64) (MLX4_CMPT_TYPE_EQ * ··· 861 874 } 862 875 863 876 864 - num_eqs = (mlx4_is_master(dev)) ? 865 - roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 866 - dev->caps.num_eqs; 877 + num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 878 + dev->caps.num_eqs; 867 879 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 868 880 init_hca->eqc_base, dev_cap->eqc_entry_sz, 869 881 num_eqs, num_eqs, 0, 0);
+10
drivers/net/ethernet/mellanox/mlx4/mlx4.h
··· 1039 1039 void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1040 1040 enum mlx4_res_tracker_free_type type); 1041 1041 1042 + int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, 1043 + struct mlx4_vhcr *vhcr, 1044 + struct mlx4_cmd_mailbox *inbox, 1045 + struct mlx4_cmd_mailbox *outbox, 1046 + struct mlx4_cmd_info *cmd); 1042 1047 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, 1043 1048 struct mlx4_vhcr *vhcr, 1044 1049 struct mlx4_cmd_mailbox *inbox, ··· 1059 1054 struct mlx4_cmd_mailbox *inbox, 1060 1055 struct mlx4_cmd_mailbox *outbox, 1061 1056 struct mlx4_cmd_info *cmd); 1057 + int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, 1058 + struct mlx4_vhcr *vhcr, 1059 + struct mlx4_cmd_mailbox *inbox, 1060 + struct mlx4_cmd_mailbox *outbox, 1061 + struct mlx4_cmd_info *cmd); 1062 1062 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1063 1063 struct mlx4_vhcr *vhcr, 1064 1064 struct mlx4_cmd_mailbox *inbox,
+6 -3
drivers/net/ethernet/mellanox/mlx4/profile.c
··· 126 126 profile[MLX4_RES_AUXC].num = request->num_qp; 127 127 profile[MLX4_RES_SRQ].num = request->num_srq; 128 128 profile[MLX4_RES_CQ].num = request->num_cq; 129 - profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); 129 + profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? 130 + dev->phys_caps.num_phys_eqs : 131 + min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); 130 132 profile[MLX4_RES_DMPT].num = request->num_mpt; 131 133 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 132 134 profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg); ··· 217 215 init_hca->log_num_cqs = profile[i].log_num; 218 216 break; 219 217 case MLX4_RES_EQ: 220 - dev->caps.num_eqs = profile[i].num; 218 + dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs, 219 + MAX_MSIX)); 221 220 init_hca->eqc_base = profile[i].start; 222 - init_hca->log_num_eqs = profile[i].log_num; 221 + init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); 223 222 break; 224 223 case MLX4_RES_DMPT: 225 224 dev->caps.num_mpts = profile[i].num;
+12 -12
drivers/net/ethernet/realtek/8139cp.c
··· 979 979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); 980 980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); 981 981 982 + cpw32_f(HiTxRingAddr, 0); 983 + cpw32_f(HiTxRingAddr + 4, 0); 984 + 985 + ring_dma = cp->ring_dma; 986 + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); 987 + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); 988 + 989 + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; 990 + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); 991 + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); 992 + 982 993 cp_start_hw(cp); 983 994 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ 984 995 ··· 1002 991 cp->wol_enabled = 0; 1003 992 1004 993 cpw8(Config5, cpr8(Config5) & PMEStatus); 1005 - 1006 - cpw32_f(HiTxRingAddr, 0); 1007 - cpw32_f(HiTxRingAddr + 4, 0); 1008 - 1009 - ring_dma = cp->ring_dma; 1010 - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); 1011 - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); 1012 - 1013 - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; 1014 - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); 1015 - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); 1016 994 1017 995 cpw16(MultiIntr, 0); 1018 996 ··· 1636 1636 1637 1637 static void eeprom_cmd_end(void __iomem *ee_addr) 1638 1638 { 1639 - writeb (~EE_CS, ee_addr); 1639 + writeb(0, ee_addr); 1640 1640 eeprom_delay (); 1641 1641 } 1642 1642
+1 -1
drivers/net/ethernet/realtek/8139too.c
··· 1173 1173 } 1174 1174 1175 1175 /* Terminate the EEPROM access. */ 1176 - RTL_W8 (Cfg9346, ~EE_CS); 1176 + RTL_W8(Cfg9346, 0); 1177 1177 eeprom_delay (); 1178 1178 1179 1179 return retval;
+3
drivers/net/ethernet/realtek/r8169.c
··· 6345 6345 6346 6346 cancel_work_sync(&tp->wk.work); 6347 6347 6348 + netif_napi_del(&tp->napi); 6349 + 6348 6350 unregister_netdev(dev); 6349 6351 6350 6352 rtl_release_firmware(tp); ··· 6670 6668 return rc; 6671 6669 6672 6670 err_out_msi_4: 6671 + netif_napi_del(&tp->napi); 6673 6672 rtl_disable_msi(pdev, tp); 6674 6673 iounmap(ioaddr); 6675 6674 err_out_free_res_3:
+23 -2
drivers/net/usb/mcs7830.c
··· 629 629 return skb->len > 0; 630 630 } 631 631 632 + static void mcs7830_status(struct usbnet *dev, struct urb *urb) 633 + { 634 + u8 *buf = urb->transfer_buffer; 635 + bool link; 636 + 637 + if (urb->actual_length < 16) 638 + return; 639 + 640 + link = !(buf[1] & 0x20); 641 + if (netif_carrier_ok(dev->net) != link) { 642 + if (link) { 643 + netif_carrier_on(dev->net); 644 + usbnet_defer_kevent(dev, EVENT_LINK_RESET); 645 + } else 646 + netif_carrier_off(dev->net); 647 + netdev_dbg(dev->net, "Link Status is: %d\n", link); 648 + } 649 + } 650 + 632 651 static const struct driver_info moschip_info = { 633 652 .description = "MOSCHIP 7830/7832/7730 usb-NET adapter", 634 653 .bind = mcs7830_bind, 635 654 .rx_fixup = mcs7830_rx_fixup, 636 - .flags = FLAG_ETHER, 655 + .flags = FLAG_ETHER | FLAG_LINK_INTR, 656 + .status = mcs7830_status, 637 657 .in = 1, 638 658 .out = 2, 639 659 }; ··· 662 642 .description = "Sitecom LN-30 usb-NET adapter", 663 643 .bind = mcs7830_bind, 664 644 .rx_fixup = mcs7830_rx_fixup, 665 - .flags = FLAG_ETHER, 645 + .flags = FLAG_ETHER | FLAG_LINK_INTR, 646 + .status = mcs7830_status, 666 647 .in = 1, 667 648 .out = 2, 668 649 };
+6
include/linux/mlx4/device.h
··· 64 64 MLX4_MAX_NUM_PF = 16, 65 65 MLX4_MAX_NUM_VF = 64, 66 66 MLX4_MFUNC_MAX = 80, 67 + MLX4_MAX_EQ_NUM = 1024, 67 68 MLX4_MFUNC_EQ_NUM = 4, 68 69 MLX4_MFUNC_MAX_EQES = 8, 69 70 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) ··· 239 238 { 240 239 return (major << 32) | (minor << 16) | subminor; 241 240 } 241 + 242 + struct mlx4_phys_caps { 243 + u32 num_phys_eqs; 244 + }; 242 245 243 246 struct mlx4_caps { 244 247 u64 fw_ver; ··· 504 499 unsigned long flags; 505 500 unsigned long num_slaves; 506 501 struct mlx4_caps caps; 502 + struct mlx4_phys_caps phys_caps; 507 503 struct radix_tree_root qp_table_tree; 508 504 u8 rev_id; 509 505 char board_id[MLX4_BOARD_ID_LEN];
+28 -1
include/net/cipso_ipv4.h
··· 42 42 #include <net/netlabel.h> 43 43 #include <net/request_sock.h> 44 44 #include <linux/atomic.h> 45 + #include <asm/unaligned.h> 45 46 46 47 /* known doi values */ 47 48 #define CIPSO_V4_DOI_UNKNOWN 0x00000000 ··· 286 285 static inline int cipso_v4_validate(const struct sk_buff *skb, 287 286 unsigned char **option) 288 287 { 289 - return -ENOSYS; 288 + unsigned char *opt = *option; 289 + unsigned char err_offset = 0; 290 + u8 opt_len = opt[1]; 291 + u8 opt_iter; 292 + 293 + if (opt_len < 8) { 294 + err_offset = 1; 295 + goto out; 296 + } 297 + 298 + if (get_unaligned_be32(&opt[2]) == 0) { 299 + err_offset = 2; 300 + goto out; 301 + } 302 + 303 + for (opt_iter = 6; opt_iter < opt_len;) { 304 + if (opt[opt_iter + 1] > (opt_len - opt_iter)) { 305 + err_offset = opt_iter + 1; 306 + goto out; 307 + } 308 + opt_iter += opt[opt_iter + 1]; 309 + } 310 + 311 + out: 312 + *option = opt + err_offset; 313 + return err_offset; 314 + 290 315 } 291 316 #endif /* CONFIG_NETLABEL */ 292 317
+11 -7
lib/dynamic_queue_limits.c
··· 10 10 #include <linux/jiffies.h> 11 11 #include <linux/dynamic_queue_limits.h> 12 12 13 - #define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0) 13 + #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) 14 + #define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) 14 15 15 16 /* Records completed count and recalculates the queue limit */ 16 17 void dql_completed(struct dql *dql, unsigned int count) 17 18 { 18 19 unsigned int inprogress, prev_inprogress, limit; 19 - unsigned int ovlimit, all_prev_completed, completed; 20 + unsigned int ovlimit, completed, num_queued; 21 + bool all_prev_completed; 22 + 23 + num_queued = ACCESS_ONCE(dql->num_queued); 20 24 21 25 /* Can't complete more than what's in queue */ 22 - BUG_ON(count > dql->num_queued - dql->num_completed); 26 + BUG_ON(count > num_queued - dql->num_completed); 23 27 24 28 completed = dql->num_completed + count; 25 29 limit = dql->limit; 26 - ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit); 27 - inprogress = dql->num_queued - completed; 30 + ovlimit = POSDIFF(num_queued - dql->num_completed, limit); 31 + inprogress = num_queued - completed; 28 32 prev_inprogress = dql->prev_num_queued - dql->num_completed; 29 - all_prev_completed = POSDIFF(completed, dql->prev_num_queued); 33 + all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued); 30 34 31 35 if ((ovlimit && !inprogress) || 32 36 (dql->prev_ovlimit && all_prev_completed)) { ··· 108 104 dql->prev_ovlimit = ovlimit; 109 105 dql->prev_last_obj_cnt = dql->last_obj_cnt; 110 106 dql->num_completed = completed; 111 - dql->prev_num_queued = dql->num_queued; 107 + dql->prev_num_queued = num_queued; 112 108 } 113 109 EXPORT_SYMBOL(dql_completed); 114 110
+5 -2
net/core/sock.c
··· 1592 1592 gfp_t gfp_mask; 1593 1593 long timeo; 1594 1594 int err; 1595 + int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1596 + 1597 + err = -EMSGSIZE; 1598 + if (npages > MAX_SKB_FRAGS) 1599 + goto failure; 1595 1600 1596 1601 gfp_mask = sk->sk_allocation; 1597 1602 if (gfp_mask & __GFP_WAIT) ··· 1615 1610 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1616 1611 skb = alloc_skb(header_len, gfp_mask); 1617 1612 if (skb) { 1618 - int npages; 1619 1613 int i; 1620 1614 1621 1615 /* No pages, we're done... */ 1622 1616 if (!data_len) 1623 1617 break; 1624 1618 1625 - npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1626 1619 skb->truesize += data_len; 1627 1620 skb_shinfo(skb)->nr_frags = npages; 1628 1621 for (i = 0; i < npages; i++) {
+2 -1
net/ipv4/inet_connection_sock.c
··· 377 377 378 378 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 379 379 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 380 - sk->sk_protocol, inet_sk_flowi_flags(sk), 380 + sk->sk_protocol, 381 + inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS, 381 382 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 382 383 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 383 384 security_req_classify_flow(req, flowi4_to_flowi(fl4));
+6 -3
net/ipv4/tcp_ipv4.c
··· 824 824 */ 825 825 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 826 826 struct request_sock *req, 827 - struct request_values *rvp) 827 + struct request_values *rvp, 828 + u16 queue_mapping) 828 829 { 829 830 const struct inet_request_sock *ireq = inet_rsk(req); 830 831 struct flowi4 fl4; ··· 841 840 if (skb) { 842 841 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 843 842 843 + skb_set_queue_mapping(skb, queue_mapping); 844 844 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 845 845 ireq->rmt_addr, 846 846 ireq->opt); ··· 856 854 struct request_values *rvp) 857 855 { 858 856 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 859 - return tcp_v4_send_synack(sk, NULL, req, rvp); 857 + return tcp_v4_send_synack(sk, NULL, req, rvp, 0); 860 858 } 861 859 862 860 /* ··· 1424 1422 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1425 1423 1426 1424 if (tcp_v4_send_synack(sk, dst, req, 1427 - (struct request_values *)&tmp_ext) || 1425 + (struct request_values *)&tmp_ext, 1426 + skb_get_queue_mapping(skb)) || 1428 1427 want_cookie) 1429 1428 goto drop_and_free; 1430 1429
+6 -3
net/ipv6/tcp_ipv6.c
··· 476 476 477 477 478 478 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 479 - struct request_values *rvp) 479 + struct request_values *rvp, 480 + u16 queue_mapping) 480 481 { 481 482 struct inet6_request_sock *treq = inet6_rsk(req); 482 483 struct ipv6_pinfo *np = inet6_sk(sk); ··· 514 513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 515 514 516 515 fl6.daddr = treq->rmt_addr; 516 + skb_set_queue_mapping(skb, queue_mapping); 517 517 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 518 518 err = net_xmit_eval(err); 519 519 } ··· 530 528 struct request_values *rvp) 531 529 { 532 530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 533 - return tcp_v6_send_synack(sk, req, rvp); 531 + return tcp_v6_send_synack(sk, req, rvp, 0); 534 532 } 535 533 536 534 static void tcp_v6_reqsk_destructor(struct request_sock *req) ··· 1215 1213 security_inet_conn_request(sk, skb, req); 1216 1214 1217 1215 if (tcp_v6_send_synack(sk, req, 1218 - (struct request_values *)&tmp_ext) || 1216 + (struct request_values *)&tmp_ext, 1217 + skb_get_queue_mapping(skb)) || 1219 1218 want_cookie) 1220 1219 goto drop_and_free; 1221 1220