Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David S. Miller:

1) Since we do RCU lookups on ipv4 FIB entries, we have to test if the
entry is dead before returning it to our caller.

2) openvswitch locking and packet validation fixes from Ansis Atteka,
Jesse Gross, and Pravin B Shelar.

3) Fix PM resume locking in IGB driver, from Benjamin Poirier.

4) Fix VLAN header handling in vhost-net and macvtap, from Basil Gor.

5) Revert a bogus network namespace isolation change that was causing
regressions on S390 networking devices.

6) If bonding decides to process and handle a LACPDU frame, we
shouldn't bump the rx_dropped counter. From Jiri Bohac.

7) Fix mis-calculation of available TX space in r8169 driver when doing
TSO, which can lead to crashes and/or hung device. From Julien
Ducourthial.

8) SCTP does not validate cached routes properly in all cases, from
Nicolas Dichtel.

9) Link status interrupt needs to be handled in ks8851 driver, from
Stephen Boyd.

10) Use capable(), not cap_raised(), in connector/userns netlink code.
From Eric W. Biederman via Andrew Morton.

11) Fix pktgen OOPS on module unload, from Eric Dumazet.

12) iwlwifi under-estimates SKB truesizes, also from Eric Dumazet.

13) Cure division by zero in SFC driver, from Ben Hutchings.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (26 commits)
ks8851: Update link status during link change interrupt
macvtap: restore vlan header on user read
vhost-net: fix handle_rx buffer size
bonding: don't increase rx_dropped after processing LACPDUs
connector/userns: replace netlink uses of cap_raised() with capable()
sctp: check cached dst before using it
pktgen: fix crash at module unload
Revert "net: maintain namespace isolation between vlan and real device"
ehea: fix losing of NEQ events when one event occurred early
igb: fix rtnl race in PM resume path
ipv4: Do not use dead fib_info entries.
r8169: fix unsigned int wraparound with TSO
sfc: Fix division by zero when using one RX channel and no SR-IOV
openvswitch: Validation of IPv6 set port action uses IPv4 header
net: compare_ether_addr[_64bits]() has no ordering
cdc_ether: Ignore bogus union descriptor for RNDIS devices
bnx2x: bug fix when loading after SAN boot
e1000: Silence sparse warnings by correcting type
igb, ixgbe: netdev_tx_reset_queue incorrectly called from tx init path
openvswitch: Release rtnl_lock if ovs_vport_cmd_build_info() failed.
...

+235 -171
+1 -1
drivers/block/drbd/drbd_nl.c
··· 2297 2297 return; 2298 2298 } 2299 2299 2300 - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) { 2300 + if (!capable(CAP_SYS_ADMIN)) { 2301 2301 retcode = ERR_PERM; 2302 2302 goto fail; 2303 2303 }
+1 -1
drivers/md/dm-log-userspace-transfer.c
··· 134 134 { 135 135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); 136 136 137 - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) 137 + if (!capable(CAP_SYS_ADMIN)) 138 138 return; 139 139 140 140 spin_lock(&receiving_list_lock);
+12 -6
drivers/net/bonding/bond_3ad.c
··· 2173 2173 * received frames (loopback). Since only the payload is given to this 2174 2174 * function, it check for loopback. 2175 2175 */ 2176 - static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length) 2176 + static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length) 2177 2177 { 2178 2178 struct port *port; 2179 + int ret = RX_HANDLER_ANOTHER; 2179 2180 2180 2181 if (length >= sizeof(struct lacpdu)) { 2181 2182 ··· 2185 2184 if (!port->slave) { 2186 2185 pr_warning("%s: Warning: port of slave %s is uninitialized\n", 2187 2186 slave->dev->name, slave->dev->master->name); 2188 - return; 2187 + return ret; 2189 2188 } 2190 2189 2191 2190 switch (lacpdu->subtype) { 2192 2191 case AD_TYPE_LACPDU: 2192 + ret = RX_HANDLER_CONSUMED; 2193 2193 pr_debug("Received LACPDU on port %d\n", 2194 2194 port->actor_port_number); 2195 2195 /* Protect against concurrent state machines */ ··· 2200 2198 break; 2201 2199 2202 2200 case AD_TYPE_MARKER: 2201 + ret = RX_HANDLER_CONSUMED; 2203 2202 // No need to convert fields to Little Endian since we don't use the marker's fields. 2204 2203 2205 2204 switch (((struct bond_marker *)lacpdu)->tlv_type) { ··· 2222 2219 } 2223 2220 } 2224 2221 } 2222 + return ret; 2225 2223 } 2226 2224 2227 2225 /** ··· 2460 2456 return NETDEV_TX_OK; 2461 2457 } 2462 2458 2463 - void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 2459 + int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 2464 2460 struct slave *slave) 2465 2461 { 2462 + int ret = RX_HANDLER_ANOTHER; 2466 2463 if (skb->protocol != PKT_TYPE_LACPDU) 2467 - return; 2464 + return ret; 2468 2465 2469 2466 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2470 - return; 2467 + return ret; 2471 2468 2472 2469 read_lock(&bond->lock); 2473 - bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2470 + ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2474 2471 read_unlock(&bond->lock); 2472 + return ret; 2475 2473 } 2476 2474 2477 2475 /*
+1 -1
drivers/net/bonding/bond_3ad.h
··· 274 274 void bond_3ad_handle_link_change(struct slave *slave, char link); 275 275 int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 276 276 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 277 - void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 277 + int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 278 278 struct slave *slave); 279 279 int bond_3ad_set_carrier(struct bonding *bond); 280 280 void bond_3ad_update_lacp_rate(struct bonding *bond);
+11 -5
drivers/net/bonding/bond_main.c
··· 1444 1444 struct sk_buff *skb = *pskb; 1445 1445 struct slave *slave; 1446 1446 struct bonding *bond; 1447 - void (*recv_probe)(struct sk_buff *, struct bonding *, 1447 + int (*recv_probe)(struct sk_buff *, struct bonding *, 1448 1448 struct slave *); 1449 + int ret = RX_HANDLER_ANOTHER; 1449 1450 1450 1451 skb = skb_share_check(skb, GFP_ATOMIC); 1451 1452 if (unlikely(!skb)) ··· 1465 1464 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1466 1465 1467 1466 if (likely(nskb)) { 1468 - recv_probe(nskb, bond, slave); 1467 + ret = recv_probe(nskb, bond, slave); 1469 1468 dev_kfree_skb(nskb); 1469 + if (ret == RX_HANDLER_CONSUMED) { 1470 + consume_skb(skb); 1471 + return ret; 1472 + } 1470 1473 } 1471 1474 } 1472 1475 ··· 1492 1487 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); 1493 1488 } 1494 1489 1495 - return RX_HANDLER_ANOTHER; 1490 + return ret; 1496 1491 } 1497 1492 1498 1493 /* enslave device <slave> to bond device <master> */ ··· 2728 2723 } 2729 2724 } 2730 2725 2731 - static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, 2726 + static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, 2732 2727 struct slave *slave) 2733 2728 { 2734 2729 struct arphdr *arp; ··· 2736 2731 __be32 sip, tip; 2737 2732 2738 2733 if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) 2739 - return; 2734 + return RX_HANDLER_ANOTHER; 2740 2735 2741 2736 read_lock(&bond->lock); 2742 2737 ··· 2781 2776 2782 2777 out_unlock: 2783 2778 read_unlock(&bond->lock); 2779 + return RX_HANDLER_ANOTHER; 2784 2780 } 2785 2781 2786 2782 /*
+22 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 9122 9122 return bnx2x_prev_mcp_done(bp); 9123 9123 } 9124 9124 9125 + /* previous driver DMAE transaction may have occurred when pre-boot stage ended 9126 + * and boot began, or when kdump kernel was loaded. Either case would invalidate 9127 + * the addresses of the transaction, resulting in was-error bit set in the pci 9128 + * causing all hw-to-host pcie transactions to timeout. If this happened we want 9129 + * to clear the interrupt which detected this from the pglueb and the was done 9130 + * bit 9131 + */ 9132 + static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) 9133 + { 9134 + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); 9135 + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 9136 + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); 9137 + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); 9138 + } 9139 + } 9140 + 9125 9141 static int __devinit bnx2x_prev_unload(struct bnx2x *bp) 9126 9142 { 9127 9143 int time_counter = 10; 9128 9144 u32 rc, fw, hw_lock_reg, hw_lock_val; 9129 9145 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 9130 9146 9131 - /* Release previously held locks */ 9147 + /* clear hw from errors which may have resulted from an interrupted 9148 + * dmae transaction. 9149 + */ 9150 + bnx2x_prev_interrupted_dmae(bp); 9151 + 9152 + /* Release previously held locks */ 9132 9153 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 9133 9154 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 9134 9155 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
+2
drivers/net/ethernet/ibm/ehea/ehea_main.c
··· 3335 3335 goto out_shutdown_ports; 3336 3336 } 3337 3337 3338 + /* Handle any events that might be pending. */ 3339 + tasklet_hi_schedule(&adapter->neq_tasklet); 3338 3340 3339 3341 ret = 0; 3340 3342 goto out;
+2 -2
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 3380 3380 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 3381 3381 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 3382 3382 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; 3383 - struct my_u { u64 a; u64 b; }; 3383 + struct my_u { __le64 a; __le64 b; }; 3384 3384 struct my_u *u = (struct my_u *)tx_desc; 3385 3385 const char *type; 3386 3386 ··· 3424 3424 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 3425 3425 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 3426 3426 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i]; 3427 - struct my_u { u64 a; u64 b; }; 3427 + struct my_u { __le64 a; __le64 b; }; 3428 3428 struct my_u *u = (struct my_u *)rx_desc; 3429 3429 const char *type; 3430 3430
+8 -16
drivers/net/ethernet/intel/igb/igb_main.c
··· 1111 1111 adapter->flags |= IGB_FLAG_HAS_MSI; 1112 1112 out: 1113 1113 /* Notify the stack of the (possibly) reduced queue counts. */ 1114 + rtnl_lock(); 1114 1115 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 1115 - return netif_set_real_num_rx_queues(adapter->netdev, 1116 - adapter->num_rx_queues); 1116 + err = netif_set_real_num_rx_queues(adapter->netdev, 1117 + adapter->num_rx_queues); 1118 + rtnl_unlock(); 1119 + return err; 1117 1120 } 1118 1121 1119 1122 /** ··· 2774 2771 2775 2772 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 2776 2773 wr32(E1000_TXDCTL(reg_idx), txdctl); 2777 - 2778 - netdev_tx_reset_queue(txring_txq(ring)); 2779 2774 } 2780 2775 2781 2776 /** ··· 3282 3281 buffer_info = &tx_ring->tx_buffer_info[i]; 3283 3282 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); 3284 3283 } 3284 + 3285 + netdev_tx_reset_queue(txring_txq(tx_ring)); 3285 3286 3286 3287 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 3287 3288 memset(tx_ring->tx_buffer_info, 0, size); ··· 6799 6796 pci_enable_wake(pdev, PCI_D3hot, 0); 6800 6797 pci_enable_wake(pdev, PCI_D3cold, 0); 6801 6798 6802 - if (!rtnl_is_locked()) { 6803 - /* 6804 - * shut up ASSERT_RTNL() warning in 6805 - * netif_set_real_num_tx/rx_queues. 6806 - */ 6807 - rtnl_lock(); 6808 - err = igb_init_interrupt_scheme(adapter); 6809 - rtnl_unlock(); 6810 - } else { 6811 - err = igb_init_interrupt_scheme(adapter); 6812 - } 6813 - if (err) { 6799 + if (igb_init_interrupt_scheme(adapter)) { 6814 6800 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 6815 6801 return -ENOMEM; 6816 6802 }
-3
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 574 574 extern struct ixgbe_info ixgbe_X540_info; 575 575 #ifdef CONFIG_IXGBE_DCB 576 576 extern const struct dcbnl_rtnl_ops dcbnl_ops; 577 - extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 578 - struct ixgbe_dcb_config *dst_dcb_cfg, 579 - int tc_max); 580 577 #endif 581 578 582 579 extern char ixgbe_driver_name[];
+20 -23
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
··· 44 44 #define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ 45 45 #define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ 46 46 47 - int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg, 48 - struct ixgbe_dcb_config *dcfg, int tc_max) 47 + static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) 49 48 { 49 + struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; 50 + struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; 50 51 struct tc_configuration *src = NULL; 51 52 struct tc_configuration *dst = NULL; 52 53 int i, j; 53 54 int tx = DCB_TX_CONFIG; 54 55 int rx = DCB_RX_CONFIG; 55 56 int changes = 0; 57 + #ifdef IXGBE_FCOE 58 + struct dcb_app app = { 59 + .selector = DCB_APP_IDTYPE_ETHTYPE, 60 + .protocol = ETH_P_FCOE, 61 + }; 62 + u8 up = dcb_getapp(adapter->netdev, &app); 56 63 57 - if (!scfg || !dcfg) 58 - return changes; 64 + if (up && !(up & (1 << adapter->fcoe.up))) 65 + changes |= BIT_APP_UPCHG; 66 + #endif 59 67 60 68 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { 61 69 src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; ··· 340 332 struct ixgbe_adapter *adapter = netdev_priv(netdev); 341 333 int ret = DCB_NO_HW_CHG; 342 334 int i; 343 - #ifdef IXGBE_FCOE 344 - struct dcb_app app = { 345 - .selector = DCB_APP_IDTYPE_ETHTYPE, 346 - .protocol = ETH_P_FCOE, 347 - }; 348 - u8 up; 349 - 350 - /* In IEEE mode, use the IEEE Ethertype selector value */ 351 - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { 352 - app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; 353 - up = dcb_ieee_getapp_mask(netdev, &app); 354 - } else { 355 - up = dcb_getapp(netdev, &app); 356 - } 357 - #endif 358 335 359 336 /* Fail command if not in CEE mode */ 360 337 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 361 338 return ret; 362 339 363 - adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, 364 - &adapter->dcb_cfg, 340 + adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, 365 341 MAX_TRAFFIC_CLASS); 366 342 if (!adapter->dcb_set_bitmap) 367 343 return ret; ··· 432 440 * FCoE is using changes. This happens if the APP info 433 441 * changes or the up2tc mapping is updated. 434 442 */ 435 - if ((up && !(up & (1 << adapter->fcoe.up))) || 436 - (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) { 443 + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 444 + struct dcb_app app = { 445 + .selector = DCB_APP_IDTYPE_ETHTYPE, 446 + .protocol = ETH_P_FCOE, 447 + }; 448 + u8 up = dcb_getapp(netdev, &app); 449 + 437 450 adapter->fcoe.up = ffs(up) - 1; 438 451 ixgbe_dcbnl_devreset(netdev); 439 452 ret = DCB_HW_CHG_RST;
+2
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 1780 1780 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1781 1781 } 1782 1782 1783 + netdev_tx_reset_queue(txring_txq(tx_ring)); 1784 + 1783 1785 /* re-map buffers to ring, store next to clean values */ 1784 1786 ixgbe_alloc_rx_buffers(rx_ring, count); 1785 1787 rx_ring->next_to_clean = rx_ntc;
+6 -4
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 2671 2671 /* enable queue */ 2672 2672 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); 2673 2673 2674 - netdev_tx_reset_queue(txring_txq(ring)); 2675 - 2676 2674 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 2677 2675 if (hw->mac.type == ixgbe_mac_82598EB && 2678 2676 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) ··· 4165 4167 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 4166 4168 } 4167 4169 4170 + netdev_tx_reset_queue(txring_txq(tx_ring)); 4171 + 4168 4172 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4169 4173 memset(tx_ring->tx_buffer_info, 0, size); 4170 4174 ··· 4418 4418 adapter->dcb_cfg.pfc_mode_enable = false; 4419 4419 adapter->dcb_set_bitmap = 0x00; 4420 4420 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; 4421 - ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 4422 - MAX_TRAFFIC_CLASS); 4421 + memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 4422 + sizeof(adapter->temp_dcb_cfg)); 4423 4423 4424 4424 #endif 4425 4425 ··· 4866 4866 netif_device_detach(netdev); 4867 4867 4868 4868 if (netif_running(netdev)) { 4869 + rtnl_lock(); 4869 4870 ixgbe_down(adapter); 4870 4871 ixgbe_free_irq(adapter); 4871 4872 ixgbe_free_all_tx_resources(adapter); 4872 4873 ixgbe_free_all_rx_resources(adapter); 4874 + rtnl_unlock(); 4873 4875 } 4874 4876 4875 4877 ixgbe_clear_interrupt_scheme(adapter);
+4 -3
drivers/net/ethernet/micrel/ks8851.c
··· 618 618 netif_dbg(ks, intr, ks->netdev, 619 619 "%s: status 0x%04x\n", __func__, status); 620 620 621 - if (status & IRQ_LCI) { 622 - /* should do something about checking link status */ 621 + if (status & IRQ_LCI) 623 622 handled |= IRQ_LCI; 624 - } 625 623 626 624 if (status & IRQ_LDI) { 627 625 u16 pmecr = ks8851_rdreg16(ks, KS_PMECR); ··· 681 683 } 682 684 683 685 mutex_unlock(&ks->lock); 686 + 687 + if (status & IRQ_LCI) 688 + mii_check_link(&ks->mii); 684 689 685 690 if (status & IRQ_TXI) 686 691 netif_wake_queue(ks->netdev);
+10 -6
drivers/net/ethernet/realtek/r8169.c
··· 61 61 #define R8169_MSG_DEFAULT \ 62 62 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) 63 63 64 - #define TX_BUFFS_AVAIL(tp) \ 65 - (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) 64 + #define TX_SLOTS_AVAIL(tp) \ 65 + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) 66 + 67 + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ 68 + #define TX_FRAGS_READY_FOR(tp,nr_frags) \ 69 + (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) 66 70 67 71 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 68 72 The RTL chips use a 64 element hash table based on the Ethernet CRC. */ ··· 5119 5115 u32 opts[2]; 5120 5116 int frags; 5121 5117 5122 - if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { 5118 + if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { 5123 5119 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 5124 5120 goto err_stop_0; 5125 5121 } ··· 5173 5169 5174 5170 mmiowb(); 5175 5171 5176 - if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { 5172 + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { 5177 5173 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 5178 5174 * not miss a ring update when it notices a stopped queue. 5179 5175 */ ··· 5187 5183 * can't. 5188 5184 */ 5189 5185 smp_mb(); 5190 - if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) 5186 + if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) 5191 5187 netif_wake_queue(dev); 5192 5188 } 5193 5189 ··· 5310 5306 */ 5311 5307 smp_mb(); 5312 5308 if (netif_queue_stopped(dev) && 5313 - (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { 5309 + TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { 5314 5310 netif_wake_queue(dev); 5315 5311 } 5316 5312 /*
+1 -1
drivers/net/ethernet/sfc/efx.c
··· 1349 1349 } 1350 1350 1351 1351 /* RSS might be usable on VFs even if it is disabled on the PF */ 1352 - efx->rss_spread = (efx->n_rx_channels > 1 ? 1352 + efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ? 1353 1353 efx->n_rx_channels : efx_vf_size(efx)); 1354 1354 1355 1355 return 0;
+1 -1
drivers/net/macvlan.c
··· 259 259 260 260 xmit_world: 261 261 skb->ip_summed = ip_summed; 262 - skb_set_dev(skb, vlan->lowerdev); 262 + skb->dev = vlan->lowerdev; 263 263 return dev_queue_xmit(skb); 264 264 } 265 265
+37 -4
drivers/net/macvtap.c
··· 1 1 #include <linux/etherdevice.h> 2 2 #include <linux/if_macvlan.h> 3 + #include <linux/if_vlan.h> 3 4 #include <linux/interrupt.h> 4 5 #include <linux/nsproxy.h> 5 6 #include <linux/compat.h> ··· 760 759 struct macvlan_dev *vlan; 761 760 int ret; 762 761 int vnet_hdr_len = 0; 762 + int vlan_offset = 0; 763 + int copied; 763 764 764 765 if (q->flags & IFF_VNET_HDR) { 765 766 struct virtio_net_hdr vnet_hdr; ··· 776 773 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) 777 774 return -EFAULT; 778 775 } 776 + copied = vnet_hdr_len; 779 777 780 - len = min_t(int, skb->len, len); 778 + if (!vlan_tx_tag_present(skb)) 779 + len = min_t(int, skb->len, len); 780 + else { 781 + int copy; 782 + struct { 783 + __be16 h_vlan_proto; 784 + __be16 h_vlan_TCI; 785 + } veth; 786 + veth.h_vlan_proto = htons(ETH_P_8021Q); 787 + veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 781 788 782 - ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len); 789 + vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 790 + len = min_t(int, skb->len + VLAN_HLEN, len); 783 791 792 + copy = min_t(int, vlan_offset, len); 793 + ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); 794 + len -= copy; 795 + copied += copy; 796 + if (ret || !len) 797 + goto done; 798 + 799 + copy = min_t(int, sizeof(veth), len); 800 + ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); 801 + len -= copy; 802 + copied += copy; 803 + if (ret || !len) 804 + goto done; 805 + } 806 + 807 + ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); 808 + copied += len; 809 + 810 + done: 784 811 rcu_read_lock_bh(); 785 812 vlan = rcu_dereference_bh(q->vlan); 786 813 if (vlan) 787 - macvlan_count_rx(vlan, len, ret == 0, 0); 814 + macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); 788 815 rcu_read_unlock_bh(); 789 816 790 - return ret ? ret : (len + vnet_hdr_len); 817 + return ret ? ret : copied; 791 818 } 792 819 793 820 static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+12 -2
drivers/net/usb/cdc_ether.c
··· 83 83 struct cdc_state *info = (void *) &dev->data; 84 84 int status; 85 85 int rndis; 86 + bool android_rndis_quirk = false; 86 87 struct usb_driver *driver = driver_of(intf); 87 88 struct usb_cdc_mdlm_desc *desc = NULL; 88 89 struct usb_cdc_mdlm_detail_desc *detail = NULL; ··· 196 195 info->control, 197 196 info->u->bSlaveInterface0, 198 197 info->data); 198 + /* fall back to hard-wiring for RNDIS */ 199 + if (rndis) { 200 + android_rndis_quirk = true; 201 + goto next_desc; 202 + } 199 203 goto bad_desc; 200 204 } 201 205 if (info->control != intf) { ··· 277 271 /* Microsoft ActiveSync based and some regular RNDIS devices lack the 278 272 * CDC descriptors, so we'll hard-wire the interfaces and not check 279 273 * for descriptors. 274 + * 275 + * Some Android RNDIS devices have a CDC Union descriptor pointing 276 + * to non-existing interfaces. Ignore that and attempt the same 277 + * hard-wired 0 and 1 interfaces. 280 278 */ 281 - if (rndis && !info->u) { 279 + if (rndis && (!info->u || android_rndis_quirk)) { 282 280 info->control = usb_ifnum_to_if(dev->udev, 0); 283 281 info->data = usb_ifnum_to_if(dev->udev, 1); 284 - if (!info->control || !info->data) { 282 + if (!info->control || !info->data || info->control != intf) { 285 283 dev_dbg(&intf->dev, 286 284 "rndis: master #0/%p slave #1/%p\n", 287 285 info->control,
+2 -2
drivers/net/wireless/ath/ath9k/ar9003_phy.c
··· 373 373 else 374 374 spur_subchannel_sd = 0; 375 375 376 - spur_freq_sd = (freq_offset << 9) / 11; 376 + spur_freq_sd = ((freq_offset + 10) << 9) / 11; 377 377 378 378 } else { 379 379 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL, ··· 382 382 else 383 383 spur_subchannel_sd = 1; 384 384 385 - spur_freq_sd = (freq_offset << 9) / 11; 385 + spur_freq_sd = ((freq_offset - 10) << 9) / 11; 386 386 387 387 } 388 388
+1
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
··· 2637 2637 /* after stopping the bus, exit thread */ 2638 2638 brcmf_sdbrcm_bus_stop(bus->sdiodev->dev); 2639 2639 bus->dpc_tsk = NULL; 2640 + spin_lock_irqsave(&bus->dpc_tl_lock, flags); 2640 2641 break; 2641 2642 } 2642 2643
+14 -7
drivers/net/wireless/iwlwifi/iwl-agn-rx.c
··· 773 773 struct sk_buff *skb; 774 774 __le16 fc = hdr->frame_control; 775 775 struct iwl_rxon_context *ctx; 776 - struct page *p; 777 - int offset; 776 + unsigned int hdrlen, fraglen; 778 777 779 778 /* We only process data packets if the interface is open */ 780 779 if (unlikely(!priv->is_open)) { ··· 787 788 iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 788 789 return; 789 790 790 - skb = dev_alloc_skb(128); 791 + /* Dont use dev_alloc_skb(), we'll have enough headroom once 792 + * ieee80211_hdr pulled. 793 + */ 794 + skb = alloc_skb(128, GFP_ATOMIC); 791 795 if (!skb) { 792 - IWL_ERR(priv, "dev_alloc_skb failed\n"); 796 + IWL_ERR(priv, "alloc_skb failed\n"); 793 797 return; 794 798 } 799 + hdrlen = min_t(unsigned int, len, skb_tailroom(skb)); 800 + memcpy(skb_put(skb, hdrlen), hdr, hdrlen); 801 + fraglen = len - hdrlen; 795 802 796 - offset = (void *)hdr - rxb_addr(rxb); 797 - p = rxb_steal_page(rxb); 798 - skb_add_rx_frag(skb, 0, p, offset, len, len); 803 + if (fraglen) { 804 + int offset = (void *)hdr + hdrlen - rxb_addr(rxb); 799 805 806 + skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, 807 + fraglen, rxb->truesize); 808 + } 800 809 iwl_update_stats(priv, false, fc, len); 801 810 802 811 /*
+2 -1
drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
··· 374 374 if (WARN_ON(!rxb)) 375 375 return; 376 376 377 + rxcb.truesize = PAGE_SIZE << hw_params(trans).rx_page_order; 377 378 dma_unmap_page(trans->dev, rxb->page_dma, 378 - PAGE_SIZE << hw_params(trans).rx_page_order, 379 + rxcb.truesize, 379 380 DMA_FROM_DEVICE); 380 381 381 382 rxcb._page = rxb->page;
+1
drivers/net/wireless/iwlwifi/iwl-trans.h
··· 260 260 261 261 struct iwl_rx_cmd_buffer { 262 262 struct page *_page; 263 + unsigned int truesize; 263 264 }; 264 265 265 266 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
+6 -1
drivers/vhost/net.c
··· 24 24 #include <linux/if_arp.h> 25 25 #include <linux/if_tun.h> 26 26 #include <linux/if_macvlan.h> 27 + #include <linux/if_vlan.h> 27 28 28 29 #include <net/sock.h> 29 30 ··· 284 283 285 284 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); 286 285 head = skb_peek(&sk->sk_receive_queue); 287 - if (likely(head)) 286 + if (likely(head)) { 288 287 len = head->len; 288 + if (vlan_tx_tag_present(head)) 289 + len += VLAN_HLEN; 290 + } 291 + 289 292 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); 290 293 return len; 291 294 }
+1 -1
drivers/video/uvesafb.c
··· 73 73 struct uvesafb_task *utask; 74 74 struct uvesafb_ktask *task; 75 75 76 - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) 76 + if (!capable(CAP_SYS_ADMIN)) 77 77 return; 78 78 79 79 if (msg->seq >= UVESAFB_TASKS_MAX)
+6 -5
include/linux/etherdevice.h
··· 159 159 * @addr1: Pointer to a six-byte array containing the Ethernet address 160 160 * @addr2: Pointer other six-byte array containing the Ethernet address 161 161 * 162 - * Compare two ethernet addresses, returns 0 if equal 162 + * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise. 163 + * Unlike memcmp(), it doesn't return a value suitable for sorting. 163 164 */ 164 165 static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) 165 166 { ··· 185 184 * @addr1: Pointer to an array of 8 bytes 186 185 * @addr2: Pointer to an other array of 8 bytes 187 186 * 188 - * Compare two ethernet addresses, returns 0 if equal. 189 - * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional 190 - * branches, and possibly long word memory accesses on CPU allowing cheap 191 - * unaligned memory reads. 187 + * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise. 188 + * Unlike memcmp(), it doesn't return a value suitable for sorting. 189 + * The function doesn't need any conditional branches and possibly uses 190 + * word memory accesses on CPU allowing cheap unaligned memory reads. 192 191 * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2} 193 192 * 194 193 * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits.
-9
include/linux/netdevice.h
··· 1403 1403 return 0; 1404 1404 } 1405 1405 1406 - #ifndef CONFIG_NET_NS 1407 - static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev) 1408 - { 1409 - skb->dev = dev; 1410 - } 1411 - #else /* CONFIG_NET_NS */ 1412 - void skb_set_dev(struct sk_buff *skb, struct net_device *dev); 1413 - #endif 1414 - 1415 1406 static inline bool netdev_uses_trailer_tags(struct net_device *dev) 1416 1407 { 1417 1408 #ifdef CONFIG_NET_DSA_TAG_TRAILER
+13
include/net/sctp/sctp.h
··· 704 704 addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); 705 705 } 706 706 707 + /* The cookie is always 0 since this is how it's used in the 708 + * pmtu code. 709 + */ 710 + static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) 711 + { 712 + if (t->dst && !dst_check(t->dst, 0)) { 713 + dst_release(t->dst); 714 + t->dst = NULL; 715 + } 716 + 717 + return t->dst; 718 + } 719 + 707 720 #endif /* __net_sctp_h__ */
+1 -1
net/8021q/vlan_dev.c
··· 157 157 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 158 158 } 159 159 160 - skb_set_dev(skb, vlan_dev_priv(dev)->real_dev); 160 + skb->dev = vlan_dev_priv(dev)->real_dev; 161 161 len = skb->len; 162 162 if (netpoll_tx_running(dev)) 163 163 return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
+5 -31
net/core/dev.c
··· 1617 1617 return NET_RX_DROP; 1618 1618 } 1619 1619 skb->skb_iif = 0; 1620 - skb_set_dev(skb, dev); 1620 + skb->dev = dev; 1621 + skb_dst_drop(skb); 1621 1622 skb->tstamp.tv64 = 0; 1622 1623 skb->pkt_type = PACKET_HOST; 1623 1624 skb->protocol = eth_type_trans(skb, dev); 1625 + skb->mark = 0; 1626 + secpath_reset(skb); 1627 + nf_reset(skb); 1624 1628 return netif_rx(skb); 1625 1629 } 1626 1630 EXPORT_SYMBOL_GPL(dev_forward_skb); ··· 1872 1868 } 1873 1869 } 1874 1870 EXPORT_SYMBOL(netif_device_attach); 1875 - 1876 - /** 1877 - * skb_dev_set -- assign a new device to a buffer 1878 - * @skb: buffer for the new device 1879 - * @dev: network device 1880 - * 1881 - * If an skb is owned by a device already, we have to reset 1882 - * all data private to the namespace a device belongs to 1883 - * before assigning it a new device. 1884 - */ 1885 - #ifdef CONFIG_NET_NS 1886 - void skb_set_dev(struct sk_buff *skb, struct net_device *dev) 1887 - { 1888 - skb_dst_drop(skb); 1889 - if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { 1890 - secpath_reset(skb); 1891 - nf_reset(skb); 1892 - skb_init_secmark(skb); 1893 - skb->mark = 0; 1894 - skb->priority = 0; 1895 - skb->nf_trace = 0; 1896 - skb->ipvs_property = 0; 1897 - #ifdef CONFIG_NET_SCHED 1898 - skb->tc_index = 0; 1899 - #endif 1900 - } 1901 - skb->dev = dev; 1902 - } 1903 - EXPORT_SYMBOL(skb_set_dev); 1904 - #endif /* CONFIG_NET_NS */ 1905 1871 1906 1872 static void skb_warn_bad_offload(const struct sk_buff *skb) 1907 1873 {
+8 -2
net/core/pktgen.c
··· 1931 1931 { 1932 1932 struct net_device *dev = ptr; 1933 1933 1934 - if (!net_eq(dev_net(dev), &init_net)) 1934 + if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting) 1935 1935 return NOTIFY_DONE; 1936 1936 1937 1937 /* It is OK that we do not hold the group lock right now, ··· 3755 3755 { 3756 3756 struct pktgen_thread *t; 3757 3757 struct list_head *q, *n; 3758 + struct list_head list; 3758 3759 3759 3760 /* Stop all interfaces & threads */ 3760 3761 pktgen_exiting = true; 3761 3762 3762 - list_for_each_safe(q, n, &pktgen_threads) { 3763 + mutex_lock(&pktgen_thread_lock); 3764 + list_splice(&list, &pktgen_threads); 3765 + mutex_unlock(&pktgen_thread_lock); 3766 + 3767 + list_for_each_safe(q, n, &list) { 3763 3768 t = list_entry(q, struct pktgen_thread, th_list); 3769 + list_del(&t->th_list); 3764 3770 kthread_stop(t->tsk); 3765 3771 kfree(t); 3766 3772 }
+2
net/ipv4/fib_trie.c
··· 1370 1370 1371 1371 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) 1372 1372 continue; 1373 + if (fi->fib_dead) 1374 + continue; 1373 1375 if (fa->fa_info->fib_scope < flp->flowi4_scope) 1374 1376 continue; 1375 1377 fib_alias_accessed(fa);
+17 -10
net/openvswitch/datapath.c
··· 421 421 return validate_actions(actions, key, depth + 1); 422 422 } 423 423 424 + static int validate_tp_port(const struct sw_flow_key *flow_key) 425 + { 426 + if (flow_key->eth.type == htons(ETH_P_IP)) { 427 + if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst) 428 + return 0; 429 + } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { 430 + if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst) 431 + return 0; 432 + } 433 + 434 + return -EINVAL; 435 + } 436 + 424 437 static int validate_set(const struct nlattr *a, 425 438 const struct sw_flow_key *flow_key) 426 439 { ··· 475 462 if (flow_key->ip.proto != IPPROTO_TCP) 476 463 return -EINVAL; 477 464 478 - if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) 479 - return -EINVAL; 480 - 481 - break; 465 + return validate_tp_port(flow_key); 482 466 483 467 case OVS_KEY_ATTR_UDP: 484 468 if (flow_key->ip.proto != IPPROTO_UDP) 485 469 return -EINVAL; 486 470 487 - if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) 488 - return -EINVAL; 489 - break; 471 + return validate_tp_port(flow_key); 490 472 491 473 default: 492 474 return -EINVAL; ··· 1649 1641 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, 1650 1642 OVS_VPORT_CMD_NEW); 1651 1643 if (IS_ERR(reply)) { 1652 - err = PTR_ERR(reply); 1653 1644 netlink_set_err(init_net.genl_sock, 0, 1654 - ovs_dp_vport_multicast_group.id, err); 1655 - return 0; 1645 + ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); 1646 + goto exit_unlock; 1656 1647 } 1657 1648 1658 1649 genl_notify(reply, genl_info_net(info), info->snd_pid,
+2 -1
net/openvswitch/flow.c
··· 183 183 u8 tcp_flags = 0; 184 184 185 185 if (flow->key.eth.type == htons(ETH_P_IP) && 186 - flow->key.ip.proto == IPPROTO_TCP) { 186 + flow->key.ip.proto == IPPROTO_TCP && 187 + likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { 187 188 u8 *tcp = (u8 *)tcp_hdr(skb); 188 189 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; 189 190 }
+1 -3
net/sctp/output.c
··· 377 377 */ 378 378 skb_set_owner_w(nskb, sk); 379 379 380 - /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ 381 - if (!dst || (dst->obsolete > 1)) { 382 - dst_release(dst); 380 + if (!sctp_transport_dst_check(tp)) { 383 381 sctp_transport_route(tp, NULL, sctp_sk(sk)); 384 382 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { 385 383 sctp_assoc_sync_pmtu(asoc);
-17
net/sctp/transport.c
··· 226 226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 227 227 } 228 228 229 - /* this is a complete rip-off from __sk_dst_check 230 - * the cookie is always 0 since this is how it's used in the 231 - * pmtu code 232 - */ 233 - static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) 234 - { 235 - struct dst_entry *dst = t->dst; 236 - 237 - if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) { 238 - dst_release(t->dst); 239 - t->dst = NULL; 240 - return NULL; 241 - } 242 - 243 - return dst; 244 - } 245 - 246 229 void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 247 230 { 248 231 struct dst_entry *dst;