Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix hotplug deadlock in hv_netvsc, from Stephen Hemminger.

2) Fix double-free in rmnet driver, from Dan Carpenter.

3) INET connection socket layer can double put request sockets, fix
from Eric Dumazet.

4) Don't match collect metadata-mode tunnels if the device is down,
from Haishuang Yan.

5) Do not perform TSO6/GSO on ipv6 packets with extensions headers in
be2net driver, from Suresh Reddy.

6) Fix scaling error in gen_estimator, from Eric Dumazet.

7) Fix 64-bit statistics deadlock in systemport driver, from Florian
Fainelli.

8) Fix use-after-free in sctp_sock_dump, from Xin Long.

9) Reject invalid BPF_END instructions in verifier, from Edward Cree.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (43 commits)
mlxsw: spectrum_router: Only handle IPv4 and IPv6 events
Documentation: link in networking docs
tcp: fix data delivery rate
bpf/verifier: reject BPF_ALU64|BPF_END
sctp: do not mark sk dumped when inet_sctp_diag_fill returns err
sctp: fix an use-after-free issue in sctp_sock_dump
netvsc: increase default receive buffer size
tcp: update skb->skb_mstamp more carefully
net: ipv4: fix l3slave check for index returned in IP_PKTINFO
net: smsc911x: Quieten netif during suspend
net: systemport: Fix 64-bit stats deadlock
net: vrf: avoid gcc-4.6 warning
qed: remove unnecessary call to memset
tg3: clean up redundant initialization of tnapi
tls: make tls_sw_free_resources static
sctp: potential read out of bounds in sctp_ulpevent_type_enabled()
MAINTAINERS: review Renesas DT bindings as well
net_sched: gen_estimator: fix scaling error in bytes/packets samples
nfp: wait for the NSP resource to appear on boot
nfp: wait for board state before talking to the NSP
...

+467 -234
+1 -1
Documentation/networking/filter.txt
··· 45 45 qdisc layer, SECCOMP-BPF (SECure COMPuting [1]), and lots of other places 46 46 such as team driver, PTP code, etc where BPF is being used. 47 47 48 - [1] Documentation/prctl/seccomp_filter.txt 48 + [1] Documentation/userspace-api/seccomp_filter.rst 49 49 50 50 Original BPF paper: 51 51
+2
MAINTAINERS
··· 11435 11435 R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> 11436 11436 L: netdev@vger.kernel.org 11437 11437 L: linux-renesas-soc@vger.kernel.org 11438 + F: Documentation/devicetree/bindings/net/renesas,*.txt 11439 + F: Documentation/devicetree/bindings/net/sh_eth.txt 11438 11440 F: drivers/net/ethernet/renesas/ 11439 11441 F: include/linux/sh_eth.h 11440 11442
+7 -10
drivers/net/bonding/bond_main.c
··· 4289 4289 int bond_mode = BOND_MODE_ROUNDROBIN; 4290 4290 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; 4291 4291 int lacp_fast = 0; 4292 - int tlb_dynamic_lb = 0; 4292 + int tlb_dynamic_lb; 4293 4293 4294 4294 /* Convert string parameters. */ 4295 4295 if (mode) { ··· 4601 4601 } 4602 4602 ad_user_port_key = valptr->value; 4603 4603 4604 - if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) { 4605 - bond_opt_initstr(&newval, "default"); 4606 - valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), 4607 - &newval); 4608 - if (!valptr) { 4609 - pr_err("Error: No tlb_dynamic_lb default value"); 4610 - return -EINVAL; 4611 - } 4612 - tlb_dynamic_lb = valptr->value; 4604 + bond_opt_initstr(&newval, "default"); 4605 + valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval); 4606 + if (!valptr) { 4607 + pr_err("Error: No tlb_dynamic_lb default value"); 4608 + return -EINVAL; 4613 4609 } 4610 + tlb_dynamic_lb = valptr->value; 4614 4611 4615 4612 if (lp_interval == 0) { 4616 4613 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+3
drivers/net/bonding/bond_options.c
··· 754 754 bond->params.miimon); 755 755 } 756 756 757 + if (newval->value == BOND_MODE_ALB) 758 + bond->params.tlb_dynamic_lb = 1; 759 + 757 760 /* don't cache arp_validate between modes */ 758 761 bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; 759 762 bond->params.mode = newval->value;
-3
drivers/net/ethernet/broadcom/bcmsysport.c
··· 1735 1735 stats->tx_packets += tx_packets; 1736 1736 } 1737 1737 1738 - /* lockless update tx_bytes and tx_packets */ 1739 - u64_stats_update_begin(&priv->syncp); 1740 1738 stats64->tx_bytes = stats->tx_bytes; 1741 1739 stats64->tx_packets = stats->tx_packets; 1742 - u64_stats_update_end(&priv->syncp); 1743 1740 1744 1741 do { 1745 1742 start = u64_stats_fetch_begin_irq(&priv->syncp);
+2 -2
drivers/net/ethernet/broadcom/tg3.c
··· 11536 11536 tg3_napi_enable(tp); 11537 11537 11538 11538 for (i = 0; i < tp->irq_cnt; i++) { 11539 - struct tg3_napi *tnapi = &tp->napi[i]; 11540 11539 err = tg3_request_irq(tp, i); 11541 11540 if (err) { 11542 11541 for (i--; i >= 0; i--) { 11543 - tnapi = &tp->napi[i]; 11542 + struct tg3_napi *tnapi = &tp->napi[i]; 11543 + 11544 11544 free_irq(tnapi->irq_vec, tnapi); 11545 11545 } 11546 11546 goto out_napi_fini;
+8
drivers/net/ethernet/emulex/benet/be.h
··· 930 930 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 931 931 } 932 932 933 + static inline bool is_ipv6_ext_hdr(struct sk_buff *skb) 934 + { 935 + if (ip_hdr(skb)->version == 6) 936 + return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr); 937 + else 938 + return false; 939 + } 940 + 933 941 #define be_error_recovering(adapter) \ 934 942 (adapter->flags & BE_FLAGS_TRY_RECOVERY) 935 943
+14
drivers/net/ethernet/emulex/benet/be_main.c
··· 5089 5089 struct be_adapter *adapter = netdev_priv(dev); 5090 5090 u8 l4_hdr = 0; 5091 5091 5092 + if (skb_is_gso(skb)) { 5093 + /* IPv6 TSO requests with extension hdrs are a problem 5094 + * to Lancer and BE3 HW. Disable TSO6 feature. 5095 + */ 5096 + if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb)) 5097 + features &= ~NETIF_F_TSO6; 5098 + 5099 + /* Lancer cannot handle the packet with MSS less than 256. 5100 + * Disable the GSO support in such cases 5101 + */ 5102 + if (lancer_chip(adapter) && skb_shinfo(skb)->gso_size < 256) 5103 + features &= ~NETIF_F_GSO_MASK; 5104 + } 5105 + 5092 5106 /* The code below restricts offload features for some tunneled and 5093 5107 * Q-in-Q packets. 5094 5108 * Offload features for normal (non tunnel) packets are unchanged.
+26 -12
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 575 575 } 576 576 577 577 static struct mlxsw_sp_span_entry * 578 - mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) 578 + mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) 579 579 { 580 - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 581 580 int i; 582 581 583 582 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 584 583 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 585 584 586 - if (curr->used && curr->local_port == port->local_port) 585 + if (curr->used && curr->local_port == local_port) 587 586 return curr; 588 587 } 589 588 return NULL; ··· 593 594 { 594 595 struct mlxsw_sp_span_entry *span_entry; 595 596 596 - span_entry = mlxsw_sp_span_entry_find(port); 597 + span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, 598 + port->local_port); 597 599 if (span_entry) { 598 600 /* Already exists, just take a reference */ 599 601 span_entry->ref_count++; ··· 783 783 } 784 784 785 785 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 786 - struct mlxsw_sp_port *to, 786 + u8 destination_port, 787 787 enum mlxsw_sp_span_type type) 788 788 { 789 789 struct mlxsw_sp_span_entry *span_entry; 790 790 791 - span_entry = mlxsw_sp_span_entry_find(to); 791 + span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, 792 + destination_port); 792 793 if (!span_entry) { 793 794 netdev_err(from->dev, "no span entry found\n"); 794 795 return; ··· 1564 1563 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1565 1564 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1566 1565 { 1567 - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1568 1566 enum mlxsw_sp_span_type span_type; 1569 - struct mlxsw_sp_port *to_port; 1570 1567 1571 - to_port = mlxsw_sp->ports[mirror->to_local_port]; 1572 1568 span_type = mirror->ingress ? 1573 1569 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1574 - mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); 1570 + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port, 1571 + span_type); 1575 1572 } 1576 1573 1577 1574 static int ··· 2544 2545 return err; 2545 2546 } 2546 2547 2547 - #define MLXSW_SP_QSFP_I2C_ADDR 0x50 2548 + #define MLXSW_SP_I2C_ADDR_LOW 0x50 2549 + #define MLXSW_SP_I2C_ADDR_HIGH 0x51 2550 + #define MLXSW_SP_EEPROM_PAGE_LENGTH 256 2548 2551 2549 2552 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2550 2553 u16 offset, u16 size, void *data, ··· 2555 2554 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2556 2555 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2557 2556 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2557 + u16 i2c_addr; 2558 2558 int status; 2559 2559 int err; 2560 2560 2561 2561 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2562 + 2563 + if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && 2564 + offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) 2565 + /* Cross pages read, read until offset 256 in low page */ 2566 + size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; 2567 + 2568 + i2c_addr = MLXSW_SP_I2C_ADDR_LOW; 2569 + if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { 2570 + i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; 2571 + offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; 2572 + } 2573 + 2562 2574 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2563 - 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR); 2575 + 0, 0, offset, size, i2c_addr); 2564 2576 2565 2577 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2566 2578 if (err)
+2 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 4868 4868 struct fib_notifier_info *info = ptr; 4869 4869 struct mlxsw_sp_router *router; 4870 4870 4871 - if (!net_eq(info->net, &init_net)) 4871 + if (!net_eq(info->net, &init_net) || 4872 + (info->family != AF_INET && info->family != AF_INET6)) 4872 4873 return NOTIFY_DONE; 4873 4874 4874 4875 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+13
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 44 44 #include "../nfp_net.h" 45 45 #include "../nfp_port.h" 46 46 47 + #define NFP_FLOWER_WHITELIST_DISSECTOR \ 48 + (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ 49 + BIT(FLOW_DISSECTOR_KEY_BASIC) | \ 50 + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ 51 + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ 52 + BIT(FLOW_DISSECTOR_KEY_PORTS) | \ 53 + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ 54 + BIT(FLOW_DISSECTOR_KEY_VLAN) | \ 55 + BIT(FLOW_DISSECTOR_KEY_IP)) 56 + 47 57 static int 48 58 nfp_flower_xmit_flow(struct net_device *netdev, 49 59 struct nfp_fl_payload *nfp_flow, u8 mtype) ··· 121 111 u32 key_layer_two; 122 112 u8 key_layer; 123 113 int key_size; 114 + 115 + if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) 116 + return -EOPNOTSUPP; 124 117 125 118 if (dissector_uses_key(flow->dissector, 126 119 FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+47
drivers/net/ethernet/netronome/nfp/nfp_main.c
··· 74 74 }; 75 75 MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); 76 76 77 + static bool nfp_board_ready(struct nfp_pf *pf) 78 + { 79 + const char *cp; 80 + long state; 81 + int err; 82 + 83 + cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); 84 + if (!cp) 85 + return false; 86 + 87 + err = kstrtol(cp, 0, &state); 88 + if (err < 0) 89 + return false; 90 + 91 + return state == 15; 92 + } 93 + 94 + static int nfp_pf_board_state_wait(struct nfp_pf *pf) 95 + { 96 + const unsigned long wait_until = jiffies + 10 * HZ; 97 + 98 + while (!nfp_board_ready(pf)) { 99 + if (time_is_before_eq_jiffies(wait_until)) { 100 + nfp_err(pf->cpp, "NFP board initialization timeout\n"); 101 + return -EINVAL; 102 + } 103 + 104 + nfp_info(pf->cpp, "waiting for board initialization\n"); 105 + if (msleep_interruptible(500)) 106 + return -ERESTARTSYS; 107 + 108 + /* Refresh cached information */ 109 + kfree(pf->hwinfo); 110 + pf->hwinfo = nfp_hwinfo_read(pf->cpp); 111 + } 112 + 113 + return 0; 114 + } 115 + 77 116 static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) 78 117 { 79 118 int err; ··· 351 312 struct nfp_nsp *nsp; 352 313 int err; 353 314 315 + err = nfp_resource_wait(pf->cpp, NFP_RESOURCE_NSP, 30); 316 + if (err) 317 + return err; 318 + 354 319 nsp = nfp_nsp_open(pf->cpp); 355 320 if (IS_ERR(nsp)) { 356 321 err = PTR_ERR(nsp); ··· 467 424 nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial"), 468 425 nfp_hwinfo_lookup(pf->hwinfo, "assembly.revision"), 469 426 nfp_hwinfo_lookup(pf->hwinfo, "cpld.version")); 427 + 428 + err = nfp_pf_board_state_wait(pf); 429 + if (err) 430 + goto err_hwinfo_free; 470 431 471 432 err = devlink_register(devlink, &pdev->dev); 472 433 if (err)
-23
drivers/net/ethernet/netronome/nfp/nfp_net_main.c
··· 64 64 65 65 #define NFP_PF_CSR_SLICE_SIZE (32 * 1024) 66 66 67 - static int nfp_is_ready(struct nfp_pf *pf) 68 - { 69 - const char *cp; 70 - long state; 71 - int err; 72 - 73 - cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); 74 - if (!cp) 75 - return 0; 76 - 77 - err = kstrtol(cp, 0, &state); 78 - if (err < 0) 79 - return 0; 80 - 81 - return state == 15; 82 - } 83 - 84 67 /** 85 68 * nfp_net_get_mac_addr() - Get the MAC address. 86 69 * @pf: NFP PF handle ··· 707 724 int err; 708 725 709 726 INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics); 710 - 711 - /* Verify that the board has completed initialization */ 712 - if (!nfp_is_ready(pf)) { 713 - nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n"); 714 - return -EINVAL; 715 - } 716 727 717 728 if (!pf->rtbl) { 718 729 nfp_err(pf->cpp, "No %s, giving up.\n",
+2
drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
··· 97 97 98 98 void nfp_resource_release(struct nfp_resource *res); 99 99 100 + int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs); 101 + 100 102 u32 nfp_resource_cpp_id(struct nfp_resource *res); 101 103 102 104 const char *nfp_resource_name(struct nfp_resource *res);
+45
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
··· 250 250 } 251 251 252 252 /** 253 + * nfp_resource_wait() - Wait for resource to appear 254 + * @cpp: NFP CPP handle 255 + * @name: Name of the resource 256 + * @secs: Number of seconds to wait 257 + * 258 + * Wait for resource to appear in the resource table, grab and release 259 + * its lock. The wait is jiffies-based, don't expect fine granularity. 260 + * 261 + * Return: 0 on success, errno otherwise. 262 + */ 263 + int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs) 264 + { 265 + unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ; 266 + unsigned long err_at = jiffies + secs * HZ; 267 + struct nfp_resource *res; 268 + 269 + while (true) { 270 + res = nfp_resource_acquire(cpp, name); 271 + if (!IS_ERR(res)) { 272 + nfp_resource_release(res); 273 + return 0; 274 + } 275 + 276 + if (PTR_ERR(res) != -ENOENT) { 277 + nfp_err(cpp, "error waiting for resource %s: %ld\n", 278 + name, PTR_ERR(res)); 279 + return PTR_ERR(res); 280 + } 281 + if (time_is_before_eq_jiffies(err_at)) { 282 + nfp_err(cpp, "timeout waiting for resource %s\n", name); 283 + return -ETIMEDOUT; 284 + } 285 + if (time_is_before_eq_jiffies(warn_at)) { 286 + warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; 287 + nfp_info(cpp, "waiting for NFP resource %s\n", name); 288 + } 289 + if (msleep_interruptible(10)) { 290 + nfp_err(cpp, "wait for resource %s interrupted\n", 291 + name); 292 + return -ERESTARTSYS; 293 + } 294 + } 295 + } 296 + 297 + /** 253 298 * nfp_resource_cpp_id() - Return the cpp_id of a resource handle 254 299 * @res: NFP Resource handle 255 300 *
+1
drivers/net/ethernet/nuvoton/w90p910_ether.c
··· 11 11 12 12 #include <linux/module.h> 13 13 #include <linux/init.h> 14 + #include <linux/interrupt.h> 14 15 #include <linux/mii.h> 15 16 #include <linux/netdevice.h> 16 17 #include <linux/etherdevice.h>
-1
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
··· 1244 1244 if (!dcbx_info) 1245 1245 return -ENOMEM; 1246 1246 1247 - memset(dcbx_info, 0, sizeof(*dcbx_info)); 1248 1247 rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); 1249 1248 if (rc) { 1250 1249 kfree(dcbx_info);
+4 -6
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
··· 84 84 if (((int)skb->len - (int)packet_len) < 0) 85 85 return NULL; 86 86 87 + /* Some hardware can send us empty frames. Catch them */ 88 + if (ntohs(maph->pkt_len) == 0) 89 + return NULL; 90 + 87 91 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); 88 92 if (!skbn) 89 93 return NULL; ··· 97 93 skb_put(skbn, packet_len); 98 94 memcpy(skbn->data, skb->data, packet_len); 99 95 skb_pull(skb, packet_len); 100 - 101 - /* Some hardware can send us empty frames. Catch them */ 102 - if (ntohs(maph->pkt_len) == 0) { 103 - kfree_skb(skb); 104 - return NULL; 105 - } 106 96 107 97 return skbn; 108 98 }
+14 -1
drivers/net/ethernet/smsc/smsc911x.c
··· 2595 2595 struct net_device *ndev = dev_get_drvdata(dev); 2596 2596 struct smsc911x_data *pdata = netdev_priv(ndev); 2597 2597 2598 + if (netif_running(ndev)) { 2599 + netif_stop_queue(ndev); 2600 + netif_device_detach(ndev); 2601 + } 2602 + 2598 2603 /* enable wake on LAN, energy detection and the external PME 2599 2604 * signal. */ 2600 2605 smsc911x_reg_write(pdata, PMT_CTRL, ··· 2633 2628 while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) 2634 2629 udelay(1000); 2635 2630 2636 - return (to == 0) ? -EIO : 0; 2631 + if (to == 0) 2632 + return -EIO; 2633 + 2634 + if (netif_running(ndev)) { 2635 + netif_device_attach(ndev); 2636 + netif_start_queue(ndev); 2637 + } 2638 + 2639 + return 0; 2637 2640 } 2638 2641 2639 2642 static const struct dev_pm_ops smsc911x_pm_ops = {
+3
drivers/net/hyperv/hyperv_net.h
··· 204 204 const struct ndis_pkt_8021q_info *vlan); 205 205 void netvsc_channel_cb(void *context); 206 206 int netvsc_poll(struct napi_struct *napi, int budget); 207 + 208 + void rndis_set_subchannel(struct work_struct *w); 207 209 bool rndis_filter_opened(const struct netvsc_device *nvdev); 208 210 int rndis_filter_open(struct netvsc_device *nvdev); 209 211 int rndis_filter_close(struct netvsc_device *nvdev); ··· 784 782 u32 num_chn; 785 783 786 784 atomic_t open_chn; 785 + struct work_struct subchan_work; 787 786 wait_queue_head_t subchan_open; 788 787 789 788 struct rndis_device *extension;
+3
drivers/net/hyperv/netvsc.c
··· 81 81 82 82 init_completion(&net_device->channel_init_wait); 83 83 init_waitqueue_head(&net_device->subchan_open); 84 + INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); 84 85 85 86 return net_device; 86 87 } ··· 557 556 struct netvsc_device *net_device 558 557 = rtnl_dereference(net_device_ctx->nvdev); 559 558 int i; 559 + 560 + cancel_work_sync(&net_device->subchan_work); 560 561 561 562 netvsc_disconnect_vsp(device); 562 563
+4 -9
drivers/net/hyperv/netvsc_drv.c
··· 49 49 #define NETVSC_MIN_TX_SECTIONS 10 50 50 #define NETVSC_DEFAULT_TX 192 /* ~1M */ 51 51 #define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */ 52 - #define NETVSC_DEFAULT_RX 2048 /* ~4M */ 52 + #define NETVSC_DEFAULT_RX 10485 /* Max ~16M */ 53 53 54 54 #define LINKCHANGE_INT (2 * HZ) 55 55 #define VF_TAKEOVER_INT (HZ / 10) ··· 853 853 rndis_filter_device_remove(dev, nvdev); 854 854 855 855 nvdev = rndis_filter_device_add(dev, &device_info); 856 - if (!IS_ERR(nvdev)) { 857 - netif_set_real_num_tx_queues(net, nvdev->num_chn); 858 - netif_set_real_num_rx_queues(net, nvdev->num_chn); 859 - } else { 856 + if (IS_ERR(nvdev)) { 860 857 ret = PTR_ERR(nvdev); 861 858 device_info.num_chn = orig; 862 859 nvdev = rndis_filter_device_add(dev, &device_info); ··· 1951 1954 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 1952 1955 net->vlan_features = net->features; 1953 1956 1954 - netif_set_real_num_tx_queues(net, nvdev->num_chn); 1955 - netif_set_real_num_rx_queues(net, nvdev->num_chn); 1956 - 1957 1957 netdev_lockdep_set_classes(net); 1958 1958 1959 1959 /* MTU range: 68 - 1500 or 65521 */ ··· 2006 2012 if (vf_netdev) 2007 2013 netvsc_unregister_vf(vf_netdev); 2008 2014 2015 + unregister_netdevice(net); 2016 + 2009 2017 rndis_filter_device_remove(dev, 2010 2018 rtnl_dereference(ndev_ctx->nvdev)); 2011 - unregister_netdevice(net); 2012 2019 rtnl_unlock(); 2013 2020 2014 2021 hv_set_drvdata(dev, NULL);
+87 -39
drivers/net/hyperv/rndis_filter.c
··· 1039 1039 1040 1040 /* Set the channel before opening.*/ 1041 1041 nvchan->channel = new_sc; 1042 - netif_napi_add(ndev, &nvchan->napi, 1043 - netvsc_poll, NAPI_POLL_WEIGHT); 1044 1042 1045 1043 ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, 1046 1044 nvscdev->ring_size * PAGE_SIZE, NULL, 0, ··· 1046 1048 if (ret == 0) 1047 1049 napi_enable(&nvchan->napi); 1048 1050 else 1049 - netif_napi_del(&nvchan->napi); 1051 + netdev_notice(ndev, "sub channel open failed: %d\n", ret); 1050 1052 1051 - atomic_inc(&nvscdev->open_chn); 1052 - wake_up(&nvscdev->subchan_open); 1053 + if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn) 1054 + wake_up(&nvscdev->subchan_open); 1055 + } 1056 + 1057 + /* Open sub-channels after completing the handling of the device probe. 1058 + * This breaks overlap of processing the host message for the 1059 + * new primary channel with the initialization of sub-channels. 1060 + */ 1061 + void rndis_set_subchannel(struct work_struct *w) 1062 + { 1063 + struct netvsc_device *nvdev 1064 + = container_of(w, struct netvsc_device, subchan_work); 1065 + struct nvsp_message *init_packet = &nvdev->channel_init_pkt; 1066 + struct net_device_context *ndev_ctx; 1067 + struct rndis_device *rdev; 1068 + struct net_device *ndev; 1069 + struct hv_device *hv_dev; 1070 + int i, ret; 1071 + 1072 + if (!rtnl_trylock()) { 1073 + schedule_work(w); 1074 + return; 1075 + } 1076 + 1077 + rdev = nvdev->extension; 1078 + if (!rdev) 1079 + goto unlock; /* device was removed */ 1080 + 1081 + ndev = rdev->ndev; 1082 + ndev_ctx = netdev_priv(ndev); 1083 + hv_dev = ndev_ctx->device_ctx; 1084 + 1085 + memset(init_packet, 0, sizeof(struct nvsp_message)); 1086 + init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; 1087 + init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; 1088 + init_packet->msg.v5_msg.subchn_req.num_subchannels = 1089 + nvdev->num_chn - 1; 1090 + ret = vmbus_sendpacket(hv_dev->channel, init_packet, 1091 + sizeof(struct nvsp_message), 1092 + (unsigned long)init_packet, 1093 + VM_PKT_DATA_INBAND, 1094 + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1095 + if (ret) { 1096 + netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); 1097 + goto failed; 1098 + } 1099 + 1100 + wait_for_completion(&nvdev->channel_init_wait); 1101 + if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1102 + netdev_err(ndev, "sub channel request failed\n"); 1103 + goto failed; 1104 + } 1105 + 1106 + nvdev->num_chn = 1 + 1107 + init_packet->msg.v5_msg.subchn_comp.num_subchannels; 1108 + 1109 + /* wait for all sub channels to open */ 1110 + wait_event(nvdev->subchan_open, 1111 + atomic_read(&nvdev->open_chn) == nvdev->num_chn); 1112 + 1113 + /* ignore failues from setting rss parameters, still have channels */ 1114 + rndis_filter_set_rss_param(rdev, netvsc_hash_key); 1115 + 1116 + netif_set_real_num_tx_queues(ndev, nvdev->num_chn); 1117 + netif_set_real_num_rx_queues(ndev, nvdev->num_chn); 1118 + 1119 + rtnl_unlock(); 1120 + return; 1121 + 1122 + failed: 1123 + /* fallback to only primary channel */ 1124 + for (i = 1; i < nvdev->num_chn; i++) 1125 + netif_napi_del(&nvdev->chan_table[i].napi); 1126 + 1127 + nvdev->max_chn = 1; 1128 + nvdev->num_chn = 1; 1129 + unlock: 1130 + rtnl_unlock(); 1053 1131 } 1054 1132 1055 1133 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, ··· 1137 1063 struct rndis_device *rndis_device; 1138 1064 struct ndis_offload hwcaps; 1139 1065 struct ndis_offload_params offloads; 1140 - struct nvsp_message *init_packet; 1141 1066 struct ndis_recv_scale_cap rsscap; 1142 1067 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); 1143 1068 unsigned int gso_max_size = GSO_MAX_SIZE; ··· 1288 1215 net_device->num_chn); 1289 1216 1290 1217 atomic_set(&net_device->open_chn, 1); 1291 - 1292 - if (net_device->num_chn == 1) 1293 - return net_device; 1218 + vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); 1294 1219 1295 1220 for (i = 1; i < net_device->num_chn; i++) { 1296 1221 ret = netvsc_alloc_recv_comp_ring(net_device, i); ··· 1299 1228 } 1300 1229 } 1301 1230 1302 - vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); 1231 + for (i = 1; i < net_device->num_chn; i++) 1232 + netif_napi_add(net, &net_device->chan_table[i].napi, 1233 + netvsc_poll, NAPI_POLL_WEIGHT); 1303 1234 1304 - init_packet = &net_device->channel_init_pkt; 1305 - memset(init_packet, 0, sizeof(struct nvsp_message)); 1306 - init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; 1307 - init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; 1308 - init_packet->msg.v5_msg.subchn_req.num_subchannels = 1309 - net_device->num_chn - 1; 1310 - ret = vmbus_sendpacket(dev->channel, init_packet, 1311 - sizeof(struct nvsp_message), 1312 - (unsigned long)init_packet, 1313 - VM_PKT_DATA_INBAND, 1314 - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1315 - if (ret) 1316 - goto out; 1235 + if (net_device->num_chn > 1) 1236 + schedule_work(&net_device->subchan_work); 1317 1237 1318 - wait_for_completion(&net_device->channel_init_wait); 1319 - if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1320 - ret = -ENODEV; 1321 - goto out; 1322 - } 1323 - 1324 - net_device->num_chn = 1 + 1325 - init_packet->msg.v5_msg.subchn_comp.num_subchannels; 1326 - 1327 - /* wait for all sub channels to open */ 1328 - wait_event(net_device->subchan_open, 1329 - atomic_read(&net_device->open_chn) == net_device->num_chn); 1330 - 1331 - /* ignore failues from setting rss parameters, still have channels */ 1332 - rndis_filter_set_rss_param(rndis_device, netvsc_hash_key); 1333 1238 out: 1239 + /* if unavailable, just proceed with one queue */ 1334 1240 if (ret) { 1335 1241 net_device->max_chn = 1; 1336 1242 net_device->num_chn = 1; ··· 1328 1280 /* Halt and release the rndis device */ 1329 1281 rndis_filter_halt_device(rndis_dev); 1330 1282 1331 - kfree(rndis_dev); 1332 1283 net_dev->extension = NULL; 1333 1284 1334 1285 netvsc_device_remove(dev); 1286 + kfree(rndis_dev); 1335 1287 } 1336 1288 1337 1289 int rndis_filter_open(struct netvsc_device *nvdev)
+5 -6
drivers/net/usb/smsc95xx.c
··· 526 526 static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, 527 527 u16 lcladv, u16 rmtadv) 528 528 { 529 - u32 flow, afc_cfg = 0; 529 + u32 flow = 0, afc_cfg; 530 530 531 531 int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg); 532 532 if (ret < 0) ··· 537 537 538 538 if (cap & FLOW_CTRL_RX) 539 539 flow = 0xFFFF0002; 540 - else 541 - flow = 0; 542 540 543 - if (cap & FLOW_CTRL_TX) 541 + if (cap & FLOW_CTRL_TX) { 544 542 afc_cfg |= 0xF; 545 - else 543 + flow |= 0xFFFF0000; 544 + } else { 546 545 afc_cfg &= ~0xF; 546 + } 547 547 548 548 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n", 549 549 cap & FLOW_CTRL_RX ? "enabled" : "disabled", 550 550 cap & FLOW_CTRL_TX ? "enabled" : "disabled"); 551 551 } else { 552 552 netif_dbg(dev, link, dev->net, "half duplex\n"); 553 - flow = 0; 554 553 afc_cfg |= 0xF; 555 554 } 556 555
+3 -3
drivers/net/vrf.c
··· 957 957 { 958 958 const struct ipv6hdr *iph = ipv6_hdr(skb); 959 959 struct flowi6 fl6 = { 960 + .flowi6_iif = ifindex, 961 + .flowi6_mark = skb->mark, 962 + .flowi6_proto = iph->nexthdr, 960 963 .daddr = iph->daddr, 961 964 .saddr = iph->saddr, 962 965 .flowlabel = ip6_flowinfo(iph), 963 - .flowi6_mark = skb->mark, 964 - .flowi6_proto = iph->nexthdr, 965 - .flowi6_iif = ifindex, 966 966 }; 967 967 struct net *net = dev_net(vrf_dev); 968 968 struct rt6_info *rt6;
+2
include/linux/syscalls.h
··· 200 200 #define SYSCALL_DEFINE5(name, ...) SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) 201 201 #define SYSCALL_DEFINE6(name, ...) SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) 202 202 203 + #define SYSCALL_DEFINE_MAXARGS 6 204 + 203 205 #define SYSCALL_DEFINEx(x, sname, ...) \ 204 206 SYSCALL_METADATA(sname, x, __VA_ARGS__) \ 205 207 __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
-2
include/net/act_api.h
··· 34 34 struct gnet_stats_queue tcfa_qstats; 35 35 struct net_rate_estimator __rcu *tcfa_rate_est; 36 36 spinlock_t tcfa_lock; 37 - struct rcu_head tcfa_rcu; 38 37 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 39 38 struct gnet_stats_queue __percpu *cpu_qstats; 40 39 struct tc_cookie *act_cookie; ··· 49 50 #define tcf_qstats common.tcfa_qstats 50 51 #define tcf_rate_est common.tcfa_rate_est 51 52 #define tcf_lock common.tcfa_lock 52 - #define tcf_rcu common.tcfa_rcu 53 53 54 54 /* Update lastuse only if needed, to avoid dirtying a cache line. 55 55 * We use a temp variable to avoid fetching jiffies twice.
+2 -1
include/net/sctp/sctp.h
··· 127 127 const union sctp_addr *laddr, 128 128 const union sctp_addr *paddr, void *p); 129 129 int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), 130 - struct net *net, int pos, void *p); 130 + int (*cb_done)(struct sctp_transport *, void *), 131 + struct net *net, int *pos, void *p); 131 132 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p); 132 133 int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, 133 134 struct sctp_info *info);
+5 -1
include/net/sctp/ulpevent.h
··· 153 153 static inline int sctp_ulpevent_type_enabled(__u16 sn_type, 154 154 struct sctp_event_subscribe *mask) 155 155 { 156 + int offset = sn_type - SCTP_SN_TYPE_BASE; 156 157 char *amask = (char *) mask; 157 - return amask[sn_type - SCTP_SN_TYPE_BASE]; 158 + 159 + if (offset >= sizeof(struct sctp_event_subscribe)) 160 + return 0; 161 + return amask[offset]; 158 162 } 159 163 160 164 /* Given an event subscription, is this event enabled? */
+2 -2
include/trace/events/xdp.h
··· 138 138 139 139 #define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \ 140 140 trace_xdp_redirect_map(dev, xdp, fwd ? fwd->ifindex : 0, \ 141 - 0, map, idx); 141 + 0, map, idx) 142 142 143 143 #define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \ 144 144 trace_xdp_redirect_map_err(dev, xdp, fwd ? fwd->ifindex : 0, \ 145 - err, map, idx); 145 + err, map, idx) 146 146 147 147 #endif /* _TRACE_XDP_H */ 148 148
+2 -1
kernel/bpf/verifier.c
··· 2292 2292 } 2293 2293 } else { 2294 2294 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 2295 - (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { 2295 + (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 2296 + BPF_CLASS(insn->code) == BPF_ALU64) { 2296 2297 verbose("BPF_END uses reserved fields\n"); 2297 2298 return -EINVAL; 2298 2299 }
+1 -1
kernel/trace/trace_syscalls.c
··· 565 565 struct syscall_tp_t { 566 566 unsigned long long regs; 567 567 unsigned long syscall_nr; 568 - unsigned long args[sys_data->nb_args]; 568 + unsigned long args[SYSCALL_DEFINE_MAXARGS]; 569 569 } param; 570 570 int i; 571 571
+26 -12
net/core/filter.c
··· 2506 2506 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 2507 2507 const struct bpf_prog *map_owner = ri->map_owner; 2508 2508 struct bpf_map *map = ri->map; 2509 + struct net_device *fwd = NULL; 2509 2510 u32 index = ri->ifindex; 2510 - struct net_device *fwd; 2511 2511 int err; 2512 2512 2513 2513 ri->ifindex = 0; 2514 2514 ri->map = NULL; 2515 2515 ri->map_owner = NULL; 2516 2516 2517 - /* This is really only caused by a deliberately crappy 2518 - * BPF program, normally we would never hit that case, 2519 - * so no need to inform someone via tracepoints either, 2520 - * just bail out. 2521 - */ 2522 - if (unlikely(map_owner != xdp_prog)) 2523 - return -EINVAL; 2517 + if (unlikely(map_owner != xdp_prog)) { 2518 + err = -EFAULT; 2519 + map = NULL; 2520 + goto err; 2521 + } 2524 2522 2525 2523 fwd = __dev_map_lookup_elem(map, index); 2526 2524 if (!fwd) { ··· 2574 2576 struct bpf_prog *xdp_prog) 2575 2577 { 2576 2578 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 2579 + const struct bpf_prog *map_owner = ri->map_owner; 2580 + struct bpf_map *map = ri->map; 2581 + struct net_device *fwd = NULL; 2577 2582 u32 index = ri->ifindex; 2578 - struct net_device *fwd; 2579 2583 unsigned int len; 2580 2584 int err = 0; 2581 2585 2582 - fwd = dev_get_by_index_rcu(dev_net(dev), index); 2583 2586 ri->ifindex = 0; 2587 + ri->map = NULL; 2588 + ri->map_owner = NULL; 2589 + 2590 + if (map) { 2591 + if (unlikely(map_owner != xdp_prog)) { 2592 + err = -EFAULT; 2593 + map = NULL; 2594 + goto err; 2595 + } 2596 + fwd = __dev_map_lookup_elem(map, index); 2597 + } else { 2598 + fwd = dev_get_by_index_rcu(dev_net(dev), index); 2599 + } 2584 2600 if (unlikely(!fwd)) { 2585 2601 err = -EINVAL; 2586 2602 goto err; ··· 2612 2600 } 2613 2601 2614 2602 skb->dev = fwd; 2615 - _trace_xdp_redirect(dev, xdp_prog, index); 2603 + map ? _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index) 2604 + : _trace_xdp_redirect(dev, xdp_prog, index); 2616 2605 return 0; 2617 2606 err: 2618 - _trace_xdp_redirect_err(dev, xdp_prog, index, err); 2607 + map ? _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err) 2608 + : _trace_xdp_redirect_err(dev, xdp_prog, index, err); 2619 2609 return err; 2620 2610 } 2621 2611 EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
+2 -2
net/core/gen_estimator.c
··· 83 83 u64 rate, brate; 84 84 85 85 est_fetch_counters(est, &b); 86 - brate = (b.bytes - est->last_bytes) << (8 - est->ewma_log); 86 + brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log); 87 87 brate -= (est->avbps >> est->ewma_log); 88 88 89 - rate = (u64)(b.packets - est->last_packets) << (8 - est->ewma_log); 89 + rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log); 90 90 rate -= (est->avpps >> est->ewma_log); 91 91 92 92 write_seqcount_begin(&est->seq);
+1 -1
net/ipv4/inet_connection_sock.c
··· 916 916 tcp_sk(child)->fastopen_rsk = NULL; 917 917 } 918 918 inet_csk_destroy_sock(child); 919 - reqsk_put(req); 920 919 } 921 920 922 921 struct sock *inet_csk_reqsk_queue_add(struct sock *sk, ··· 986 987 sock_hold(child); 987 988 988 989 inet_child_forget(sk, req, child); 990 + reqsk_put(req); 989 991 bh_unlock_sock(child); 990 992 local_bh_enable(); 991 993 sock_put(child);
+6 -2
net/ipv4/ip_sockglue.c
··· 1207 1207 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) 1208 1208 { 1209 1209 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb); 1210 - bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags); 1211 1210 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) || 1212 1211 ipv6_sk_rxinfo(sk); 1213 1212 ··· 1220 1221 * (e.g., process binds socket to eth0 for Tx which is 1221 1222 * redirected to loopback in the rtable/dst). 1222 1223 */ 1223 - if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX || l3slave) 1224 + struct rtable *rt = skb_rtable(skb); 1225 + bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags); 1226 + 1227 + if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX) 1224 1228 pktinfo->ipi_ifindex = inet_iif(skb); 1229 + else if (l3slave && rt && rt->rt_iif) 1230 + pktinfo->ipi_ifindex = rt->rt_iif; 1225 1231 1226 1232 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb); 1227 1233 } else {
+1 -1
net/ipv4/ip_tunnel.c
··· 176 176 return cand; 177 177 178 178 t = rcu_dereference(itn->collect_md_tun); 179 - if (t) 179 + if (t && t->dev->flags & IFF_UP) 180 180 return t; 181 181 182 182 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
+14 -10
net/ipv4/tcp_output.c
··· 991 991 struct tcp_skb_cb *tcb; 992 992 struct tcp_out_options opts; 993 993 unsigned int tcp_options_size, tcp_header_size; 994 + struct sk_buff *oskb = NULL; 994 995 struct tcp_md5sig_key *md5; 995 996 struct tcphdr *th; 996 997 int err; ··· 999 998 BUG_ON(!skb || !tcp_skb_pcount(skb)); 1000 999 tp = tcp_sk(sk); 1001 1000 1002 - skb->skb_mstamp = tp->tcp_mstamp; 1003 1001 if (clone_it) { 1004 1002 TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq 1005 1003 - tp->snd_una; 1006 - tcp_rate_skb_sent(sk, skb); 1007 - 1004 + oskb = skb; 1008 1005 if (unlikely(skb_cloned(skb))) 1009 1006 skb = pskb_copy(skb, gfp_mask); 1010 1007 else ··· 1010 1011 if (unlikely(!skb)) 1011 1012 return -ENOBUFS; 1012 1013 } 1014 + skb->skb_mstamp = tp->tcp_mstamp; 1013 1015 1014 1016 inet = inet_sk(sk); 1015 1017 tcb = TCP_SKB_CB(skb); ··· 1122 1122 1123 1123 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); 1124 1124 1125 - if (likely(err <= 0)) 1126 - return err; 1127 - 1128 - tcp_enter_cwr(sk); 1129 - 1130 - return net_xmit_eval(err); 1125 + if (unlikely(err > 0)) { 1126 + tcp_enter_cwr(sk); 1127 + err = net_xmit_eval(err); 1128 + } 1129 + if (!err && oskb) { 1130 + oskb->skb_mstamp = tp->tcp_mstamp; 1131 + tcp_rate_skb_sent(sk, oskb); 1132 + } 1133 + return err; 1131 1134 } 1132 1135 1133 1136 /* This routine just queues the buffer for sending. ··· 2872 2869 skb_headroom(skb) >= 0xFFFF)) { 2873 2870 struct sk_buff *nskb; 2874 2871 2875 - skb->skb_mstamp = tp->tcp_mstamp; 2876 2872 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 2877 2873 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2878 2874 -ENOBUFS; 2875 + if (!err) 2876 + skb->skb_mstamp = tp->tcp_mstamp; 2879 2877 } else { 2880 2878 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2881 2879 }
+1 -1
net/ipv6/ip6_tunnel.c
··· 171 171 } 172 172 173 173 t = rcu_dereference(ip6n->collect_md_tun); 174 - if (t) 174 + if (t && t->dev->flags & IFF_UP) 175 175 return t; 176 176 177 177 t = rcu_dereference(ip6n->tnls_wc[0]);
-4
net/ipv6/seg6_local.c
··· 72 72 73 73 srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); 74 74 75 - /* make sure it's a Segment Routing header (Routing Type 4) */ 76 - if (srh->type != IPV6_SRCRT_TYPE_4) 77 - return NULL; 78 - 79 75 len = (srh->hdrlen + 1) << 3; 80 76 81 77 if (!pskb_may_pull(skb, srhoff + len))
+2 -1
net/openvswitch/datapath.c
··· 1112 1112 if (!a[OVS_FLOW_ATTR_KEY]) { 1113 1113 OVS_NLERR(log, 1114 1114 "Flow key attribute not present in set flow."); 1115 - return -EINVAL; 1115 + error = -EINVAL; 1116 + goto error; 1116 1117 } 1117 1118 1118 1119 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
+12 -11
net/sched/act_api.c
··· 53 53 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 54 54 } 55 55 56 - static void free_tcf(struct rcu_head *head) 56 + /* XXX: For standalone actions, we don't need a RCU grace period either, because 57 + * actions are always connected to filters and filters are already destroyed in 58 + * RCU callbacks, so after a RCU grace period actions are already disconnected 59 + * from filters. Readers later can not find us. 60 + */ 61 + static void free_tcf(struct tc_action *p) 57 62 { 58 - struct tc_action *p = container_of(head, struct tc_action, tcfa_rcu); 59 - 60 63 free_percpu(p->cpu_bstats); 61 64 free_percpu(p->cpu_qstats); 62 65 ··· 79 76 idr_remove_ext(&idrinfo->action_idr, p->tcfa_index); 80 77 spin_unlock_bh(&idrinfo->lock); 81 78 gen_kill_estimator(&p->tcfa_rate_est); 82 - /* 83 - * gen_estimator est_timer() might access p->tcfa_lock 84 - * or bstats, wait a RCU grace period before freeing p 85 - */ 86 - call_rcu(&p->tcfa_rcu, free_tcf); 79 + free_tcf(p); 87 80 } 88 81 89 82 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) ··· 180 181 idr_for_each_entry_ext(idr, p, id) { 181 182 ret = __tcf_idr_release(p, false, true); 182 183 if (ret == ACT_P_DELETED) { 183 - module_put(p->ops->owner); 184 + module_put(ops->owner); 184 185 n_i++; 185 186 } else if (ret < 0) { 186 187 goto nla_put_failure; ··· 258 259 { 259 260 if (est) 260 261 gen_kill_estimator(&a->tcfa_rate_est); 261 - call_rcu(&a->tcfa_rcu, free_tcf); 262 + free_tcf(a); 262 263 } 263 264 EXPORT_SYMBOL(tcf_idr_cleanup); 264 265 ··· 514 515 515 516 int tcf_action_destroy(struct list_head *actions, int bind) 516 517 { 518 + const struct tc_action_ops *ops; 517 519 struct tc_action *a, *tmp; 518 520 int ret = 0; 519 521 520 522 list_for_each_entry_safe(a, tmp, actions, list) { 523 + ops = a->ops; 521 524 ret = __tcf_idr_release(a, bind, true); 522 525 if (ret == ACT_P_DELETED) 523 - module_put(a->ops->owner); 526 + module_put(ops->owner); 524 527 else if (ret < 0) 525 528 return ret; 526 529 }
+40 -23
net/sched/cls_api.c
··· 182 182 list_add_tail(&chain->list, &block->chain_list); 183 183 chain->block = block; 184 184 chain->index = chain_index; 185 - chain->refcnt = 0; 185 + chain->refcnt = 1; 186 186 return chain; 187 187 } 188 188 ··· 194 194 RCU_INIT_POINTER(*chain->p_filter_chain, NULL); 195 195 while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) { 196 196 RCU_INIT_POINTER(chain->filter_chain, tp->next); 197 + tcf_chain_put(chain); 197 198 tcf_proto_destroy(tp); 198 199 } 199 200 } 200 201 201 202 static void tcf_chain_destroy(struct tcf_chain *chain) 202 203 { 203 - /* May be already removed from the list by the previous call. */ 204 - if (!list_empty(&chain->list)) 205 - list_del_init(&chain->list); 204 + list_del(&chain->list); 205 + kfree(chain); 206 + } 206 207 207 - /* There might still be a reference held when we got here from 208 - * tcf_block_put. Wait for the user to drop reference before free. 209 - */ 210 - if (!chain->refcnt) 211 - kfree(chain); 208 + static void tcf_chain_hold(struct tcf_chain *chain) 209 + { 210 + ++chain->refcnt; 212 211 } 213 212 214 213 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, ··· 216 217 struct tcf_chain *chain; 217 218 218 219 list_for_each_entry(chain, &block->chain_list, list) { 219 - if (chain->index == chain_index) 220 - goto incref; 220 + if (chain->index == chain_index) { 221 + tcf_chain_hold(chain); 222 + return chain; 223 + } 221 224 } 222 - chain = create ? tcf_chain_create(block, chain_index) : NULL; 223 225 224 - incref: 225 - if (chain) 226 - chain->refcnt++; 227 - return chain; 226 + return create ? tcf_chain_create(block, chain_index) : NULL; 228 227 } 229 228 EXPORT_SYMBOL(tcf_chain_get); 230 229 231 230 void tcf_chain_put(struct tcf_chain *chain) 232 231 { 233 - /* Destroy unused chain, with exception of chain 0, which is the 234 - * default one and has to be always present. 235 - */ 236 - if (--chain->refcnt == 0 && !chain->filter_chain && chain->index != 0) 232 + if (--chain->refcnt == 0) 237 233 tcf_chain_destroy(chain); 238 234 } 239 235 EXPORT_SYMBOL(tcf_chain_put); ··· 273 279 if (!block) 274 280 return; 275 281 276 - list_for_each_entry_safe(chain, tmp, &block->chain_list, list) { 282 + /* XXX: Standalone actions are not allowed to jump to any chain, and 283 + * bound actions should be all removed after flushing. However, 284 + * filters are destroyed in RCU callbacks, we have to hold the chains 285 + * first, otherwise we would always race with RCU callbacks on this list 286 + * without proper locking. 287 + */ 288 + 289 + /* Wait for existing RCU callbacks to cool down. */ 290 + rcu_barrier(); 291 + 292 + /* Hold a refcnt for all chains, except 0, in case they are gone. */ 293 + list_for_each_entry(chain, &block->chain_list, list) 294 + if (chain->index) 295 + tcf_chain_hold(chain); 296 + 297 + /* No race on the list, because no chain could be destroyed. */ 298 + list_for_each_entry(chain, &block->chain_list, list) 277 299 tcf_chain_flush(chain); 278 - tcf_chain_destroy(chain); 279 - } 300 + 301 + /* Wait for RCU callbacks to release the reference count. */ 302 + rcu_barrier(); 303 + 304 + /* At this point, all the chains should have refcnt == 1. */ 305 + list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 306 + tcf_chain_put(chain); 280 307 kfree(block); 281 308 } 282 309 EXPORT_SYMBOL(tcf_block_put); ··· 375 360 rcu_assign_pointer(*chain->p_filter_chain, tp); 376 361 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info)); 377 362 rcu_assign_pointer(*chain_info->pprev, tp); 363 + tcf_chain_hold(chain); 378 364 } 379 365 380 366 static void tcf_chain_tp_remove(struct tcf_chain *chain, ··· 387 371 if (chain->p_filter_chain && tp == chain->filter_chain) 388 372 RCU_INIT_POINTER(*chain->p_filter_chain, next); 389 373 RCU_INIT_POINTER(*chain_info->pprev, next); 374 + tcf_chain_put(chain); 390 375 } 391 376 392 377 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
+1 -1
net/sched/cls_rsvp.h
··· 389 389 if ((data->hgenerator += 0x10000) == 0) 390 390 data->hgenerator = 0x10000; 391 391 h = data->hgenerator|salt; 392 - if (rsvp_get(tp, h) == 0) 392 + if (!rsvp_get(tp, h)) 393 393 return h; 394 394 } 395 395 return 0;
+9 -24
net/sctp/sctp_diag.c
··· 279 279 return err; 280 280 } 281 281 282 - static int sctp_sock_dump(struct sock *sk, void *p) 282 + static int sctp_sock_dump(struct sctp_transport *tsp, void *p) 283 283 { 284 + struct sctp_endpoint *ep = tsp->asoc->ep; 284 285 struct sctp_comm_param *commp = p; 286 + struct sock *sk = ep->base.sk; 285 287 struct sk_buff *skb = commp->skb; 286 288 struct netlink_callback *cb = commp->cb; 287 289 const struct inet_diag_req_v2 *r = commp->r; ··· 291 289 int err = 0; 292 290 293 291 lock_sock(sk); 294 - if (!sctp_sk(sk)->ep) 295 - goto release; 296 - list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) { 292 + list_for_each_entry(assoc, &ep->asocs, asocs) { 297 293 if (cb->args[4] < cb->args[1]) 298 294 goto next; 299 295 ··· 309 309 cb->nlh->nlmsg_seq, 310 310 NLM_F_MULTI, cb->nlh, 311 311 commp->net_admin) < 0) { 312 - cb->args[3] = 1; 313 312 err = 1; 314 313 goto release; 315 314 } ··· 326 327 cb->args[4]++; 327 328 } 328 329 cb->args[1] = 0; 329 - cb->args[2]++; 330 330 cb->args[3] = 0; 331 331 cb->args[4] = 0; 332 332 release: 333 333 release_sock(sk); 334 - sock_put(sk); 335 334 return err; 336 335 } 337 336 338 - static int sctp_get_sock(struct sctp_transport *tsp, void *p) 337 + static int sctp_sock_filter(struct sctp_transport *tsp, void *p) 339 338 { 340 339 struct sctp_endpoint *ep = tsp->asoc->ep; 341 340 struct sctp_comm_param *commp = p; 342 341 struct sock *sk = ep->base.sk; 343 - struct netlink_callback *cb = commp->cb; 344 342 const struct inet_diag_req_v2 *r = commp->r; 345 343 struct sctp_association *assoc = 346 344 list_entry(ep->asocs.next, struct sctp_association, asocs); 347 345 348 346 /* find the ep only once through the transports by this condition */ 349 347 if (tsp->asoc != assoc) 350 - goto out; 348 + return 0; 351 349 352 350 if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) 353 - goto out; 354 - 355 - sock_hold(sk); 356 - cb->args[5] = (long)sk; 351 + return 0; 357 352 358 353 return 1; 359 - 360 - out: 361 - cb->args[2]++; 362 - return 0; 363 354 } 364 355 365 356 static int sctp_ep_dump(struct sctp_endpoint *ep, void *p) ··· 492 503 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE))) 493 504 goto done; 494 505 495 - next: 496 - cb->args[5] = 0; 497 - sctp_for_each_transport(sctp_get_sock, net, cb->args[2], &commp); 498 - 499 - if (cb->args[5] && !sctp_sock_dump((struct sock *)cb->args[5], &commp)) 500 - goto next; 506 + sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump, 507 + net, (int *)&cb->args[2], &commp); 501 508 502 509 done: 503 510 cb->args[1] = cb->args[4];
+25 -15
net/sctp/socket.c
··· 4658 4658 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); 4659 4659 4660 4660 int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), 4661 - struct net *net, int pos, void *p) { 4661 + int (*cb_done)(struct sctp_transport *, void *), 4662 + struct net *net, int *pos, void *p) { 4662 4663 struct rhashtable_iter hti; 4663 - void *obj; 4664 - int err; 4664 + struct sctp_transport *tsp; 4665 + int ret; 4665 4666 4666 - err = sctp_transport_walk_start(&hti); 4667 - if (err) 4668 - return err; 4667 + again: 4668 + ret = sctp_transport_walk_start(&hti); 4669 + if (ret) 4670 + return ret; 4669 4671 4670 - obj = sctp_transport_get_idx(net, &hti, pos + 1); 4671 - for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) { 4672 - struct sctp_transport *transport = obj; 4673 - 4674 - if (!sctp_transport_hold(transport)) 4672 + tsp = sctp_transport_get_idx(net, &hti, *pos + 1); 4673 + for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { 4674 + if (!sctp_transport_hold(tsp)) 4675 4675 continue; 4676 - err = cb(transport, p); 4677 - sctp_transport_put(transport); 4678 - if (err) 4676 + ret = cb(tsp, p); 4677 + if (ret) 4679 4678 break; 4679 + (*pos)++; 4680 + sctp_transport_put(tsp); 4680 4681 } 4681 4682 sctp_transport_walk_stop(&hti); 4682 4683 4683 - return err; 4684 + if (ret) { 4685 + if (cb_done && !cb_done(tsp, p)) { 4686 + (*pos)++; 4687 + sctp_transport_put(tsp); 4688 + goto again; 4689 + } 4690 + sctp_transport_put(tsp); 4691 + } 4692 + 4693 + return ret; 4684 4694 } 4685 4695 EXPORT_SYMBOL_GPL(sctp_for_each_transport); 4686 4696
+1 -1
net/tls/tls_sw.c
··· 639 639 return ret; 640 640 } 641 641 642 - void tls_sw_free_resources(struct sock *sk) 642 + static void tls_sw_free_resources(struct sock *sk) 643 643 { 644 644 struct tls_context *tls_ctx = tls_get_ctx(sk); 645 645 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+16
tools/testing/selftests/bpf/test_verifier.c
··· 6629 6629 .result = REJECT, 6630 6630 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 6631 6631 }, 6632 + { 6633 + "invalid 64-bit BPF_END", 6634 + .insns = { 6635 + BPF_MOV32_IMM(BPF_REG_0, 0), 6636 + { 6637 + .code = BPF_ALU64 | BPF_END | BPF_TO_LE, 6638 + .dst_reg = BPF_REG_0, 6639 + .src_reg = 0, 6640 + .off = 0, 6641 + .imm = 32, 6642 + }, 6643 + BPF_EXIT_INSN(), 6644 + }, 6645 + .errstr = "BPF_END uses reserved fields", 6646 + .result = REJECT, 6647 + }, 6632 6648 }; 6633 6649 6634 6650 static int probe_filter_length(const struct bpf_insn *fp)