Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Dave writes:
"Networking fixes:

1) Fix truncation of 32-bit right shift in bpf, from Jann Horn.

2) Fix memory leak in wireless wext compat, from Stefan Seyfried.

3) Use after free in cfg80211's reg_process_hint(), from Yu Zhao.

4) Need to cancel pending work when unbinding in smsc75xx otherwise
we oops, also from Yu Zhao.

5) Don't allow enslaving a team device to itself, from Ido Schimmel.

6) Fix backwards compat with older userspace for rtnetlink FDB dumps.
From Mauricio Faria.

7) Add validation of tc policy netlink attributes, from David Ahern.

8) Fix RCU locking in rawv6_send_hdrinc(), from Wei Wang."

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (26 commits)
net: mvpp2: Extract the correct ethtype from the skb for tx csum offload
ipv6: take rcu lock in rawv6_send_hdrinc()
net: sched: Add policy validation for tc attributes
rtnetlink: fix rtnl_fdb_dump() for ndmsg header
yam: fix a missing-check bug
net: bpfilter: Fix type cast and pointer warnings
net: cxgb3_main: fix a missing-check bug
bpf: 32-bit RSH verification must truncate input before the ALU op
net: phy: phylink: fix SFP interface autodetection
be2net: don't flip hw_features when VXLANs are added/deleted
net/packet: fix packet drop as of virtio gso
net: dsa: b53: Keep CPU port as tagged in all VLANs
openvswitch: load NAT helper
bnxt_en: get the reduced max_irqs by the ones used by RDMA
bnxt_en: free hwrm resources, if driver probe fails.
bnxt_en: Fix enables field in HWRM_QUEUE_COS2BW_CFG request
bnxt_en: Fix VNIC reservations on the PF.
team: Forbid enslaving team device to itself
net/usb: cancel pending work when unbinding smsc75xx
mlxsw: spectrum: Delete RIF when VLAN device is removed
...

+203 -81
+2 -2
drivers/net/dsa/b53/b53_common.c
··· 1107 1107 b53_get_vlan_entry(dev, vid, vl); 1108 1108 1109 1109 vl->members |= BIT(port); 1110 - if (untagged) 1110 + if (untagged && !dsa_is_cpu_port(ds, port)) 1111 1111 vl->untag |= BIT(port); 1112 1112 else 1113 1113 vl->untag &= ~BIT(port); ··· 1149 1149 pvid = 0; 1150 1150 } 1151 1151 1152 - if (untagged) 1152 + if (untagged && !dsa_is_cpu_port(ds, port)) 1153 1153 vl->untag &= ~(BIT(port)); 1154 1154 1155 1155 b53_set_vlan_entry(dev, vid, vl);
+8 -6
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 3017 3017 { 3018 3018 struct pci_dev *pdev = bp->pdev; 3019 3019 3020 - dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 3021 - bp->hwrm_cmd_resp_dma_addr); 3022 - 3023 - bp->hwrm_cmd_resp_addr = NULL; 3020 + if (bp->hwrm_cmd_resp_addr) { 3021 + dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 3022 + bp->hwrm_cmd_resp_dma_addr); 3023 + bp->hwrm_cmd_resp_addr = NULL; 3024 + } 3024 3025 } 3025 3026 3026 3027 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) ··· 4651 4650 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 4652 4651 enables |= ring_grps ? 4653 4652 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 4654 - enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 4653 + enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 4655 4654 4656 4655 req->num_rx_rings = cpu_to_le16(rx_rings); 4657 4656 req->num_hw_ring_grps = cpu_to_le16(ring_grps); ··· 8622 8621 *max_tx = hw_resc->max_tx_rings; 8623 8622 *max_rx = hw_resc->max_rx_rings; 8624 8623 *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), 8625 - hw_resc->max_irqs); 8624 + hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp)); 8626 8625 *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); 8627 8626 max_ring_grps = hw_resc->max_hw_ring_grps; 8628 8627 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { ··· 9058 9057 bnxt_clear_int_mode(bp); 9059 9058 9060 9059 init_err_pci_clean: 9060 + bnxt_free_hwrm_resources(bp); 9061 9061 bnxt_cleanup_pci(bp); 9062 9062 9063 9063 init_err_free:
+3 -3
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
··· 98 98 99 99 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); 100 100 for (i = 0; i < max_tc; i++) { 101 - u8 qidx; 101 + u8 qidx = bp->tc_to_qidx[i]; 102 102 103 103 req.enables |= cpu_to_le32( 104 - QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i); 104 + QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << 105 + qidx); 105 106 106 107 memset(&cos2bw, 0, sizeof(cos2bw)); 107 - qidx = bp->tc_to_qidx[i]; 108 108 cos2bw.queue_id = bp->q_info[qidx].queue_id; 109 109 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) { 110 110 cos2bw.tsa =
+17
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
··· 2159 2159 return -EPERM; 2160 2160 if (copy_from_user(&t, useraddr, sizeof(t))) 2161 2161 return -EFAULT; 2162 + if (t.cmd != CHELSIO_SET_QSET_PARAMS) 2163 + return -EINVAL; 2162 2164 if (t.qset_idx >= SGE_QSETS) 2163 2165 return -EINVAL; 2164 2166 if (!in_range(t.intr_lat, 0, M_NEWTIMER) || ··· 2260 2258 if (copy_from_user(&t, useraddr, sizeof(t))) 2261 2259 return -EFAULT; 2262 2260 2261 + if (t.cmd != CHELSIO_GET_QSET_PARAMS) 2262 + return -EINVAL; 2263 + 2263 2264 /* Display qsets for all ports when offload enabled */ 2264 2265 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { 2265 2266 q1 = 0; ··· 2308 2303 return -EBUSY; 2309 2304 if (copy_from_user(&edata, useraddr, sizeof(edata))) 2310 2305 return -EFAULT; 2306 + if (edata.cmd != CHELSIO_SET_QSET_NUM) 2307 + return -EINVAL; 2311 2308 if (edata.val < 1 || 2312 2309 (edata.val > 1 && !(adapter->flags & USING_MSIX))) 2313 2310 return -EINVAL; ··· 2350 2343 return -EPERM; 2351 2344 if (copy_from_user(&t, useraddr, sizeof(t))) 2352 2345 return -EFAULT; 2346 + if (t.cmd != CHELSIO_LOAD_FW) 2347 + return -EINVAL; 2353 2348 /* Check t.len sanity ? */ 2354 2349 fw_data = memdup_user(useraddr + sizeof(t), t.len); 2355 2350 if (IS_ERR(fw_data)) ··· 2375 2366 return -EBUSY; 2376 2367 if (copy_from_user(&m, useraddr, sizeof(m))) 2377 2368 return -EFAULT; 2369 + if (m.cmd != CHELSIO_SETMTUTAB) 2370 + return -EINVAL; 2378 2371 if (m.nmtus != NMTUS) 2379 2372 return -EINVAL; 2380 2373 if (m.mtus[0] < 81) /* accommodate SACK */ ··· 2418 2407 return -EBUSY; 2419 2408 if (copy_from_user(&m, useraddr, sizeof(m))) 2420 2409 return -EFAULT; 2410 + if (m.cmd != CHELSIO_SET_PM) 2411 + return -EINVAL; 2421 2412 if (!is_power_of_2(m.rx_pg_sz) || 2422 2413 !is_power_of_2(m.tx_pg_sz)) 2423 2414 return -EINVAL; /* not power of 2 */ ··· 2453 2440 return -EIO; /* need the memory controllers */ 2454 2441 if (copy_from_user(&t, useraddr, sizeof(t))) 2455 2442 return -EFAULT; 2443 + if (t.cmd != CHELSIO_GET_MEM) 2444 + return -EINVAL; 2456 2445 if ((t.addr & 7) || (t.len & 7)) 2457 2446 return -EINVAL; 2458 2447 if (t.mem_id == MEM_CM) ··· 2507 2492 return -EAGAIN; 2508 2493 if (copy_from_user(&t, useraddr, sizeof(t))) 2509 2494 return -EFAULT; 2495 + if (t.cmd != CHELSIO_SET_TRACE_FILTER) 2496 + return -EINVAL; 2510 2497 2511 2498 tp = (const struct trace_params *)&t.sip; 2512 2499 if (t.config_tx)
+1 -4
drivers/net/ethernet/emulex/benet/be_main.c
··· 4002 4002 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4003 4003 NETIF_F_TSO | NETIF_F_TSO6 | 4004 4004 NETIF_F_GSO_UDP_TUNNEL; 4005 - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; 4006 - netdev->features |= NETIF_F_GSO_UDP_TUNNEL; 4007 4005 4008 4006 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", 4009 4007 be16_to_cpu(port)); ··· 4023 4025 adapter->vxlan_port = 0; 4024 4026 4025 4027 netdev->hw_enc_features = 0; 4026 - netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL); 4027 - netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL); 4028 4028 } 4029 4029 4030 4030 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs, ··· 5316 5320 struct be_adapter *adapter = netdev_priv(netdev); 5317 5321 5318 5322 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 5323 + NETIF_F_GSO_UDP_TUNNEL | 5319 5324 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 5320 5325 NETIF_F_HW_VLAN_CTAG_TX; 5321 5326 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
+5 -4
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 1725 1725 } 1726 1726 1727 1727 /* Set Tx descriptors fields relevant for CSUM calculation */ 1728 - static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, 1728 + static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, 1729 1729 int ip_hdr_len, int l4_proto) 1730 1730 { 1731 1731 u32 command; ··· 2600 2600 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2601 2601 int ip_hdr_len = 0; 2602 2602 u8 l4_proto; 2603 + __be16 l3_proto = vlan_get_protocol(skb); 2603 2604 2604 - if (skb->protocol == htons(ETH_P_IP)) { 2605 + if (l3_proto == htons(ETH_P_IP)) { 2605 2606 struct iphdr *ip4h = ip_hdr(skb); 2606 2607 2607 2608 /* Calculate IPv4 checksum and L4 checksum */ 2608 2609 ip_hdr_len = ip4h->ihl; 2609 2610 l4_proto = ip4h->protocol; 2610 - } else if (skb->protocol == htons(ETH_P_IPV6)) { 2611 + } else if (l3_proto == htons(ETH_P_IPV6)) { 2611 2612 struct ipv6hdr *ip6h = ipv6_hdr(skb); 2612 2613 2613 2614 /* Read l4_protocol from one of IPv6 extra headers */ ··· 2620 2619 } 2621 2620 2622 2621 return mvpp2_txq_desc_csum(skb_network_offset(skb), 2623 - skb->protocol, ip_hdr_len, l4_proto); 2622 + l3_proto, ip_hdr_len, l4_proto); 2624 2623 } 2625 2624 2626 2625 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
+7 -4
drivers/net/ethernet/mellanox/mlxsw/pci.c
··· 718 718 memset(&active_cqns, 0, sizeof(active_cqns)); 719 719 720 720 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { 721 - u8 event_type = mlxsw_pci_eqe_event_type_get(eqe); 722 721 723 - switch (event_type) { 724 - case MLXSW_PCI_EQE_EVENT_TYPE_CMD: 722 + /* Command interface completion events are always received on 723 + * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events 724 + * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1). 725 + */ 726 + switch (q->num) { 727 + case MLXSW_PCI_EQ_ASYNC_NUM: 725 728 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe); 726 729 q->u.eq.ev_cmd_count++; 727 730 break; 728 - case MLXSW_PCI_EQE_EVENT_TYPE_COMP: 731 + case MLXSW_PCI_EQ_COMP_NUM: 729 732 cqn = mlxsw_pci_eqe_cqn_get(eqe); 730 733 set_bit(cqn, active_cqns); 731 734 cq_handle = true;
+2
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 4845 4845 upper_dev = info->upper_dev; 4846 4846 if (info->linking) 4847 4847 break; 4848 + if (is_vlan_dev(upper_dev)) 4849 + mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4848 4850 if (netif_is_macvlan(upper_dev)) 4849 4851 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4850 4852 break;
+4
drivers/net/hamradio/yam.c
··· 966 966 sizeof(struct yamdrv_ioctl_mcs)); 967 967 if (IS_ERR(ym)) 968 968 return PTR_ERR(ym); 969 + if (ym->cmd != SIOCYAMSMCS) 970 + return -EINVAL; 969 971 if (ym->bitrate > YAM_MAXBITRATE) { 970 972 kfree(ym); 971 973 return -EINVAL; ··· 983 981 if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg))) 984 982 return -EFAULT; 985 983 984 + if (yi.cmd != SIOCYAMSCFG) 985 + return -EINVAL; 986 986 if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev)) 987 987 return -EINVAL; /* Cannot change this parameter when up */ 988 988 if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
+28 -20
drivers/net/phy/phylink.c
··· 717 717 return 0; 718 718 } 719 719 720 + static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy, 721 + phy_interface_t interface) 722 + { 723 + int ret; 724 + 725 + if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED || 726 + (pl->link_an_mode == MLO_AN_INBAND && 727 + phy_interface_mode_is_8023z(interface)))) 728 + return -EINVAL; 729 + 730 + if (pl->phydev) 731 + return -EBUSY; 732 + 733 + ret = phy_attach_direct(pl->netdev, phy, 0, interface); 734 + if (ret) 735 + return ret; 736 + 737 + ret = phylink_bringup_phy(pl, phy); 738 + if (ret) 739 + phy_detach(phy); 740 + 741 + return ret; 742 + } 743 + 720 744 /** 721 745 * phylink_connect_phy() - connect a PHY to the phylink instance 722 746 * @pl: a pointer to a &struct phylink returned from phylink_create() ··· 758 734 */ 759 735 int phylink_connect_phy(struct phylink *pl, struct phy_device *phy) 760 736 { 761 - int ret; 762 - 763 - if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED || 764 - (pl->link_an_mode == MLO_AN_INBAND && 765 - phy_interface_mode_is_8023z(pl->link_interface)))) 766 - return -EINVAL; 767 - 768 - if (pl->phydev) 769 - return -EBUSY; 770 - 771 737 /* Use PHY device/driver interface */ 772 738 if (pl->link_interface == PHY_INTERFACE_MODE_NA) { 773 739 pl->link_interface = phy->interface; 774 740 pl->link_config.interface = pl->link_interface; 775 741 } 776 742 777 - ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface); 778 - if (ret) 779 - return ret; 780 - 781 - ret = phylink_bringup_phy(pl, phy); 782 - if (ret) 783 - phy_detach(phy); 784 - 785 - return ret; 743 + return __phylink_connect_phy(pl, phy, pl->link_interface); 786 744 } 787 745 EXPORT_SYMBOL_GPL(phylink_connect_phy); 788 746 ··· 1678 1672 1679 1673 static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) 1680 1674 { 1681 - return phylink_connect_phy(upstream, phy); 1675 + struct phylink *pl = upstream; 1676 + 1677 + return __phylink_connect_phy(upstream, phy, pl->link_config.interface); 1682 1678 } 1683 1679 1684 1680 static void phylink_sfp_disconnect_phy(void *upstream)
+6
drivers/net/team/team.c
··· 1167 1167 return -EBUSY; 1168 1168 } 1169 1169 1170 + if (dev == port_dev) { 1171 + NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself"); 1172 + netdev_err(dev, "Cannot enslave team device to itself\n"); 1173 + return -EINVAL; 1174 + } 1175 + 1170 1176 if (port_dev->features & NETIF_F_VLAN_CHALLENGED && 1171 1177 vlan_uses_dev(dev)) { 1172 1178 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
+1
drivers/net/usb/smsc75xx.c
··· 1520 1520 { 1521 1521 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 1522 1522 if (pdata) { 1523 + cancel_work_sync(&pdata->set_multicast); 1523 1524 netif_dbg(dev, ifdown, dev->net, "free pdata\n"); 1524 1525 kfree(pdata); 1525 1526 pdata = NULL;
+18
include/linux/virtio_net.h
··· 5 5 #include <linux/if_vlan.h> 6 6 #include <uapi/linux/virtio_net.h> 7 7 8 + static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, 9 + const struct virtio_net_hdr *hdr) 10 + { 11 + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 12 + case VIRTIO_NET_HDR_GSO_TCPV4: 13 + case VIRTIO_NET_HDR_GSO_UDP: 14 + skb->protocol = cpu_to_be16(ETH_P_IP); 15 + break; 16 + case VIRTIO_NET_HDR_GSO_TCPV6: 17 + skb->protocol = cpu_to_be16(ETH_P_IPV6); 18 + break; 19 + default: 20 + return -EINVAL; 21 + } 22 + 23 + return 0; 24 + } 25 + 8 26 static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, 9 27 const struct virtio_net_hdr *hdr, 10 28 bool little_endian)
+4 -1
kernel/bpf/local_storage.c
··· 129 129 struct bpf_cgroup_storage *storage; 130 130 struct bpf_storage_buffer *new; 131 131 132 - if (flags & BPF_NOEXIST) 132 + if (flags != BPF_ANY && flags != BPF_EXIST) 133 133 return -EINVAL; 134 134 135 135 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, ··· 193 193 struct bpf_cgroup_storage_map *map; 194 194 195 195 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) 196 + return ERR_PTR(-EINVAL); 197 + 198 + if (attr->value_size == 0) 196 199 return ERR_PTR(-EINVAL); 197 200 198 201 if (attr->value_size > PAGE_SIZE)
+9 -1
kernel/bpf/verifier.c
··· 2896 2896 u64 umin_val, umax_val; 2897 2897 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 2898 2898 2899 + if (insn_bitness == 32) { 2900 + /* Relevant for 32-bit RSH: Information can propagate towards 2901 + * LSB, so it isn't sufficient to only truncate the output to 2902 + * 32 bits. 2903 + */ 2904 + coerce_reg_to_size(dst_reg, 4); 2905 + coerce_reg_to_size(&src_reg, 4); 2906 + } 2907 + 2899 2908 smin_val = src_reg.smin_value; 2900 2909 smax_val = src_reg.smax_value; 2901 2910 umin_val = src_reg.umin_value; ··· 3140 3131 if (BPF_CLASS(insn->code) != BPF_ALU64) { 3141 3132 /* 32-bit ALU ops are (32,32)->32 */ 3142 3133 coerce_reg_to_size(dst_reg, 4); 3143 - coerce_reg_to_size(&src_reg, 4); 3144 3134 } 3145 3135 3146 3136 __reg_deduce_bounds(dst_reg);
+2 -2
net/bpfilter/bpfilter_kern.c
··· 59 59 req.is_set = is_set; 60 60 req.pid = current->pid; 61 61 req.cmd = optname; 62 - req.addr = (long)optval; 62 + req.addr = (long __force __user)optval; 63 63 req.len = optlen; 64 64 mutex_lock(&bpfilter_lock); 65 65 if (!info.pid) ··· 98 98 pr_info("Loaded bpfilter_umh pid %d\n", info.pid); 99 99 100 100 /* health check that usermode process started correctly */ 101 - if (__bpfilter_process_sockopt(NULL, 0, 0, 0, 0) != 0) { 101 + if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { 102 102 stop_umh(); 103 103 return -EFAULT; 104 104 }
+20 -9
net/core/rtnetlink.c
··· 3748 3748 int err = 0; 3749 3749 int fidx = 0; 3750 3750 3751 - err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, 3752 - IFLA_MAX, ifla_policy, NULL); 3753 - if (err < 0) { 3754 - return -EINVAL; 3755 - } else if (err == 0) { 3756 - if (tb[IFLA_MASTER]) 3757 - br_idx = nla_get_u32(tb[IFLA_MASTER]); 3758 - } 3751 + /* A hack to preserve kernel<->userspace interface. 3752 + * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 3753 + * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 3754 + * So, check for ndmsg with an optional u32 attribute (not used here). 3755 + * Fortunately these sizes don't conflict with the size of ifinfomsg 3756 + * with an optional attribute. 3757 + */ 3758 + if (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) && 3759 + (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) + 3760 + nla_attr_size(sizeof(u32)))) { 3761 + err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, 3762 + IFLA_MAX, ifla_policy, NULL); 3763 + if (err < 0) { 3764 + return -EINVAL; 3765 + } else if (err == 0) { 3766 + if (tb[IFLA_MASTER]) 3767 + br_idx = nla_get_u32(tb[IFLA_MASTER]); 3768 + } 3759 3769 3760 - brport_idx = ifm->ifi_index; 3770 + brport_idx = ifm->ifi_index; 3771 + } 3761 3772 3762 3773 if (br_idx) { 3763 3774 br_dev = __dev_get_by_index(net, br_idx);
+20 -9
net/ipv6/raw.c
··· 651 651 skb->priority = sk->sk_priority; 652 652 skb->mark = sk->sk_mark; 653 653 skb->tstamp = sockc->transmit_time; 654 - skb_dst_set(skb, &rt->dst); 655 - *dstp = NULL; 656 654 657 655 skb_put(skb, length); 658 656 skb_reset_network_header(skb); ··· 663 665 664 666 skb->transport_header = skb->network_header; 665 667 err = memcpy_from_msg(iph, msg, length); 666 - if (err) 667 - goto error_fault; 668 + if (err) { 669 + err = -EFAULT; 670 + kfree_skb(skb); 671 + goto error; 672 + } 673 + 674 + skb_dst_set(skb, &rt->dst); 675 + *dstp = NULL; 668 676 669 677 /* if egress device is enslaved to an L3 master device pass the 670 678 * skb to its handler for processing ··· 679 675 if (unlikely(!skb)) 680 676 return 0; 681 677 678 + /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev 679 + * in the error path. Since skb has been freed, the dst could 680 + * have been queued for deletion. 681 + */ 682 + rcu_read_lock(); 682 683 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); 683 684 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, 684 685 NULL, rt->dst.dev, dst_output); 685 686 if (err > 0) 686 687 err = net_xmit_errno(err); 687 - if (err) 688 - goto error; 688 + if (err) { 689 + IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 690 + rcu_read_unlock(); 691 + goto error_check; 692 + } 693 + rcu_read_unlock(); 689 694 out: 690 695 return 0; 691 696 692 - error_fault: 693 - err = -EFAULT; 694 - kfree_skb(skb); 695 697 error: 696 698 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 699 + error_check: 697 700 if (err == -ENOBUFS && !np->recverr) 698 701 err = 0; 699 702 return err;
+1 -1
net/mac80211/cfg.c
··· 427 427 case NL80211_IFTYPE_AP: 428 428 case NL80211_IFTYPE_AP_VLAN: 429 429 /* Keys without a station are used for TX only */ 430 - if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP)) 430 + if (sta && test_sta_flag(sta, WLAN_STA_MFP)) 431 431 key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; 432 432 break; 433 433 case NL80211_IFTYPE_ADHOC:
+4
net/openvswitch/conntrack.c
··· 1312 1312 1313 1313 rcu_assign_pointer(help->helper, helper); 1314 1314 info->helper = helper; 1315 + 1316 + if (info->nat) 1317 + request_module("ip_nat_%s", name); 1318 + 1315 1319 return 0; 1316 1320 } 1317 1321
+7 -4
net/packet/af_packet.c
··· 2715 2715 } 2716 2716 } 2717 2717 2718 - if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr, 2719 - vio_le())) { 2720 - tp_len = -EINVAL; 2721 - goto tpacket_error; 2718 + if (po->has_vnet_hdr) { 2719 + if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { 2720 + tp_len = -EINVAL; 2721 + goto tpacket_error; 2722 + } 2723 + virtio_net_hdr_set_proto(skb, vnet_hdr); 2722 2724 } 2723 2725 2724 2726 skb->destructor = tpacket_destruct_skb; ··· 2917 2915 if (err) 2918 2916 goto out_free; 2919 2917 len += sizeof(vnet_hdr); 2918 + virtio_net_hdr_set_proto(skb, &vnet_hdr); 2920 2919 } 2921 2920 2922 2921 skb_probe_transport_header(skb, reserve);
+20 -4
net/sched/sch_api.c
··· 1311 1311 * Delete/get qdisc. 1312 1312 */ 1313 1313 1314 + const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = { 1315 + [TCA_KIND] = { .type = NLA_STRING }, 1316 + [TCA_OPTIONS] = { .type = NLA_NESTED }, 1317 + [TCA_RATE] = { .type = NLA_BINARY, 1318 + .len = sizeof(struct tc_estimator) }, 1319 + [TCA_STAB] = { .type = NLA_NESTED }, 1320 + [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG }, 1321 + [TCA_CHAIN] = { .type = NLA_U32 }, 1322 + [TCA_INGRESS_BLOCK] = { .type = NLA_U32 }, 1323 + [TCA_EGRESS_BLOCK] = { .type = NLA_U32 }, 1324 + }; 1325 + 1314 1326 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1315 1327 struct netlink_ext_ack *extack) 1316 1328 { ··· 1339 1327 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1340 1328 return -EPERM; 1341 1329 1342 - err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack); 1330 + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy, 1331 + extack); 1343 1332 if (err < 0) 1344 1333 return err; 1345 1334 ··· 1424 1411 1425 1412 replay: 1426 1413 /* Reinit, just in case something touches this. */ 1427 - err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack); 1414 + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy, 1415 + extack); 1428 1416 if (err < 0) 1429 1417 return err; 1430 1418 ··· 1659 1645 idx = 0; 1660 1646 ASSERT_RTNL(); 1661 1647 1662 - err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL); 1648 + err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, 1649 + rtm_tca_policy, NULL); 1663 1650 if (err < 0) 1664 1651 return err; 1665 1652 ··· 1879 1864 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1880 1865 return -EPERM; 1881 1866 1882 - err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack); 1867 + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy, 1868 + extack); 1883 1869 if (err < 0) 1884 1870 return err; 1885 1871
+4 -3
net/wireless/reg.c
··· 2661 2661 { 2662 2662 struct wiphy *wiphy = NULL; 2663 2663 enum reg_request_treatment treatment; 2664 + enum nl80211_reg_initiator initiator = reg_request->initiator; 2664 2665 2665 2666 if (reg_request->wiphy_idx != WIPHY_IDX_INVALID) 2666 2667 wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); 2667 2668 2668 - switch (reg_request->initiator) { 2669 + switch (initiator) { 2669 2670 case NL80211_REGDOM_SET_BY_CORE: 2670 2671 treatment = reg_process_hint_core(reg_request); 2671 2672 break; ··· 2684 2683 treatment = reg_process_hint_country_ie(wiphy, reg_request); 2685 2684 break; 2686 2685 default: 2687 - WARN(1, "invalid initiator %d\n", reg_request->initiator); 2686 + WARN(1, "invalid initiator %d\n", initiator); 2688 2687 goto out_free; 2689 2688 } 2690 2689 ··· 2699 2698 */ 2700 2699 if (treatment == REG_REQ_ALREADY_SET && wiphy && 2701 2700 wiphy->regulatory_flags & REGULATORY_STRICT_REG) { 2702 - wiphy_update_regulatory(wiphy, reg_request->initiator); 2701 + wiphy_update_regulatory(wiphy, initiator); 2703 2702 wiphy_all_share_dfs_chan_state(wiphy); 2704 2703 reg_check_channels(); 2705 2704 }
+10 -4
net/wireless/wext-compat.c
··· 1278 1278 if (err) 1279 1279 return err; 1280 1280 1281 - if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) 1282 - return -EOPNOTSUPP; 1281 + if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { 1282 + err = -EOPNOTSUPP; 1283 + goto free; 1284 + } 1283 1285 1284 1286 rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate); 1285 1287 1286 - return 0; 1288 + free: 1289 + cfg80211_sinfo_release_content(&sinfo); 1290 + return err; 1287 1291 } 1288 1292 1289 1293 /* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */ ··· 1297 1293 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 1298 1294 /* we are under RTNL - globally locked - so can use static structs */ 1299 1295 static struct iw_statistics wstats; 1300 - static struct station_info sinfo; 1296 + static struct station_info sinfo = {}; 1301 1297 u8 bssid[ETH_ALEN]; 1302 1298 1303 1299 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) ··· 1355 1351 wstats.discard.misc = sinfo.rx_dropped_misc; 1356 1352 if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED)) 1357 1353 wstats.discard.retries = sinfo.tx_failed; 1354 + 1355 + cfg80211_sinfo_release_content(&sinfo); 1358 1356 1359 1357 return &wstats; 1360 1358 }