Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix leak in fsl/fman driver, from Dan Carpenter.

2) Call flow dissector initcall earlier than any networking driver can
register and start to use it, from Eric Dumazet.

3) Some dup header fixes from Geliang Tang.

4) TIPC link monitoring compat fix from Jon Paul Maloy.

5) Link changes require EEE re-negotiation in bcm_sf2 driver, from
Florian Fainelli.

6) Fix bogus handle ID passed into tfilter_notify_chain(), from Roman
Mashak.

7) Fix dump size calculation in rtnl_calcit(), from Zhang Shengju.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (26 commits)
tipc: resolve connection flow control compatibility problem
mvpp2: use correct size for memset
net/mlx5: drop duplicate header delay.h
net: ieee802154: drop duplicate header delay.h
ibmvnic: drop duplicate header seq_file.h
fsl/fman: fix a leak in tgec_free()
net: ethtool: don't require CAP_NET_ADMIN for ETHTOOL_GLINKSETTINGS
tipc: improve sanity check for received domain records
tipc: fix compatibility bug in link monitoring
net: ethernet: mvneta: Remove IFF_UNICAST_FLT which is not implemented
dwc_eth_qos: drop duplicate headers
net sched filters: fix filter handle ID in tfilter_notify_chain()
net: dsa: bcm_sf2: Ensure we re-negotiate EEE during after link change
bnxt: do not busy-poll when link is down
udplite: call proper backlog handlers
ipv6: bump genid when the IFA_F_TENTATIVE flag is clear
net/mlx4_en: Free netdev resources under state lock
net: revert "net: l2tp: Treat NET_XMIT_CN as success in l2tp_eth_dev_xmit"
rtnetlink: fix the wrong minimal dump size getting from rtnl_calcit()
bnxt_en: Fix a VXLAN vs GENEVE issue
...

+101 -60
+4
drivers/net/dsa/bcm_sf2.c
··· 588 588 struct phy_device *phydev) 589 589 { 590 590 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 591 + struct ethtool_eee *p = &priv->port_sts[port].eee; 591 592 u32 id_mode_dis = 0, port_mode; 592 593 const char *str = NULL; 593 594 u32 reg; ··· 663 662 reg |= DUPLX_MODE; 664 663 665 664 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); 665 + 666 + if (!phydev->is_pseudo_fixed_link) 667 + p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); 666 668 } 667 669 668 670 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
+12 -3
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 1811 1811 if (atomic_read(&bp->intr_sem) != 0) 1812 1812 return LL_FLUSH_FAILED; 1813 1813 1814 + if (!bp->link_info.link_up) 1815 + return LL_FLUSH_FAILED; 1816 + 1814 1817 if (!bnxt_lock_poll(bnapi)) 1815 1818 return LL_FLUSH_BUSY; 1816 1819 ··· 3213 3210 goto err_out; 3214 3211 } 3215 3212 3216 - if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN) 3213 + switch (tunnel_type) { 3214 + case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 3217 3215 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 3218 - 3219 - else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE) 3216 + break; 3217 + case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 3220 3218 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 3219 + break; 3220 + default: 3221 + break; 3222 + } 3223 + 3221 3224 err_out: 3222 3225 mutex_unlock(&bp->hwrm_cmd_lock); 3223 3226 return rc;
-3
drivers/net/ethernet/freescale/fman/fman_tgec.c
··· 722 722 { 723 723 free_init_resources(tgec); 724 724 725 - if (tgec->cfg) 726 - tgec->cfg = NULL; 727 - 728 725 kfree(tgec->cfg); 729 726 kfree(tgec); 730 727
-1
drivers/net/ethernet/ibm/ibmvnic.c
··· 74 74 #include <asm/iommu.h> 75 75 #include <linux/uaccess.h> 76 76 #include <asm/firmware.h> 77 - #include <linux/seq_file.h> 78 77 #include <linux/workqueue.h> 79 78 80 79 #include "ibmvnic.h"
+1 -1
drivers/net/ethernet/marvell/mvneta.c
··· 4151 4151 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 4152 4152 dev->hw_features |= dev->features; 4153 4153 dev->vlan_features |= dev->features; 4154 - dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 4154 + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 4155 4155 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; 4156 4156 4157 4157 err = register_netdev(dev);
+1 -1
drivers/net/ethernet/marvell/mvpp2.c
··· 3293 3293 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 3294 3294 3295 3295 /* Clear classifier flow table */ 3296 - memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 3296 + memset(&fe.data, 0, sizeof(fe.data)); 3297 3297 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 3298 3298 fe.index = index; 3299 3299 mvpp2_cls_flow_write(priv, &fe);
+4 -1
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 129 129 } 130 130 }; 131 131 132 + /* Must not acquire state_lock, as its corresponding work_sync 133 + * is done under it. 134 + */ 132 135 static void mlx4_en_filter_work(struct work_struct *work) 133 136 { 134 137 struct mlx4_en_filter *filter = container_of(work, ··· 2192 2189 mutex_lock(&mdev->state_lock); 2193 2190 mdev->pndev[priv->port] = NULL; 2194 2191 mdev->upper[priv->port] = NULL; 2195 - mutex_unlock(&mdev->state_lock); 2196 2192 2197 2193 #ifdef CONFIG_RFS_ACCEL 2198 2194 mlx4_en_cleanup_filters(priv); 2199 2195 #endif 2200 2196 2201 2197 mlx4_en_free_resources(priv); 2198 + mutex_unlock(&mdev->state_lock); 2202 2199 2203 2200 kfree(priv->tx_ring); 2204 2201 kfree(priv->tx_cq);
-1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 46 46 #include <linux/mlx5/srq.h> 47 47 #include <linux/debugfs.h> 48 48 #include <linux/kmod.h> 49 - #include <linux/delay.h> 50 49 #include <linux/mlx5/mlx5_ifc.h> 51 50 #ifdef CONFIG_RFS_ACCEL 52 51 #include <linux/cpu_rmap.h>
-2
drivers/net/ethernet/synopsys/dwc_eth_qos.c
··· 33 33 #include <linux/stat.h> 34 34 #include <linux/types.h> 35 35 36 - #include <linux/types.h> 37 36 #include <linux/slab.h> 38 37 #include <linux/delay.h> 39 38 #include <linux/mm.h> ··· 42 43 43 44 #include <linux/phy.h> 44 45 #include <linux/mii.h> 45 - #include <linux/delay.h> 46 46 #include <linux/dma-mapping.h> 47 47 #include <linux/vmalloc.h> 48 48
-1
drivers/net/ieee802154/adf7242.c
··· 20 20 #include <linux/skbuff.h> 21 21 #include <linux/of.h> 22 22 #include <linux/irq.h> 23 - #include <linux/delay.h> 24 23 #include <linux/debugfs.h> 25 24 #include <linux/bitops.h> 26 25 #include <linux/ieee802154.h>
+2 -1
drivers/net/macvlan.c
··· 623 623 return 0; 624 624 625 625 clear_multi: 626 - dev_set_allmulti(lowerdev, -1); 626 + if (dev->flags & IFF_ALLMULTI) 627 + dev_set_allmulti(lowerdev, -1); 627 628 del_unicast: 628 629 dev_uc_del(lowerdev, dev->dev_addr); 629 630 out:
+4 -4
drivers/net/phy/micrel.c
··· 318 318 /* Limit supported and advertised modes in fiber mode */ 319 319 if (of_property_read_bool(of_node, "micrel,fiber-mode")) { 320 320 phydev->dev_flags |= MICREL_PHY_FXEN; 321 - phydev->supported &= SUPPORTED_FIBRE | 322 - SUPPORTED_100baseT_Full | 321 + phydev->supported &= SUPPORTED_100baseT_Full | 323 322 SUPPORTED_100baseT_Half; 324 - phydev->advertising &= ADVERTISED_FIBRE | 325 - ADVERTISED_100baseT_Full | 323 + phydev->supported |= SUPPORTED_FIBRE; 324 + phydev->advertising &= ADVERTISED_100baseT_Full | 326 325 ADVERTISED_100baseT_Half; 326 + phydev->advertising |= ADVERTISED_FIBRE; 327 327 phydev->autoneg = AUTONEG_DISABLE; 328 328 } 329 329
+1 -1
include/linux/netdevice.h
··· 1619 1619 * @dcbnl_ops: Data Center Bridging netlink ops 1620 1620 * @num_tc: Number of traffic classes in the net device 1621 1621 * @tc_to_txq: XXX: need comments on this one 1622 - * @prio_tc_map XXX: need comments on this one 1622 + * @prio_tc_map: XXX: need comments on this one 1623 1623 * 1624 1624 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 1625 1625 *
+1 -1
include/net/bluetooth/hci_core.h
··· 1018 1018 } 1019 1019 1020 1020 struct hci_dev *hci_dev_get(int index); 1021 - struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src); 1021 + struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); 1022 1022 1023 1023 struct hci_dev *hci_alloc_dev(void); 1024 1024 void hci_free_dev(struct hci_dev *hdev);
+2 -2
net/bluetooth/6lowpan.c
··· 1090 1090 { 1091 1091 struct hci_conn *hcon; 1092 1092 struct hci_dev *hdev; 1093 - bdaddr_t *src = BDADDR_ANY; 1094 1093 int n; 1095 1094 1096 1095 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", ··· 1100 1101 if (n < 7) 1101 1102 return -EINVAL; 1102 1103 1103 - hdev = hci_get_route(addr, src); 1104 + /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */ 1105 + hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC); 1104 1106 if (!hdev) 1105 1107 return -ENOENT; 1106 1108
+24 -2
net/bluetooth/hci_conn.c
··· 613 613 return 0; 614 614 } 615 615 616 - struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) 616 + struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) 617 617 { 618 618 int use_src = bacmp(src, BDADDR_ANY); 619 619 struct hci_dev *hdev = NULL, *d; ··· 634 634 */ 635 635 636 636 if (use_src) { 637 - if (!bacmp(&d->bdaddr, src)) { 637 + bdaddr_t id_addr; 638 + u8 id_addr_type; 639 + 640 + if (src_type == BDADDR_BREDR) { 641 + if (!lmp_bredr_capable(d)) 642 + continue; 643 + bacpy(&id_addr, &d->bdaddr); 644 + id_addr_type = BDADDR_BREDR; 645 + } else { 646 + if (!lmp_le_capable(d)) 647 + continue; 648 + 649 + hci_copy_identity_address(d, &id_addr, 650 + &id_addr_type); 651 + 652 + /* Convert from HCI to three-value type */ 653 + if (id_addr_type == ADDR_LE_DEV_PUBLIC) 654 + id_addr_type = BDADDR_LE_PUBLIC; 655 + else 656 + id_addr_type = BDADDR_LE_RANDOM; 657 + } 658 + 659 + if (!bacmp(&id_addr, src) && id_addr_type == src_type) { 638 660 hdev = d; break; 639 661 } 640 662 } else {
+1 -1
net/bluetooth/l2cap_core.c
··· 7060 7060 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, 7061 7061 dst_type, __le16_to_cpu(psm)); 7062 7062 7063 - hdev = hci_get_route(dst, &chan->src); 7063 + hdev = hci_get_route(dst, &chan->src, chan->src_type); 7064 7064 if (!hdev) 7065 7065 return -EHOSTUNREACH; 7066 7066
+1 -1
net/bluetooth/rfcomm/tty.c
··· 178 178 struct hci_dev *hdev; 179 179 struct hci_conn *conn; 180 180 181 - hdev = hci_get_route(&dev->dst, &dev->src); 181 + hdev = hci_get_route(&dev->dst, &dev->src, BDADDR_BREDR); 182 182 if (!hdev) 183 183 return; 184 184
+1 -1
net/bluetooth/sco.c
··· 219 219 220 220 BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); 221 221 222 - hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src); 222 + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); 223 223 if (!hdev) 224 224 return -EHOSTUNREACH; 225 225
+10 -8
net/can/bcm.c
··· 77 77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 78 78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 79 79 80 - #define CAN_BCM_VERSION "20160617" 80 + #define CAN_BCM_VERSION "20161123" 81 81 82 82 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 83 83 MODULE_LICENSE("Dual BSD/GPL"); ··· 109 109 u32 count; 110 110 u32 nframes; 111 111 u32 currframe; 112 - struct canfd_frame *frames; 113 - struct canfd_frame *last_frames; 112 + /* void pointers to arrays of struct can[fd]_frame */ 113 + void *frames; 114 + void *last_frames; 114 115 struct canfd_frame sframe; 115 116 struct canfd_frame last_sframe; 116 117 struct sock *sk; ··· 682 681 683 682 if (op->flags & RX_FILTER_ID) { 684 683 /* the easiest case */ 685 - bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); 684 + bcm_rx_update_and_send(op, op->last_frames, rxframe); 686 685 goto rx_starttimer; 687 686 } 688 687 ··· 1069 1068 1070 1069 if (msg_head->nframes) { 1071 1070 /* update CAN frames content */ 1072 - err = memcpy_from_msg((u8 *)op->frames, msg, 1071 + err = memcpy_from_msg(op->frames, msg, 1073 1072 msg_head->nframes * op->cfsiz); 1074 1073 if (err < 0) 1075 1074 return err; ··· 1119 1118 } 1120 1119 1121 1120 if (msg_head->nframes) { 1122 - err = memcpy_from_msg((u8 *)op->frames, msg, 1121 + err = memcpy_from_msg(op->frames, msg, 1123 1122 msg_head->nframes * op->cfsiz); 1124 1123 if (err < 0) { 1125 1124 if (op->frames != &op->sframe) ··· 1164 1163 /* check flags */ 1165 1164 1166 1165 if (op->flags & RX_RTR_FRAME) { 1166 + struct canfd_frame *frame0 = op->frames; 1167 1167 1168 1168 /* no timers in RTR-mode */ 1169 1169 hrtimer_cancel(&op->thrtimer); ··· 1176 1174 * prevent a full-load-loopback-test ... ;-] 1177 1175 */ 1178 1176 if ((op->flags & TX_CP_CAN_ID) || 1179 - (op->frames[0].can_id == op->can_id)) 1180 - op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; 1177 + (frame0->can_id == op->can_id)) 1178 + frame0->can_id = op->can_id & ~CAN_RTR_FLAG; 1181 1179 1182 1180 } else { 1183 1181 if (op->flags & SETTIMER) {
+1
net/core/ethtool.c
··· 2479 2479 case ETHTOOL_GET_TS_INFO: 2480 2480 case ETHTOOL_GEEE: 2481 2481 case ETHTOOL_GTUNABLE: 2482 + case ETHTOOL_GLINKSETTINGS: 2482 2483 break; 2483 2484 default: 2484 2485 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+1 -1
net/core/flow_dissector.c
··· 1013 1013 return 0; 1014 1014 } 1015 1015 1016 - late_initcall_sync(init_default_flow_dissectors); 1016 + core_initcall(init_default_flow_dissectors);
+1 -1
net/core/rtnetlink.c
··· 2737 2737 ext_filter_mask)); 2738 2738 } 2739 2739 2740 - return min_ifinfo_dump_size; 2740 + return nlmsg_total_size(min_ifinfo_dump_size); 2741 2741 } 2742 2742 2743 2743 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
+1 -1
net/ipv4/udp.c
··· 1455 1455 udp_lib_rehash(sk, new_hash); 1456 1456 } 1457 1457 1458 - static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1458 + int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1459 1459 { 1460 1460 int rc; 1461 1461
+1 -1
net/ipv4/udp_impl.h
··· 25 25 int flags, int *addr_len); 26 26 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 27 27 int flags); 28 - int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 28 + int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 29 29 void udp_destroy_sock(struct sock *sk); 30 30 31 31 #ifdef CONFIG_PROC_FS
+1 -1
net/ipv4/udplite.c
··· 50 50 .sendmsg = udp_sendmsg, 51 51 .recvmsg = udp_recvmsg, 52 52 .sendpage = udp_sendpage, 53 - .backlog_rcv = udp_queue_rcv_skb, 53 + .backlog_rcv = __udp_queue_rcv_skb, 54 54 .hash = udp_lib_hash, 55 55 .unhash = udp_lib_unhash, 56 56 .get_port = udp_v4_get_port,
+12 -6
net/ipv6/addrconf.c
··· 183 183 184 184 static void addrconf_dad_start(struct inet6_ifaddr *ifp); 185 185 static void addrconf_dad_work(struct work_struct *w); 186 - static void addrconf_dad_completed(struct inet6_ifaddr *ifp); 186 + static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id); 187 187 static void addrconf_dad_run(struct inet6_dev *idev); 188 188 static void addrconf_rs_timer(unsigned long data); 189 189 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); ··· 2898 2898 spin_lock_bh(&ifp->lock); 2899 2899 ifp->flags &= ~IFA_F_TENTATIVE; 2900 2900 spin_unlock_bh(&ifp->lock); 2901 + rt_genid_bump_ipv6(dev_net(idev->dev)); 2901 2902 ipv6_ifa_notify(RTM_NEWADDR, ifp); 2902 2903 in6_ifa_put(ifp); 2903 2904 } ··· 3741 3740 { 3742 3741 struct inet6_dev *idev = ifp->idev; 3743 3742 struct net_device *dev = idev->dev; 3744 - bool notify = false; 3743 + bool bump_id, notify = false; 3745 3744 3746 3745 addrconf_join_solict(dev, &ifp->addr); 3747 3746 ··· 3756 3755 idev->cnf.accept_dad < 1 || 3757 3756 !(ifp->flags&IFA_F_TENTATIVE) || 3758 3757 ifp->flags & IFA_F_NODAD) { 3758 + bump_id = ifp->flags & IFA_F_TENTATIVE; 3759 3759 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 3760 3760 spin_unlock(&ifp->lock); 3761 3761 read_unlock_bh(&idev->lock); 3762 3762 3763 - addrconf_dad_completed(ifp); 3763 + addrconf_dad_completed(ifp, bump_id); 3764 3764 return; 3765 3765 } 3766 3766 ··· 3821 3819 struct inet6_ifaddr, 3822 3820 dad_work); 3823 3821 struct inet6_dev *idev = ifp->idev; 3822 + bool bump_id, disable_ipv6 = false; 3824 3823 struct in6_addr mcaddr; 3825 - bool disable_ipv6 = false; 3826 3824 3827 3825 enum { 3828 3826 DAD_PROCESS, ··· 3892 3890 * DAD was successful 3893 3891 */ 3894 3892 3893 + bump_id = ifp->flags & IFA_F_TENTATIVE; 3895 3894 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 3896 3895 spin_unlock(&ifp->lock); 3897 3896 write_unlock_bh(&idev->lock); 3898 3897 3899 - addrconf_dad_completed(ifp); 3898 + addrconf_dad_completed(ifp, bump_id); 3900 3899 3901 3900 goto out; 3902 3901 } ··· 3934 3931 return true; 3935 3932 } 3936 3933 3937 - static void addrconf_dad_completed(struct inet6_ifaddr *ifp) 3934 + static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id) 3938 3935 { 3939 3936 struct net_device *dev = ifp->idev->dev; 3940 3937 struct in6_addr lladdr; ··· 3986 3983 spin_unlock(&ifp->lock); 3987 3984 write_unlock_bh(&ifp->idev->lock); 3988 3985 } 3986 + 3987 + if (bump_id) 3988 + rt_genid_bump_ipv6(dev_net(dev)); 3989 3989 } 3990 3990 3991 3991 static void addrconf_dad_run(struct inet6_dev *idev)
+1 -1
net/ipv6/udp.c
··· 514 514 return; 515 515 } 516 516 517 - static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 517 + int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 518 518 { 519 519 int rc; 520 520
+1 -1
net/ipv6/udp_impl.h
··· 26 26 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 27 27 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 28 28 int flags, int *addr_len); 29 - int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 29 + int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 30 30 void udpv6_destroy_sock(struct sock *sk); 31 31 32 32 #ifdef CONFIG_PROC_FS
+1 -1
net/ipv6/udplite.c
··· 45 45 .getsockopt = udpv6_getsockopt, 46 46 .sendmsg = udpv6_sendmsg, 47 47 .recvmsg = udpv6_recvmsg, 48 - .backlog_rcv = udpv6_queue_rcv_skb, 48 + .backlog_rcv = __udpv6_queue_rcv_skb, 49 49 .hash = udp_lib_hash, 50 50 .unhash = udp_lib_unhash, 51 51 .get_port = udp_v6_get_port,
+1 -1
net/l2tp/l2tp_eth.c
··· 97 97 unsigned int len = skb->len; 98 98 int ret = l2tp_xmit_skb(session, skb, session->hdr_len); 99 99 100 - if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 100 + if (likely(ret == NET_XMIT_SUCCESS)) { 101 101 atomic_long_add(len, &priv->tx_bytes); 102 102 atomic_long_inc(&priv->tx_packets); 103 103 } else {
+1 -1
net/sched/cls_api.c
··· 112 112 113 113 for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL; 114 114 it_chain = &tp->next) 115 - tfilter_notify(net, oskb, n, tp, n->nlmsg_flags, event, false); 115 + tfilter_notify(net, oskb, n, tp, 0, event, false); 116 116 } 117 117 118 118 /* Select new prio value from the range, managed by kernel. */
+3 -2
net/tipc/link.c
··· 1492 1492 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1493 1493 l->tolerance = peers_tol; 1494 1494 1495 - if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI, 1496 - TIPC_MAX_LINK_PRI)) { 1495 + /* Update own prio if peer indicates a different value */ 1496 + if ((peers_prio != l->priority) && 1497 + in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { 1497 1498 l->priority = peers_prio; 1498 1499 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1499 1500 }
+5 -5
net/tipc/monitor.c
··· 455 455 int i, applied_bef; 456 456 457 457 state->probing = false; 458 - if (!dlen) 459 - return; 460 458 461 459 /* Sanity check received domain record */ 462 - if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) { 463 - pr_warn_ratelimited("Received illegal domain record\n"); 460 + if (dlen < dom_rec_len(arrv_dom, 0)) 464 461 return; 465 - } 462 + if (dlen != dom_rec_len(arrv_dom, new_member_cnt)) 463 + return; 464 + if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) 465 + return; 466 466 467 467 /* Synch generation numbers with peer if link just came up */ 468 468 if (!state->synched) {
+1 -1
net/tipc/socket.c
··· 186 186 187 187 static bool tsk_conn_cong(struct tipc_sock *tsk) 188 188 { 189 - return tsk->snt_unacked >= tsk->snd_win; 189 + return tsk->snt_unacked > tsk->snd_win; 190 190 } 191 191 192 192 /* tsk_blocks(): translate a buffer size in bytes to number of