Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'net-7.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
"Including fixes from CAN and netfilter.

Current release - regressions:

- eth: mana: Null service_wq on setup error to prevent double destroy

Previous releases - regressions:

- nexthop: fix percpu use-after-free in remove_nh_grp_entry

- sched: teql: fix NULL pointer dereference in iptunnel_xmit on TEQL slave xmit

- bpf: fix nd_tbl NULL dereference when IPv6 is disabled

- neighbour: restore protocol != 0 check in pneigh update

- tipc: fix divide-by-zero in tipc_sk_filter_connect()

- eth:
- mlx5:
- fix crash when moving to switchdev mode
- fix DMA FIFO desync on error CQE SQ recovery
- iavf: fix PTP use-after-free during reset
- bonding: fix type confusion in bond_setup_by_slave()
- lan78xx: fix WARN in __netif_napi_del_locked on disconnect

Previous releases - always broken:

- core: add xmit recursion limit to tunnel xmit functions

- net-shapers: don't free reply skb after genlmsg_reply()

- netfilter:
- fix stack out-of-bounds read in pipapo_drop()
- fix OOB read in nfnl_cthelper_dump_table()

- mctp:
- fix device leak on probe failure
- i2c: fix skb memory leak in receive path

- can: keep the max bitrate error at 5%

- eth:
- bonding: fix nd_tbl NULL dereference when IPv6 is disabled
- bnxt_en: fix RSS table size check when changing ethtool channels
- amd-xgbe: prevent CRC errors during RX adaptation with AN disabled
- octeontx2-af: devlink: fix NIX RAS reporter recovery condition"

* tag 'net-7.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (71 commits)
net: prevent NULL deref in ip[6]tunnel_xmit()
octeontx2-af: devlink: fix NIX RAS reporter to use RAS interrupt status
octeontx2-af: devlink: fix NIX RAS reporter recovery condition
net: ethernet: ti: am65-cpsw-nuss: Fix rx_filter value for PTP support
net/mana: Null service_wq on setup error to prevent double destroy
selftests: rtnetlink: add neighbour update test
neighbour: restore protocol != 0 check in pneigh update
net: dsa: realtek: Fix LED group port bit for non-zero LED group
tipc: fix divide-by-zero in tipc_sk_filter_connect()
net: dsa: microchip: Fix error path in PTP IRQ setup
bpf: bpf_out_neigh_v6: Fix nd_tbl NULL dereference when IPv6 is disabled
bpf: bpf_out_neigh_v4: Fix nd_tbl NULL dereference when IPv6 is disabled
net: bonding: Fix nd_tbl NULL dereference when IPv6 is disabled
ipv6: move the disable_ipv6_mod knob to core code
net: bcmgenet: fix broken EEE by converting to phylib-managed state
net-shapers: don't free reply skb after genlmsg_reply()
net: dsa: mxl862xx: don't set user_mii_bus
net: ethernet: arc: emac: quiesce interrupts before requesting IRQ
page_pool: store detach_time as ktime_t to avoid false-negatives
net: macb: Shuffle the tx ring before enabling tx
...

+668 -327
+1 -1
MAINTAINERS
··· 16358 16358 16359 16359 MEDIATEK T7XX 5G WWAN MODEM DRIVER 16360 16360 M: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com> 16361 - R: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com> 16362 16361 R: Liu Haijun <haijun.liu@mediatek.com> 16363 16362 R: Ricardo Martinez <ricardo.martinez@linux.intel.com> 16364 16363 L: netdev@vger.kernel.org ··· 25758 25759 F: include/net/pkt_sched.h 25759 25760 F: include/net/sch_priv.h 25760 25761 F: include/net/tc_act/ 25762 + F: include/net/tc_wrapper.h 25761 25763 F: include/uapi/linux/pkt_cls.h 25762 25764 F: include/uapi/linux/pkt_sched.h 25763 25765 F: include/uapi/linux/tc_act/
+63 -7
drivers/net/bonding/bond_main.c
··· 1509 1509 return features; 1510 1510 } 1511 1511 1512 + static int bond_header_create(struct sk_buff *skb, struct net_device *bond_dev, 1513 + unsigned short type, const void *daddr, 1514 + const void *saddr, unsigned int len) 1515 + { 1516 + struct bonding *bond = netdev_priv(bond_dev); 1517 + const struct header_ops *slave_ops; 1518 + struct slave *slave; 1519 + int ret = 0; 1520 + 1521 + rcu_read_lock(); 1522 + slave = rcu_dereference(bond->curr_active_slave); 1523 + if (slave) { 1524 + slave_ops = READ_ONCE(slave->dev->header_ops); 1525 + if (slave_ops && slave_ops->create) 1526 + ret = slave_ops->create(skb, slave->dev, 1527 + type, daddr, saddr, len); 1528 + } 1529 + rcu_read_unlock(); 1530 + return ret; 1531 + } 1532 + 1533 + static int bond_header_parse(const struct sk_buff *skb, unsigned char *haddr) 1534 + { 1535 + struct bonding *bond = netdev_priv(skb->dev); 1536 + const struct header_ops *slave_ops; 1537 + struct slave *slave; 1538 + int ret = 0; 1539 + 1540 + rcu_read_lock(); 1541 + slave = rcu_dereference(bond->curr_active_slave); 1542 + if (slave) { 1543 + slave_ops = READ_ONCE(slave->dev->header_ops); 1544 + if (slave_ops && slave_ops->parse) 1545 + ret = slave_ops->parse(skb, haddr); 1546 + } 1547 + rcu_read_unlock(); 1548 + return ret; 1549 + } 1550 + 1551 + static const struct header_ops bond_header_ops = { 1552 + .create = bond_header_create, 1553 + .parse = bond_header_parse, 1554 + }; 1555 + 1512 1556 static void bond_setup_by_slave(struct net_device *bond_dev, 1513 1557 struct net_device *slave_dev) 1514 1558 { ··· 1560 1516 1561 1517 dev_close(bond_dev); 1562 1518 1563 - bond_dev->header_ops = slave_dev->header_ops; 1519 + bond_dev->header_ops = slave_dev->header_ops ? 1520 + &bond_header_ops : NULL; 1564 1521 1565 1522 bond_dev->type = slave_dev->type; 1566 1523 bond_dev->hard_header_len = slave_dev->hard_header_len; ··· 2846 2801 2847 2802 continue; 2848 2803 2804 + case BOND_LINK_FAIL: 2805 + case BOND_LINK_BACK: 2806 + slave_dbg(bond->dev, slave->dev, "link_new_state %d on slave\n", 2807 + slave->link_new_state); 2808 + continue; 2809 + 2849 2810 default: 2850 - slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", 2811 + slave_err(bond->dev, slave->dev, "invalid link_new_state %d on slave\n", 2851 2812 slave->link_new_state); 2852 2813 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 2853 2814 ··· 3428 3377 } else if (is_arp) { 3429 3378 return bond_arp_rcv(skb, bond, slave); 3430 3379 #if IS_ENABLED(CONFIG_IPV6) 3431 - } else if (is_ipv6) { 3380 + } else if (is_ipv6 && likely(ipv6_mod_enabled())) { 3432 3381 return bond_na_rcv(skb, bond, slave); 3433 3382 #endif 3434 3383 } else { ··· 5120 5069 { 5121 5070 struct bond_up_slave *usable, *all; 5122 5071 5123 - usable = rtnl_dereference(bond->usable_slaves); 5124 - rcu_assign_pointer(bond->usable_slaves, usable_slaves); 5125 - kfree_rcu(usable, rcu); 5126 - 5127 5072 all = rtnl_dereference(bond->all_slaves); 5128 5073 rcu_assign_pointer(bond->all_slaves, all_slaves); 5129 5074 kfree_rcu(all, rcu); 5075 + 5076 + if (BOND_MODE(bond) == BOND_MODE_BROADCAST) { 5077 + kfree_rcu(usable_slaves, rcu); 5078 + return; 5079 + } 5080 + 5081 + usable = rtnl_dereference(bond->usable_slaves); 5082 + rcu_assign_pointer(bond->usable_slaves, usable_slaves); 5083 + kfree_rcu(usable, rcu); 5130 5084 } 5131 5085 5132 5086 static void bond_reset_slave_arr(struct bonding *bond)
+3
drivers/net/caif/caif_serial.c
··· 297 297 dev_close(ser->dev); 298 298 unregister_netdevice(ser->dev); 299 299 debugfs_deinit(ser); 300 + tty_kref_put(tty->link); 300 301 tty_kref_put(tty); 301 302 } 302 303 rtnl_unlock(); ··· 332 331 333 332 ser = netdev_priv(dev); 334 333 ser->tty = tty_kref_get(tty); 334 + tty_kref_get(tty->link); 335 335 ser->dev = dev; 336 336 debugfs_init(ser, tty); 337 337 tty->receive_room = 4096; ··· 341 339 rtnl_lock(); 342 340 result = register_netdevice(dev); 343 341 if (result) { 342 + tty_kref_put(tty->link); 344 343 tty_kref_put(tty); 345 344 rtnl_unlock(); 346 345 free_netdev(dev);
+1 -1
drivers/net/can/dev/calc_bittiming.c
··· 8 8 #include <linux/units.h> 9 9 #include <linux/can/dev.h> 10 10 11 - #define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ 11 + #define CAN_CALC_MAX_ERROR 500 /* max error 5% */ 12 12 13 13 /* CiA recommended sample points for Non Return to Zero encoding. */ 14 14 static int can_calc_sample_point_nrz(const struct can_bittiming *bt)
+4 -1
drivers/net/can/spi/hi311x.c
··· 755 755 return ret; 756 756 757 757 mutex_lock(&priv->hi3110_lock); 758 - hi3110_power_enable(priv->transceiver, 1); 758 + ret = hi3110_power_enable(priv->transceiver, 1); 759 + if (ret) 760 + goto out_close_candev; 759 761 760 762 priv->force_quit = 0; 761 763 priv->tx_skb = NULL; ··· 792 790 hi3110_hw_sleep(spi); 793 791 out_close: 794 792 hi3110_power_enable(priv->transceiver, 0); 793 + out_close_candev: 795 794 close_candev(net); 796 795 mutex_unlock(&priv->hi3110_lock); 797 796 return ret;
+8 -3
drivers/net/dsa/microchip/ksz_ptp.c
··· 1108 1108 const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops; 1109 1109 struct ksz_irq *ptpirq = &port->ptpirq; 1110 1110 struct ksz_ptp_irq *ptpmsg_irq; 1111 + int ret; 1111 1112 1112 1113 ptpmsg_irq = &port->ptpmsg_irq[n]; 1113 1114 ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n); ··· 1120 1119 1121 1120 strscpy(ptpmsg_irq->name, name[n]); 1122 1121 1123 - return request_threaded_irq(ptpmsg_irq->num, NULL, 1124 - ksz_ptp_msg_thread_fn, IRQF_ONESHOT, 1125 - ptpmsg_irq->name, ptpmsg_irq); 1122 + ret = request_threaded_irq(ptpmsg_irq->num, NULL, 1123 + ksz_ptp_msg_thread_fn, IRQF_ONESHOT, 1124 + ptpmsg_irq->name, ptpmsg_irq); 1125 + if (ret) 1126 + irq_dispose_mapping(ptpmsg_irq->num); 1127 + 1128 + return ret; 1126 1129 } 1127 1130 1128 1131 int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
-1
drivers/net/dsa/mxl862xx/mxl862xx.c
··· 149 149 return -ENOMEM; 150 150 151 151 bus->priv = priv; 152 - ds->user_mii_bus = bus; 153 152 bus->name = KBUILD_MODNAME "-mii"; 154 153 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev)); 155 154 bus->read_c45 = mxl862xx_phy_read_c45_mii_bus;
+1 -2
drivers/net/dsa/realtek/rtl8365mb.c
··· 1480 1480 1481 1481 stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] + 1482 1482 cnt[RTL8365MB_MIB_ifInMulticastPkts] + 1483 - cnt[RTL8365MB_MIB_ifInBroadcastPkts] - 1484 - cnt[RTL8365MB_MIB_ifOutDiscards]; 1483 + cnt[RTL8365MB_MIB_ifInBroadcastPkts]; 1485 1484 1486 1485 stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] + 1487 1486 cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+3 -3
drivers/net/dsa/realtek/rtl8366rb-leds.c
··· 12 12 case 0: 13 13 return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); 14 14 case 1: 15 - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); 15 + return FIELD_PREP(RTL8366RB_LED_X_1_CTRL_MASK, BIT(port)); 16 16 case 2: 17 - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); 17 + return FIELD_PREP(RTL8366RB_LED_2_X_CTRL_MASK, BIT(port)); 18 18 case 3: 19 - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); 19 + return FIELD_PREP(RTL8366RB_LED_X_3_CTRL_MASK, BIT(port)); 20 20 default: 21 21 return 0; 22 22 }
+3 -4
drivers/net/dsa/sja1105/sja1105_main.c
··· 2339 2339 goto out; 2340 2340 } 2341 2341 2342 + rc = sja1105_reload_cbs(priv); 2343 + 2344 + out: 2342 2345 dsa_switch_for_each_available_port(dp, ds) 2343 2346 if (dp->pl) 2344 2347 phylink_replay_link_end(dp->pl); 2345 2348 2346 - rc = sja1105_reload_cbs(priv); 2347 - if (rc < 0) 2348 - goto out; 2349 - out: 2350 2349 mutex_unlock(&priv->mgmt_lock); 2351 2350 mutex_unlock(&priv->fdb_lock); 2352 2351
+10 -9
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 1271 1271 if (ret) 1272 1272 goto err_napi; 1273 1273 1274 + /* Reset the phy settings */ 1275 + ret = xgbe_phy_reset(pdata); 1276 + if (ret) 1277 + goto err_irqs; 1278 + 1279 + /* Start the phy */ 1274 1280 ret = phy_if->phy_start(pdata); 1275 1281 if (ret) 1276 1282 goto err_irqs; 1277 1283 1278 1284 hw_if->enable_tx(pdata); 1279 1285 hw_if->enable_rx(pdata); 1286 + /* Synchronize flag with hardware state after enabling TX/RX. 1287 + * This prevents stale state after device restart cycles. 1288 + */ 1289 + pdata->data_path_stopped = false; 1280 1290 1281 1291 udp_tunnel_nic_reset_ntf(netdev); 1282 - 1283 - /* Reset the phy settings */ 1284 - ret = xgbe_phy_reset(pdata); 1285 - if (ret) 1286 - goto err_txrx; 1287 1292 1288 1293 netif_tx_start_all_queues(netdev); 1289 1294 ··· 1298 1293 clear_bit(XGBE_STOPPED, &pdata->dev_state); 1299 1294 1300 1295 return 0; 1301 - 1302 - err_txrx: 1303 - hw_if->disable_rx(pdata); 1304 - hw_if->disable_tx(pdata); 1305 1296 1306 1297 err_irqs: 1307 1298 xgbe_free_irqs(pdata);
+75 -7
drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
··· 1942 1942 static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata) 1943 1943 { 1944 1944 struct xgbe_phy_data *phy_data = pdata->phy_data; 1945 - unsigned int reg; 1945 + int reg; 1946 1946 1947 1947 /* step 2: force PCS to send RX_ADAPT Req to PHY */ 1948 1948 XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4, ··· 1964 1964 1965 1965 /* Step 4: Check for Block lock */ 1966 1966 1967 - /* Link status is latched low, so read once to clear 1968 - * and then read again to get current state 1967 + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); 1968 + if (reg < 0) 1969 + goto set_mode; 1970 + 1971 + /* Link status is latched low so that momentary link drops 1972 + * can be detected. If link was already down read again 1973 + * to get the latest state. 1969 1974 */ 1970 - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); 1971 - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); 1975 + if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) { 1976 + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); 1977 + if (reg < 0) 1978 + goto set_mode; 1979 + } 1980 + 1972 1981 if (reg & MDIO_STAT1_LSTATUS) { 1973 1982 /* If the block lock is found, update the helpers 1974 1983 * and declare the link up ··· 2015 2006 2016 2007 /* perform rx adaptation */ 2017 2008 xgbe_rx_adaptation(pdata); 2009 + } 2010 + 2011 + /* 2012 + * xgbe_phy_stop_data_path - Stop TX/RX to prevent packet corruption 2013 + * @pdata: driver private data 2014 + * 2015 + * This function stops the data path (TX and RX) to prevent packet 2016 + * corruption during critical PHY operations like RX adaptation. 2017 + * Must be called before initiating RX adaptation when link goes down. 2018 + */ 2019 + static void xgbe_phy_stop_data_path(struct xgbe_prv_data *pdata) 2020 + { 2021 + if (pdata->data_path_stopped) 2022 + return; 2023 + 2024 + /* Stop TX/RX to prevent packet corruption during RX adaptation */ 2025 + pdata->hw_if.disable_tx(pdata); 2026 + pdata->hw_if.disable_rx(pdata); 2027 + pdata->data_path_stopped = true; 2028 + 2029 + netif_dbg(pdata, link, pdata->netdev, 2030 + "stopping data path for RX adaptation\n"); 2031 + } 2032 + 2033 + /* 2034 + * xgbe_phy_start_data_path - Re-enable TX/RX after RX adaptation 2035 + * @pdata: driver private data 2036 + * 2037 + * This function re-enables the data path (TX and RX) after RX adaptation 2038 + * has completed successfully. Only called when link is confirmed up. 2039 + */ 2040 + static void xgbe_phy_start_data_path(struct xgbe_prv_data *pdata) 2041 + { 2042 + if (!pdata->data_path_stopped) 2043 + return; 2044 + 2045 + pdata->hw_if.enable_rx(pdata); 2046 + pdata->hw_if.enable_tx(pdata); 2047 + pdata->data_path_stopped = false; 2048 + 2049 + netif_dbg(pdata, link, pdata->netdev, 2050 + "restarting data path after RX adaptation\n"); 2018 2051 } 2019 2052 2020 2053 static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata) ··· 2852 2801 if (pdata->en_rx_adap) { 2853 2802 /* if the link is available and adaptation is done, 2854 2803 * declare link up 2804 + * 2805 + * Note: When link is up and adaptation is done, we can 2806 + * safely re-enable the data path if it was stopped 2807 + * for adaptation. 2855 2808 */ 2856 - if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done) 2809 + if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done) { 2810 + xgbe_phy_start_data_path(pdata); 2857 2811 return 1; 2812 + } 2858 2813 /* If either link is not available or adaptation is not done, 2859 2814 * retrigger the adaptation logic. (if the mode is not set, 2860 2815 * then issue mailbox command first) 2861 2816 */ 2817 + 2818 + /* CRITICAL: Stop data path BEFORE triggering RX adaptation 2819 + * to prevent CRC errors from packets corrupted during 2820 + * the adaptation process. This is especially important 2821 + * when AN is OFF in 10G KR mode. 2822 + */ 2823 + xgbe_phy_stop_data_path(pdata); 2824 + 2862 2825 if (pdata->mode_set) { 2863 2826 xgbe_phy_rx_adaptation(pdata); 2864 2827 } else { ··· 2880 2815 xgbe_phy_set_mode(pdata, phy_data->cur_mode); 2881 2816 } 2882 2817 2883 - if (pdata->rx_adapt_done) 2818 + if (pdata->rx_adapt_done) { 2819 + /* Adaptation complete, safe to re-enable data path */ 2820 + xgbe_phy_start_data_path(pdata); 2884 2821 return 1; 2822 + } 2885 2823 } else if (reg & MDIO_STAT1_LSTATUS) 2886 2824 return 1; 2887 2825
+4
drivers/net/ethernet/amd/xgbe/xgbe.h
··· 1243 1243 bool en_rx_adap; 1244 1244 int rx_adapt_retries; 1245 1245 bool rx_adapt_done; 1246 + /* Flag to track if data path (TX/RX) was stopped for RX adaptation. 1247 + * This prevents packet corruption during the adaptation window. 1248 + */ 1249 + bool data_path_stopped; 1246 1250 bool mode_set; 1247 1251 bool sph; 1248 1252 };
+11
drivers/net/ethernet/arc/emac_main.c
··· 934 934 /* Set poll rate so that it polls every 1 ms */ 935 935 arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); 936 936 937 + /* 938 + * Put the device into a known quiescent state before requesting 939 + * the IRQ. Clear only EMAC interrupt status bits here; leave the 940 + * MDIO completion bit alone and avoid writing TXPL_MASK, which is 941 + * used to force TX polling rather than acknowledge interrupts. 942 + */ 943 + arc_reg_set(priv, R_ENABLE, 0); 944 + arc_reg_set(priv, R_STATUS, RXINT_MASK | TXINT_MASK | ERR_MASK | 945 + TXCH_MASK | MSER_MASK | RXCR_MASK | 946 + RXFR_MASK | RXFL_MASK); 947 + 937 948 ndev->irq = irq; 938 949 dev_info(dev, "IRQ is %d\n", ndev->irq); 939 950
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 979 979 980 980 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 981 981 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && 982 - netif_is_rxfh_configured(dev)) { 983 - netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); 982 + (netif_is_rxfh_configured(dev) || bp->num_rss_ctx)) { 983 + netdev_warn(dev, "RSS table size change required, RSS table entries must be default (with no additional RSS contexts present) to proceed\n"); 984 984 return -EINVAL; 985 985 } 986 986
+12 -19
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 1342 1342 } 1343 1343 } 1344 1344 1345 - void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, 1346 - bool tx_lpi_enabled) 1345 + void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) 1347 1346 { 1348 1347 struct bcmgenet_priv *priv = netdev_priv(dev); 1349 1348 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; ··· 1362 1363 1363 1364 /* Enable EEE and switch to a 27Mhz clock automatically */ 1364 1365 reg = bcmgenet_readl(priv->base + off); 1365 - if (tx_lpi_enabled) 1366 + if (enable) 1366 1367 reg |= TBUF_EEE_EN | TBUF_PM_EN; 1367 1368 else 1368 1369 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); ··· 1381 1382 priv->clk_eee_enabled = false; 1382 1383 } 1383 1384 1384 - priv->eee.eee_enabled = enable; 1385 - priv->eee.tx_lpi_enabled = tx_lpi_enabled; 1386 1385 } 1387 1386 1388 1387 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e) 1389 1388 { 1390 1389 struct bcmgenet_priv *priv = netdev_priv(dev); 1391 - struct ethtool_keee *p = &priv->eee; 1390 + int ret; 1392 1391 1393 1392 if (GENET_IS_V1(priv)) 1394 1393 return -EOPNOTSUPP; ··· 1394 1397 if (!dev->phydev) 1395 1398 return -ENODEV; 1396 1399 1397 - e->tx_lpi_enabled = p->tx_lpi_enabled; 1400 + ret = phy_ethtool_get_eee(dev->phydev, e); 1401 + if (ret) 1402 + return ret; 1403 + 1404 + /* tx_lpi_timer is maintained by the MAC hardware register; the 1405 + * PHY-level eee_cfg timer is not set for GENET. 1406 + */ 1398 1407 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); 1399 1408 1400 - return phy_ethtool_get_eee(dev->phydev, e); 1409 + return 0; 1401 1410 } 1402 1411 1403 1412 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e) 1404 1413 { 1405 1414 struct bcmgenet_priv *priv = netdev_priv(dev); 1406 - struct ethtool_keee *p = &priv->eee; 1407 - bool active; 1408 1415 1409 1416 if (GENET_IS_V1(priv)) 1410 1417 return -EOPNOTSUPP; ··· 1416 1415 if (!dev->phydev) 1417 1416 return -ENODEV; 1418 1417 1419 - p->eee_enabled = e->eee_enabled; 1420 - 1421 - if (!p->eee_enabled) { 1422 - bcmgenet_eee_enable_set(dev, false, false); 1423 - } else { 1424 - active = phy_init_eee(dev->phydev, false) >= 0; 1425 - bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); 1426 - bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled); 1427 - } 1418 + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); 1428 1419 1429 1420 return phy_ethtool_set_eee(dev->phydev, e); 1430 1421 }
+1 -4
drivers/net/ethernet/broadcom/genet/bcmgenet.h
··· 665 665 u8 sopass[SOPASS_MAX]; 666 666 667 667 struct bcmgenet_mib_counters mib; 668 - 669 - struct ethtool_keee eee; 670 668 }; 671 669 672 670 static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv) ··· 747 749 int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, 748 750 enum bcmgenet_power_mode mode); 749 751 750 - void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, 751 - bool tx_lpi_enabled); 752 + void bcmgenet_eee_enable_set(struct net_device *dev, bool enable); 752 753 753 754 #endif /* __BCMGENET_H__ */
+5 -5
drivers/net/ethernet/broadcom/genet/bcmmii.c
··· 29 29 struct bcmgenet_priv *priv = netdev_priv(dev); 30 30 struct phy_device *phydev = dev->phydev; 31 31 u32 reg, cmd_bits = 0; 32 - bool active; 33 32 34 33 /* speed */ 35 34 if (phydev->speed == SPEED_1000) ··· 89 90 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 90 91 spin_unlock_bh(&priv->reg_lock); 91 92 92 - active = phy_init_eee(phydev, 0) >= 0; 93 - bcmgenet_eee_enable_set(dev, 94 - priv->eee.eee_enabled && active, 95 - priv->eee.tx_lpi_enabled); 96 93 } 97 94 98 95 /* setup netdev link state when PHY link status change and ··· 107 112 reg &= ~RGMII_LINK; 108 113 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); 109 114 } 115 + 116 + bcmgenet_eee_enable_set(dev, phydev->enable_tx_lpi); 110 117 111 118 phy_print_status(phydev); 112 119 } ··· 408 411 409 412 /* Indicate that the MAC is responsible for PHY PM */ 410 413 dev->phydev->mac_managed_pm = true; 414 + 415 + if (!GENET_IS_V1(priv)) 416 + phy_support_eee(dev->phydev); 411 417 412 418 return 0; 413 419 }
+95 -3
drivers/net/ethernet/cadence/macb_main.c
··· 36 36 #include <linux/tcp.h> 37 37 #include <linux/types.h> 38 38 #include <linux/udp.h> 39 + #include <linux/gcd.h> 39 40 #include <net/pkt_sched.h> 40 41 #include "macb.h" 41 42 ··· 669 668 netif_tx_stop_all_queues(ndev); 670 669 } 671 670 671 + /* Use juggling algorithm to left rotate tx ring and tx skb array */ 672 + static void gem_shuffle_tx_one_ring(struct macb_queue *queue) 673 + { 674 + unsigned int head, tail, count, ring_size, desc_size; 675 + struct macb_tx_skb tx_skb, *skb_curr, *skb_next; 676 + struct macb_dma_desc *desc_curr, *desc_next; 677 + unsigned int i, cycles, shift, curr, next; 678 + struct macb *bp = queue->bp; 679 + unsigned char desc[24]; 680 + unsigned long flags; 681 + 682 + desc_size = macb_dma_desc_get_size(bp); 683 + 684 + if (WARN_ON_ONCE(desc_size > ARRAY_SIZE(desc))) 685 + return; 686 + 687 + spin_lock_irqsave(&queue->tx_ptr_lock, flags); 688 + head = queue->tx_head; 689 + tail = queue->tx_tail; 690 + ring_size = bp->tx_ring_size; 691 + count = CIRC_CNT(head, tail, ring_size); 692 + 693 + if (!(tail % ring_size)) 694 + goto unlock; 695 + 696 + if (!count) { 697 + queue->tx_head = 0; 698 + queue->tx_tail = 0; 699 + goto unlock; 700 + } 701 + 702 + shift = tail % ring_size; 703 + cycles = gcd(ring_size, shift); 704 + 705 + for (i = 0; i < cycles; i++) { 706 + memcpy(&desc, macb_tx_desc(queue, i), desc_size); 707 + memcpy(&tx_skb, macb_tx_skb(queue, i), 708 + sizeof(struct macb_tx_skb)); 709 + 710 + curr = i; 711 + next = (curr + shift) % ring_size; 712 + 713 + while (next != i) { 714 + desc_curr = macb_tx_desc(queue, curr); 715 + desc_next = macb_tx_desc(queue, next); 716 + 717 + memcpy(desc_curr, desc_next, desc_size); 718 + 719 + if (next == ring_size - 1) 720 + desc_curr->ctrl &= ~MACB_BIT(TX_WRAP); 721 + if (curr == ring_size - 1) 722 + desc_curr->ctrl |= MACB_BIT(TX_WRAP); 723 + 724 + skb_curr = macb_tx_skb(queue, curr); 725 + skb_next = macb_tx_skb(queue, next); 726 + memcpy(skb_curr, skb_next, sizeof(struct macb_tx_skb)); 727 + 728 + curr = next; 729 + next = (curr + shift) % ring_size; 730 + } 731 + 732 + desc_curr = macb_tx_desc(queue, curr); 733 + memcpy(desc_curr, &desc, desc_size); 734 + if (i == ring_size - 1) 735 + desc_curr->ctrl &= ~MACB_BIT(TX_WRAP); 736 + if (curr == ring_size - 1) 737 + desc_curr->ctrl |= MACB_BIT(TX_WRAP); 738 + memcpy(macb_tx_skb(queue, curr), &tx_skb, 739 + sizeof(struct macb_tx_skb)); 740 + } 741 + 742 + queue->tx_head = count; 743 + queue->tx_tail = 0; 744 + 745 + /* Make descriptor updates visible to hardware */ 746 + wmb(); 747 + 748 + unlock: 749 + spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); 750 + } 751 + 752 + /* Rotate the queue so that the tail is at index 0 */ 753 + static void gem_shuffle_tx_rings(struct macb *bp) 754 + { 755 + struct macb_queue *queue; 756 + int q; 757 + 758 + for (q = 0, queue = bp->queues; q < bp->num_queues; q++, queue++) 759 + gem_shuffle_tx_one_ring(queue); 760 + } 761 + 672 762 static void macb_mac_link_up(struct phylink_config *config, 673 763 struct phy_device *phy, 674 764 unsigned int mode, phy_interface_t interface, ··· 798 706 ctrl |= MACB_BIT(PAE); 799 707 800 708 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 801 - queue->tx_head = 0; 802 - queue->tx_tail = 0; 803 709 queue_writel(queue, IER, 804 710 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); 805 711 } ··· 811 721 812 722 spin_unlock_irqrestore(&bp->lock, flags); 813 723 814 - if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) 724 + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { 815 725 macb_set_tx_clk(bp, speed); 726 + gem_shuffle_tx_rings(bp); 727 + } 816 728 817 729 /* Enable Rx and Tx; Enable PTP unicast */ 818 730 ctrl = macb_readl(bp, NCR);
+10 -14
drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
··· 333 333 334 334 mdio_node = of_get_child_by_name(np, "mdio"); 335 335 if (!mdio_node) 336 - return 0; 336 + return -ENODEV; 337 337 338 338 phy_node = of_get_next_child(mdio_node, NULL); 339 - if (!phy_node) 339 + if (!phy_node) { 340 + err = -ENODEV; 340 341 goto of_put_mdio_node; 342 + } 341 343 342 344 err = of_property_read_u32(phy_node, "reg", &addr); 343 345 if (err) ··· 425 423 426 424 addr = netc_get_phy_addr(gchild); 427 425 if (addr < 0) { 426 + if (addr == -ENODEV) 427 + continue; 428 + 428 429 dev_err(dev, "Failed to get PHY address\n"); 429 430 return addr; 430 431 } ··· 437 432 "Find same PHY address in EMDIO and ENETC node\n"); 438 433 return -EINVAL; 439 434 } 440 - 441 - /* The default value of LaBCR[MDIO_PHYAD_PRTAD ] is 442 - * 0, so no need to set the register. 443 - */ 444 - if (!addr) 445 - continue; 446 435 447 436 switch (bus_devfn) { 448 437 case IMX95_ENETC0_BUS_DEVFN: ··· 577 578 578 579 addr = netc_get_phy_addr(np); 579 580 if (addr < 0) { 581 + if (addr == -ENODEV) 582 + return 0; 583 + 580 584 dev_err(dev, "Failed to get PHY address\n"); 581 585 return addr; 582 586 } 583 - 584 - /* The default value of LaBCR[MDIO_PHYAD_PRTAD] is 0, 585 - * so no need to set the register. 586 - */ 587 - if (!addr) 588 - return 0; 589 587 590 588 if (phy_mask & BIT(addr)) { 591 589 dev_err(dev,
-2
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 2952 2952 dma_error: 2953 2953 dev_err(&pdev->dev, "TX DMA map failed\n"); 2954 2954 buffer_info->dma = 0; 2955 - if (count) 2956 - count--; 2957 2955 2958 2956 while (count--) { 2959 2957 if (i == 0)
-2
drivers/net/ethernet/intel/e1000e/netdev.c
··· 5652 5652 dma_error: 5653 5653 dev_err(&pdev->dev, "Tx DMA map failed\n"); 5654 5654 buffer_info->dma = 0; 5655 - if (count) 5656 - count--; 5657 5655 5658 5656 while (count--) { 5659 5657 if (i == 0)
+7 -7
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 3833 3833 cfilter.n_proto = ETH_P_IP; 3834 3834 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3835 3835 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3836 - ARRAY_SIZE(tcf.dst_ip)); 3837 - else if (mask.src_ip[0] & tcf.dst_ip[0]) 3836 + sizeof(cfilter.ip.v4.dst_ip)); 3837 + else if (mask.src_ip[0] & tcf.src_ip[0]) 3838 3838 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3839 - ARRAY_SIZE(tcf.dst_ip)); 3839 + sizeof(cfilter.ip.v4.src_ip)); 3840 3840 break; 3841 3841 case VIRTCHNL_TCP_V6_FLOW: 3842 3842 cfilter.n_proto = ETH_P_IPV6; ··· 3891 3891 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3892 3892 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3893 3893 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3894 - sizeof(cfilter.ip.v6.src_ip6))) 3894 + sizeof(cfilter.ip.v6.dst_ip6))) 3895 3895 continue; 3896 3896 if (mask.vlan_id) 3897 3897 if (cfilter.vlan_id != cf->vlan_id) ··· 3979 3979 cfilter->n_proto = ETH_P_IP; 3980 3980 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3981 3981 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3982 - ARRAY_SIZE(tcf.dst_ip)); 3983 - else if (mask.src_ip[0] & tcf.dst_ip[0]) 3982 + sizeof(cfilter->ip.v4.dst_ip)); 3983 + else if (mask.src_ip[0] & tcf.src_ip[0]) 3984 3984 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3985 - ARRAY_SIZE(tcf.dst_ip)); 3985 + sizeof(cfilter->ip.v4.src_ip)); 3986 3986 break; 3987 3987 case VIRTCHNL_TCP_V6_FLOW: 3988 3988 cfilter->n_proto = ETH_P_IPV6;
+1 -2
drivers/net/ethernet/intel/iavf/iavf.h
··· 260 260 struct work_struct adminq_task; 261 261 struct work_struct finish_config; 262 262 wait_queue_head_t down_waitqueue; 263 - wait_queue_head_t reset_waitqueue; 264 263 wait_queue_head_t vc_waitqueue; 265 264 struct iavf_q_vector *q_vectors; 266 265 struct list_head vlan_filter_list; ··· 625 626 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); 626 627 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 627 628 const u8 *macaddr); 628 - int iavf_wait_for_reset(struct iavf_adapter *adapter); 629 + void iavf_reset_step(struct iavf_adapter *adapter); 629 630 #endif /* _IAVF_H_ */
+6 -13
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
··· 492 492 { 493 493 struct iavf_adapter *adapter = netdev_priv(netdev); 494 494 u32 new_rx_count, new_tx_count; 495 - int ret = 0; 496 495 497 496 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 498 497 return -EINVAL; ··· 536 537 } 537 538 538 539 if (netif_running(netdev)) { 539 - iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 540 - ret = iavf_wait_for_reset(adapter); 541 - if (ret) 542 - netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset"); 540 + adapter->flags |= IAVF_FLAG_RESET_NEEDED; 541 + iavf_reset_step(adapter); 543 542 } 544 543 545 - return ret; 544 + return 0; 546 545 } 547 546 548 547 /** ··· 1720 1723 { 1721 1724 struct iavf_adapter *adapter = netdev_priv(netdev); 1722 1725 u32 num_req = ch->combined_count; 1723 - int ret = 0; 1724 1726 1725 1727 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1726 1728 adapter->num_tc) { ··· 1741 1745 1742 1746 adapter->num_req_queues = num_req; 1743 1747 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1744 - iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 1748 + adapter->flags |= IAVF_FLAG_RESET_NEEDED; 1749 + iavf_reset_step(adapter); 1745 1750 1746 - ret = iavf_wait_for_reset(adapter); 1747 - if (ret) 1748 - netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset"); 1749 - 1750 - return ret; 1751 + return 0; 1751 1752 } 1752 1753 1753 1754 /**
+28 -53
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 186 186 } 187 187 188 188 /** 189 - * iavf_wait_for_reset - Wait for reset to finish. 190 - * @adapter: board private structure 191 - * 192 - * Returns 0 if reset finished successfully, negative on timeout or interrupt. 193 - */ 194 - int iavf_wait_for_reset(struct iavf_adapter *adapter) 195 - { 196 - int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue, 197 - !iavf_is_reset_in_progress(adapter), 198 - msecs_to_jiffies(5000)); 199 - 200 - /* If ret < 0 then it means wait was interrupted. 201 - * If ret == 0 then it means we got a timeout while waiting 202 - * for reset to finish. 203 - * If ret > 0 it means reset has finished. 204 - */ 205 - if (ret > 0) 206 - return 0; 207 - else if (ret < 0) 208 - return -EINTR; 209 - else 210 - return -EBUSY; 211 - } 212 - 213 - /** 214 189 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 215 190 * @hw: pointer to the HW structure 216 191 * @mem: ptr to mem struct to fill out ··· 3011 3036 3012 3037 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 3013 3038 3039 + iavf_ptp_release(adapter); 3040 + 3014 3041 /* We don't use netif_running() because it may be true prior to 3015 3042 * ndo_open() returning, so we can't assume it means all our open 3016 3043 * tasks have finished, since we're not holding the rtnl_lock here. ··· 3088 3111 } 3089 3112 3090 3113 /** 3091 - * iavf_reset_task - Call-back task to handle hardware reset 3092 - * @work: pointer to work_struct 3114 + * iavf_reset_step - Perform the VF reset sequence 3115 + * @adapter: board private structure 3093 3116 * 3094 - * During reset we need to shut down and reinitialize the admin queue 3095 - * before we can use it to communicate with the PF again. We also clear 3096 - * and reinit the rings because that context is lost as well. 3097 - **/ 3098 - static void iavf_reset_task(struct work_struct *work) 3117 + * Requests a reset from PF, polls for completion, and reconfigures 3118 + * the driver. Caller must hold the netdev instance lock. 3119 + * 3120 + * This can sleep for several seconds while polling HW registers. 3121 + */ 3122 + void iavf_reset_step(struct iavf_adapter *adapter) 3099 3123 { 3100 - struct iavf_adapter *adapter = container_of(work, 3101 - struct iavf_adapter, 3102 - reset_task); 3103 3124 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3104 3125 struct net_device *netdev = adapter->netdev; 3105 3126 struct iavf_hw *hw = &adapter->hw; ··· 3108 3133 int i = 0, err; 3109 3134 bool running; 3110 3135 3111 - netdev_lock(netdev); 3136 + netdev_assert_locked(netdev); 3112 3137 3113 3138 iavf_misc_irq_disable(adapter); 3114 3139 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { ··· 3153 3178 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 3154 3179 reg_val); 3155 3180 iavf_disable_vf(adapter); 3156 - netdev_unlock(netdev); 3157 3181 return; /* Do not attempt to reinit. It's dead, Jim. */ 3158 3182 } 3159 3183 ··· 3164 3190 iavf_startup(adapter); 3165 3191 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 3166 3192 msecs_to_jiffies(30)); 3167 - netdev_unlock(netdev); 3168 3193 return; 3169 3194 } 3170 3195 ··· 3183 3210 3184 3211 iavf_change_state(adapter, __IAVF_RESETTING); 3185 3212 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 3213 + 3214 + iavf_ptp_release(adapter); 3186 3215 3187 3216 /* free the Tx/Rx rings and descriptors, might be better to just 3188 3217 * re-use them sometime in the future ··· 3306 3331 3307 3332 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3308 3333 3309 - wake_up(&adapter->reset_waitqueue); 3310 - netdev_unlock(netdev); 3311 - 3312 3334 return; 3313 3335 reset_err: 3314 3336 if (running) { ··· 3314 3342 } 3315 3343 iavf_disable_vf(adapter); 3316 3344 3317 - netdev_unlock(netdev); 3318 3345 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 3346 + } 3347 + 3348 + static void iavf_reset_task(struct work_struct *work) 3349 + { 3350 + struct iavf_adapter *adapter = container_of(work, 3351 + struct iavf_adapter, 3352 + reset_task); 3353 + struct net_device *netdev = adapter->netdev; 3354 + 3355 + netdev_lock(netdev); 3356 + iavf_reset_step(adapter); 3357 + netdev_unlock(netdev); 3319 3358 } 3320 3359 3321 3360 /** ··· 4594 4611 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 4595 4612 { 4596 4613 struct iavf_adapter *adapter = netdev_priv(netdev); 4597 - int ret = 0; 4598 4614 4599 4615 netdev_dbg(netdev, "changing MTU from %d to %d\n", 4600 4616 netdev->mtu, new_mtu); 4601 4617 WRITE_ONCE(netdev->mtu, new_mtu); 4602 4618 4603 4619 if (netif_running(netdev)) { 4604 - iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 4605 - ret = iavf_wait_for_reset(adapter); 4606 - if (ret < 0) 4607 - netdev_warn(netdev, "MTU change interrupted waiting for reset"); 4608 - else if (ret) 4609 - netdev_warn(netdev, "MTU change timed out waiting for reset"); 4620 + adapter->flags |= IAVF_FLAG_RESET_NEEDED; 4621 + iavf_reset_step(adapter); 4610 4622 } 4611 4623 4612 - return ret; 4624 + return 0; 4613 4625 } 4614 4626 4615 4627 /** ··· 5408 5430 5409 5431 /* Setup the wait queue for indicating transition to down status */ 5410 5432 init_waitqueue_head(&adapter->down_waitqueue); 5411 - 5412 - /* Setup the wait queue for indicating transition to running state */ 5413 - init_waitqueue_head(&adapter->reset_waitqueue); 5414 5433 5415 5434 /* Setup the wait queue for indicating virtchannel events */ 5416 5435 init_waitqueue_head(&adapter->vc_waitqueue);
-1
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
··· 2736 2736 case VIRTCHNL_OP_ENABLE_QUEUES: 2737 2737 /* enable transmits */ 2738 2738 iavf_irq_enable(adapter, true); 2739 - wake_up(&adapter->reset_waitqueue); 2740 2739 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; 2741 2740 break; 2742 2741 case VIRTCHNL_OP_DISABLE_QUEUES:
+2 -2
drivers/net/ethernet/intel/ice/devlink/devlink.c
··· 1360 1360 1361 1361 cdev = pf->cdev_info; 1362 1362 if (!cdev) 1363 - return -ENODEV; 1363 + return -EOPNOTSUPP; 1364 1364 1365 1365 ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2); 1366 1366 ··· 1427 1427 1428 1428 cdev = pf->cdev_info; 1429 1429 if (!cdev) 1430 - return -ENODEV; 1430 + return -EOPNOTSUPP; 1431 1431 1432 1432 ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP); 1433 1433
+3 -3
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
··· 327 327 rvu_report_pair_end(fmsg); 328 328 break; 329 329 case NIX_AF_RVU_RAS: 330 - intr_val = nix_event_context->nix_af_rvu_err; 330 + intr_val = nix_event_context->nix_af_rvu_ras; 331 331 rvu_report_pair_start(fmsg, "NIX_AF_RAS"); 332 332 devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ", 333 - nix_event_context->nix_af_rvu_err); 333 + nix_event_context->nix_af_rvu_ras); 334 334 devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:"); 335 335 if (intr_val & BIT_ULL(34)) 336 336 devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S"); ··· 475 475 if (blkaddr < 0) 476 476 return blkaddr; 477 477 478 - if (nix_event_ctx->nix_af_rvu_int) 478 + if (nix_event_ctx->nix_af_rvu_ras) 479 479 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL); 480 480 481 481 return 0;
-1
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
··· 47 47 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", 48 48 sq->sqn, sq->cc, sq->pc); 49 49 sq->cc = 0; 50 - sq->dma_fifo_cc = 0; 51 50 sq->pc = 0; 52 51 } 53 52
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
··· 2912 2912 goto out; 2913 2913 2914 2914 peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp); 2915 - if (peer_priv) 2915 + if (peer_priv && peer_priv->ipsec) 2916 2916 complete_all(&peer_priv->ipsec->comp); 2917 2917 2918 2918 mlx5_devcom_for_each_peer_end(priv->devcom);
+9 -14
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 1589 1589 struct skb_shared_info *sinfo; 1590 1590 u32 frag_consumed_bytes; 1591 1591 struct bpf_prog *prog; 1592 + u8 nr_frags_free = 0; 1592 1593 struct sk_buff *skb; 1593 1594 dma_addr_t addr; 1594 1595 u32 truesize; ··· 1632 1631 1633 1632 prog = rcu_dereference(rq->xdp_prog); 1634 1633 if (prog) { 1635 - u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; 1634 + u8 old_nr_frags = sinfo->nr_frags; 1636 1635 1637 1636 if (mlx5e_xdp_handle(rq, prog, mxbuf)) { 1638 1637 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, 1639 1638 rq->flags)) { 1640 1639 struct mlx5e_wqe_frag_info *pwi; 1641 - 1642 - wi -= old_nr_frags - sinfo->nr_frags; 1643 1640 1644 1641 for (pwi = head_wi; pwi < wi; pwi++) 1645 1642 pwi->frag_page->frags++; ··· 1646 1647 } 1647 1648 1648 1649 nr_frags_free = old_nr_frags - sinfo->nr_frags; 1649 - if (unlikely(nr_frags_free)) { 1650 - wi -= nr_frags_free; 1650 + if (unlikely(nr_frags_free)) 1651 1651 truesize -= nr_frags_free * frag_info->frag_stride; 1652 - } 1653 1652 } 1654 1653 1655 1654 skb = mlx5e_build_linear_skb( ··· 1663 1666 1664 1667 if (xdp_buff_has_frags(&mxbuf->xdp)) { 1665 1668 /* sinfo->nr_frags is reset by build_skb, calculate again. */ 1666 - xdp_update_skb_frags_info(skb, wi - head_wi - 1, 1669 + xdp_update_skb_frags_info(skb, wi - head_wi - nr_frags_free - 1, 1667 1670 sinfo->xdp_frags_size, truesize, 1668 1671 xdp_buff_get_skb_flags(&mxbuf->xdp)); 1669 1672 ··· 1954 1957 1955 1958 if (prog) { 1956 1959 u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; 1960 + u8 new_nr_frags; 1957 1961 u32 len; 1958 1962 1959 1963 if (mlx5e_xdp_handle(rq, prog, mxbuf)) { 1960 1964 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1961 1965 struct mlx5e_frag_page *pfp; 1962 - 1963 - frag_page -= old_nr_frags - sinfo->nr_frags; 1964 1966 1965 1967 for (pfp = head_page; pfp < frag_page; pfp++) 1966 1968 pfp->frags++; ··· 1971 1975 return NULL; /* page/packet was consumed by XDP */ 1972 1976 } 1973 1977 1974 - nr_frags_free = old_nr_frags - sinfo->nr_frags; 1975 - if (unlikely(nr_frags_free)) { 1976 - frag_page -= nr_frags_free; 1978 + new_nr_frags = sinfo->nr_frags; 1979 + nr_frags_free = old_nr_frags - new_nr_frags; 1980 + if (unlikely(nr_frags_free)) 1977 1981 truesize -= (nr_frags_free - 1) * PAGE_SIZE + 1978 1982 ALIGN(pg_consumed_bytes, 1979 1983 BIT(rq->mpwqe.log_stride_sz)); 1980 - } 1981 1984 1982 1985 len = mxbuf->xdp.data_end - mxbuf->xdp.data; 1983 1986 ··· 1998 2003 struct mlx5e_frag_page *pagep; 1999 2004 2000 2005 /* sinfo->nr_frags is reset by build_skb, calculate again. */ 2001 - xdp_update_skb_frags_info(skb, frag_page - head_page, 2006 + xdp_update_skb_frags_info(skb, new_nr_frags, 2002 2007 sinfo->xdp_frags_size, 2003 2008 truesize, 2004 2009 xdp_buff_get_skb_flags(&mxbuf->xdp));
+4 -3
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1072 1072 1073 1073 static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw) 1074 1074 { 1075 - if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) 1075 + if (esw->mode == MLX5_ESWITCH_OFFLOADS && 1076 + mlx5_eswitch_is_funcs_handler(esw->dev)) { 1076 1077 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); 1077 - 1078 - flush_workqueue(esw->work_queue); 1078 + atomic_inc(&esw->esw_funcs.generation); 1079 + } 1079 1080 } 1080 1081 1081 1082 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
+2
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 335 335 struct mlx5_host_work { 336 336 struct work_struct work; 337 337 struct mlx5_eswitch *esw; 338 + int work_gen; 338 339 }; 339 340 340 341 struct mlx5_esw_functions { 341 342 struct mlx5_nb nb; 343 + atomic_t generation; 342 344 bool host_funcs_disabled; 343 345 u16 num_vfs; 344 346 u16 num_ec_vfs;
+25 -20
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 1241 1241 flows[peer_vport->index] = flow; 1242 1242 } 1243 1243 1244 - if (mlx5_esw_host_functions_enabled(esw->dev)) { 1245 - mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1246 - mlx5_core_max_vfs(peer_dev)) { 1247 - esw_set_peer_miss_rule_source_port(esw, peer_esw, 1248 - spec, 1249 - peer_vport->vport); 1250 - 1251 - flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1252 - spec, &flow_act, &dest, 1); 1253 - if (IS_ERR(flow)) { 1254 - err = PTR_ERR(flow); 1255 - goto add_vf_flow_err; 1256 - } 1257 - flows[peer_vport->index] = flow; 1244 + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1245 + mlx5_core_max_vfs(peer_dev)) { 1246 + esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, 1247 + peer_vport->vport); 1248 + flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1249 + spec, &flow_act, &dest, 1); 1250 + if (IS_ERR(flow)) { 1251 + err = PTR_ERR(flow); 1252 + goto add_vf_flow_err; 1258 1253 } 1254 + flows[peer_vport->index] = flow; 1259 1255 } 1260 1256 1261 1257 if (mlx5_core_ec_sriov_enabled(peer_dev)) { ··· 1343 1347 mlx5_del_flow_rules(flows[peer_vport->index]); 1344 1348 } 1345 1349 1346 - if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { 1350 + if (mlx5_core_is_ecpf_esw_manager(peer_dev) && 1351 + mlx5_esw_host_functions_enabled(peer_dev)) { 1347 1352 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1348 1353 mlx5_del_flow_rules(flows[peer_vport->index]); 1349 1354 } ··· 3579 3582 } 3580 3583 3581 3584 static void 3582 - esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) 3585 + esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, int work_gen, 3586 + const u32 *out) 3583 3587 { 3584 3588 struct devlink *devlink; 3585 3589 bool host_pf_disabled; 3586 3590 u16 new_num_vfs; 3591 + 3592 + devlink = priv_to_devlink(esw->dev); 3593 + devl_lock(devlink); 3594 + 3595 + /* Stale work from one or more mode changes ago. Bail out. */ 3596 + if (work_gen != atomic_read(&esw->esw_funcs.generation)) 3597 + goto unlock; 3587 3598 3588 3599 new_num_vfs = MLX5_GET(query_esw_functions_out, out, 3589 3600 host_params_context.host_num_of_vfs); ··· 3599 3594 host_params_context.host_pf_disabled); 3600 3595 3601 3596 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) 3602 - return; 3597 + goto unlock; 3603 3598 3604 - devlink = priv_to_devlink(esw->dev); 3605 - devl_lock(devlink); 3606 3599 /* Number of VFs can only change from "0 to x" or "x to 0". */ 3607 3600 if (esw->esw_funcs.num_vfs > 0) { 3608 3601 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); ··· 3615 3612 } 3616 3613 } 3617 3614 esw->esw_funcs.num_vfs = new_num_vfs; 3615 + unlock: 3618 3616 devl_unlock(devlink); 3619 3617 } 3620 3618 ··· 3632 3628 if (IS_ERR(out)) 3633 3629 goto out; 3634 3630 3635 - esw_vfs_changed_event_handler(esw, out); 3631 + esw_vfs_changed_event_handler(esw, host_work->work_gen, out); 3636 3632 kvfree(out); 3637 3633 out: 3638 3634 kfree(host_work); ··· 3652 3648 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs); 3653 3649 3654 3650 host_work->esw = esw; 3651 + host_work->work_gen = atomic_read(&esw_funcs->generation); 3655 3652 3656 3653 INIT_WORK(&host_work->work, esw_functions_changed_event_handler); 3657 3654 queue_work(esw->work_queue, &host_work->work);
+1
drivers/net/ethernet/microsoft/mana/gdma_main.c
··· 1934 1934 mana_gd_remove_irqs(pdev); 1935 1935 free_workqueue: 1936 1936 destroy_workqueue(gc->service_wq); 1937 + gc->service_wq = NULL; 1937 1938 dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err); 1938 1939 return err; 1939 1940 }
+13 -6
drivers/net/ethernet/spacemit/k1_emac.c
··· 565 565 DMA_FROM_DEVICE); 566 566 if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) { 567 567 dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n"); 568 - goto err_free_skb; 568 + dev_kfree_skb_any(skb); 569 + rx_buf->skb = NULL; 570 + break; 569 571 } 570 572 571 573 rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i]; ··· 592 590 593 591 rx_ring->head = i; 594 592 return; 595 - 596 - err_free_skb: 597 - dev_kfree_skb_any(skb); 598 - rx_buf->skb = NULL; 599 593 } 600 594 601 595 /* Returns number of packets received */ ··· 733 735 struct emac_desc tx_desc, *tx_desc_addr; 734 736 struct device *dev = &priv->pdev->dev; 735 737 struct emac_tx_desc_buffer *tx_buf; 736 - u32 head, old_head, frag_num, f; 738 + u32 head, old_head, frag_num, f, i; 737 739 bool buf_idx; 738 740 739 741 frag_num = skb_shinfo(skb)->nr_frags; ··· 801 803 802 804 err_free_skb: 803 805 dev_dstats_tx_dropped(priv->ndev); 806 + 807 + i = old_head; 808 + while (i != head) { 809 + emac_free_tx_buf(priv, i); 810 + 811 + if (++i == tx_ring->total_cnt) 812 + i = 0; 813 + } 814 + 804 815 dev_kfree_skb_any(skb); 805 816 } 806 817
+9 -7
drivers/net/ethernet/ti/am65-cpsw-nuss.c
··· 1351 1351 ndev_priv = netdev_priv(ndev); 1352 1352 am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark); 1353 1353 skb_put(skb, pkt_len); 1354 - if (port->rx_ts_enabled) 1354 + if (port->rx_ts_filter) 1355 1355 am65_cpts_rx_timestamp(common->cpts, skb); 1356 1356 skb_mark_for_recycle(skb); 1357 1357 skb->protocol = eth_type_trans(skb, ndev); ··· 1811 1811 1812 1812 switch (cfg->rx_filter) { 1813 1813 case HWTSTAMP_FILTER_NONE: 1814 - port->rx_ts_enabled = false; 1814 + port->rx_ts_filter = HWTSTAMP_FILTER_NONE; 1815 1815 break; 1816 1816 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1817 1817 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1818 1818 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1819 + port->rx_ts_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1820 + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1821 + break; 1819 1822 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1820 1823 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1821 1824 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: ··· 1828 1825 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1829 1826 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1830 1827 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1831 - port->rx_ts_enabled = true; 1832 - cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1828 + port->rx_ts_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1829 + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1833 1830 break; 1834 1831 case HWTSTAMP_FILTER_ALL: 1835 1832 case HWTSTAMP_FILTER_SOME: ··· 1866 1863 ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN | 1867 1864 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN; 1868 1865 1869 - if (port->rx_ts_enabled) 1866 + if (port->rx_ts_filter) 1870 1867 ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN | 1871 1868 AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN; 1872 1869 ··· 1891 1888 cfg->flags = 0; 1892 1889 cfg->tx_type = port->tx_ts_enabled ? 1893 1890 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 1894 - cfg->rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT | 1895 - HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE; 1891 + cfg->rx_filter = port->rx_ts_filter; 1896 1892 1897 1893 return 0; 1898 1894 }
+1 -1
drivers/net/ethernet/ti/am65-cpsw-nuss.h
··· 52 52 bool disabled; 53 53 struct am65_cpsw_slave_data slave; 54 54 bool tx_ts_enabled; 55 - bool rx_ts_enabled; 55 + enum hwtstamp_rx_filters rx_ts_filter; 56 56 struct am65_cpsw_qos qos; 57 57 struct devlink_port devlink_port; 58 58 struct bpf_prog *xdp_prog;
+1
drivers/net/mctp/mctp-i2c.c
··· 343 343 } else { 344 344 status = NET_RX_DROP; 345 345 spin_unlock_irqrestore(&midev->lock, flags); 346 + kfree_skb(skb); 346 347 } 347 348 348 349 if (status == NET_RX_SUCCESS) {
+1 -2
drivers/net/mctp/mctp-usb.c
··· 329 329 SET_NETDEV_DEV(netdev, &intf->dev); 330 330 dev = netdev_priv(netdev); 331 331 dev->netdev = netdev; 332 - dev->usbdev = usb_get_dev(interface_to_usbdev(intf)); 332 + dev->usbdev = interface_to_usbdev(intf); 333 333 dev->intf = intf; 334 334 usb_set_intfdata(intf, dev); 335 335 ··· 365 365 mctp_unregister_netdev(dev->netdev); 366 366 usb_free_urb(dev->tx_urb); 367 367 usb_free_urb(dev->rx_urb); 368 - usb_put_dev(dev->usbdev); 369 368 free_netdev(dev->netdev); 370 369 } 371 370
+7 -1
drivers/net/phy/sfp.c
··· 367 367 sfp->state_ignore_mask |= SFP_F_TX_FAULT; 368 368 } 369 369 370 + static void sfp_fixup_ignore_tx_fault_and_los(struct sfp *sfp) 371 + { 372 + sfp_fixup_ignore_tx_fault(sfp); 373 + sfp_fixup_ignore_los(sfp); 374 + } 375 + 370 376 static void sfp_fixup_ignore_hw(struct sfp *sfp, unsigned int mask) 371 377 { 372 378 sfp->state_hw_mask &= ~mask; ··· 536 530 // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in 537 531 // their EEPROM 538 532 SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex, 539 - sfp_fixup_ignore_tx_fault), 533 + sfp_fixup_ignore_tx_fault_and_los), 540 534 541 535 // Lantech 8330-262D-E and 8330-265D can operate at 2500base-X, but 542 536 // incorrectly report 2500MBd NRZ in their EEPROM.
+8 -4
drivers/net/usb/lan78xx.c
··· 3119 3119 int ret; 3120 3120 u32 buf; 3121 3121 3122 + /* LAN7850 is USB 2.0 and does not support LTM */ 3123 + if (dev->chipid == ID_REV_CHIP_ID_7850_) 3124 + return 0; 3125 + 3122 3126 ret = lan78xx_read_reg(dev, USB_CFG1, &buf); 3123 3127 if (ret < 0) 3124 3128 goto init_ltm_failed; ··· 3833 3829 */ 3834 3830 if (!(dev->net->features & NETIF_F_RXCSUM) || 3835 3831 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) || 3832 + unlikely(rx_cmd_a & RX_CMD_A_CSE_MASK_) || 3836 3833 ((rx_cmd_a & RX_CMD_A_FVTG_) && 3837 3834 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) { 3838 3835 skb->ip_summed = CHECKSUM_NONE; ··· 3906 3901 return 0; 3907 3902 } 3908 3903 3909 - if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) { 3904 + if (unlikely(rx_cmd_a & RX_CMD_A_RED_) && 3905 + (rx_cmd_a & RX_CMD_A_RX_HARD_ERRS_MASK_)) { 3910 3906 netif_dbg(dev, rx_err, dev->net, 3911 3907 "Error rx_cmd_a=0x%08x", rx_cmd_a); 3912 3908 } else { ··· 4182 4176 } 4183 4177 4184 4178 tx_data += len; 4185 - entry->length += len; 4179 + entry->length += max_t(unsigned int, len, ETH_ZLEN); 4186 4180 entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1; 4187 4181 4188 4182 dev_kfree_skb_any(skb); ··· 4551 4545 phylink_stop(dev->phylink); 4552 4546 phylink_disconnect_phy(dev->phylink); 4553 4547 rtnl_unlock(); 4554 - 4555 - netif_napi_del(&dev->napi); 4556 4548 4557 4549 unregister_netdev(net); 4558 4550
+3
drivers/net/usb/lan78xx.h
··· 74 74 #define RX_CMD_A_ICSM_ (0x00004000) 75 75 #define RX_CMD_A_LEN_MASK_ (0x00003FFF) 76 76 77 + #define RX_CMD_A_RX_HARD_ERRS_MASK_ \ 78 + (RX_CMD_A_RX_ERRS_MASK_ & ~RX_CMD_A_CSE_MASK_) 79 + 77 80 /* Rx Command B */ 78 81 #define RX_CMD_B_CSUM_SHIFT_ (16) 79 82 #define RX_CMD_B_CSUM_MASK_ (0xFFFF0000)
+2 -2
drivers/net/usb/qmi_wwan.c
··· 928 928 929 929 static const struct driver_info qmi_wwan_info = { 930 930 .description = "WWAN/QMI device", 931 - .flags = FLAG_WWAN | FLAG_SEND_ZLP, 931 + .flags = FLAG_WWAN | FLAG_NOMAXMTU | FLAG_SEND_ZLP, 932 932 .bind = qmi_wwan_bind, 933 933 .unbind = qmi_wwan_unbind, 934 934 .manage_power = qmi_wwan_manage_power, ··· 937 937 938 938 static const struct driver_info qmi_wwan_info_quirk_dtr = { 939 939 .description = "WWAN/QMI device", 940 - .flags = FLAG_WWAN | FLAG_SEND_ZLP, 940 + .flags = FLAG_WWAN | FLAG_NOMAXMTU | FLAG_SEND_ZLP, 941 941 .bind = qmi_wwan_bind, 942 942 .unbind = qmi_wwan_unbind, 943 943 .manage_power = qmi_wwan_manage_power,
+4 -3
drivers/net/usb/usbnet.c
··· 1829 1829 if ((dev->driver_info->flags & FLAG_NOARP) != 0) 1830 1830 net->flags |= IFF_NOARP; 1831 1831 1832 - if (net->max_mtu > (dev->hard_mtu - net->hard_header_len)) 1832 + if ((dev->driver_info->flags & FLAG_NOMAXMTU) == 0 && 1833 + net->max_mtu > (dev->hard_mtu - net->hard_header_len)) 1833 1834 net->max_mtu = dev->hard_mtu - net->hard_header_len; 1834 1835 1835 - if (net->mtu > net->max_mtu) 1836 - net->mtu = net->max_mtu; 1836 + if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1837 + net->mtu = dev->hard_mtu - net->hard_header_len; 1837 1838 1838 1839 } else if (!info->in || !info->out) 1839 1840 status = usbnet_get_endpoints(dev, udev);
+4 -4
fs/afs/addr_list.c
··· 298 298 srx.transport.sin.sin_addr.s_addr = xdr; 299 299 300 300 peer = rxrpc_kernel_lookup_peer(net->socket, &srx, GFP_KERNEL); 301 - if (!peer) 302 - return -ENOMEM; 301 + if (IS_ERR(peer)) 302 + return PTR_ERR(peer); 303 303 304 304 for (i = 0; i < alist->nr_ipv4; i++) { 305 305 if (peer == alist->addrs[i].peer) { ··· 342 342 memcpy(&srx.transport.sin6.sin6_addr, xdr, 16); 343 343 344 344 peer = rxrpc_kernel_lookup_peer(net->socket, &srx, GFP_KERNEL); 345 - if (!peer) 346 - return -ENOMEM; 345 + if (IS_ERR(peer)) 346 + return PTR_ERR(peer); 347 347 348 348 for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) { 349 349 if (peer == alist->addrs[i].peer) {
+6 -1
include/linux/ipv6.h
··· 333 333 }; 334 334 335 335 #if IS_ENABLED(CONFIG_IPV6) 336 - bool ipv6_mod_enabled(void); 336 + extern int disable_ipv6_mod; 337 + 338 + static inline bool ipv6_mod_enabled(void) 339 + { 340 + return disable_ipv6_mod == 0; 341 + } 337 342 338 343 static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk) 339 344 {
+32
include/linux/netdevice.h
··· 3576 3576 }; 3577 3577 DECLARE_PER_CPU(struct page_pool_bh, system_page_pool); 3578 3578 3579 + #define XMIT_RECURSION_LIMIT 8 3580 + 3579 3581 #ifndef CONFIG_PREEMPT_RT 3580 3582 static inline int dev_recursion_level(void) 3581 3583 { 3582 3584 return this_cpu_read(softnet_data.xmit.recursion); 3585 + } 3586 + 3587 + static inline bool dev_xmit_recursion(void) 3588 + { 3589 + return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 3590 + XMIT_RECURSION_LIMIT); 3591 + } 3592 + 3593 + static inline void dev_xmit_recursion_inc(void) 3594 + { 3595 + __this_cpu_inc(softnet_data.xmit.recursion); 3596 + } 3597 + 3598 + static inline void dev_xmit_recursion_dec(void) 3599 + { 3600 + __this_cpu_dec(softnet_data.xmit.recursion); 3583 3601 } 3584 3602 #else 3585 3603 static inline int dev_recursion_level(void) ··· 3605 3587 return current->net_xmit.recursion; 3606 3588 } 3607 3589 3590 + static inline bool dev_xmit_recursion(void) 3591 + { 3592 + return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT); 3593 + } 3594 + 3595 + static inline void dev_xmit_recursion_inc(void) 3596 + { 3597 + current->net_xmit.recursion++; 3598 + } 3599 + 3600 + static inline void dev_xmit_recursion_dec(void) 3601 + { 3602 + current->net_xmit.recursion--; 3603 + } 3608 3604 #endif 3609 3605 3610 3606 void __netif_schedule(struct Qdisc *q);
+1
include/linux/usb/usbnet.h
··· 132 132 #define FLAG_MULTI_PACKET 0x2000 133 133 #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ 134 134 #define FLAG_NOARP 0x8000 /* device can't do ARP */ 135 + #define FLAG_NOMAXMTU 0x10000 /* allow max_mtu above hard_mtu */ 135 136 136 137 /* init device ... can sleep, or cause probe() failure */ 137 138 int (*bind)(struct usbnet *, struct usb_interface *);
+14
include/net/ip6_tunnel.h
··· 156 156 { 157 157 int pkt_len, err; 158 158 159 + if (unlikely(dev_recursion_level() > IP_TUNNEL_RECURSION_LIMIT)) { 160 + if (dev) { 161 + net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 162 + dev->name); 163 + DEV_STATS_INC(dev, tx_errors); 164 + } 165 + kfree_skb(skb); 166 + return; 167 + } 168 + 169 + dev_xmit_recursion_inc(); 170 + 159 171 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 160 172 IP6CB(skb)->flags = ip6cb_flags; 161 173 pkt_len = skb->len - skb_inner_network_offset(skb); ··· 178 166 pkt_len = -1; 179 167 iptunnel_xmit_stats(dev, pkt_len); 180 168 } 169 + 170 + dev_xmit_recursion_dec(); 181 171 } 182 172 #endif 183 173 #endif
+7
include/net/ip_tunnels.h
··· 27 27 #include <net/ip6_route.h> 28 28 #endif 29 29 30 + /* Recursion limit for tunnel xmit to detect routing loops. 31 + * Unlike XMIT_RECURSION_LIMIT (8) used in the no-qdisc path, tunnel 32 + * recursion involves route lookups and full IP output, consuming much 33 + * more stack per level, so a lower limit is needed. 34 + */ 35 + #define IP_TUNNEL_RECURSION_LIMIT 4 36 + 30 37 /* Keep error state on tunnel for 30 sec */ 31 38 #define IPTUNNEL_ERR_TIMEO (30*HZ) 32 39
+1 -1
include/net/page_pool/types.h
··· 247 247 /* User-facing fields, protected by page_pools_lock */ 248 248 struct { 249 249 struct hlist_node list; 250 - u64 detach_time; 250 + ktime_t detach_time; 251 251 u32 id; 252 252 } user; 253 253 };
-35
net/core/dev.h
··· 366 366 367 367 void kick_defer_list_purge(unsigned int cpu); 368 368 369 - #define XMIT_RECURSION_LIMIT 8 370 - 371 - #ifndef CONFIG_PREEMPT_RT 372 - static inline bool dev_xmit_recursion(void) 373 - { 374 - return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 375 - XMIT_RECURSION_LIMIT); 376 - } 377 - 378 - static inline void dev_xmit_recursion_inc(void) 379 - { 380 - __this_cpu_inc(softnet_data.xmit.recursion); 381 - } 382 - 383 - static inline void dev_xmit_recursion_dec(void) 384 - { 385 - __this_cpu_dec(softnet_data.xmit.recursion); 386 - } 387 - #else 388 - static inline bool dev_xmit_recursion(void) 389 - { 390 - return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT); 391 - } 392 - 393 - static inline void dev_xmit_recursion_inc(void) 394 - { 395 - current->net_xmit.recursion++; 396 - } 397 - 398 - static inline void dev_xmit_recursion_dec(void) 399 - { 400 - current->net_xmit.recursion--; 401 - } 402 - #endif 403 - 404 369 int dev_set_hwtstamp_phylib(struct net_device *dev, 405 370 struct kernel_hwtstamp_config *cfg, 406 371 struct netlink_ext_ack *extack);
+7
net/core/filter.c
··· 2228 2228 return -ENOMEM; 2229 2229 } 2230 2230 2231 + if (unlikely(!ipv6_mod_enabled())) 2232 + goto out_drop; 2233 + 2231 2234 rcu_read_lock(); 2232 2235 if (!nh) { 2233 2236 dst = skb_dst(skb); ··· 2338 2335 2339 2336 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); 2340 2337 } else if (nh->nh_family == AF_INET6) { 2338 + if (unlikely(!ipv6_mod_enabled())) { 2339 + rcu_read_unlock(); 2340 + goto out_drop; 2341 + } 2341 2342 neigh = ip_neigh_gw6(dev, &nh->ipv6_nh); 2342 2343 is_v6gw = true; 2343 2344 } else if (nh->nh_family == AF_INET) {
+2 -1
net/core/neighbour.c
··· 820 820 update: 821 821 WRITE_ONCE(n->flags, flags); 822 822 n->permanent = permanent; 823 - WRITE_ONCE(n->protocol, protocol); 823 + if (protocol) 824 + WRITE_ONCE(n->protocol, protocol); 824 825 out: 825 826 mutex_unlock(&tbl->phash_lock); 826 827 return err;
+2 -2
net/core/page_pool_user.c
··· 245 245 goto err_cancel; 246 246 if (pool->user.detach_time && 247 247 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_DETACH_TIME, 248 - pool->user.detach_time)) 248 + ktime_divns(pool->user.detach_time, NSEC_PER_SEC))) 249 249 goto err_cancel; 250 250 251 251 if (pool->mp_ops && pool->mp_ops->nl_fill(pool->mp_priv, rsp, NULL)) ··· 337 337 void page_pool_detached(struct page_pool *pool) 338 338 { 339 339 mutex_lock(&page_pools_lock); 340 - pool->user.detach_time = ktime_get_boottime_seconds(); 340 + pool->user.detach_time = ktime_get_boottime(); 341 341 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF); 342 342 mutex_unlock(&page_pools_lock); 343 343 }
+6
net/ipv4/af_inet.c
··· 124 124 125 125 #include <trace/events/sock.h> 126 126 127 + /* Keep the definition of IPv6 disable here for now, to avoid annoying linker 128 + * issues in case IPv6=m 129 + */ 130 + int disable_ipv6_mod; 131 + EXPORT_SYMBOL(disable_ipv6_mod); 132 + 127 133 /* The inetsw table contains everything that inet_create needs to 128 134 * build a new socket. 129 135 */
+15
net/ipv4/ip_tunnel_core.c
··· 58 58 struct iphdr *iph; 59 59 int err; 60 60 61 + if (unlikely(dev_recursion_level() > IP_TUNNEL_RECURSION_LIMIT)) { 62 + if (dev) { 63 + net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 64 + dev->name); 65 + DEV_STATS_INC(dev, tx_errors); 66 + } 67 + ip_rt_put(rt); 68 + kfree_skb(skb); 69 + return; 70 + } 71 + 72 + dev_xmit_recursion_inc(); 73 + 61 74 skb_scrub_packet(skb, xnet); 62 75 63 76 skb_clear_hash_if_not_l4(skb); ··· 101 88 pkt_len = 0; 102 89 iptunnel_xmit_stats(dev, pkt_len); 103 90 } 91 + 92 + dev_xmit_recursion_dec(); 104 93 } 105 94 EXPORT_SYMBOL_GPL(iptunnel_xmit); 106 95
+11 -3
net/ipv4/nexthop.c
··· 2002 2002 } 2003 2003 2004 2004 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, 2005 - struct nl_info *nlinfo) 2005 + struct nl_info *nlinfo, 2006 + struct list_head *deferred_free) 2006 2007 { 2007 2008 struct nh_grp_entry *nhges, *new_nhges; 2008 2009 struct nexthop *nhp = nhge->nh_parent; ··· 2063 2062 rcu_assign_pointer(nhp->nh_grp, newg); 2064 2063 2065 2064 list_del(&nhge->nh_list); 2066 - free_percpu(nhge->stats); 2067 2065 nexthop_put(nhge->nh); 2066 + list_add(&nhge->nh_list, deferred_free); 2068 2067 2069 2068 /* Removal of a NH from a resilient group is notified through 2070 2069 * bucket notifications. ··· 2084 2083 struct nl_info *nlinfo) 2085 2084 { 2086 2085 struct nh_grp_entry *nhge, *tmp; 2086 + LIST_HEAD(deferred_free); 2087 2087 2088 2088 /* If there is nothing to do, let's avoid the costly call to 2089 2089 * synchronize_net() ··· 2093 2091 return; 2094 2092 2095 2093 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) 2096 - remove_nh_grp_entry(net, nhge, nlinfo); 2094 + remove_nh_grp_entry(net, nhge, nlinfo, &deferred_free); 2097 2095 2098 2096 /* make sure all see the newly published array before releasing rtnl */ 2099 2097 synchronize_net(); 2098 + 2099 + /* Now safe to free percpu stats — all RCU readers have finished */ 2100 + list_for_each_entry_safe(nhge, tmp, &deferred_free, nh_list) { 2101 + list_del(&nhge->nh_list); 2102 + free_percpu(nhge->stats); 2103 + } 2100 2104 } 2101 2105 2102 2106 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
-8
net/ipv6/af_inet6.c
··· 86 86 .autoconf = 1, 87 87 }; 88 88 89 - static int disable_ipv6_mod; 90 - 91 89 module_param_named(disable, disable_ipv6_mod, int, 0444); 92 90 MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional"); 93 91 ··· 94 96 95 97 module_param_named(autoconf, ipv6_defaults.autoconf, int, 0444); 96 98 MODULE_PARM_DESC(autoconf, "Enable IPv6 address autoconfiguration on all interfaces"); 97 - 98 - bool ipv6_mod_enabled(void) 99 - { 100 - return disable_ipv6_mod == 0; 101 - } 102 - EXPORT_SYMBOL_GPL(ipv6_mod_enabled); 103 99 104 100 static struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) 105 101 {
+8 -5
net/mctp/route.c
··· 359 359 { 360 360 struct mctp_sk_key *key; 361 361 struct mctp_flow *flow; 362 + unsigned long flags; 362 363 363 364 flow = skb_ext_find(skb, SKB_EXT_MCTP); 364 365 if (!flow) ··· 367 366 368 367 key = flow->key; 369 368 370 - if (key->dev) { 371 - WARN_ON(key->dev != dev); 372 - return; 373 - } 369 + spin_lock_irqsave(&key->lock, flags); 374 370 375 - mctp_dev_set_key(dev, key); 371 + if (!key->dev) 372 + mctp_dev_set_key(dev, key); 373 + else 374 + WARN_ON(key->dev != dev); 375 + 376 + spin_unlock_irqrestore(&key->lock, flags); 376 377 } 377 378 #else 378 379 static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {}
+2 -1
net/ncsi/ncsi-aen.c
··· 224 224 if (!nah) { 225 225 netdev_warn(ndp->ndev.dev, "Invalid AEN (0x%x) received\n", 226 226 h->type); 227 - return -ENOENT; 227 + ret = -ENOENT; 228 + goto out; 228 229 } 229 230 230 231 ret = ncsi_validate_aen_pkt(h, nah->payload);
+12 -4
net/ncsi/ncsi-rsp.c
··· 1176 1176 /* Find the NCSI device */ 1177 1177 nd = ncsi_find_dev(orig_dev); 1178 1178 ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL; 1179 - if (!ndp) 1180 - return -ENODEV; 1179 + if (!ndp) { 1180 + ret = -ENODEV; 1181 + goto err_free_skb; 1182 + } 1181 1183 1182 1184 /* Check if it is AEN packet */ 1183 1185 hdr = (struct ncsi_pkt_hdr *)skb_network_header(skb); ··· 1201 1199 if (!nrh) { 1202 1200 netdev_err(nd->dev, "Received unrecognized packet (0x%x)\n", 1203 1201 hdr->type); 1204 - return -ENOENT; 1202 + ret = -ENOENT; 1203 + goto err_free_skb; 1205 1204 } 1206 1205 1207 1206 /* Associate with the request */ ··· 1210 1207 nr = &ndp->requests[hdr->id]; 1211 1208 if (!nr->used) { 1212 1209 spin_unlock_irqrestore(&ndp->lock, flags); 1213 - return -ENODEV; 1210 + ret = -ENODEV; 1211 + goto err_free_skb; 1214 1212 } 1215 1213 1216 1214 nr->rsp = skb; ··· 1264 1260 1265 1261 out: 1266 1262 ncsi_free_request(nr); 1263 + return ret; 1264 + 1265 + err_free_skb: 1266 + kfree_skb(skb); 1267 1267 return ret; 1268 1268 }
+1 -3
net/netfilter/nf_tables_api.c
··· 829 829 830 830 nft_set_elem_change_active(ctx->net, set, ext); 831 831 nft_setelem_data_deactivate(ctx->net, set, catchall->elem); 832 - break; 833 832 } 834 833 } 835 834 ··· 5872 5873 5873 5874 nft_clear(ctx->net, ext); 5874 5875 nft_setelem_data_activate(ctx->net, set, catchall->elem); 5875 - break; 5876 5876 } 5877 5877 } 5878 5878 ··· 9686 9688 break; 9687 9689 case NETDEV_REGISTER: 9688 9690 /* NOP if not matching or already registered */ 9689 - if (!match || (changename && ops)) 9691 + if (!match || ops) 9690 9692 continue; 9691 9693 9692 9694 ops = kzalloc_obj(struct nf_hook_ops,
+1 -1
net/netfilter/nft_chain_filter.c
··· 344 344 break; 345 345 case NETDEV_REGISTER: 346 346 /* NOP if not matching or already registered */ 347 - if (!match || (changename && ops)) 347 + if (!match || ops) 348 348 continue; 349 349 350 350 ops = kmemdup(&basechain->ops,
+2 -1
net/netfilter/nft_set_pipapo.c
··· 1640 1640 int i; 1641 1641 1642 1642 nft_pipapo_for_each_field(f, i, m) { 1643 + bool last = i == m->field_count - 1; 1643 1644 int g; 1644 1645 1645 1646 for (g = 0; g < f->groups; g++) { ··· 1660 1659 } 1661 1660 1662 1661 pipapo_unmap(f->mt, f->rules, rulemap[i].to, rulemap[i].n, 1663 - rulemap[i + 1].n, i == m->field_count - 1); 1662 + last ? 0 : rulemap[i + 1].n, last); 1664 1663 if (pipapo_resize(f, f->rules, f->rules - rulemap[i].n)) { 1665 1664 /* We can ignore this, a failure to shrink tables down 1666 1665 * doesn't make tables invalid.
+6
net/netfilter/xt_IDLETIMER.c
··· 318 318 319 319 info->timer = __idletimer_tg_find_by_label(info->label); 320 320 if (info->timer) { 321 + if (info->timer->timer_type & XT_IDLETIMER_ALARM) { 322 + pr_debug("Adding/Replacing rule with same label and different timer type is not allowed\n"); 323 + mutex_unlock(&list_mutex); 324 + return -EINVAL; 325 + } 326 + 321 327 info->timer->refcnt++; 322 328 mod_timer(&info->timer->timer, 323 329 secs_to_jiffies(info->timeout) + jiffies);
+2 -2
net/netfilter/xt_dccp.c
··· 62 62 return true; 63 63 } 64 64 65 - if (op[i] < 2) 65 + if (op[i] < 2 || i == optlen - 1) 66 66 i++; 67 67 else 68 - i += op[i+1]?:1; 68 + i += op[i + 1] ? : 1; 69 69 } 70 70 71 71 spin_unlock_bh(&dccp_buflock);
+4 -2
net/netfilter/xt_tcpudp.c
··· 59 59 60 60 for (i = 0; i < optlen; ) { 61 61 if (op[i] == option) return !invert; 62 - if (op[i] < 2) i++; 63 - else i += op[i+1]?:1; 62 + if (op[i] < 2 || i == optlen - 1) 63 + i++; 64 + else 65 + i += op[i + 1] ? : 1; 64 66 } 65 67 66 68 return invert;
+5 -3
net/rxrpc/af_rxrpc.c
··· 267 267 * Lookup or create a remote transport endpoint record for the specified 268 268 * address. 269 269 * 270 - * Return: The peer record found with a reference, %NULL if no record is found 271 - * or a negative error code if the address is invalid or unsupported. 270 + * Return: The peer record found with a reference or a negative error code if 271 + * the address is invalid or unsupported. 272 272 */ 273 273 struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock, 274 274 struct sockaddr_rxrpc *srx, gfp_t gfp) 275 275 { 276 + struct rxrpc_peer *peer; 276 277 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 277 278 int ret; 278 279 ··· 281 280 if (ret < 0) 282 281 return ERR_PTR(ret); 283 282 284 - return rxrpc_lookup_peer(rx->local, srx, gfp); 283 + peer = rxrpc_lookup_peer(rx->local, srx, gfp); 284 + return peer ?: ERR_PTR(-ENOMEM); 285 285 } 286 286 EXPORT_SYMBOL(rxrpc_kernel_lookup_peer); 287 287
+1
net/sched/sch_teql.c
··· 315 315 if (__netif_tx_trylock(slave_txq)) { 316 316 unsigned int length = qdisc_pkt_len(skb); 317 317 318 + skb->dev = slave; 318 319 if (!netif_xmit_frozen_or_stopped(slave_txq) && 319 320 netdev_start_xmit(skb, slave, slave_txq, false) == 320 321 NETDEV_TX_OK) {
+2 -9
net/shaper/shaper.c
··· 759 759 if (ret) 760 760 goto free_msg; 761 761 762 - ret = genlmsg_reply(msg, info); 763 - if (ret) 764 - goto free_msg; 765 - 766 - return 0; 762 + return genlmsg_reply(msg, info); 767 763 768 764 free_msg: 769 765 nlmsg_free(msg); ··· 1309 1313 if (ret) 1310 1314 goto free_msg; 1311 1315 1312 - ret = genlmsg_reply(msg, info); 1313 - if (ret) 1314 - goto free_msg; 1315 - return 0; 1316 + return genlmsg_reply(msg, info); 1316 1317 1317 1318 free_msg: 1318 1319 nlmsg_free(msg);
+2
net/tipc/socket.c
··· 2233 2233 if (skb_queue_empty(&sk->sk_write_queue)) 2234 2234 break; 2235 2235 get_random_bytes(&delay, 2); 2236 + if (tsk->conn_timeout < 4) 2237 + tsk->conn_timeout = 4; 2236 2238 delay %= (tsk->conn_timeout / 4); 2237 2239 delay = msecs_to_jiffies(delay + 100); 2238 2240 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
+55
tools/testing/selftests/net/rtnetlink.sh
··· 28 28 kci_test_fdb_get 29 29 kci_test_fdb_del 30 30 kci_test_neigh_get 31 + kci_test_neigh_update 31 32 kci_test_bridge_parent_id 32 33 kci_test_address_proto 33 34 kci_test_enslave_bonding ··· 1159 1158 fi 1160 1159 1161 1160 end_test "PASS: neigh get" 1161 + } 1162 + 1163 + kci_test_neigh_update() 1164 + { 1165 + dstip=10.0.2.4 1166 + dstmac=de:ad:be:ef:13:37 1167 + local ret=0 1168 + 1169 + for proxy in "" "proxy" ; do 1170 + # add a neighbour entry without any flags 1171 + run_cmd ip neigh add $proxy $dstip dev "$devdummy" lladdr $dstmac nud permanent 1172 + run_cmd_grep $dstip ip neigh show $proxy 1173 + run_cmd_grep_fail "$dstip dev $devdummy .*\(managed\|use\|router\|extern\)" ip neigh show $proxy 1174 + 1175 + # set the extern_learn flag, but no other 1176 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" extern_learn 1177 + run_cmd_grep "$dstip dev $devdummy .* extern_learn" ip neigh show $proxy 1178 + run_cmd_grep_fail "$dstip dev $devdummy .* \(managed\|use\|router\)" ip neigh show $proxy 1179 + 1180 + # flags are reset when not provided 1181 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" 1182 + run_cmd_grep $dstip ip neigh show $proxy 1183 + run_cmd_grep_fail "$dstip dev $devdummy .* extern_learn" ip neigh show $proxy 1184 + 1185 + # add a protocol 1186 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" protocol boot 1187 + run_cmd_grep "$dstip dev $devdummy .* proto boot" ip neigh show $proxy 1188 + 1189 + # protocol is retained when not provided 1190 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" 1191 + run_cmd_grep "$dstip dev $devdummy .* proto boot" ip neigh show $proxy 1192 + 1193 + # change protocol 1194 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" protocol static 1195 + run_cmd_grep "$dstip dev $devdummy .* proto static" ip neigh show $proxy 1196 + 1197 + # also check an extended flag for non-proxy neighs 1198 + if [ "$proxy" = "" ]; then 1199 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" managed 1200 + run_cmd_grep "$dstip dev $devdummy managed" ip neigh show $proxy 1201 + 1202 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" lladdr $dstmac 1203 + run_cmd_grep_fail "$dstip dev $devdummy managed" ip neigh show $proxy 1204 + fi 1205 + 1206 + run_cmd ip neigh del $proxy $dstip dev "$devdummy" 1207 + done 1208 + 1209 + if [ $ret -ne 0 ];then 1210 + end_test "FAIL: neigh update" 1211 + return 1 1212 + fi 1213 + 1214 + end_test "PASS: neigh update" 1162 1215 } 1163 1216 1164 1217 kci_test_bridge_parent_id()