Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'net-6.16-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
"Including fixes from can and xfrm.

The TI regression notified last week is actually on our net-next tree,
it does not affect 6.16.

We are investigating a virtio regression which is quite hard to
reproduce - currently only our CI sporadically hits it. Hopefully it
should not be critical, and I'm not sure that an additional week would
be enough to solve it.

Current release - fix to a fix:

- sched: sch_qfq: avoid sleeping in atomic context in qfq_delete_class

Previous releases - regressions:

- xfrm:
- set transport header to fix UDP GRO handling
- delete x->tunnel as we delete x

- eth:
- mlx5: fix memory leak in cmd_exec()
- i40e: when removing VF MAC filters, avoid losing PF-set MAC
- gve: fix stuck TX queue for DQ queue format

Previous releases - always broken:

- can: fix NULL pointer deref of struct can_priv::do_set_mode

- eth:
- ice: fix a null pointer dereference in ice_copy_and_init_pkg()
- ism: fix concurrency management in ism_cmd()
- dpaa2: fix device reference count leak in MAC endpoint handling
- icssg-prueth: fix buffer allocation for ICSSG

Misc:

- selftests: mptcp: increase code coverage"

* tag 'net-6.16-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (34 commits)
net: hns3: default enable tx bounce buffer when smmu enabled
net: hns3: fixed vf get max channels bug
net: hns3: disable interrupt when ptp init failed
net: hns3: fix concurrent setting vlan filter issue
s390/ism: fix concurrency management in ism_cmd()
selftests: drv-net: wait for iperf client to stop sending
MAINTAINERS: Add in6.h to MAINTAINERS
selftests: netfilter: tone-down conntrack clash test
can: netlink: can_changelink(): fix NULL pointer deref of struct can_priv::do_set_mode
net/sched: sch_qfq: Avoid triggering might_sleep in atomic context in qfq_delete_class
gve: Fix stuck TX queue for DQ queue format
net: appletalk: Fix use-after-free in AARP proxy probe
net: bcmasp: Restore programming of TX map vector register
selftests: mptcp: connect: also cover checksum
selftests: mptcp: connect: also cover alt modes
e1000e: ignore uninitialized checksum word on tgp
e1000e: disregard NVM checksum on tgp when valid checksum bit is not set
ice: Fix a null pointer dereference in ice_copy_and_init_pkg()
i40e: When removing VF MAC filters, only check PF-set MAC
i40e: report VF tx_dropped with tx_errors instead of tx_discards
...

+550 -307
+1
MAINTAINERS
··· 17383 17383 F: include/linux/framer/framer-provider.h 17384 17384 F: include/linux/framer/framer.h 17385 17385 F: include/linux/in.h 17386 + F: include/linux/in6.h 17386 17387 F: include/linux/indirect_call_wrapper.h 17387 17388 F: include/linux/inet.h 17388 17389 F: include/linux/inet_diag.h
+9 -10
drivers/bus/fsl-mc/fsl-mc-bus.c
··· 943 943 struct fsl_mc_obj_desc endpoint_desc = {{ 0 }}; 944 944 struct dprc_endpoint endpoint1 = {{ 0 }}; 945 945 struct dprc_endpoint endpoint2 = {{ 0 }}; 946 + struct fsl_mc_bus *mc_bus; 946 947 int state, err; 947 948 948 949 mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); ··· 967 966 strcpy(endpoint_desc.type, endpoint2.type); 968 967 endpoint_desc.id = endpoint2.id; 969 968 endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); 969 + if (endpoint) 970 + return endpoint; 970 971 971 972 /* 972 973 * We know that the device has an endpoint because we verified by ··· 976 973 * yet discovered by the fsl-mc bus, thus the lookup returned NULL. 977 974 * Force a rescan of the devices in this container and retry the lookup. 978 975 */ 979 - if (!endpoint) { 980 - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); 981 - 982 - if (mutex_trylock(&mc_bus->scan_mutex)) { 983 - err = dprc_scan_objects(mc_bus_dev, true); 984 - mutex_unlock(&mc_bus->scan_mutex); 985 - } 986 - 987 - if (err < 0) 988 - return ERR_PTR(err); 976 + mc_bus = to_fsl_mc_bus(mc_bus_dev); 977 + if (mutex_trylock(&mc_bus->scan_mutex)) { 978 + err = dprc_scan_objects(mc_bus_dev, true); 979 + mutex_unlock(&mc_bus->scan_mutex); 989 980 } 981 + if (err < 0) 982 + return ERR_PTR(err); 990 983 991 984 endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); 992 985 /*
+9 -3
drivers/net/can/dev/dev.c
··· 145 145 EXPORT_SYMBOL_GPL(can_change_state); 146 146 147 147 /* CAN device restart for bus-off recovery */ 148 - static void can_restart(struct net_device *dev) 148 + static int can_restart(struct net_device *dev) 149 149 { 150 150 struct can_priv *priv = netdev_priv(dev); 151 151 struct sk_buff *skb; 152 152 struct can_frame *cf; 153 153 int err; 154 + 155 + if (!priv->do_set_mode) 156 + return -EOPNOTSUPP; 154 157 155 158 if (netif_carrier_ok(dev)) 156 159 netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n"); ··· 176 173 if (err) { 177 174 netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err)); 178 175 netif_carrier_off(dev); 176 + 177 + return err; 179 178 } else { 180 179 netdev_dbg(dev, "Restarted\n"); 181 180 priv->can_stats.restarts++; 182 181 } 182 + 183 + return 0; 183 184 } 184 185 185 186 static void can_restart_work(struct work_struct *work) ··· 208 201 return -EBUSY; 209 202 210 203 cancel_delayed_work_sync(&priv->restart_work); 211 - can_restart(dev); 212 204 213 - return 0; 205 + return can_restart(dev); 214 206 } 215 207 216 208 /* CAN bus-off
+12
drivers/net/can/dev/netlink.c
··· 285 285 } 286 286 287 287 if (data[IFLA_CAN_RESTART_MS]) { 288 + if (!priv->do_set_mode) { 289 + NL_SET_ERR_MSG(extack, 290 + "Device doesn't support restart from Bus Off"); 291 + return -EOPNOTSUPP; 292 + } 293 + 288 294 /* Do not allow changing restart delay while running */ 289 295 if (dev->flags & IFF_UP) 290 296 return -EBUSY; ··· 298 292 } 299 293 300 294 if (data[IFLA_CAN_RESTART]) { 295 + if (!priv->do_set_mode) { 296 + NL_SET_ERR_MSG(extack, 297 + "Device doesn't support restart from Bus Off"); 298 + return -EOPNOTSUPP; 299 + } 300 + 301 301 /* Do not allow a restart while not running */ 302 302 if (!(dev->flags & IFF_UP)) 303 303 return -EINVAL;
+3
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
··· 818 818 /* Tx SPB */ 819 819 tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT), 820 820 TX_SPB_CTRL_XF_CTRL2); 821 + 822 + if (intf->parent->tx_chan_offset) 823 + tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR); 821 824 tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT); 822 825 823 826 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
+12 -3
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
··· 4666 4666 return PTR_ERR(dpmac_dev); 4667 4667 } 4668 4668 4669 - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) 4669 + if (IS_ERR(dpmac_dev)) 4670 4670 return 0; 4671 4671 4672 + if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) { 4673 + err = 0; 4674 + goto out_put_device; 4675 + } 4676 + 4672 4677 mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL); 4673 - if (!mac) 4674 - return -ENOMEM; 4678 + if (!mac) { 4679 + err = -ENOMEM; 4680 + goto out_put_device; 4681 + } 4675 4682 4676 4683 mac->mc_dev = dpmac_dev; 4677 4684 mac->mc_io = priv->mc_io; ··· 4712 4705 dpaa2_mac_close(mac); 4713 4706 err_free_mac: 4714 4707 kfree(mac); 4708 + out_put_device: 4709 + put_device(&dpmac_dev->dev); 4715 4710 return err; 4716 4711 } 4717 4712
+12 -3
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
··· 1448 1448 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) 1449 1449 return PTR_ERR(dpmac_dev); 1450 1450 1451 - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) 1451 + if (IS_ERR(dpmac_dev)) 1452 1452 return 0; 1453 1453 1454 + if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) { 1455 + err = 0; 1456 + goto out_put_device; 1457 + } 1458 + 1454 1459 mac = kzalloc(sizeof(*mac), GFP_KERNEL); 1455 - if (!mac) 1456 - return -ENOMEM; 1460 + if (!mac) { 1461 + err = -ENOMEM; 1462 + goto out_put_device; 1463 + } 1457 1464 1458 1465 mac->mc_dev = dpmac_dev; 1459 1466 mac->mc_io = port_priv->ethsw_data->mc_io; ··· 1490 1483 dpaa2_mac_close(mac); 1491 1484 err_free_mac: 1492 1485 kfree(mac); 1486 + out_put_device: 1487 + put_device(&dpmac_dev->dev); 1493 1488 return err; 1494 1489 } 1495 1490
+37 -30
drivers/net/ethernet/google/gve/gve_main.c
··· 1917 1917 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); 1918 1918 } 1919 1919 1920 - static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) 1920 + static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv, 1921 + unsigned int txqueue) 1921 1922 { 1922 - struct gve_notify_block *block; 1923 - struct gve_tx_ring *tx = NULL; 1924 - struct gve_priv *priv; 1925 - u32 last_nic_done; 1926 - u32 current_time; 1927 1923 u32 ntfy_idx; 1928 1924 1929 - netdev_info(dev, "Timeout on tx queue, %d", txqueue); 1930 - priv = netdev_priv(dev); 1931 1925 if (txqueue > priv->tx_cfg.num_queues) 1932 - goto reset; 1926 + return NULL; 1933 1927 1934 1928 ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); 1935 1929 if (ntfy_idx >= priv->num_ntfy_blks) 1936 - goto reset; 1930 + return NULL; 1937 1931 1938 - block = &priv->ntfy_blocks[ntfy_idx]; 1939 - tx = block->tx; 1932 + return &priv->ntfy_blocks[ntfy_idx]; 1933 + } 1934 + 1935 + static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv, 1936 + unsigned int txqueue) 1937 + { 1938 + struct gve_notify_block *block; 1939 + u32 current_time; 1940 + 1941 + block = gve_get_tx_notify_block(priv, txqueue); 1942 + 1943 + if (!block) 1944 + return false; 1940 1945 1941 1946 current_time = jiffies_to_msecs(jiffies); 1942 - if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) 1943 - goto reset; 1947 + if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) 1948 + return false; 1944 1949 1945 - /* Check to see if there are missed completions, which will allow us to 1946 - * kick the queue. 1947 - */ 1948 - last_nic_done = gve_tx_load_event_counter(priv, tx); 1949 - if (last_nic_done - tx->done) { 1950 - netdev_info(dev, "Kicking queue %d", txqueue); 1951 - iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); 1952 - napi_schedule(&block->napi); 1953 - tx->last_kick_msec = current_time; 1954 - goto out; 1955 - } // Else reset. 1950 + netdev_info(priv->dev, "Kicking queue %d", txqueue); 1951 + napi_schedule(&block->napi); 1952 + block->tx->last_kick_msec = current_time; 1953 + return true; 1954 + } 1956 1955 1957 - reset: 1958 - gve_schedule_reset(priv); 1956 + static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) 1957 + { 1958 + struct gve_notify_block *block; 1959 + struct gve_priv *priv; 1959 1960 1960 - out: 1961 - if (tx) 1962 - tx->queue_timeout++; 1961 + netdev_info(dev, "Timeout on tx queue, %d", txqueue); 1962 + priv = netdev_priv(dev); 1963 + 1964 + if (!gve_tx_timeout_try_q_kick(priv, txqueue)) 1965 + gve_schedule_reset(priv); 1966 + 1967 + block = gve_get_tx_notify_block(priv, txqueue); 1968 + if (block) 1969 + block->tx->queue_timeout++; 1963 1970 priv->tx_timeo_cnt++; 1964 1971 } 1965 1972
+31
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 11 11 #include <linux/irq.h> 12 12 #include <linux/ip.h> 13 13 #include <linux/ipv6.h> 14 + #include <linux/iommu.h> 14 15 #include <linux/module.h> 15 16 #include <linux/pci.h> 16 17 #include <linux/skbuff.h> ··· 1040 1039 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) 1041 1040 { 1042 1041 u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; 1042 + struct net_device *netdev = ring_to_netdev(ring); 1043 + struct hns3_nic_priv *priv = netdev_priv(netdev); 1043 1044 struct hns3_tx_spare *tx_spare; 1044 1045 struct page *page; 1045 1046 dma_addr_t dma; ··· 1083 1080 tx_spare->buf = page_address(page); 1084 1081 tx_spare->len = PAGE_SIZE << order; 1085 1082 ring->tx_spare = tx_spare; 1083 + ring->tx_copybreak = priv->tx_copybreak; 1086 1084 return; 1087 1085 1088 1086 dma_mapping_error: ··· 4878 4874 devm_kfree(&pdev->dev, priv->tqp_vector); 4879 4875 } 4880 4876 4877 + static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv) 4878 + { 4879 + #define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024) 4880 + #define HNS3_MAX_PACKET_SIZE (64 * 1024) 4881 + 4882 + struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev); 4883 + struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); 4884 + struct hnae3_handle *handle = priv->ae_handle; 4885 + 4886 + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) 4887 + return; 4888 + 4889 + if (!(domain && iommu_is_dma_domain(domain))) 4890 + return; 4891 + 4892 + priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE; 4893 + priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE; 4894 + 4895 + if (priv->tx_copybreak < priv->min_tx_copybreak) 4896 + priv->tx_copybreak = priv->min_tx_copybreak; 4897 + if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size) 4898 + handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size; 4899 + } 4900 + 4881 4901 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 4882 4902 unsigned int ring_type) 4883 4903 { ··· 5135 5107 int i, j; 5136 5108 int ret; 5137 5109 5110 + hns3_update_tx_spare_buf_config(priv); 5138 5111 for (i = 0; i < ring_num; i++) { 5139 5112 ret = hns3_alloc_ring_memory(&priv->ring[i]); 5140 5113 if (ret) { ··· 5340 5311 priv->ae_handle = handle; 5341 5312 priv->tx_timeout_count = 0; 5342 5313 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; 5314 + priv->min_tx_copybreak = 0; 5315 + priv->min_tx_spare_buf_size = 0; 5343 5316 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 5344 5317 5345 5318 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
+2
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
··· 596 596 struct hns3_enet_coalesce rx_coal; 597 597 u32 tx_copybreak; 598 598 u32 rx_copybreak; 599 + u32 min_tx_copybreak; 600 + u32 min_tx_spare_buf_size; 599 601 }; 600 602 601 603 union l3_hdr_info {
+21 -15
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 9576 9576 return false; 9577 9577 } 9578 9578 9579 - int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) 9579 + static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport, 9580 + bool request_en) 9580 9581 { 9581 - struct hclge_dev *hdev = vport->back; 9582 9582 bool need_en; 9583 9583 int ret; 9584 9584 9585 - mutex_lock(&hdev->vport_lock); 9586 - 9587 - vport->req_vlan_fltr_en = request_en; 9588 - 9589 9585 need_en = hclge_need_enable_vport_vlan_filter(vport); 9590 - if (need_en == vport->cur_vlan_fltr_en) { 9591 - mutex_unlock(&hdev->vport_lock); 9586 + if (need_en == vport->cur_vlan_fltr_en) 9592 9587 return 0; 9593 - } 9594 9588 9595 9589 ret = hclge_set_vport_vlan_filter(vport, need_en); 9596 - if (ret) { 9597 - mutex_unlock(&hdev->vport_lock); 9590 + if (ret) 9598 9591 return ret; 9599 - } 9600 9592 9601 9593 vport->cur_vlan_fltr_en = need_en; 9602 9594 9595 + return 0; 9596 + } 9597 + 9598 + int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) 9599 + { 9600 + struct hclge_dev *hdev = vport->back; 9601 + int ret; 9602 + 9603 + mutex_lock(&hdev->vport_lock); 9604 + vport->req_vlan_fltr_en = request_en; 9605 + ret = __hclge_enable_vport_vlan_filter(vport, request_en); 9603 9606 mutex_unlock(&hdev->vport_lock); 9604 9607 9605 - return 0; 9608 + return ret; 9606 9609 } 9607 9610 9608 9611 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) ··· 10626 10623 &vport->state)) 10627 10624 continue; 10628 10625 10629 - ret = hclge_enable_vport_vlan_filter(vport, 10630 - vport->req_vlan_fltr_en); 10626 + mutex_lock(&hdev->vport_lock); 10627 + ret = __hclge_enable_vport_vlan_filter(vport, 10628 + vport->req_vlan_fltr_en); 10631 10629 if (ret) { 10632 10630 dev_err(&hdev->pdev->dev, 10633 10631 "failed to sync vlan filter state for vport%u, ret = %d\n", 10634 10632 vport->vport_id, ret); 10635 10633 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 10636 10634 &vport->state); 10635 + mutex_unlock(&hdev->vport_lock); 10637 10636 return; 10638 10637 } 10638 + mutex_unlock(&hdev->vport_lock); 10639 10639 } 10640 10640 } 10641 10641
+6 -3
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
··· 497 497 if (ret) { 498 498 dev_err(&hdev->pdev->dev, 499 499 "failed to init freq, ret = %d\n", ret); 500 - goto out; 500 + goto out_clear_int; 501 501 } 502 502 503 503 ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg); 504 504 if (ret) { 505 505 dev_err(&hdev->pdev->dev, 506 506 "failed to init ts mode, ret = %d\n", ret); 507 - goto out; 507 + goto out_clear_int; 508 508 } 509 509 510 510 ktime_get_real_ts64(&ts); ··· 512 512 if (ret) { 513 513 dev_err(&hdev->pdev->dev, 514 514 "failed to init ts time, ret = %d\n", ret); 515 - goto out; 515 + goto out_clear_int; 516 516 } 517 517 518 518 set_bit(HCLGE_STATE_PTP_EN, &hdev->state); ··· 520 520 521 521 return 0; 522 522 523 + out_clear_int: 524 + clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags); 525 + hclge_ptp_int_en(hdev, false); 523 526 out: 524 527 hclge_ptp_destroy_clock(hdev); 525 528
+1 -5
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 3094 3094 3095 3095 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3096 3096 { 3097 - struct hnae3_handle *nic = &hdev->nic; 3098 - struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3099 - 3100 - return min_t(u32, hdev->rss_size_max, 3101 - hdev->num_tqps / kinfo->tc_info.num_tc); 3097 + return min(hdev->rss_size_max, hdev->num_tqps); 3102 3098 } 3103 3099 3104 3100 /**
+3
drivers/net/ethernet/intel/e1000e/defines.h
··· 638 638 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ 639 639 #define NVM_SUM 0xBABA 640 640 641 + /* Uninitialized ("empty") checksum word value */ 642 + #define NVM_CHECKSUM_UNINITIALIZED 0xFFFF 643 + 641 644 /* PBA (printed board assembly) number words */ 642 645 #define NVM_PBA_OFFSET_0 8 643 646 #define NVM_PBA_OFFSET_1 9
+2
drivers/net/ethernet/intel/e1000e/ich8lan.c
··· 4274 4274 ret_val = e1000e_update_nvm_checksum(hw); 4275 4275 if (ret_val) 4276 4276 return ret_val; 4277 + } else if (hw->mac.type == e1000_pch_tgp) { 4278 + return 0; 4277 4279 } 4278 4280 } 4279 4281
+6
drivers/net/ethernet/intel/e1000e/nvm.c
··· 558 558 checksum += nvm_data; 559 559 } 560 560 561 + if (hw->mac.type == e1000_pch_tgp && 562 + nvm_data == NVM_CHECKSUM_UNINITIALIZED) { 563 + e_dbg("Uninitialized NVM Checksum on TGP platform - ignoring\n"); 564 + return 0; 565 + } 566 + 561 567 if (checksum != (u16)NVM_SUM) { 562 568 e_dbg("NVM Checksum Invalid\n"); 563 569 return -E1000_ERR_NVM;
+3 -3
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 3137 3137 const u8 *addr = al->list[i].addr; 3138 3138 3139 3139 /* Allow to delete VF primary MAC only if it was not set 3140 - * administratively by PF or if VF is trusted. 3140 + * administratively by PF. 3141 3141 */ 3142 3142 if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { 3143 - if (i40e_can_vf_change_mac(vf)) 3143 + if (!vf->pf_set_mac) 3144 3144 was_unimac_deleted = true; 3145 3145 else 3146 3146 continue; ··· 5006 5006 vf_stats->broadcast = stats->rx_broadcast; 5007 5007 vf_stats->multicast = stats->rx_multicast; 5008 5008 vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; 5009 - vf_stats->tx_dropped = stats->tx_discards; 5009 + vf_stats->tx_dropped = stats->tx_errors; 5010 5010 5011 5011 return 0; 5012 5012 }
+2
drivers/net/ethernet/intel/ice/ice_ddp.c
··· 2301 2301 return ICE_DDP_PKG_ERR; 2302 2302 2303 2303 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); 2304 + if (!buf_copy) 2305 + return ICE_DDP_PKG_ERR; 2304 2306 2305 2307 state = ice_init_pkg(hw, buf_copy, len); 2306 2308 if (!ice_is_init_pkg_successful(state)) {
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 1947 1947 1948 1948 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, 1949 1949 pages_queue, token, force_polling); 1950 - if (callback) 1951 - return err; 1950 + if (callback && !err) 1951 + return 0; 1952 1952 1953 1953 if (err > 0) /* Failed in FW, command didn't execute */ 1954 1954 err = deliv_status_to_err(err);
+54 -54
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 1182 1182 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1183 1183 struct mlx5_core_dev *peer_dev) 1184 1184 { 1185 + struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; 1185 1186 struct mlx5_flow_destination dest = {}; 1186 1187 struct mlx5_flow_act flow_act = {0}; 1187 1188 struct mlx5_flow_handle **flows; 1188 - /* total vports is the same for both e-switches */ 1189 - int nvports = esw->total_vports; 1190 1189 struct mlx5_flow_handle *flow; 1190 + struct mlx5_vport *peer_vport; 1191 1191 struct mlx5_flow_spec *spec; 1192 - struct mlx5_vport *vport; 1193 1192 int err, pfindex; 1194 1193 unsigned long i; 1195 1194 void *misc; 1196 1195 1197 - if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev)) 1196 + if (!MLX5_VPORT_MANAGER(peer_dev) && 1197 + !mlx5_core_is_ecpf_esw_manager(peer_dev)) 1198 1198 return 0; 1199 1199 1200 1200 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); ··· 1203 1203 1204 1204 peer_miss_rules_setup(esw, peer_dev, spec, &dest); 1205 1205 1206 - flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); 1206 + flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL); 1207 1207 if (!flows) { 1208 1208 err = -ENOMEM; 1209 1209 goto alloc_flows_err; ··· 1213 1213 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1214 1214 misc_parameters); 1215 1215 1216 - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1217 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1218 - esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, 1219 - spec, MLX5_VPORT_PF); 1216 + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { 1217 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1218 + esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, 1219 + MLX5_VPORT_PF); 1220 1220 1221 1221 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1222 1222 spec, &flow_act, &dest, 1); ··· 1224 1224 err = PTR_ERR(flow); 1225 1225 goto add_pf_flow_err; 1226 1226 } 1227 - flows[vport->index] = flow; 1227 + flows[peer_vport->index] = flow; 1228 1228 } 1229 1229 1230 - if (mlx5_ecpf_vport_exists(esw->dev)) { 1231 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1230 + if (mlx5_ecpf_vport_exists(peer_dev)) { 1231 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); 1232 1232 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); 1233 1233 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1234 1234 spec, &flow_act, &dest, 1); ··· 1236 1236 err = PTR_ERR(flow); 1237 1237 goto add_ecpf_flow_err; 1238 1238 } 1239 - flows[vport->index] = flow; 1239 + flows[peer_vport->index] = flow; 1240 1240 } 1241 1241 1242 - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1242 + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1243 + mlx5_core_max_vfs(peer_dev)) { 1243 1244 esw_set_peer_miss_rule_source_port(esw, 1244 - peer_dev->priv.eswitch, 1245 - spec, vport->vport); 1245 + peer_esw, 1246 + spec, peer_vport->vport); 1246 1247 1247 1248 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1248 1249 spec, &flow_act, &dest, 1); ··· 1251 1250 err = PTR_ERR(flow); 1252 1251 goto add_vf_flow_err; 1253 1252 } 1254 - flows[vport->index] = flow; 1253 + flows[peer_vport->index] = flow; 1255 1254 } 1256 1255 1257 - if (mlx5_core_ec_sriov_enabled(esw->dev)) { 1258 - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { 1259 - if (i >= mlx5_core_max_ec_vfs(peer_dev)) 1260 - break; 1261 - esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, 1262 - spec, vport->vport); 1256 + if (mlx5_core_ec_sriov_enabled(peer_dev)) { 1257 + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, 1258 + mlx5_core_max_ec_vfs(peer_dev)) { 1259 + esw_set_peer_miss_rule_source_port(esw, peer_esw, 1260 + spec, 1261 + peer_vport->vport); 1263 1262 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1264 1263 spec, &flow_act, &dest, 1); 1265 1264 if (IS_ERR(flow)) { 1266 1265 err = PTR_ERR(flow); 1267 1266 goto add_ec_vf_flow_err; 1268 1267 } 1269 - flows[vport->index] = flow; 1268 + flows[peer_vport->index] = flow; 1270 1269 } 1271 1270 } 1272 1271 ··· 1283 1282 return 0; 1284 1283 1285 1284 add_ec_vf_flow_err: 1286 - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { 1287 - if (!flows[vport->index]) 1285 + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, 1286 + mlx5_core_max_ec_vfs(peer_dev)) { 1287 + if (!flows[peer_vport->index]) 1288 1288 continue; 1289 - mlx5_del_flow_rules(flows[vport->index]); 1289 + mlx5_del_flow_rules(flows[peer_vport->index]); 1290 1290 } 1291 1291 add_vf_flow_err: 1292 - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1293 - if (!flows[vport->index]) 1292 + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1293 + mlx5_core_max_vfs(peer_dev)) { 1294 + if (!flows[peer_vport->index]) 1294 1295 continue; 1295 - mlx5_del_flow_rules(flows[vport->index]); 1296 + mlx5_del_flow_rules(flows[peer_vport->index]); 1296 1297 } 1297 - if (mlx5_ecpf_vport_exists(esw->dev)) { 1298 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1299 - mlx5_del_flow_rules(flows[vport->index]); 1298 + if (mlx5_ecpf_vport_exists(peer_dev)) { 1299 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); 1300 + mlx5_del_flow_rules(flows[peer_vport->index]); 1300 1301 } 1301 1302 add_ecpf_flow_err: 1302 - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1303 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1304 - mlx5_del_flow_rules(flows[vport->index]); 1303 + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { 1304 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1305 + mlx5_del_flow_rules(flows[peer_vport->index]); 1305 1306 } 1306 1307 add_pf_flow_err: 1307 1308 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); ··· 1316 1313 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1317 1314 struct mlx5_core_dev *peer_dev) 1318 1315 { 1316 + struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; 1319 1317 u16 peer_index = mlx5_get_dev_index(peer_dev); 1320 1318 struct mlx5_flow_handle **flows; 1321 - struct mlx5_vport *vport; 1319 + struct mlx5_vport *peer_vport; 1322 1320 unsigned long i; 1323 1321 1324 1322 flows = esw->fdb_table.offloads.peer_miss_rules[peer_index]; 1325 1323 if (!flows) 1326 1324 return; 1327 1325 1328 - if (mlx5_core_ec_sriov_enabled(esw->dev)) { 1329 - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { 1330 - /* The flow for a particular vport could be NULL if the other ECPF 1331 - * has fewer or no VFs enabled 1332 - */ 1333 - if (!flows[vport->index]) 1334 - continue; 1335 - mlx5_del_flow_rules(flows[vport->index]); 1336 - } 1326 + if (mlx5_core_ec_sriov_enabled(peer_dev)) { 1327 + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, 1328 + mlx5_core_max_ec_vfs(peer_dev)) 1329 + mlx5_del_flow_rules(flows[peer_vport->index]); 1337 1330 } 1338 1331 1339 - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) 1340 - mlx5_del_flow_rules(flows[vport->index]); 1332 + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1333 + mlx5_core_max_vfs(peer_dev)) 1334 + mlx5_del_flow_rules(flows[peer_vport->index]); 1341 1335 1342 - if (mlx5_ecpf_vport_exists(esw->dev)) { 1343 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1344 - mlx5_del_flow_rules(flows[vport->index]); 1336 + if (mlx5_ecpf_vport_exists(peer_dev)) { 1337 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); 1338 + mlx5_del_flow_rules(flows[peer_vport->index]); 1345 1339 } 1346 1340 1347 - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1348 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1349 - mlx5_del_flow_rules(flows[vport->index]); 1341 + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { 1342 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1343 + mlx5_del_flow_rules(flows[peer_vport->index]); 1350 1344 } 1351 1345 1352 1346 kvfree(flows);
+108 -54
drivers/net/ethernet/ti/icssg/icssg_config.c
··· 288 288 int i; 289 289 290 290 addr = lower_32_bits(prueth->msmcram.pa); 291 - if (slice) 292 - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; 291 + if (slice) { 292 + if (prueth->pdata.banked_ms_ram) 293 + addr += MSMC_RAM_BANK_SIZE; 294 + else 295 + addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE; 296 + } 293 297 294 298 if (addr % SZ_64K) { 295 299 dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); ··· 301 297 } 302 298 303 299 bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; 304 - /* workaround for f/w bug. bpool 0 needs to be initialized */ 305 - for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) { 300 + 301 + /* Configure buffer pools for forwarding buffers 302 + * - used by firmware to store packets to be forwarded to other port 303 + * - 8 total pools per slice 304 + */ 305 + for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) { 306 306 writel(addr, &bpool_cfg[i].addr); 307 - writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); 308 - addr += PRUETH_EMAC_BUF_POOL_SIZE; 307 + writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len); 308 + addr += PRUETH_SW_FWD_BUF_POOL_SIZE; 309 309 } 310 310 311 - if (!slice) 312 - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; 313 - else 314 - addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; 311 + /* Configure buffer pools for Local Injection buffers 312 + * - used by firmware to store packets received from host core 313 + * - 16 total pools per slice 314 + */ 315 + for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) { 316 + int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; 315 317 316 - for (i = PRUETH_NUM_BUF_POOLS; 317 - i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS; 318 - i++) { 319 - /* The driver only uses first 4 queues per PRU so only initialize them */ 320 - if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) { 321 - writel(addr, &bpool_cfg[i].addr); 322 - writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len); 323 - addr += PRUETH_SW_BUF_POOL_SIZE_HOST; 318 + /* The driver only uses first 4 queues per PRU, 319 + * so only initialize buffer for them 320 + */ 321 + if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE) 322 + < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) { 323 + writel(addr, &bpool_cfg[cfg_idx].addr); 324 + writel(PRUETH_SW_LI_BUF_POOL_SIZE, 325 + &bpool_cfg[cfg_idx].len); 326 + addr += PRUETH_SW_LI_BUF_POOL_SIZE; 324 327 } else { 325 - writel(0, &bpool_cfg[i].addr); 326 - writel(0, &bpool_cfg[i].len); 328 + writel(0, &bpool_cfg[cfg_idx].addr); 329 + writel(0, &bpool_cfg[cfg_idx].len); 327 330 } 328 331 } 329 332 330 - if (!slice) 331 - addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; 332 - else 333 - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; 333 + /* Express RX buffer queue 334 + * - used by firmware to store express packets to be transmitted 335 + * to the host core 336 + */ 337 + rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; 338 + for (i = 0; i < 3; i++) 339 + writel(addr, &rxq_ctx->start[i]); 334 340 341 + addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE; 342 + writel(addr, &rxq_ctx->end); 343 + 344 + /* Pre-emptible RX buffer queue 345 + * - used by firmware to store preemptible packets to be transmitted 346 + * to the host core 347 + */ 335 348 rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; 336 349 for (i = 0; i < 3; i++) 337 350 writel(addr, &rxq_ctx->start[i]); 338 351 339 - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; 340 - writel(addr - SZ_2K, &rxq_ctx->end); 352 + addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE; 353 + writel(addr, &rxq_ctx->end); 354 + 355 + /* Set pointer for default dropped packet write 356 + * - used by firmware to temporarily store packet to be dropped 357 + */ 358 + rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET; 359 + writel(addr, &rxq_ctx->start[0]); 341 360 342 361 return 0; 343 362 } ··· 374 347 u32 addr; 375 348 int i; 376 349 377 - /* Layout to have 64KB aligned buffer pool 378 - * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1| 379 - */ 380 - 381 350 addr = lower_32_bits(prueth->msmcram.pa); 382 - if (slice) 383 - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; 351 + if (slice) { 352 + if (prueth->pdata.banked_ms_ram) 353 + addr += MSMC_RAM_BANK_SIZE; 354 + else 355 + addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE; 356 + } 384 357 385 358 if (addr % SZ_64K) { 386 359 dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); ··· 388 361 } 389 362 390 363 bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; 391 - /* workaround for f/w bug. bpool 0 needs to be initilalized */ 392 - writel(addr, &bpool_cfg[0].addr); 393 - writel(0, &bpool_cfg[0].len); 394 364 395 - for (i = PRUETH_EMAC_BUF_POOL_START; 396 - i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS; 397 - i++) { 398 - writel(addr, &bpool_cfg[i].addr); 399 - writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); 400 - addr += PRUETH_EMAC_BUF_POOL_SIZE; 365 + /* Configure buffer pools for forwarding buffers 366 + * - in mac mode - no forwarding so initialize all pools to 0 367 + * - 8 total pools per slice 368 + */ 369 + for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) { 370 + writel(0, &bpool_cfg[i].addr); 371 + writel(0, &bpool_cfg[i].len); 401 372 } 402 373 403 - if (!slice) 404 - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; 405 - else 406 - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2; 374 + /* Configure buffer pools for Local Injection buffers 375 + * - used by firmware to store packets received from host core 376 + * - 16 total pools per slice 377 + */ 378 + bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; 379 + for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) { 380 + int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; 407 381 408 - /* Pre-emptible RX buffer queue */ 409 - rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; 410 - for (i = 0; i < 3; i++) 411 - writel(addr, &rxq_ctx->start[i]); 382 + /* In EMAC mode, only first 4 buffers are used, 383 + * as 1 slice needs to handle only 1 port 384 + */ 385 + if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) { 386 + writel(addr, &bpool_cfg[cfg_idx].addr); 387 + writel(PRUETH_EMAC_LI_BUF_POOL_SIZE, 388 + &bpool_cfg[cfg_idx].len); 389 + addr += PRUETH_EMAC_LI_BUF_POOL_SIZE; 390 + } else { 391 + writel(0, &bpool_cfg[cfg_idx].addr); 392 + writel(0, &bpool_cfg[cfg_idx].len); 393 + } 394 + } 412 395 413 - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; 414 - writel(addr, &rxq_ctx->end); 415 - 416 - /* Express RX buffer queue */ 396 + /* Express RX buffer queue 397 + * - used by firmware to store express packets to be transmitted 398 + * to host core 399 + */ 417 400 rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; 418 401 for (i = 0; i < 3; i++) 419 402 writel(addr, &rxq_ctx->start[i]); 420 403 421 - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; 404 + addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE; 422 405 writel(addr, &rxq_ctx->end); 406 + 407 + /* Pre-emptible RX buffer queue 408 + * - used by firmware to store preemptible packets to be transmitted 409 + * to host core 410 + */ 411 + rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; 412 + for (i = 0; i < 3; i++) 413 + writel(addr, &rxq_ctx->start[i]); 414 + 415 + addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE; 416 + writel(addr, &rxq_ctx->end); 417 + 418 + /* Set pointer for default dropped packet write 419 + * - used by firmware to temporarily store packet to be dropped 420 + */ 421 + rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET; 422 + writel(addr, &rxq_ctx->start[0]); 423 423 424 424 return 0; 425 425 }
+64 -14
drivers/net/ethernet/ti/icssg/icssg_config.h
··· 26 26 #define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */ 27 27 #define PRUETH_RX_FLOW_DATA 0 28 28 29 - #define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K 30 - #define PRUETH_EMAC_POOLS_PER_SLICE 24 31 - #define PRUETH_EMAC_BUF_POOL_START 8 32 - #define PRUETH_NUM_BUF_POOLS 8 33 - #define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */ 34 - #define MSMC_RAM_SIZE \ 35 - (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \ 36 - PRUETH_EMAC_RX_CTX_BUF_SIZE * 2)) 29 + /* Defines for forwarding path buffer pools: 30 + * - used by firmware to store packets to be forwarded to other port 31 + * - 8 total pools per slice 32 + * - only used in switch mode (as no forwarding in mac mode) 33 + */ 34 + #define PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE 8 35 + #define PRUETH_SW_FWD_BUF_POOL_SIZE (SZ_8K) 37 36 38 - #define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K 39 - #define PRUETH_SW_NUM_BUF_POOLS_HOST 8 40 - #define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4 41 - #define MSMC_RAM_SIZE_SWITCH_MODE \ 42 - (MSMC_RAM_SIZE + \ 43 - (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST)) 37 + /* Defines for local injection path buffer pools: 38 + * - used by firmware to store packets received from host core 39 + * - 16 total pools per slice 40 + * - 8 pools per port per slice and each slice handles both ports 41 + * - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG) 42 + * - switch mode: 8 total pools used 43 + * - mac mode: 4 total pools used 44 + */ 45 + #define PRUETH_NUM_LI_BUF_POOLS_PER_SLICE 16 46 + #define PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE 8 47 + #define PRUETH_SW_LI_BUF_POOL_SIZE SZ_4K 48 + #define PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE 8 49 + #define PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4 50 + #define PRUETH_EMAC_LI_BUF_POOL_SIZE SZ_8K 51 + #define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE 4 52 + #define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4 53 + 54 + /* Defines for host egress path - express and preemptible buffers 55 + * - used by firmware to store express and preemptible packets 56 + * to be transmitted to host core 57 + * - used by both mac/switch modes 58 + */ 59 + #define PRUETH_SW_HOST_EXP_BUF_POOL_SIZE SZ_16K 60 + #define PRUETH_SW_HOST_PRE_BUF_POOL_SIZE (SZ_16K - SZ_2K) 61 + #define PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE PRUETH_SW_HOST_EXP_BUF_POOL_SIZE 62 + #define PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE PRUETH_SW_HOST_PRE_BUF_POOL_SIZE 63 + 64 + /* Buffer used by firmware to temporarily store packet to be dropped */ 65 + #define PRUETH_SW_DROP_PKT_BUF_SIZE SZ_2K 66 + #define PRUETH_EMAC_DROP_PKT_BUF_SIZE PRUETH_SW_DROP_PKT_BUF_SIZE 67 + 68 + /* Total switch mode memory usage for buffers per slice */ 69 + #define PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE \ 70 + (PRUETH_SW_FWD_BUF_POOL_SIZE * PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE + \ 71 + PRUETH_SW_LI_BUF_POOL_SIZE * PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE + \ 72 + PRUETH_SW_HOST_EXP_BUF_POOL_SIZE + \ 73 + PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + \ 74 + PRUETH_SW_DROP_PKT_BUF_SIZE) 75 + 76 + /* Total switch mode memory usage for all buffers */ 77 + #define PRUETH_SW_TOTAL_BUF_SIZE \ 78 + (2 * PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE) 79 + 80 + /* Total mac mode memory usage for buffers per slice */ 81 + #define PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE \ 82 + (PRUETH_EMAC_LI_BUF_POOL_SIZE * \ 83 + PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE + \ 84 + PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE + \ 85 + PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE + \ 86 + PRUETH_EMAC_DROP_PKT_BUF_SIZE) 87 + 88 + /* Total mac mode memory usage for all buffers */ 89 + #define PRUETH_EMAC_TOTAL_BUF_SIZE \ 90 + (2 * PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE) 91 + 92 + /* Size of 1 bank of MSMC/OC_SRAM memory */ 93 + #define MSMC_RAM_BANK_SIZE SZ_256K 44 94 45 95 #define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1) 46 96
+14 -6
drivers/net/ethernet/ti/icssg/icssg_prueth.c
··· 1764 1764 goto put_mem; 1765 1765 } 1766 1766 1767 - msmc_ram_size = MSMC_RAM_SIZE; 1768 1767 prueth->is_switchmode_supported = prueth->pdata.switch_mode; 1769 - if (prueth->is_switchmode_supported) 1770 - msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE; 1768 + if (prueth->pdata.banked_ms_ram) { 1769 + /* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */ 1770 + msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE); 1771 + } else { 1772 + msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE; 1773 + if (prueth->is_switchmode_supported) 1774 + msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE; 1775 + } 1771 1776 1772 1777 /* NOTE: FW bug needs buffer base to be 64KB aligned */ 1773 1778 prueth->msmcram.va = ··· 1929 1924 1930 1925 free_pool: 1931 1926 gen_pool_free(prueth->sram_pool, 1932 - (unsigned long)prueth->msmcram.va, msmc_ram_size); 1927 + (unsigned long)prueth->msmcram.va, 1928 + prueth->msmcram.size); 1933 1929 1934 1930 put_mem: 1935 1931 pruss_release_mem_region(prueth->pruss, &prueth->shram); ··· 1982 1976 icss_iep_put(prueth->iep0); 1983 1977 1984 1978 gen_pool_free(prueth->sram_pool, 1985 - (unsigned long)prueth->msmcram.va, 1986 - MSMC_RAM_SIZE); 1979 + (unsigned long)prueth->msmcram.va, 1980 + prueth->msmcram.size); 1987 1981 1988 1982 pruss_release_mem_region(prueth->pruss, &prueth->shram); 1989 1983 ··· 2000 1994 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 2001 1995 .quirk_10m_link_issue = 1, 2002 1996 .switch_mode = 1, 1997 + .banked_ms_ram = 0, 2003 1998 }; 2004 1999 2005 2000 static const struct prueth_pdata am64x_icssg_pdata = { 2006 2001 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 2007 2002 .quirk_10m_link_issue = 1, 2008 2003 .switch_mode = 1, 2004 + .banked_ms_ram = 1, 2009 2005 }; 2010 2006 2011 2007 static const struct of_device_id prueth_dt_match[] = {
+2
drivers/net/ethernet/ti/icssg/icssg_prueth.h
··· 251 251 * @fdqring_mode: Free desc queue mode 252 252 * @quirk_10m_link_issue: 10M link detect errata 253 253 * @switch_mode: switch firmware support 254 + * @banked_ms_ram: banked memory support 254 255 */ 255 256 struct prueth_pdata { 256 257 enum k3_ring_mode fdqring_mode; 257 258 u32 quirk_10m_link_issue:1; 258 259 u32 switch_mode:1; 260 + u32 banked_ms_ram:1; 259 261 }; 260 262 261 263 struct icssg_firmwares {
+3
drivers/net/ethernet/ti/icssg/icssg_switch_map.h
··· 180 180 /* Used to notify the FW of the current link speed */ 181 181 #define PORT_LINK_SPEED_OFFSET 0x00A8 182 182 183 + /* 2k memory pointer reserved for default writes by PRU0*/ 184 + #define DEFAULT_MSMC_Q_OFFSET 0x00AC 185 + 183 186 /* TAS gate mask for windows list0 */ 184 187 #define TAS_GATE_MASK_LIST0 0x0100 185 188
+3
drivers/s390/net/ism_drv.c
··· 130 130 struct ism_req_hdr *req = cmd; 131 131 struct ism_resp_hdr *resp = cmd; 132 132 133 + spin_lock(&ism->cmd_lock); 133 134 __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req)); 134 135 __ism_write_cmd(ism, req, 0, sizeof(*req)); 135 136 ··· 144 143 } 145 144 __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp)); 146 145 out: 146 + spin_unlock(&ism->cmd_lock); 147 147 return resp->ret; 148 148 } 149 149 ··· 608 606 return -ENOMEM; 609 607 610 608 spin_lock_init(&ism->lock); 609 + spin_lock_init(&ism->cmd_lock); 611 610 dev_set_drvdata(&pdev->dev, ism); 612 611 ism->pdev = pdev; 613 612 ism->dev.parent = &pdev->dev;
+1
include/linux/ism.h
··· 28 28 29 29 struct ism_dev { 30 30 spinlock_t lock; /* protects the ism device */ 31 + spinlock_t cmd_lock; /* serializes cmds */ 31 32 struct list_head list; 32 33 struct pci_dev *pdev; 33 34
+4 -11
include/net/xfrm.h
··· 441 441 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo); 442 442 443 443 void xfrm_flush_gc(void); 444 - void xfrm_state_delete_tunnel(struct xfrm_state *x); 445 444 446 445 struct xfrm_type { 447 446 struct module *owner; ··· 473 474 474 475 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); 475 476 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); 476 - void xfrm_set_type_offload(struct xfrm_state *x); 477 + void xfrm_set_type_offload(struct xfrm_state *x, bool try_load); 477 478 static inline void xfrm_unset_type_offload(struct xfrm_state *x) 478 479 { 479 480 if (!x->type_offload) ··· 915 916 xfrm_pol_put(pols[i]); 916 917 } 917 918 918 - void __xfrm_state_destroy(struct xfrm_state *, bool); 919 + void __xfrm_state_destroy(struct xfrm_state *); 919 920 920 921 static inline void __xfrm_state_put(struct xfrm_state *x) 921 922 { ··· 925 926 static inline void xfrm_state_put(struct xfrm_state *x) 926 927 { 927 928 if (refcount_dec_and_test(&x->refcnt)) 928 - __xfrm_state_destroy(x, false); 929 - } 930 - 931 - static inline void xfrm_state_put_sync(struct xfrm_state *x) 932 - { 933 - if (refcount_dec_and_test(&x->refcnt)) 934 - __xfrm_state_destroy(x, true); 929 + __xfrm_state_destroy(x); 935 930 } 936 931 937 932 static inline void xfrm_state_hold(struct xfrm_state *x) ··· 1763 1770 1764 1771 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num); 1765 1772 int xfrm_state_delete(struct xfrm_state *x); 1766 - int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync); 1773 + int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); 1767 1774 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); 1768 1775 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev, 1769 1776 bool task_valid);
+21 -3
net/appletalk/aarp.c
··· 35 35 #include <linux/seq_file.h> 36 36 #include <linux/export.h> 37 37 #include <linux/etherdevice.h> 38 + #include <linux/refcount.h> 38 39 39 40 int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME; 40 41 int sysctl_aarp_tick_time = AARP_TICK_TIME; ··· 45 44 /* Lists of aarp entries */ 46 45 /** 47 46 * struct aarp_entry - AARP entry 47 + * @refcnt: Reference count 48 48 * @last_sent: Last time we xmitted the aarp request 49 49 * @packet_queue: Queue of frames wait for resolution 50 50 * @status: Used for proxy AARP ··· 57 55 * @next: Next entry in chain 58 56 */ 59 57 struct aarp_entry { 58 + refcount_t refcnt; 60 59 /* These first two are only used for unresolved entries */ 61 60 unsigned long last_sent; 62 61 struct sk_buff_head packet_queue; ··· 82 79 /* Used to walk the list and purge/kick entries. */ 83 80 static struct timer_list aarp_timer; 84 81 82 + static inline void aarp_entry_get(struct aarp_entry *a) 83 + { 84 + refcount_inc(&a->refcnt); 85 + } 86 + 87 + static inline void aarp_entry_put(struct aarp_entry *a) 88 + { 89 + if (refcount_dec_and_test(&a->refcnt)) 90 + kfree(a); 91 + } 92 + 85 93 /* 86 94 * Delete an aarp queue 87 95 * ··· 101 87 static void __aarp_expire(struct aarp_entry *a) 102 88 { 103 89 skb_queue_purge(&a->packet_queue); 104 - kfree(a); 90 + aarp_entry_put(a); 105 91 } 106 92 107 93 /* ··· 394 380 static struct aarp_entry *aarp_alloc(void) 395 381 { 396 382 struct aarp_entry *a = kmalloc(sizeof(*a), GFP_ATOMIC); 383 + if (!a) 384 + return NULL; 397 385 398 - if (a) 399 - skb_queue_head_init(&a->packet_queue); 386 + refcount_set(&a->refcnt, 1); 387 + skb_queue_head_init(&a->packet_queue); 400 388 return a; 401 389 } 402 390 ··· 493 477 entry->dev = atif->dev; 494 478 495 479 write_lock_bh(&aarp_lock); 480 + aarp_entry_get(entry); 496 481 497 482 hash = sa->s_node % (AARP_HASH_SIZE - 1); 498 483 entry->next = proxies[hash]; ··· 519 502 retval = 1; 520 503 } 521 504 505 + aarp_entry_put(entry); 522 506 write_unlock_bh(&aarp_lock); 523 507 out: 524 508 return retval;
+2
net/ipv4/ipcomp.c
··· 54 54 } 55 55 56 56 /* We always hold one tunnel user reference to indicate a tunnel */ 57 + static struct lock_class_key xfrm_state_lock_key; 57 58 static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) 58 59 { 59 60 struct net *net = xs_net(x); ··· 63 62 t = xfrm_state_alloc(net); 64 63 if (!t) 65 64 goto out; 65 + lockdep_set_class(&t->lock, &xfrm_state_lock_key); 66 66 67 67 t->id.proto = IPPROTO_IPIP; 68 68 t->id.spi = x->props.saddr.a4;
+3
net/ipv4/xfrm4_input.c
··· 202 202 if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) 203 203 goto out; 204 204 205 + /* set the transport header to ESP */ 206 + skb_set_transport_header(skb, offset); 207 + 205 208 NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; 206 209 207 210 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+2
net/ipv6/ipcomp6.c
··· 71 71 return 0; 72 72 } 73 73 74 + static struct lock_class_key xfrm_state_lock_key; 74 75 static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) 75 76 { 76 77 struct net *net = xs_net(x); ··· 80 79 t = xfrm_state_alloc(net); 81 80 if (!t) 82 81 goto out; 82 + lockdep_set_class(&t->lock, &xfrm_state_lock_key); 83 83 84 84 t->id.proto = IPPROTO_IPV6; 85 85 t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr);
+3
net/ipv6/xfrm6_input.c
··· 202 202 if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) 203 203 goto out; 204 204 205 + /* set the transport header to ESP */ 206 + skb_set_transport_header(skb, offset); 207 + 205 208 NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; 206 209 207 210 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+1 -1
net/ipv6/xfrm6_tunnel.c
··· 334 334 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 335 335 unsigned int i; 336 336 337 + xfrm_state_flush(net, IPSEC_PROTO_ANY, false); 337 338 xfrm_flush_gc(); 338 - xfrm_state_flush(net, 0, false, true); 339 339 340 340 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) 341 341 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
+1 -1
net/key/af_key.c
··· 1766 1766 if (proto == 0) 1767 1767 return -EINVAL; 1768 1768 1769 - err = xfrm_state_flush(net, proto, true, false); 1769 + err = xfrm_state_flush(net, proto, true); 1770 1770 err2 = unicast_flush_resp(sk, hdr); 1771 1771 if (err || err2) { 1772 1772 if (err == -ESRCH) /* empty table - go quietly */
+3 -4
net/sched/sch_qfq.c
··· 536 536 537 537 static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) 538 538 { 539 - struct qfq_sched *q = qdisc_priv(sch); 540 - 541 - qfq_rm_from_agg(q, cl); 542 539 gen_kill_estimator(&cl->rate_est); 543 540 qdisc_put(cl->qdisc); 544 541 kfree(cl); ··· 556 559 557 560 qdisc_purge_queue(cl->qdisc); 558 561 qdisc_class_hash_remove(&q->clhash, &cl->common); 559 - qfq_destroy_class(sch, cl); 562 + qfq_rm_from_agg(q, cl); 560 563 561 564 sch_tree_unlock(sch); 562 565 566 + qfq_destroy_class(sch, cl); 563 567 return 0; 564 568 } 565 569 ··· 1501 1503 for (i = 0; i < q->clhash.hashsize; i++) { 1502 1504 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], 1503 1505 common.hnode) { 1506 + qfq_rm_from_agg(q, cl); 1504 1507 qfq_destroy_class(sch, cl); 1505 1508 } 1506 1509 }
-1
net/xfrm/xfrm_device.c
··· 305 305 return -EINVAL; 306 306 } 307 307 308 - xfrm_set_type_offload(x); 309 308 if (!x->type_offload) { 310 309 NL_SET_ERR_MSG(extack, "Type doesn't support offload"); 311 310 dev_put(dev);
+1 -6
net/xfrm/xfrm_interface_core.c
··· 875 875 return -EINVAL; 876 876 } 877 877 878 - if (p.collect_md) { 878 + if (p.collect_md || xi->p.collect_md) { 879 879 NL_SET_ERR_MSG(extack, "collect_md can't be changed"); 880 880 return -EINVAL; 881 881 } ··· 886 886 } else { 887 887 if (xi->dev != dev) 888 888 return -EEXIST; 889 - if (xi->p.collect_md) { 890 - NL_SET_ERR_MSG(extack, 891 - "device can't be changed to collect_md"); 892 - return -EINVAL; 893 - } 894 889 } 895 890 896 891 return xfrmi_update(xi, &p);
+1 -2
net/xfrm/xfrm_ipcomp.c
··· 97 97 struct ip_comp_hdr *ipch = ip_comp_hdr(skb); 98 98 const int plen = skb->len; 99 99 100 - skb_reset_transport_header(skb); 100 + skb->transport_header = skb->network_header + sizeof(*ipch); 101 101 102 102 return ipcomp_post_acomp(skb, err, 0) ?: 103 103 skb->len < (plen + sizeof(ip_comp_hdr)) ? -EINVAL : ··· 313 313 struct ipcomp_data *ipcd = x->data; 314 314 if (!ipcd) 315 315 return; 316 - xfrm_state_delete_tunnel(x); 317 316 ipcomp_free_data(ipcd); 318 317 kfree(ipcd); 319 318 }
+26 -43
net/xfrm/xfrm_state.c
··· 424 424 } 425 425 EXPORT_SYMBOL(xfrm_unregister_type_offload); 426 426 427 - void xfrm_set_type_offload(struct xfrm_state *x) 427 + void xfrm_set_type_offload(struct xfrm_state *x, bool try_load) 428 428 { 429 429 const struct xfrm_type_offload *type = NULL; 430 430 struct xfrm_state_afinfo *afinfo; 431 - bool try_load = true; 432 431 433 432 retry: 434 433 afinfo = xfrm_state_get_afinfo(x->props.family); ··· 592 593 } 593 594 EXPORT_SYMBOL(xfrm_state_free); 594 595 595 - static void ___xfrm_state_destroy(struct xfrm_state *x) 596 + static void xfrm_state_gc_destroy(struct xfrm_state *x) 596 597 { 597 598 if (x->mode_cbs && x->mode_cbs->destroy_state) 598 599 x->mode_cbs->destroy_state(x); ··· 606 607 kfree(x->coaddr); 607 608 kfree(x->replay_esn); 608 609 kfree(x->preplay_esn); 610 + xfrm_unset_type_offload(x); 609 611 if (x->type) { 610 612 x->type->destructor(x); 611 613 xfrm_put_type(x->type); ··· 631 631 synchronize_rcu(); 632 632 633 633 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) 634 - ___xfrm_state_destroy(x); 634 + xfrm_state_gc_destroy(x); 635 635 } 636 636 637 637 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) ··· 780 780 struct xfrm_dev_offload *xso = &x->xso; 781 781 struct net_device *dev = READ_ONCE(xso->dev); 782 782 783 - xfrm_unset_type_offload(x); 784 - 785 783 if (dev && dev->xfrmdev_ops) { 786 784 spin_lock_bh(&xfrm_state_dev_gc_lock); 787 785 if (!hlist_unhashed(&x->dev_gclist)) ··· 795 797 } 796 798 #endif 797 799 798 - void __xfrm_state_destroy(struct xfrm_state *x, bool sync) 800 + void __xfrm_state_destroy(struct xfrm_state *x) 799 801 { 800 802 WARN_ON(x->km.state != XFRM_STATE_DEAD); 801 803 802 - if (sync) { 803 - synchronize_rcu(); 804 - ___xfrm_state_destroy(x); 805 - } else { 806 - spin_lock_bh(&xfrm_state_gc_lock); 807 - hlist_add_head(&x->gclist, &xfrm_state_gc_list); 808 - spin_unlock_bh(&xfrm_state_gc_lock); 809 - schedule_work(&xfrm_state_gc_work); 810 - } 804 + spin_lock_bh(&xfrm_state_gc_lock); 805 + hlist_add_head(&x->gclist, &xfrm_state_gc_list); 806 + spin_unlock_bh(&xfrm_state_gc_lock); 807 + schedule_work(&xfrm_state_gc_work); 811 808 } 812 809 EXPORT_SYMBOL(__xfrm_state_destroy); 813 810 811 + static void xfrm_state_delete_tunnel(struct xfrm_state *x); 814 812 int __xfrm_state_delete(struct xfrm_state *x) 815 813 { 816 814 struct net *net = xs_net(x); ··· 833 839 spin_unlock(&net->xfrm.xfrm_state_lock); 834 840 835 841 xfrm_dev_state_delete(x); 842 + 843 + xfrm_state_delete_tunnel(x); 836 844 837 845 /* All xfrm_state objects are created by xfrm_state_alloc. 838 846 * The xfrm_state_alloc call gives a reference, and that ··· 917 921 } 918 922 #endif 919 923 920 - int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync) 924 + int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) 921 925 { 922 926 int i, err = 0, cnt = 0; 923 927 ··· 939 943 err = xfrm_state_delete(x); 940 944 xfrm_audit_state_delete(x, err ? 0 : 1, 941 945 task_valid); 942 - if (sync) 943 - xfrm_state_put_sync(x); 944 - else 945 - xfrm_state_put(x); 946 + xfrm_state_put(x); 946 947 if (!err) 947 948 cnt++; 948 949 ··· 1300 1307 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, 1301 1308 const struct flowi *fl, unsigned short family, 1302 1309 struct xfrm_state **best, int *acq_in_progress, 1303 - int *error) 1310 + int *error, unsigned int pcpu_id) 1304 1311 { 1305 - /* We need the cpu id just as a lookup key, 1306 - * we don't require it to be stable. 1307 - */ 1308 - unsigned int pcpu_id = get_cpu(); 1309 - put_cpu(); 1310 - 1311 1312 /* Resolution logic: 1312 1313 * 1. There is a valid state with matching selector. Done. 1313 1314 * 2. Valid state with inappropriate selector. Skip. ··· 1368 1381 /* We need the cpu id just as a lookup key, 1369 1382 * we don't require it to be stable. 1370 1383 */ 1371 - pcpu_id = get_cpu(); 1372 - put_cpu(); 1384 + pcpu_id = raw_smp_processor_id(); 1373 1385 1374 1386 to_put = NULL; 1375 1387 1376 1388 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 1377 1389 1378 1390 rcu_read_lock(); 1391 + xfrm_hash_ptrs_get(net, &state_ptrs); 1392 + 1379 1393 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) { 1380 1394 if (x->props.family == encap_family && 1381 1395 x->props.reqid == tmpl->reqid && ··· 1388 1400 tmpl->id.proto == x->id.proto && 1389 1401 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1390 1402 xfrm_state_look_at(pol, x, fl, encap_family, 1391 - &best, &acquire_in_progress, &error); 1403 + &best, &acquire_in_progress, &error, pcpu_id); 1392 1404 } 1393 1405 1394 1406 if (best) ··· 1405 1417 tmpl->id.proto == x->id.proto && 1406 1418 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1407 1419 xfrm_state_look_at(pol, x, fl, family, 1408 - &best, &acquire_in_progress, &error); 1420 + &best, &acquire_in_progress, &error, pcpu_id); 1409 1421 } 1410 1422 1411 1423 cached: ··· 1416 1428 best = NULL; 1417 1429 else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */ 1418 1430 WARN_ON(1); 1419 - 1420 - xfrm_hash_ptrs_get(net, &state_ptrs); 1421 1431 1422 1432 h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask); 1423 1433 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) { ··· 1446 1460 tmpl->id.proto == x->id.proto && 1447 1461 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1448 1462 xfrm_state_look_at(pol, x, fl, family, 1449 - &best, &acquire_in_progress, &error); 1463 + &best, &acquire_in_progress, &error, pcpu_id); 1450 1464 } 1451 1465 if (best || acquire_in_progress) 1452 1466 goto found; ··· 1481 1495 tmpl->id.proto == x->id.proto && 1482 1496 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1483 1497 xfrm_state_look_at(pol, x, fl, family, 1484 - &best, &acquire_in_progress, &error); 1498 + &best, &acquire_in_progress, &error, pcpu_id); 1485 1499 } 1486 1500 1487 1501 found: ··· 3063 3077 } 3064 3078 EXPORT_SYMBOL(xfrm_flush_gc); 3065 3079 3066 - /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ 3067 - void xfrm_state_delete_tunnel(struct xfrm_state *x) 3080 + static void xfrm_state_delete_tunnel(struct xfrm_state *x) 3068 3081 { 3069 3082 if (x->tunnel) { 3070 3083 struct xfrm_state *t = x->tunnel; 3071 3084 3072 - if (atomic_read(&t->tunnel_users) == 2) 3085 + if (atomic_dec_return(&t->tunnel_users) == 1) 3073 3086 xfrm_state_delete(t); 3074 - atomic_dec(&t->tunnel_users); 3075 - xfrm_state_put_sync(t); 3087 + xfrm_state_put(t); 3076 3088 x->tunnel = NULL; 3077 3089 } 3078 3090 } 3079 - EXPORT_SYMBOL(xfrm_state_delete_tunnel); 3080 3091 3081 3092 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) 3082 3093 { ··· 3278 3295 unsigned int sz; 3279 3296 3280 3297 flush_work(&net->xfrm.state_hash_work); 3298 + xfrm_state_flush(net, IPSEC_PROTO_ANY, false); 3281 3299 flush_work(&xfrm_state_gc_work); 3282 - xfrm_state_flush(net, 0, false, true); 3283 3300 3284 3301 WARN_ON(!list_empty(&net->xfrm.state_all)); 3285 3302
+2 -1
net/xfrm/xfrm_user.c
··· 977 977 /* override default values from above */ 978 978 xfrm_update_ae_params(x, attrs, 0); 979 979 980 + xfrm_set_type_offload(x, attrs[XFRMA_OFFLOAD_DEV]); 980 981 /* configure the hardware if offload is requested */ 981 982 if (attrs[XFRMA_OFFLOAD_DEV]) { 982 983 err = xfrm_dev_state_add(net, x, ··· 2635 2634 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 2636 2635 int err; 2637 2636 2638 - err = xfrm_state_flush(net, p->proto, true, false); 2637 + err = xfrm_state_flush(net, p->proto, true); 2639 2638 if (err) { 2640 2639 if (err == -ESRCH) /* empty table */ 2641 2640 return 0;
+18 -5
tools/testing/selftests/drivers/net/lib/py/load.py
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 + import re 3 4 import time 4 5 5 6 from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen ··· 11 10 12 11 self.env = env 13 12 14 - if port is None: 15 - port = rand_port() 16 - self._iperf_server = cmd(f"iperf3 -s -1 -p {port}", background=True) 17 - wait_port_listen(port) 13 + self.port = rand_port() if port is None else port 14 + self._iperf_server = cmd(f"iperf3 -s -1 -p {self.port}", background=True) 15 + wait_port_listen(self.port) 18 16 time.sleep(0.1) 19 - self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {port} -t 86400", 17 + self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {self.port} -t 86400", 20 18 background=True, host=env.remote) 21 19 22 20 # Wait for traffic to ramp up ··· 56 56 ksft_pr(">> Server:") 57 57 ksft_pr(self._iperf_server.stdout) 58 58 ksft_pr(self._iperf_server.stderr) 59 + self._wait_client_stopped() 60 + 61 + def _wait_client_stopped(self, sleep=0.005, timeout=5): 62 + end = time.monotonic() + timeout 63 + 64 + live_port_pattern = re.compile(fr":{self.port:04X} 0[^6] ") 65 + 66 + while time.monotonic() < end: 67 + data = cmd("cat /proc/net/tcp*", host=self.env.remote).stdout 68 + if not live_port_pattern.search(data): 69 + return 70 + time.sleep(sleep) 71 + raise Exception(f"Waiting for client to stop timed out after {timeout}s")
+2 -1
tools/testing/selftests/net/mptcp/Makefile
··· 4 4 5 5 CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES) 6 6 7 - TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \ 7 + TEST_PROGS := mptcp_connect.sh mptcp_connect_mmap.sh mptcp_connect_sendfile.sh \ 8 + mptcp_connect_checksum.sh pm_netlink.sh mptcp_join.sh diag.sh \ 8 9 simult_flows.sh mptcp_sockopt.sh userspace_pm.sh 9 10 10 11 TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq mptcp_diag
+5
tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \ 5 + "$(dirname "${0}")/mptcp_connect.sh" -C "${@}"
+5
tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \ 5 + "$(dirname "${0}")/mptcp_connect.sh" -m mmap "${@}"
+5
tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \ 5 + "$(dirname "${0}")/mptcp_connect.sh" -m sendfile "${@}"
+22 -23
tools/testing/selftests/net/netfilter/conntrack_clash.sh
··· 93 93 run_one_clash_test() 94 94 { 95 95 local ns="$1" 96 - local daddr="$2" 97 - local dport="$3" 96 + local ctns="$2" 97 + local daddr="$3" 98 + local dport="$4" 98 99 local entries 99 100 local cre 100 101 101 102 if ! ip netns exec "$ns" ./udpclash $daddr $dport;then 102 - echo "FAIL: did not receive expected number of replies for $daddr:$dport" 103 - ret=1 104 - return 1 103 + echo "INFO: did not receive expected number of replies for $daddr:$dport" 104 + ip netns exec "$ctns" conntrack -S 105 + # don't fail: check if clash resolution triggered after all. 105 106 fi 106 107 107 - entries=$(conntrack -S | wc -l) 108 - cre=$(conntrack -S | grep -v "clash_resolve=0" | wc -l) 108 + entries=$(ip netns exec "$ctns" conntrack -S | wc -l) 109 + cre=$(ip netns exec "$ctns" conntrack -S | grep "clash_resolve=0" | wc -l) 109 110 110 - if [ "$cre" -ne "$entries" ] ;then 111 + if [ "$cre" -ne "$entries" ];then 111 112 clash_resolution_active=1 112 113 return 0 113 114 fi 114 115 115 - # 1 cpu -> parallel insertion impossible 116 - if [ "$entries" -eq 1 ]; then 117 - return 0 118 - fi 119 - 120 - # not a failure: clash resolution logic did not trigger, but all replies 121 - # were received. With right timing, xmit completed sequentially and 116 + # not a failure: clash resolution logic did not trigger. 117 + # With right timing, xmit completed sequentially and 122 118 # no parallel insertion occurs. 123 119 return $ksft_skip 124 120 } ··· 122 126 run_clash_test() 123 127 { 124 128 local ns="$1" 125 - local daddr="$2" 126 - local dport="$3" 129 + local ctns="$2" 130 + local daddr="$3" 131 + local dport="$4" 132 + local softerr=0 127 133 128 134 for i in $(seq 1 10);do 129 - run_one_clash_test "$ns" "$daddr" "$dport" 135 + run_one_clash_test "$ns" "$ctns" "$daddr" "$dport" 130 136 local rv=$? 131 137 if [ $rv -eq 0 ];then 132 138 echo "PASS: clash resolution test for $daddr:$dport on attempt $i" 133 139 return 0 134 - elif [ $rv -eq 1 ];then 135 - echo "FAIL: clash resolution test for $daddr:$dport on attempt $i" 136 - return 1 140 + elif [ $rv -eq $ksft_skip ]; then 141 + softerr=1 137 142 fi 138 143 done 144 + 145 + [ $softerr -eq 1 ] && echo "SKIP: clash resolution for $daddr:$dport did not trigger" 139 146 } 140 147 141 148 ip link add veth0 netns "$nsclient1" type veth peer name veth0 netns "$nsrouter" ··· 160 161 161 162 # exercise clash resolution with nat: 162 163 # nsrouter is supposed to dnat to 10.0.2.1:900{0,1,2,3}. 163 - run_clash_test "$nsclient1" 10.0.1.99 "$dport" 164 + run_clash_test "$nsclient1" "$nsrouter" 10.0.1.99 "$dport" 164 165 165 166 # exercise clash resolution without nat. 166 167 load_simple_ruleset "$nsclient2" 167 - run_clash_test "$nsclient2" 127.0.0.1 9001 168 + run_clash_test "$nsclient2" "$nsclient2" 127.0.0.1 9001 168 169 169 170 if [ $clash_resolution_active -eq 0 ];then 170 171 [ "$ret" -eq 0 ] && ret=$ksft_skip