Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'sfc-E100-VF-respresenters'

Edward Cree says:

====================
sfc: VF representors for EF100

This series adds representor netdevices for EF100 VFs, as a step towards
supporting TC offload and vDPA usecases in future patches.
In this first series is basic netdevice creation and packet TX; the
following series will add the RX path.

v3: dropped massive mcdi_pcol.h patch which was applied separately.
v2: converted comments on struct efx_nic members added in patch #4 to
kernel-doc (Jakub). While at it, also gave struct efx_rep its own kdoc
since several members had comments on them.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+687 -53
+1 -1
drivers/net/ethernet/sfc/Makefile
··· 8 8 ef100.o ef100_nic.o ef100_netdev.o \ 9 9 ef100_ethtool.o ef100_rx.o ef100_tx.o 10 10 sfc-$(CONFIG_SFC_MTD) += mtd.o 11 - sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o 11 + sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o mae.o 12 12 13 13 obj-$(CONFIG_SFC) += sfc.o 14 14
+14 -2
drivers/net/ethernet/sfc/ef100_netdev.c
··· 85 85 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 86 86 raw_smp_processor_id()); 87 87 88 + efx_detach_reps(efx); 88 89 netif_stop_queue(net_dev); 89 90 efx_stop_all(efx); 90 91 efx_mcdi_mac_fini_stats(efx); ··· 177 176 mutex_unlock(&efx->mac_lock); 178 177 179 178 efx->state = STATE_NET_UP; 179 + if (netif_running(efx->net_dev)) 180 + efx_attach_reps(efx); 180 181 181 182 return 0; 182 183 ··· 198 195 struct net_device *net_dev) 199 196 { 200 197 struct efx_nic *efx = efx_netdev_priv(net_dev); 198 + 199 + return __ef100_hard_start_xmit(skb, efx, net_dev, NULL); 200 + } 201 + 202 + netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb, 203 + struct efx_nic *efx, 204 + struct net_device *net_dev, 205 + struct efx_rep *efv) 206 + { 201 207 struct efx_tx_queue *tx_queue; 202 208 struct efx_channel *channel; 203 209 int rc; ··· 221 209 } 222 210 223 211 tx_queue = &channel->tx_queue[0]; 224 - rc = ef100_enqueue_skb(tx_queue, skb); 212 + rc = __ef100_enqueue_skb(tx_queue, skb, efv); 225 213 if (rc == 0) 226 214 return NETDEV_TX_OK; 227 215 ··· 324 312 unregister_netdevice_notifier(&efx->netdev_notifier); 325 313 #if defined(CONFIG_SFC_SRIOV) 326 314 if (!efx->type->is_vf) 327 - efx_ef100_pci_sriov_disable(efx); 315 + efx_ef100_pci_sriov_disable(efx, true); 328 316 #endif 329 317 330 318 ef100_unregister_netdev(efx);
+5
drivers/net/ethernet/sfc/ef100_netdev.h
··· 10 10 */ 11 11 12 12 #include <linux/netdevice.h> 13 + #include "ef100_rep.h" 13 14 15 + netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb, 16 + struct efx_nic *efx, 17 + struct net_device *net_dev, 18 + struct efx_rep *efv); 14 19 int ef100_netdev_event(struct notifier_block *this, 15 20 unsigned long event, void *ptr); 16 21 int ef100_probe_netdev(struct efx_probe_data *probe_data);
+7
drivers/net/ethernet/sfc/ef100_nic.c
··· 946 946 unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]); 947 947 struct ef100_nic_data *nic_data; 948 948 char fw_version[32]; 949 + u32 priv_mask = 0; 949 950 int i, rc; 950 951 951 952 if (WARN_ON(bar_size == 0)) ··· 1027 1026 1028 1027 efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version)); 1029 1028 pci_dbg(efx->pci_dev, "Firmware version %s\n", fw_version); 1029 + 1030 + rc = efx_mcdi_get_privilege_mask(efx, &priv_mask); 1031 + if (rc) /* non-fatal, and priv_mask will still be 0 */ 1032 + pci_info(efx->pci_dev, 1033 + "Failed to get privilege mask from FW, rc %d\n", rc); 1034 + nic_data->grp_mae = !!(priv_mask & MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE); 1030 1035 1031 1036 if (compare_versions(fw_version, "1.1.0.1000") < 0) { 1032 1037 pci_info(efx->pci_dev, "Firmware uses old event descriptors\n");
+1
drivers/net/ethernet/sfc/ef100_nic.h
··· 72 72 u8 port_id[ETH_ALEN]; 73 73 DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS); 74 74 u64 stats[EF100_STAT_COUNT]; 75 + bool grp_mae; /* MAE Privilege */ 75 76 u16 tso_max_hdr_len; 76 77 u16 tso_max_payload_num_segs; 77 78 u16 tso_max_frames;
+60 -23
drivers/net/ethernet/sfc/ef100_regs.h
··· 2 2 /**************************************************************************** 3 3 * Driver for Solarflare network controllers and boards 4 4 * Copyright 2018 Solarflare Communications Inc. 5 - * Copyright 2019-2020 Xilinx Inc. 5 + * Copyright 2019-2022 Xilinx Inc. 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify it 8 8 * under the terms of the GNU General Public License version 2 as published ··· 181 181 /* RHEAD_BASE_EVENT */ 182 182 #define ESF_GZ_E_TYPE_LBN 60 183 183 #define ESF_GZ_E_TYPE_WIDTH 4 184 - #define ESE_GZ_EF100_EV_DRIVER 5 185 - #define ESE_GZ_EF100_EV_MCDI 4 186 - #define ESE_GZ_EF100_EV_CONTROL 3 187 - #define ESE_GZ_EF100_EV_TX_TIMESTAMP 2 188 - #define ESE_GZ_EF100_EV_TX_COMPLETION 1 189 - #define ESE_GZ_EF100_EV_RX_PKTS 0 190 184 #define ESF_GZ_EV_EVQ_PHASE_LBN 59 191 185 #define ESF_GZ_EV_EVQ_PHASE_WIDTH 1 192 186 #define ESE_GZ_RHEAD_BASE_EVENT_STRUCT_SIZE 64 ··· 363 369 #define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_WIDTH 16 364 370 #define ESF_GZ_RX_PREFIX_CSUM_FRAME_LBN 144 365 371 #define ESF_GZ_RX_PREFIX_CSUM_FRAME_WIDTH 16 366 - #define ESF_GZ_RX_PREFIX_INGRESS_VPORT_LBN 128 367 - #define ESF_GZ_RX_PREFIX_INGRESS_VPORT_WIDTH 16 372 + #define ESF_GZ_RX_PREFIX_INGRESS_MPORT_LBN 128 373 + #define ESF_GZ_RX_PREFIX_INGRESS_MPORT_WIDTH 16 368 374 #define ESF_GZ_RX_PREFIX_USER_MARK_LBN 96 369 375 #define ESF_GZ_RX_PREFIX_USER_MARK_WIDTH 32 370 376 #define ESF_GZ_RX_PREFIX_RSS_HASH_LBN 64 371 377 #define ESF_GZ_RX_PREFIX_RSS_HASH_WIDTH 32 372 - #define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 32 373 - #define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 32 378 + #define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 34 379 + #define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 30 380 + #define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_LBN 33 381 + #define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_WIDTH 1 382 + #define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_LBN 32 383 + #define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_WIDTH 1 374 384 #define ESF_GZ_RX_PREFIX_CLASS_LBN 16 375 385 #define ESF_GZ_RX_PREFIX_CLASS_WIDTH 16 376 386 #define ESF_GZ_RX_PREFIX_USER_FLAG_LBN 15 ··· 452 454 #define ESF_GZ_M2M_TRANSLATE_ADDR_WIDTH 1 453 455 #define ESF_GZ_M2M_RSVD_LBN 120 454 456 #define ESF_GZ_M2M_RSVD_WIDTH 2 455 - #define ESF_GZ_M2M_ADDR_SPC_LBN 108 456 - #define ESF_GZ_M2M_ADDR_SPC_WIDTH 12 457 - #define ESF_GZ_M2M_ADDR_SPC_PASID_LBN 86 458 - #define ESF_GZ_M2M_ADDR_SPC_PASID_WIDTH 22 459 - #define ESF_GZ_M2M_ADDR_SPC_MODE_LBN 84 460 - #define ESF_GZ_M2M_ADDR_SPC_MODE_WIDTH 2 457 + #define ESF_GZ_M2M_ADDR_SPC_ID_LBN 84 458 + #define ESF_GZ_M2M_ADDR_SPC_ID_WIDTH 36 461 459 #define ESF_GZ_M2M_LEN_MINUS_1_LBN 64 462 460 #define ESF_GZ_M2M_LEN_MINUS_1_WIDTH 20 463 461 #define ESF_GZ_M2M_ADDR_LBN 0 ··· 486 492 #define ESF_GZ_TX_SEG_TRANSLATE_ADDR_WIDTH 1 487 493 #define ESF_GZ_TX_SEG_RSVD2_LBN 120 488 494 #define ESF_GZ_TX_SEG_RSVD2_WIDTH 2 489 - #define ESF_GZ_TX_SEG_ADDR_SPC_LBN 108 490 - #define ESF_GZ_TX_SEG_ADDR_SPC_WIDTH 12 491 - #define ESF_GZ_TX_SEG_ADDR_SPC_PASID_LBN 86 492 - #define ESF_GZ_TX_SEG_ADDR_SPC_PASID_WIDTH 22 493 - #define ESF_GZ_TX_SEG_ADDR_SPC_MODE_LBN 84 494 - #define ESF_GZ_TX_SEG_ADDR_SPC_MODE_WIDTH 2 495 + #define ESF_GZ_TX_SEG_ADDR_SPC_ID_LBN 84 496 + #define ESF_GZ_TX_SEG_ADDR_SPC_ID_WIDTH 36 495 497 #define ESF_GZ_TX_SEG_RSVD_LBN 80 496 498 #define ESF_GZ_TX_SEG_RSVD_WIDTH 4 497 499 #define ESF_GZ_TX_SEG_LEN_LBN 64 ··· 573 583 #define ESE_GZ_SF_TX_TSO_DSC_FMT_STRUCT_SIZE 124 574 584 575 585 586 + /* Enum D2VIO_MSG_OP */ 587 + #define ESE_GZ_QUE_JBDNE 3 588 + #define ESE_GZ_QUE_EVICT 2 589 + #define ESE_GZ_QUE_EMPTY 1 590 + #define ESE_GZ_NOP 0 591 + 576 592 /* Enum DESIGN_PARAMS */ 577 593 #define ESE_EF100_DP_GZ_RX_MAX_RUNT 17 578 594 #define ESE_EF100_DP_GZ_VI_STRIDES 16 ··· 626 630 #define ESE_GZ_PCI_BASE_CONFIG_SPACE_SIZE 256 627 631 #define ESE_GZ_PCI_EXPRESS_XCAP_HDR_SIZE 4 628 632 633 + /* Enum RH_DSC_TYPE */ 634 + #define ESE_GZ_TX_TOMB 0xF 635 + #define ESE_GZ_TX_VIO 0xE 636 + #define ESE_GZ_TX_TSO_OVRRD 0x8 637 + #define ESE_GZ_TX_D2CMP 0x7 638 + #define ESE_GZ_TX_DATA 0x6 639 + #define ESE_GZ_TX_D2M 0x5 640 + #define ESE_GZ_TX_M2M 0x4 641 + #define ESE_GZ_TX_SEG 0x3 642 + #define ESE_GZ_TX_TSO 0x2 643 + #define ESE_GZ_TX_OVRRD 0x1 644 + #define ESE_GZ_TX_SEND 0x0 645 + 629 646 /* Enum RH_HCLASS_L2_CLASS */ 630 647 #define ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN 1 631 648 #define ESE_GZ_RH_HCLASS_L2_CLASS_OTHER 0 ··· 675 666 #define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN 1 676 667 #define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE 0 677 668 669 + /* Enum SF_CTL_EVENT_SUBTYPE */ 670 + #define ESE_GZ_EF100_CTL_EV_EVQ_TIMEOUT 0x3 671 + #define ESE_GZ_EF100_CTL_EV_FLUSH 0x2 672 + #define ESE_GZ_EF100_CTL_EV_TIME_SYNC 0x1 673 + #define ESE_GZ_EF100_CTL_EV_UNSOL_OVERFLOW 0x0 674 + 675 + /* Enum SF_EVENT_TYPE */ 676 + #define ESE_GZ_EF100_EV_DRIVER 0x5 677 + #define ESE_GZ_EF100_EV_MCDI 0x4 678 + #define ESE_GZ_EF100_EV_CONTROL 0x3 679 + #define ESE_GZ_EF100_EV_TX_TIMESTAMP 0x2 680 + #define ESE_GZ_EF100_EV_TX_COMPLETION 0x1 681 + #define ESE_GZ_EF100_EV_RX_PKTS 0x0 682 + 683 + /* Enum SF_EW_EVENT_TYPE */ 684 + #define ESE_GZ_EF100_EWEV_VIRTQ_DESC 0x2 685 + #define ESE_GZ_EF100_EWEV_TXQ_DESC 0x1 686 + #define ESE_GZ_EF100_EWEV_64BIT 0x0 687 + 678 688 /* Enum TX_DESC_CSO_PARTIAL_EN */ 679 689 #define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP 2 680 690 #define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP 1 ··· 709 681 #define ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 2 710 682 #define ESE_GZ_TX_DESC_IP4_ID_INC_MOD15 1 711 683 #define ESE_GZ_TX_DESC_IP4_ID_NO_OP 0 684 + 685 + /* Enum VIRTIO_NET_HDR_F */ 686 + #define ESE_GZ_NEEDS_CSUM 0x1 687 + 688 + /* Enum VIRTIO_NET_HDR_GSO */ 689 + #define ESE_GZ_TCPV6 0x4 690 + #define ESE_GZ_UDP 0x3 691 + #define ESE_GZ_TCPV4 0x1 692 + #define ESE_GZ_NONE 0x0 712 693 /**************************************************************************/ 713 694 714 695 #define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_LBN 44
+244
drivers/net/ethernet/sfc/ef100_rep.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /**************************************************************************** 3 + * Driver for Solarflare network controllers and boards 4 + * Copyright 2019 Solarflare Communications Inc. 5 + * Copyright 2020-2022 Xilinx Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License version 2 as published 9 + * by the Free Software Foundation, incorporated herein by reference. 10 + */ 11 + 12 + #include "ef100_rep.h" 13 + #include "ef100_netdev.h" 14 + #include "ef100_nic.h" 15 + #include "mae.h" 16 + 17 + #define EFX_EF100_REP_DRIVER "efx_ef100_rep" 18 + 19 + static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv, 20 + unsigned int i) 21 + { 22 + efv->parent = efx; 23 + efv->idx = i; 24 + INIT_LIST_HEAD(&efv->list); 25 + efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 26 + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 27 + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 28 + NETIF_MSG_TX_ERR | NETIF_MSG_HW; 29 + return 0; 30 + } 31 + 32 + static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb, 33 + struct net_device *dev) 34 + { 35 + struct efx_rep *efv = netdev_priv(dev); 36 + struct efx_nic *efx = efv->parent; 37 + netdev_tx_t rc; 38 + 39 + /* __ef100_hard_start_xmit() will always return success even in the 40 + * case of TX drops, where it will increment efx's tx_dropped. The 41 + * efv stats really only count attempted TX, not success/failure. 42 + */ 43 + atomic64_inc(&efv->stats.tx_packets); 44 + atomic64_add(skb->len, &efv->stats.tx_bytes); 45 + netif_tx_lock(efx->net_dev); 46 + rc = __ef100_hard_start_xmit(skb, efx, dev, efv); 47 + netif_tx_unlock(efx->net_dev); 48 + return rc; 49 + } 50 + 51 + static int efx_ef100_rep_get_port_parent_id(struct net_device *dev, 52 + struct netdev_phys_item_id *ppid) 53 + { 54 + struct efx_rep *efv = netdev_priv(dev); 55 + struct efx_nic *efx = efv->parent; 56 + struct ef100_nic_data *nic_data; 57 + 58 + nic_data = efx->nic_data; 59 + /* nic_data->port_id is a u8[] */ 60 + ppid->id_len = sizeof(nic_data->port_id); 61 + memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id)); 62 + return 0; 63 + } 64 + 65 + static int efx_ef100_rep_get_phys_port_name(struct net_device *dev, 66 + char *buf, size_t len) 67 + { 68 + struct efx_rep *efv = netdev_priv(dev); 69 + struct efx_nic *efx = efv->parent; 70 + struct ef100_nic_data *nic_data; 71 + int ret; 72 + 73 + nic_data = efx->nic_data; 74 + ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num, 75 + nic_data->pf_index, efv->idx); 76 + if (ret >= len) 77 + return -EOPNOTSUPP; 78 + 79 + return 0; 80 + } 81 + 82 + static const struct net_device_ops efx_ef100_rep_netdev_ops = { 83 + .ndo_start_xmit = efx_ef100_rep_xmit, 84 + .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id, 85 + .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name, 86 + }; 87 + 88 + static void efx_ef100_rep_get_drvinfo(struct net_device *dev, 89 + struct ethtool_drvinfo *drvinfo) 90 + { 91 + strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver)); 92 + } 93 + 94 + static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev) 95 + { 96 + struct efx_rep *efv = netdev_priv(net_dev); 97 + 98 + return efv->msg_enable; 99 + } 100 + 101 + static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev, 102 + u32 msg_enable) 103 + { 104 + struct efx_rep *efv = netdev_priv(net_dev); 105 + 106 + efv->msg_enable = msg_enable; 107 + } 108 + 109 + static const struct ethtool_ops efx_ef100_rep_ethtool_ops = { 110 + .get_drvinfo = efx_ef100_rep_get_drvinfo, 111 + .get_msglevel = efx_ef100_rep_ethtool_get_msglevel, 112 + .set_msglevel = efx_ef100_rep_ethtool_set_msglevel, 113 + }; 114 + 115 + static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx, 116 + unsigned int i) 117 + { 118 + struct net_device *net_dev; 119 + struct efx_rep *efv; 120 + int rc; 121 + 122 + net_dev = alloc_etherdev_mq(sizeof(*efv), 1); 123 + if (!net_dev) 124 + return ERR_PTR(-ENOMEM); 125 + 126 + efv = netdev_priv(net_dev); 127 + rc = efx_ef100_rep_init_struct(efx, efv, i); 128 + if (rc) 129 + goto fail1; 130 + efv->net_dev = net_dev; 131 + rtnl_lock(); 132 + spin_lock_bh(&efx->vf_reps_lock); 133 + list_add_tail(&efv->list, &efx->vf_reps); 134 + spin_unlock_bh(&efx->vf_reps_lock); 135 + if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) { 136 + netif_device_attach(net_dev); 137 + netif_carrier_on(net_dev); 138 + } else { 139 + netif_carrier_off(net_dev); 140 + netif_tx_stop_all_queues(net_dev); 141 + } 142 + rtnl_unlock(); 143 + 144 + net_dev->netdev_ops = &efx_ef100_rep_netdev_ops; 145 + net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops; 146 + net_dev->min_mtu = EFX_MIN_MTU; 147 + net_dev->max_mtu = EFX_MAX_MTU; 148 + net_dev->features |= NETIF_F_LLTX; 149 + net_dev->hw_features |= NETIF_F_LLTX; 150 + return efv; 151 + fail1: 152 + free_netdev(net_dev); 153 + return ERR_PTR(rc); 154 + } 155 + 156 + static int efx_ef100_configure_rep(struct efx_rep *efv) 157 + { 158 + struct efx_nic *efx = efv->parent; 159 + u32 selector; 160 + int rc; 161 + 162 + /* Construct mport selector for corresponding VF */ 163 + efx_mae_mport_vf(efx, efv->idx, &selector); 164 + /* Look up actual mport ID */ 165 + rc = efx_mae_lookup_mport(efx, selector, &efv->mport); 166 + if (rc) 167 + return rc; 168 + pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport); 169 + /* mport label should fit in 16 bits */ 170 + WARN_ON(efv->mport >> 16); 171 + 172 + return 0; 173 + } 174 + 175 + static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv) 176 + { 177 + struct efx_nic *efx = efv->parent; 178 + 179 + rtnl_lock(); 180 + spin_lock_bh(&efx->vf_reps_lock); 181 + list_del(&efv->list); 182 + spin_unlock_bh(&efx->vf_reps_lock); 183 + rtnl_unlock(); 184 + free_netdev(efv->net_dev); 185 + } 186 + 187 + int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i) 188 + { 189 + struct efx_rep *efv; 190 + int rc; 191 + 192 + efv = efx_ef100_rep_create_netdev(efx, i); 193 + if (IS_ERR(efv)) { 194 + rc = PTR_ERR(efv); 195 + pci_err(efx->pci_dev, 196 + "Failed to create representor for VF %d, rc %d\n", i, 197 + rc); 198 + return rc; 199 + } 200 + rc = efx_ef100_configure_rep(efv); 201 + if (rc) { 202 + pci_err(efx->pci_dev, 203 + "Failed to configure representor for VF %d, rc %d\n", 204 + i, rc); 205 + goto fail; 206 + } 207 + rc = register_netdev(efv->net_dev); 208 + if (rc) { 209 + pci_err(efx->pci_dev, 210 + "Failed to register representor for VF %d, rc %d\n", 211 + i, rc); 212 + goto fail; 213 + } 214 + pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i, 215 + efv->net_dev->name); 216 + return 0; 217 + fail: 218 + efx_ef100_rep_destroy_netdev(efv); 219 + return rc; 220 + } 221 + 222 + void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv) 223 + { 224 + struct net_device *rep_dev; 225 + 226 + rep_dev = efv->net_dev; 227 + if (!rep_dev) 228 + return; 229 + netif_dbg(efx, drv, rep_dev, "Removing VF representor\n"); 230 + unregister_netdev(rep_dev); 231 + efx_ef100_rep_destroy_netdev(efv); 232 + } 233 + 234 + void efx_ef100_fini_vfreps(struct efx_nic *efx) 235 + { 236 + struct ef100_nic_data *nic_data = efx->nic_data; 237 + struct efx_rep *efv, *next; 238 + 239 + if (!nic_data->grp_mae) 240 + return; 241 + 242 + list_for_each_entry_safe(efv, next, &efx->vf_reps, list) 243 + efx_ef100_vfrep_destroy(efx, efv); 244 + }
+49
drivers/net/ethernet/sfc/ef100_rep.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /**************************************************************************** 3 + * Driver for Solarflare network controllers and boards 4 + * Copyright 2019 Solarflare Communications Inc. 5 + * Copyright 2020-2022 Xilinx Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License version 2 as published 9 + * by the Free Software Foundation, incorporated herein by reference. 10 + */ 11 + 12 + /* Handling for ef100 representor netdevs */ 13 + #ifndef EF100_REP_H 14 + #define EF100_REP_H 15 + 16 + #include "net_driver.h" 17 + 18 + struct efx_rep_sw_stats { 19 + atomic64_t rx_packets, tx_packets; 20 + atomic64_t rx_bytes, tx_bytes; 21 + atomic64_t rx_dropped, tx_errors; 22 + }; 23 + 24 + /** 25 + * struct efx_rep - Private data for an Efx representor 26 + * 27 + * @parent: the efx PF which manages this representor 28 + * @net_dev: representor netdevice 29 + * @msg_enable: log message enable flags 30 + * @mport: m-port ID of corresponding VF 31 + * @idx: VF index 32 + * @list: entry on efx->vf_reps 33 + * @stats: software traffic counters for netdev stats 34 + */ 35 + struct efx_rep { 36 + struct efx_nic *parent; 37 + struct net_device *net_dev; 38 + u32 msg_enable; 39 + u32 mport; 40 + unsigned int idx; 41 + struct list_head list; 42 + struct efx_rep_sw_stats stats; 43 + }; 44 + 45 + int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i); 46 + void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv); 47 + void efx_ef100_fini_vfreps(struct efx_nic *efx); 48 + 49 + #endif /* EF100_REP_H */
+24 -8
drivers/net/ethernet/sfc/ef100_sriov.c
··· 11 11 12 12 #include "ef100_sriov.h" 13 13 #include "ef100_nic.h" 14 + #include "ef100_rep.h" 14 15 15 16 static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs) 16 17 { 18 + struct ef100_nic_data *nic_data = efx->nic_data; 17 19 struct pci_dev *dev = efx->pci_dev; 18 - int rc; 20 + struct efx_rep *efv, *next; 21 + int rc, i; 19 22 20 23 efx->vf_count = num_vfs; 21 24 rc = pci_enable_sriov(dev, num_vfs); 22 25 if (rc) 23 - goto fail; 26 + goto fail1; 24 27 28 + if (!nic_data->grp_mae) 29 + return 0; 30 + 31 + for (i = 0; i < num_vfs; i++) { 32 + rc = efx_ef100_vfrep_create(efx, i); 33 + if (rc) 34 + goto fail2; 35 + } 25 36 return 0; 26 37 27 - fail: 38 + fail2: 39 + list_for_each_entry_safe(efv, next, &efx->vf_reps, list) 40 + efx_ef100_vfrep_destroy(efx, efv); 41 + pci_disable_sriov(dev); 42 + fail1: 28 43 netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n"); 29 44 efx->vf_count = 0; 30 45 return rc; 31 46 } 32 47 33 - int efx_ef100_pci_sriov_disable(struct efx_nic *efx) 48 + int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force) 34 49 { 35 50 struct pci_dev *dev = efx->pci_dev; 36 51 unsigned int vfs_assigned; 37 52 38 53 vfs_assigned = pci_vfs_assigned(dev); 39 - if (vfs_assigned) { 54 + if (vfs_assigned && !force) { 40 55 netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " 41 56 "please detach them before disabling SR-IOV\n"); 42 57 return -EBUSY; 43 58 } 44 59 45 - pci_disable_sriov(dev); 46 - 60 + efx_ef100_fini_vfreps(efx); 61 + if (!vfs_assigned) 62 + pci_disable_sriov(dev); 47 63 return 0; 48 64 } 49 65 50 66 int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs) 51 67 { 52 68 if (num_vfs == 0) 53 - return efx_ef100_pci_sriov_disable(efx); 69 + return efx_ef100_pci_sriov_disable(efx, false); 54 70 else 55 71 return efx_ef100_pci_sriov_enable(efx, num_vfs); 56 72 }
+1 -1
drivers/net/ethernet/sfc/ef100_sriov.h
··· 11 11 #include "net_driver.h" 12 12 13 13 int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs); 14 - int efx_ef100_pci_sriov_disable(struct efx_nic *efx); 14 + int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force);
+80 -4
drivers/net/ethernet/sfc/ef100_tx.c
··· 254 254 255 255 static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue, 256 256 const struct sk_buff *skb, 257 - unsigned int segment_count) 257 + unsigned int segment_count, 258 + struct efx_rep *efv) 258 259 { 259 260 unsigned int old_write_count = tx_queue->write_count; 260 261 unsigned int new_write_count = old_write_count; ··· 272 271 next_desc_type = ESE_GZ_TX_DESC_TYPE_TSO; 273 272 else 274 273 next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND; 274 + 275 + if (unlikely(efv)) { 276 + /* Create TX override descriptor */ 277 + write_ptr = new_write_count & tx_queue->ptr_mask; 278 + txd = ef100_tx_desc(tx_queue, write_ptr); 279 + ++new_write_count; 280 + 281 + tx_queue->packet_write_count = new_write_count; 282 + EFX_POPULATE_OWORD_3(*txd, 283 + ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX, 284 + ESF_GZ_TX_PREFIX_EGRESS_MPORT, efv->mport, 285 + ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1); 286 + nr_descs--; 287 + } 275 288 276 289 /* if it's a raw write (such as XDP) then always SEND single frames */ 277 290 if (!skb) ··· 321 306 /* if it's a raw write (such as XDP) then always SEND */ 322 307 next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG : 323 308 ESE_GZ_TX_DESC_TYPE_SEND; 309 + /* mark as an EFV buffer if applicable */ 310 + if (unlikely(efv)) 311 + buffer->flags |= EFX_TX_BUF_EFV; 324 312 325 313 } while (new_write_count != tx_queue->insert_count); 326 314 ··· 342 324 343 325 void ef100_tx_write(struct efx_tx_queue *tx_queue) 344 326 { 345 - ef100_tx_make_descriptors(tx_queue, NULL, 0); 327 + ef100_tx_make_descriptors(tx_queue, NULL, 0, NULL); 346 328 ef100_tx_push_buffers(tx_queue); 347 329 } 348 330 ··· 369 351 */ 370 352 int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 371 353 { 354 + return __ef100_enqueue_skb(tx_queue, skb, NULL); 355 + } 356 + 357 + int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb, 358 + struct efx_rep *efv) 359 + { 372 360 unsigned int old_insert_count = tx_queue->insert_count; 373 361 struct efx_nic *efx = tx_queue->efx; 374 362 bool xmit_more = netdev_xmit_more(); ··· 400 376 return 0; 401 377 } 402 378 379 + if (unlikely(efv)) { 380 + struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 381 + 382 + /* Drop representor packets if the queue is stopped. 383 + * We currently don't assert backoff to representors so this is 384 + * to make sure representor traffic can't starve the main 385 + * net device. 386 + * And, of course, if there are no TX descriptors left. 387 + */ 388 + if (netif_tx_queue_stopped(tx_queue->core_txq) || 389 + unlikely(efx_tx_buffer_in_use(buffer))) { 390 + atomic64_inc(&efv->stats.tx_errors); 391 + rc = -ENOSPC; 392 + goto err; 393 + } 394 + 395 + /* Also drop representor traffic if it could cause us to 396 + * stop the queue. If we assert backoff and we haven't 397 + * received traffic on the main net device recently then the 398 + * TX watchdog can go off erroneously. 399 + */ 400 + fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); 401 + fill_level += efx_tx_max_skb_descs(efx); 402 + if (fill_level > efx->txq_stop_thresh) { 403 + struct efx_tx_queue *txq2; 404 + 405 + /* Refresh cached fill level and re-check */ 406 + efx_for_each_channel_tx_queue(txq2, tx_queue->channel) 407 + txq2->old_read_count = READ_ONCE(txq2->read_count); 408 + 409 + fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); 410 + fill_level += efx_tx_max_skb_descs(efx); 411 + if (fill_level > efx->txq_stop_thresh) { 412 + atomic64_inc(&efv->stats.tx_errors); 413 + rc = -ENOSPC; 414 + goto err; 415 + } 416 + } 417 + 418 + buffer->flags = EFX_TX_BUF_OPTION | EFX_TX_BUF_EFV; 419 + tx_queue->insert_count++; 420 + } 421 + 403 422 /* Map for DMA and create descriptors */ 404 423 rc = efx_tx_map_data(tx_queue, skb, segments); 405 424 if (rc) 406 425 goto err; 407 - ef100_tx_make_descriptors(tx_queue, skb, segments); 426 + ef100_tx_make_descriptors(tx_queue, skb, segments, efv); 408 427 409 428 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); 410 429 if (fill_level > efx->txq_stop_thresh) { 411 430 struct efx_tx_queue *txq2; 431 + 432 + /* Because of checks above, representor traffic should 433 + * not be able to stop the queue. 434 + */ 435 + WARN_ON(efv); 412 436 413 437 netif_tx_stop_queue(tx_queue->core_txq); 414 438 /* Re-read after a memory barrier in case we've raced with ··· 476 404 /* If xmit_more then we don't need to push the doorbell, unless there 477 405 * are 256 descriptors already queued in which case we have to push to 478 406 * ensure we never push more than 256 at once. 407 + * 408 + * Always push for representor traffic, and don't account it to parent 409 + * PF netdevice's BQL. 479 410 */ 480 - if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) || 411 + if (unlikely(efv) || 412 + __netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) || 481 413 tx_queue->write_count - tx_queue->notify_count > 255) 482 414 ef100_tx_push_buffers(tx_queue); 483 415
+3
drivers/net/ethernet/sfc/ef100_tx.h
··· 13 13 #define EFX_EF100_TX_H 14 14 15 15 #include "net_driver.h" 16 + #include "ef100_rep.h" 16 17 17 18 int ef100_tx_probe(struct efx_tx_queue *tx_queue); 18 19 void ef100_tx_init(struct efx_tx_queue *tx_queue); ··· 23 22 void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event); 24 23 25 24 netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 25 + int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb, 26 + struct efx_rep *efv); 26 27 #endif
+8 -1
drivers/net/ethernet/sfc/efx.h
··· 12 12 #include "net_driver.h" 13 13 #include "ef100_rx.h" 14 14 #include "ef100_tx.h" 15 + #include "efx_common.h" 15 16 #include "filter.h" 16 17 17 18 int efx_net_open(struct net_device *net_dev); ··· 207 206 { 208 207 struct net_device *dev = efx->net_dev; 209 208 209 + /* We must stop reps (which use our TX) before we stop ourselves. */ 210 + efx_detach_reps(efx); 211 + 210 212 /* Lock/freeze all TX queues so that we can be sure the 211 213 * TX scheduler is stopped when we're done and before 212 214 * netif_device_present() becomes false. ··· 221 217 222 218 static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx) 223 219 { 224 - if ((efx->state != STATE_DISABLED) && !efx->reset_pending) 220 + if ((efx->state != STATE_DISABLED) && !efx->reset_pending) { 225 221 netif_device_attach(efx->net_dev); 222 + if (efx->state == STATE_NET_UP) 223 + efx_attach_reps(efx); 224 + } 226 225 } 227 226 228 227 static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
+38
drivers/net/ethernet/sfc/efx_common.c
··· 24 24 #include "mcdi_port_common.h" 25 25 #include "io.h" 26 26 #include "mcdi_pcol.h" 27 + #include "ef100_rep.h" 27 28 28 29 static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 29 30 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | ··· 1022 1021 efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, 1023 1022 sizeof(*efx->rps_hash_table), GFP_KERNEL); 1024 1023 #endif 1024 + spin_lock_init(&efx->vf_reps_lock); 1025 + INIT_LIST_HEAD(&efx->vf_reps); 1025 1026 INIT_WORK(&efx->mac_work, efx_mac_work); 1026 1027 init_waitqueue_head(&efx->flush_wq); 1027 1028 ··· 1391 1388 if (snprintf(name, len, "p%u", efx->port_num) >= len) 1392 1389 return -EINVAL; 1393 1390 return 0; 1391 + } 1392 + 1393 + void efx_detach_reps(struct efx_nic *efx) 1394 + { 1395 + struct net_device *rep_dev; 1396 + struct efx_rep *efv; 1397 + 1398 + ASSERT_RTNL(); 1399 + netif_dbg(efx, drv, efx->net_dev, "Detaching VF representors\n"); 1400 + list_for_each_entry(efv, &efx->vf_reps, list) { 1401 + rep_dev = efv->net_dev; 1402 + if (!rep_dev) 1403 + continue; 1404 + netif_carrier_off(rep_dev); 1405 + /* See efx_device_detach_sync() */ 1406 + netif_tx_lock_bh(rep_dev); 1407 + netif_tx_stop_all_queues(rep_dev); 1408 + netif_tx_unlock_bh(rep_dev); 1409 + } 1410 + } 1411 + 1412 + void efx_attach_reps(struct efx_nic *efx) 1413 + { 1414 + struct net_device *rep_dev; 1415 + struct efx_rep *efv; 1416 + 1417 + ASSERT_RTNL(); 1418 + netif_dbg(efx, drv, efx->net_dev, "Attaching VF representors\n"); 1419 + list_for_each_entry(efv, &efx->vf_reps, list) { 1420 + rep_dev = efv->net_dev; 1421 + if (!rep_dev) 1422 + continue; 1423 + netif_tx_wake_all_queues(rep_dev); 1424 + netif_carrier_on(rep_dev); 1425 + } 1394 1426 }
+3
drivers/net/ethernet/sfc/efx_common.h
··· 111 111 112 112 int efx_get_phys_port_name(struct net_device *net_dev, 113 113 char *name, size_t len); 114 + 115 + void efx_detach_reps(struct efx_nic *efx); 116 + void efx_attach_reps(struct efx_nic *efx); 114 117 #endif
+44
drivers/net/ethernet/sfc/mae.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /**************************************************************************** 3 + * Driver for Solarflare network controllers and boards 4 + * Copyright 2019 Solarflare Communications Inc. 5 + * Copyright 2020-2022 Xilinx Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License version 2 as published 9 + * by the Free Software Foundation, incorporated herein by reference. 10 + */ 11 + 12 + #include "mae.h" 13 + #include "mcdi.h" 14 + #include "mcdi_pcol.h" 15 + 16 + void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out) 17 + { 18 + efx_dword_t mport; 19 + 20 + EFX_POPULATE_DWORD_3(mport, 21 + MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC, 22 + MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER, 23 + MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id); 24 + *out = EFX_DWORD_VAL(mport); 25 + } 26 + 27 + /* id is really only 24 bits wide */ 28 + int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id) 29 + { 30 + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN); 31 + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_LOOKUP_IN_LEN); 32 + size_t outlen; 33 + int rc; 34 + 35 + MCDI_SET_DWORD(inbuf, MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR, selector); 36 + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_LOOKUP, inbuf, sizeof(inbuf), 37 + outbuf, sizeof(outbuf), &outlen); 38 + if (rc) 39 + return rc; 40 + if (outlen < sizeof(outbuf)) 41 + return -EIO; 42 + *id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID); 43 + return 0; 44 + }
+22
drivers/net/ethernet/sfc/mae.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /**************************************************************************** 3 + * Driver for Solarflare network controllers and boards 4 + * Copyright 2019 Solarflare Communications Inc. 5 + * Copyright 2020-2022 Xilinx Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License version 2 as published 9 + * by the Free Software Foundation, incorporated herein by reference. 10 + */ 11 + 12 + #ifndef EF100_MAE_H 13 + #define EF100_MAE_H 14 + /* MCDI interface for the ef100 Match-Action Engine */ 15 + 16 + #include "net_driver.h" 17 + 18 + void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out); 19 + 20 + int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id); 21 + 22 + #endif /* EF100_MAE_H */
+46
drivers/net/ethernet/sfc/mcdi.c
··· 2129 2129 return rc; 2130 2130 } 2131 2131 2132 + /* Failure to read a privilege mask is never fatal, because we can always 2133 + * carry on as though we didn't have the privilege we were interested in. 2134 + * So use efx_mcdi_rpc_quiet(). 2135 + */ 2136 + int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask) 2137 + { 2138 + MCDI_DECLARE_BUF(fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 2139 + MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN); 2140 + MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN); 2141 + size_t outlen; 2142 + u16 pf, vf; 2143 + int rc; 2144 + 2145 + if (!efx || !mask) 2146 + return -EINVAL; 2147 + 2148 + /* Get our function number */ 2149 + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, 2150 + fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN, 2151 + &outlen); 2152 + if (rc != 0) 2153 + return rc; 2154 + if (outlen < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) 2155 + return -EIO; 2156 + 2157 + pf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_PF); 2158 + vf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_VF); 2159 + 2160 + MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION, 2161 + PRIVILEGE_MASK_IN_FUNCTION_PF, pf, 2162 + PRIVILEGE_MASK_IN_FUNCTION_VF, vf); 2163 + 2164 + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PRIVILEGE_MASK, 2165 + pm_inbuf, sizeof(pm_inbuf), 2166 + pm_outbuf, sizeof(pm_outbuf), &outlen); 2167 + 2168 + if (rc != 0) 2169 + return rc; 2170 + if (outlen < MC_CMD_PRIVILEGE_MASK_OUT_LEN) 2171 + return -EIO; 2172 + 2173 + *mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK); 2174 + 2175 + return 0; 2176 + } 2177 + 2132 2178 #ifdef CONFIG_SFC_MTD 2133 2179 2134 2180 #define EFX_MCDI_NVRAM_LEN_MAX 128
+1
drivers/net/ethernet/sfc/mcdi.h
··· 366 366 unsigned int *flags); 367 367 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, 368 368 unsigned int *enabled_out); 369 + int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask); 369 370 370 371 #ifdef CONFIG_SFC_MCDI_MON 371 372 int efx_mcdi_mon_probe(struct efx_nic *efx);
+5
drivers/net/ethernet/sfc/net_driver.h
··· 178 178 #define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */ 179 179 #define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */ 180 180 #define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */ 181 + #define EFX_TX_BUF_EFV 0x100 /* buffer was sent from representor */ 181 182 182 183 /** 183 184 * struct efx_tx_queue - An Efx TX queue ··· 967 966 * @vf_count: Number of VFs intended to be enabled. 968 967 * @vf_init_count: Number of VFs that have been fully initialised. 969 968 * @vi_scale: log2 number of vnics per VF. 969 + * @vf_reps_lock: Protects vf_reps list 970 + * @vf_reps: local VF reps 970 971 * @ptp_data: PTP state data 971 972 * @ptp_warned: has this NIC seen and warned about unexpected PTP events? 972 973 * @vpd_sn: Serial number read from VPD ··· 1148 1145 unsigned vf_init_count; 1149 1146 unsigned vi_scale; 1150 1147 #endif 1148 + spinlock_t vf_reps_lock; 1149 + struct list_head vf_reps; 1151 1150 1152 1151 struct efx_ptp_data *ptp_data; 1153 1152 bool ptp_warned;
+4 -2
drivers/net/ethernet/sfc/tx.c
··· 559 559 void efx_xmit_done_single(struct efx_tx_queue *tx_queue) 560 560 { 561 561 unsigned int pkts_compl = 0, bytes_compl = 0; 562 + unsigned int efv_pkts_compl = 0; 562 563 unsigned int read_ptr; 563 564 bool finished = false; 564 565 ··· 581 580 /* Need to check the flag before dequeueing. */ 582 581 if (buffer->flags & EFX_TX_BUF_SKB) 583 582 finished = true; 584 - efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 583 + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, 584 + &efv_pkts_compl); 585 585 586 586 ++tx_queue->read_count; 587 587 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; ··· 591 589 tx_queue->pkts_compl += pkts_compl; 592 590 tx_queue->bytes_compl += bytes_compl; 593 591 594 - EFX_WARN_ON_PARANOID(pkts_compl != 1); 592 + EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1); 595 593 596 594 efx_xmit_done_check_empty(tx_queue); 597 595 }
+25 -10
drivers/net/ethernet/sfc/tx_common.c
··· 109 109 /* Free any buffers left in the ring */ 110 110 while (tx_queue->read_count != tx_queue->write_count) { 111 111 unsigned int pkts_compl = 0, bytes_compl = 0; 112 + unsigned int efv_pkts_compl = 0; 112 113 113 114 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 114 - efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 115 + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, 116 + &efv_pkts_compl); 115 117 116 118 ++tx_queue->read_count; 117 119 } ··· 148 146 void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 149 147 struct efx_tx_buffer *buffer, 150 148 unsigned int *pkts_compl, 151 - unsigned int *bytes_compl) 149 + unsigned int *bytes_compl, 150 + unsigned int *efv_pkts_compl) 152 151 { 153 152 if (buffer->unmap_len) { 154 153 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; ··· 167 164 if (buffer->flags & EFX_TX_BUF_SKB) { 168 165 struct sk_buff *skb = (struct sk_buff *)buffer->skb; 169 166 170 - EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); 171 - (*pkts_compl)++; 172 - (*bytes_compl) += skb->len; 167 + if (unlikely(buffer->flags & EFX_TX_BUF_EFV)) { 168 + EFX_WARN_ON_PARANOID(!efv_pkts_compl); 169 + (*efv_pkts_compl)++; 170 + } else { 171 + EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); 172 + (*pkts_compl)++; 173 + (*bytes_compl) += skb->len; 174 + } 175 + 173 176 if (tx_queue->timestamping && 174 177 (tx_queue->completed_timestamp_major || 175 178 tx_queue->completed_timestamp_minor)) { ··· 208 199 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 209 200 unsigned int index, 210 201 unsigned int *pkts_compl, 211 - unsigned int *bytes_compl) 202 + unsigned int *bytes_compl, 203 + unsigned int *efv_pkts_compl) 212 204 { 213 205 struct efx_nic *efx = tx_queue->efx; 214 206 unsigned int stop_index, read_ptr; ··· 228 218 return; 229 219 } 230 220 231 - efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 221 + efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl, 222 + efv_pkts_compl); 232 223 233 224 ++tx_queue->read_count; 234 225 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; ··· 252 241 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 253 242 { 254 243 unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; 244 + unsigned int efv_pkts_compl = 0; 255 245 struct efx_nic *efx = tx_queue->efx; 256 246 257 247 EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); 258 248 259 - efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 249 + efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl, 250 + &efv_pkts_compl); 260 251 tx_queue->pkts_compl += pkts_compl; 261 252 tx_queue->bytes_compl += bytes_compl; 262 253 263 - if (pkts_compl > 1) 254 + if (pkts_compl + efv_pkts_compl > 1) 264 255 ++tx_queue->merge_events; 265 256 266 257 /* See if we need to restart the netif queue. This memory ··· 287 274 void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, 288 275 unsigned int insert_count) 289 276 { 277 + unsigned int efv_pkts_compl = 0; 290 278 struct efx_tx_buffer *buffer; 291 279 unsigned int bytes_compl = 0; 292 280 unsigned int pkts_compl = 0; ··· 296 282 while (tx_queue->insert_count != insert_count) { 297 283 --tx_queue->insert_count; 298 284 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 299 - efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 285 + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, 286 + &efv_pkts_compl); 300 287 } 301 288 } 302 289
+2 -1
drivers/net/ethernet/sfc/tx_common.h
··· 19 19 void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 20 20 struct efx_tx_buffer *buffer, 21 21 unsigned int *pkts_compl, 22 - unsigned int *bytes_compl); 22 + unsigned int *bytes_compl, 23 + unsigned int *efv_pkts_compl); 23 24 24 25 static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) 25 26 {