Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

net: add bare bone queue configs

We'll need to pass extra parameters when allocating a queue for memory
providers. Define a new structure for queue configurations, and pass it
to qapi callbacks. It's empty for now, actual parameters will be added
in following patches.

Configurations should persist across resets, and for that they're
default-initialised on device registration and stored in struct
netdev_rx_queue. We also add a new qapi callback for defaulting a given
config. It must be implemented if a driver wants to use queue configs
and is optional otherwise.

Suggested-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>

+66 -16
+6 -2
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 15911 15911 .get_base_stats = bnxt_get_base_stats, 15912 15912 }; 15913 15913 15914 - static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) 15914 + static int bnxt_queue_mem_alloc(struct net_device *dev, 15915 + struct netdev_queue_config *qcfg, 15916 + void *qmem, int idx) 15915 15917 { 15916 15918 struct bnxt_rx_ring_info *rxr, *clone; 15917 15919 struct bnxt *bp = netdev_priv(dev); ··· 16079 16077 dst->rx_agg_bmap = src->rx_agg_bmap; 16080 16078 } 16081 16079 16082 - static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) 16080 + static int bnxt_queue_start(struct net_device *dev, 16081 + struct netdev_queue_config *qcfg, 16082 + void *qmem, int idx) 16083 16083 { 16084 16084 struct bnxt *bp = netdev_priv(dev); 16085 16085 struct bnxt_rx_ring_info *rxr, *clone;
+6 -3
drivers/net/ethernet/google/gve/gve_main.c
··· 2616 2616 gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg); 2617 2617 } 2618 2618 2619 - static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem, 2620 - int idx) 2619 + static int gve_rx_queue_mem_alloc(struct net_device *dev, 2620 + struct netdev_queue_config *qcfg, 2621 + void *per_q_mem, int idx) 2621 2622 { 2622 2623 struct gve_priv *priv = netdev_priv(dev); 2623 2624 struct gve_rx_alloc_rings_cfg cfg = {0}; ··· 2639 2638 return err; 2640 2639 } 2641 2640 2642 - static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx) 2641 + static int gve_rx_queue_start(struct net_device *dev, 2642 + struct netdev_queue_config *qcfg, 2643 + void *per_q_mem, int idx) 2643 2644 { 2644 2645 struct gve_priv *priv = netdev_priv(dev); 2645 2646 struct gve_rx_ring *gve_per_q_mem;
+6 -4
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 5596 5596 struct mlx5e_channel_param cparam; 5597 5597 }; 5598 5598 5599 - static int mlx5e_queue_mem_alloc(struct net_device *dev, void *newq, 5600 - int queue_index) 5599 + static int mlx5e_queue_mem_alloc(struct net_device *dev, 5600 + struct netdev_queue_config *qcfg, 5601 + void *newq, int queue_index) 5601 5602 { 5602 5603 struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq; 5603 5604 struct mlx5e_priv *priv = netdev_priv(dev); ··· 5659 5658 return 0; 5660 5659 } 5661 5660 5662 - static int mlx5e_queue_start(struct net_device *dev, void *newq, 5663 - int queue_index) 5661 + static int mlx5e_queue_start(struct net_device *dev, 5662 + struct netdev_queue_config *qcfg, 5663 + void *newq, int queue_index) 5664 5664 { 5665 5665 struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq; 5666 5666 struct mlx5e_priv *priv = netdev_priv(dev);
+6 -2
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
··· 2809 2809 fbnic_wrfl(fbd); 2810 2810 } 2811 2811 2812 - static int fbnic_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) 2812 + static int fbnic_queue_mem_alloc(struct net_device *dev, 2813 + struct netdev_queue_config *qcfg, 2814 + void *qmem, int idx) 2813 2815 { 2814 2816 struct fbnic_net *fbn = netdev_priv(dev); 2815 2817 const struct fbnic_q_triad *real; ··· 2863 2861 netif_wake_subqueue(fbn->netdev, nv->qt[i].sub0.q_idx); 2864 2862 } 2865 2863 2866 - static int fbnic_queue_start(struct net_device *dev, void *qmem, int idx) 2864 + static int fbnic_queue_start(struct net_device *dev, 2865 + struct netdev_queue_config *qcfg, 2866 + void *qmem, int idx) 2867 2867 { 2868 2868 struct fbnic_net *fbn = netdev_priv(dev); 2869 2869 struct fbnic_napi_vector *nv;
+5 -2
drivers/net/netdevsim/netdev.c
··· 758 758 }; 759 759 760 760 static int 761 - nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx) 761 + nsim_queue_mem_alloc(struct net_device *dev, 762 + struct netdev_queue_config *qcfg, 763 + void *per_queue_mem, int idx) 762 764 { 763 765 struct nsim_queue_mem *qmem = per_queue_mem; 764 766 struct netdevsim *ns = netdev_priv(dev); ··· 809 807 } 810 808 811 809 static int 812 - nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx) 810 + nsim_queue_start(struct net_device *dev, struct netdev_queue_config *qcfg, 811 + void *per_queue_mem, int idx) 813 812 { 814 813 struct nsim_queue_mem *qmem = per_queue_mem; 815 814 struct netdevsim *ns = netdev_priv(dev);
+9
include/net/netdev_queues.h
··· 14 14 u8 hds_config; 15 15 }; 16 16 17 + struct netdev_queue_config { 18 + }; 19 + 17 20 /* See the netdev.yaml spec for definition of each statistic */ 18 21 struct netdev_queue_stats_rx { 19 22 u64 bytes; ··· 133 130 * @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used 134 131 * for this queue. Return NULL on error. 135 132 * 133 + * @ndo_default_qcfg: Populate queue config struct with defaults. Optional. 134 + * 136 135 * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while 137 136 * the interface is closed. @ndo_queue_start and @ndo_queue_stop will only 138 137 * be called for an interface which is open. ··· 142 137 struct netdev_queue_mgmt_ops { 143 138 size_t ndo_queue_mem_size; 144 139 int (*ndo_queue_mem_alloc)(struct net_device *dev, 140 + struct netdev_queue_config *qcfg, 145 141 void *per_queue_mem, 146 142 int idx); 147 143 void (*ndo_queue_mem_free)(struct net_device *dev, 148 144 void *per_queue_mem); 149 145 int (*ndo_queue_start)(struct net_device *dev, 146 + struct netdev_queue_config *qcfg, 150 147 void *per_queue_mem, 151 148 int idx); 152 149 int (*ndo_queue_stop)(struct net_device *dev, 153 150 void *per_queue_mem, 154 151 int idx); 152 + void (*ndo_default_qcfg)(struct net_device *dev, 153 + struct netdev_queue_config *qcfg); 155 154 struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev, 156 155 int idx); 157 156 };
+2
include/net/netdev_rx_queue.h
··· 7 7 #include <linux/sysfs.h> 8 8 #include <net/xdp.h> 9 9 #include <net/page_pool/types.h> 10 + #include <net/netdev_queues.h> 10 11 11 12 /* This structure contains an instance of an RX queue. */ 12 13 struct netdev_rx_queue { ··· 28 27 struct xsk_buff_pool *pool; 29 28 #endif 30 29 struct napi_struct *napi; 30 + struct netdev_queue_config qcfg; 31 31 struct pp_memory_provider_params mp_params; 32 32 } ____cacheline_aligned_in_smp; 33 33
+17
net/core/dev.c
··· 11270 11270 } 11271 11271 } 11272 11272 11273 + static void init_rx_queue_cfgs(struct net_device *dev) 11274 + { 11275 + const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops; 11276 + struct netdev_rx_queue *rxq; 11277 + int i; 11278 + 11279 + if (!qops || !qops->ndo_default_qcfg) 11280 + return; 11281 + 11282 + for (i = 0; i < dev->num_rx_queues; i++) { 11283 + rxq = __netif_get_rx_queue(dev, i); 11284 + qops->ndo_default_qcfg(dev, &rxq->qcfg); 11285 + } 11286 + } 11287 + 11273 11288 /** 11274 11289 * register_netdevice() - register a network device 11275 11290 * @dev: device to register ··· 11329 11314 dev->name_node = netdev_name_node_head_alloc(dev); 11330 11315 if (!dev->name_node) 11331 11316 goto out; 11317 + 11318 + init_rx_queue_cfgs(dev); 11332 11319 11333 11320 /* Init, if this function is available */ 11334 11321 if (dev->netdev_ops->ndo_init) {
+9 -3
net/core/netdev_rx_queue.c
··· 22 22 { 23 23 struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx); 24 24 const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops; 25 + struct netdev_queue_config qcfg; 25 26 void *new_mem, *old_mem; 26 27 int err; 27 28 ··· 31 30 return -EOPNOTSUPP; 32 31 33 32 netdev_assert_locked(dev); 33 + 34 + memset(&qcfg, 0, sizeof(qcfg)); 35 + if (qops->ndo_default_qcfg) 36 + qops->ndo_default_qcfg(dev, &qcfg); 34 37 35 38 new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL); 36 39 if (!new_mem) ··· 46 41 goto err_free_new_mem; 47 42 } 48 43 49 - err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx); 44 + err = qops->ndo_queue_mem_alloc(dev, &qcfg, new_mem, rxq_idx); 50 45 if (err) 51 46 goto err_free_old_mem; 52 47 ··· 59 54 if (err) 60 55 goto err_free_new_queue_mem; 61 56 62 - err = qops->ndo_queue_start(dev, new_mem, rxq_idx); 57 + err = qops->ndo_queue_start(dev, &qcfg, new_mem, rxq_idx); 63 58 if (err) 64 59 goto err_start_queue; 65 60 } else { ··· 71 66 kvfree(old_mem); 72 67 kvfree(new_mem); 73 68 69 + rxq->qcfg = qcfg; 74 70 return 0; 75 71 76 72 err_start_queue: ··· 82 76 * WARN if we fail to recover the old rx queue, and at least free 83 77 * old_mem so we don't also leak that. 84 78 */ 85 - if (qops->ndo_queue_start(dev, old_mem, rxq_idx)) { 79 + if (qops->ndo_queue_start(dev, &rxq->qcfg, old_mem, rxq_idx)) { 86 80 WARN(1, 87 81 "Failed to restart old queue in error path. RX queue %d may be unhealthy.", 88 82 rxq_idx);