Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * include/linux/if_team.h - Network team device driver header
4 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 */
6#ifndef _LINUX_IF_TEAM_H_
7#define _LINUX_IF_TEAM_H_
8
9#include <linux/netpoll.h>
10#include <net/sch_generic.h>
11#include <linux/types.h>
12#include <uapi/linux/if_team.h>
13
14struct team_pcpu_stats {
15 u64_stats_t rx_packets;
16 u64_stats_t rx_bytes;
17 u64_stats_t rx_multicast;
18 u64_stats_t tx_packets;
19 u64_stats_t tx_bytes;
20 struct u64_stats_sync syncp;
21 u32 rx_dropped;
22 u32 tx_dropped;
23 u32 rx_nohandler;
24};
25
26struct team;
27
28struct team_port {
29 struct net_device *dev;
30 struct hlist_node tx_hlist; /* node in tx-enabled ports hash list */
31 struct list_head list; /* node in ordinary list */
32 struct team *team;
33 int tx_index; /* index of tx enabled port. If disabled, -1 */
34 bool rx_enabled;
35
36 bool linkup; /* either state.linkup or user.linkup */
37
38 struct {
39 bool linkup;
40 u32 speed;
41 u8 duplex;
42 } state;
43
44 /* Values set by userspace */
45 struct {
46 bool linkup;
47 bool linkup_enabled;
48 } user;
49
50 /* Custom gennetlink interface related flags */
51 bool changed;
52 bool removed;
53
54 /*
55 * A place for storing original values of the device before it
56 * become a port.
57 */
58 struct {
59 unsigned char dev_addr[MAX_ADDR_LEN];
60 unsigned int mtu;
61 } orig;
62
63#ifdef CONFIG_NET_POLL_CONTROLLER
64 struct netpoll *np;
65#endif
66
67 s32 priority; /* lower number ~ higher priority */
68 u16 queue_id;
69 struct list_head qom_list; /* node in queue override mapping list */
70 struct rcu_head rcu;
71 long mode_priv[];
72};
73
74static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
75{
76 return rcu_dereference(dev->rx_handler_data);
77}
78
79static inline bool team_port_rx_enabled(struct team_port *port)
80{
81 return READ_ONCE(port->rx_enabled);
82}
83
84static inline bool team_port_tx_enabled(struct team_port *port)
85{
86 return READ_ONCE(port->tx_index) != -1;
87}
88
89static inline bool team_port_enabled(struct team_port *port)
90{
91 return team_port_rx_enabled(port) && team_port_tx_enabled(port);
92}
93
94static inline bool team_port_txable(struct team_port *port)
95{
96 return port->linkup && team_port_tx_enabled(port);
97}
98
99static inline bool team_port_dev_txable(const struct net_device *port_dev)
100{
101 struct team_port *port;
102 bool txable;
103
104 rcu_read_lock();
105 port = team_port_get_rcu(port_dev);
106 txable = port ? team_port_txable(port) : false;
107 rcu_read_unlock();
108
109 return txable;
110}
111
112#ifdef CONFIG_NET_POLL_CONTROLLER
113static inline void team_netpoll_send_skb(struct team_port *port,
114 struct sk_buff *skb)
115{
116 netpoll_send_skb(port->np, skb);
117}
118#else
119static inline void team_netpoll_send_skb(struct team_port *port,
120 struct sk_buff *skb)
121{
122}
123#endif
124
125struct team_mode_ops {
126 int (*init)(struct team *team);
127 void (*exit)(struct team *team);
128 rx_handler_result_t (*receive)(struct team *team,
129 struct team_port *port,
130 struct sk_buff *skb);
131 bool (*transmit)(struct team *team, struct sk_buff *skb);
132 int (*port_enter)(struct team *team, struct team_port *port);
133 void (*port_leave)(struct team *team, struct team_port *port);
134 void (*port_change_dev_addr)(struct team *team, struct team_port *port);
135 void (*port_tx_disabled)(struct team *team, struct team_port *port);
136};
137
138extern int team_modeop_port_enter(struct team *team, struct team_port *port);
139extern void team_modeop_port_change_dev_addr(struct team *team,
140 struct team_port *port);
141
142enum team_option_type {
143 TEAM_OPTION_TYPE_U32,
144 TEAM_OPTION_TYPE_STRING,
145 TEAM_OPTION_TYPE_BINARY,
146 TEAM_OPTION_TYPE_BOOL,
147 TEAM_OPTION_TYPE_S32,
148};
149
150struct team_option_inst_info {
151 u32 array_index;
152 struct team_port *port; /* != NULL if per-port */
153};
154
155struct team_gsetter_ctx {
156 union {
157 u32 u32_val;
158 const char *str_val;
159 struct {
160 const void *ptr;
161 u32 len;
162 } bin_val;
163 bool bool_val;
164 s32 s32_val;
165 } data;
166 struct team_option_inst_info *info;
167};
168
169struct team_option {
170 struct list_head list;
171 const char *name;
172 bool per_port;
173 unsigned int array_size; /* != 0 means the option is array */
174 enum team_option_type type;
175 void (*init)(struct team *team, struct team_option_inst_info *info);
176 void (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
177 int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
178};
179
180extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
181extern void team_options_change_check(struct team *team);
182
183struct team_mode {
184 const char *kind;
185 struct module *owner;
186 size_t priv_size;
187 size_t port_priv_size;
188 const struct team_mode_ops *ops;
189 enum netdev_lag_tx_type lag_tx_type;
190};
191
192#define TEAM_PORT_HASHBITS 4
193#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
194
195#define TEAM_MODE_PRIV_LONGS 4
196#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
197
198struct team {
199 struct team_pcpu_stats __percpu *pcpu_stats;
200
201 const struct header_ops *header_ops_cache;
202
203 /*
204 * List of tx-enabled ports and counts of rx and tx-enabled ports.
205 */
206 int tx_en_port_count;
207 int rx_en_port_count;
208 struct hlist_head tx_en_port_hlist[TEAM_PORT_HASHENTRIES];
209
210 struct list_head port_list; /* list of all ports */
211
212 struct list_head option_list;
213 struct list_head option_inst_list; /* list of option instances */
214
215 const struct team_mode *mode;
216 struct team_mode_ops ops;
217 bool user_carrier_enabled;
218 bool queue_override_enabled;
219 struct list_head *qom_lists; /* array of queue override mapping lists */
220 bool port_mtu_change_allowed;
221 bool notifier_ctx;
222 struct {
223 unsigned int count;
224 unsigned int interval; /* in ms */
225 atomic_t count_pending;
226 struct delayed_work dw;
227 } notify_peers;
228 struct {
229 unsigned int count;
230 unsigned int interval; /* in ms */
231 atomic_t count_pending;
232 struct delayed_work dw;
233 } mcast_rejoin;
234 long mode_priv[TEAM_MODE_PRIV_LONGS];
235};
236
237static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
238 struct sk_buff *skb)
239{
240 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
241 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
242 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
243
244 skb->dev = port->dev;
245 if (unlikely(netpoll_tx_running(netdev_from_priv(team)))) {
246 team_netpoll_send_skb(port, skb);
247 return 0;
248 }
249 return dev_queue_xmit(skb);
250}
251
252static inline struct hlist_head *team_tx_port_index_hash(struct team *team,
253 int tx_port_index)
254{
255 unsigned int list_entry = tx_port_index & (TEAM_PORT_HASHENTRIES - 1);
256
257 return &team->tx_en_port_hlist[list_entry];
258}
259
260static inline struct team_port *team_get_port_by_tx_index(struct team *team,
261 int tx_port_index)
262{
263 struct hlist_head *head = team_tx_port_index_hash(team, tx_port_index);
264 struct team_port *port;
265
266 hlist_for_each_entry(port, head, tx_hlist)
267 if (port->tx_index == tx_port_index)
268 return port;
269 return NULL;
270}
271
272static inline int team_num_to_port_index(struct team *team, unsigned int num)
273{
274 int tx_en_port_count = READ_ONCE(team->tx_en_port_count);
275
276 if (unlikely(!tx_en_port_count))
277 return 0;
278 return num % tx_en_port_count;
279}
280
281static inline struct team_port *team_get_port_by_tx_index_rcu(struct team *team,
282 int tx_port_index)
283{
284 struct hlist_head *head = team_tx_port_index_hash(team, tx_port_index);
285 struct team_port *port;
286
287 hlist_for_each_entry_rcu(port, head, tx_hlist)
288 if (READ_ONCE(port->tx_index) == tx_port_index)
289 return port;
290 return NULL;
291}
292
293static inline struct team_port *
294team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
295{
296 struct team_port *cur;
297
298 if (likely(team_port_txable(port)))
299 return port;
300 cur = port;
301 list_for_each_entry_continue_rcu(cur, &team->port_list, list)
302 if (team_port_txable(cur))
303 return cur;
304 list_for_each_entry_rcu(cur, &team->port_list, list) {
305 if (cur == port)
306 break;
307 if (team_port_txable(cur))
308 return cur;
309 }
310 return NULL;
311}
312
313extern int team_options_register(struct team *team,
314 const struct team_option *option,
315 size_t option_count);
316extern void team_options_unregister(struct team *team,
317 const struct team_option *option,
318 size_t option_count);
319extern int team_mode_register(const struct team_mode *mode);
320extern void team_mode_unregister(const struct team_mode *mode);
321
322#define TEAM_DEFAULT_NUM_TX_QUEUES 16
323#define TEAM_DEFAULT_NUM_RX_QUEUES 16
324
325#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind)
326
327#endif /* _LINUX_IF_TEAM_H_ */