Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/list.h>
4#include <linux/netdevice.h>
5#include <linux/rtnetlink.h>
6#include <linux/skbuff.h>
7#include <net/ip.h>
8#include <net/switchdev.h>
9
10#include "br_private.h"
11
12static struct static_key_false br_switchdev_tx_fwd_offload;
13
14static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
15 const struct sk_buff *skb)
16{
17 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
18 return false;
19
20 if (br_multicast_igmp_type(skb))
21 return false;
22
23 return (p->flags & BR_TX_FWD_OFFLOAD) &&
24 (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
25}
26
27bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb)
28{
29 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
30 return false;
31
32 return BR_INPUT_SKB_CB(skb)->tx_fwd_offload;
33}
34
35void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb)
36{
37 skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb);
38}
39
40/* Mark the frame for TX forwarding offload if this egress port supports it */
41void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p,
42 struct sk_buff *skb)
43{
44 if (nbp_switchdev_can_offload_tx_fwd(p, skb))
45 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
46}
47
48/* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms
49 * that the skb has been already forwarded to, to avoid further cloning to
50 * other ports in the same hwdom by making nbp_switchdev_allowed_egress()
51 * return false.
52 */
53void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p,
54 struct sk_buff *skb)
55{
56 if (nbp_switchdev_can_offload_tx_fwd(p, skb))
57 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
58}
59
60void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
61 struct sk_buff *skb)
62{
63 if (p->hwdom)
64 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom;
65}
66
67bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
68 const struct sk_buff *skb)
69{
70 struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
71
72 return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
73 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
74}
75
76/* Flags that can be offloaded to hardware */
77#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | BR_PORT_MAB | \
78 BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
79 BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
80
81int br_switchdev_set_port_flag(struct net_bridge_port *p,
82 unsigned long flags,
83 unsigned long mask,
84 struct netlink_ext_ack *extack)
85{
86 struct switchdev_attr attr = {
87 .orig_dev = p->dev,
88 };
89 struct switchdev_notifier_port_attr_info info = {
90 .attr = &attr,
91 };
92 int err;
93
94 mask &= BR_PORT_FLAGS_HW_OFFLOAD;
95 if (!mask)
96 return 0;
97
98 attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
99 attr.u.brport_flags.val = flags;
100 attr.u.brport_flags.mask = mask;
101
102 /* We run from atomic context here */
103 err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
104 &info.info, extack);
105 err = notifier_to_errno(err);
106 if (err == -EOPNOTSUPP)
107 return 0;
108
109 if (err) {
110 NL_SET_ERR_MSG_WEAK_MOD(extack,
111 "bridge flag offload is not supported");
112 return -EOPNOTSUPP;
113 }
114
115 attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
116 attr.flags = SWITCHDEV_F_DEFER;
117
118 err = switchdev_port_attr_set(p->dev, &attr, extack);
119 if (err) {
120 NL_SET_ERR_MSG_WEAK_MOD(extack,
121 "error setting offload flag on port");
122 return err;
123 }
124
125 return 0;
126}
127
128static void br_switchdev_fdb_populate(struct net_bridge *br,
129 struct switchdev_notifier_fdb_info *item,
130 const struct net_bridge_fdb_entry *fdb,
131 const void *ctx)
132{
133 const struct net_bridge_port *p = READ_ONCE(fdb->dst);
134
135 item->addr = fdb->key.addr.addr;
136 item->vid = fdb->key.vlan_id;
137 item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
138 item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
139 item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
140 item->locked = false;
141 item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
142 item->info.ctx = ctx;
143}
144
145void
146br_switchdev_fdb_notify(struct net_bridge *br,
147 const struct net_bridge_fdb_entry *fdb, int type)
148{
149 struct switchdev_notifier_fdb_info item;
150
151 if (test_bit(BR_FDB_LOCKED, &fdb->flags))
152 return;
153
154 /* Entries with these flags were created using ndm_state == NUD_REACHABLE,
155 * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
156 * equivalent to 'bridge fdb add ... master dynamic (sticky)'.
157 * Drivers don't know how to deal with these, so don't notify them to
158 * avoid confusing them.
159 */
160 if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
161 !test_bit(BR_FDB_STATIC, &fdb->flags) &&
162 !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
163 return;
164
165 br_switchdev_fdb_populate(br, &item, fdb, NULL);
166
167 switch (type) {
168 case RTM_DELNEIGH:
169 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
170 item.info.dev, &item.info, NULL);
171 break;
172 case RTM_NEWNEIGH:
173 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
174 item.info.dev, &item.info, NULL);
175 break;
176 }
177}
178
179int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
180 bool changed, struct netlink_ext_ack *extack)
181{
182 struct switchdev_obj_port_vlan v = {
183 .obj.orig_dev = dev,
184 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
185 .flags = flags,
186 .vid = vid,
187 .changed = changed,
188 };
189
190 return switchdev_port_obj_add(dev, &v.obj, extack);
191}
192
193int br_switchdev_port_vlan_no_foreign_add(struct net_device *dev, u16 vid, u16 flags,
194 bool changed, struct netlink_ext_ack *extack)
195{
196 struct switchdev_obj_port_vlan v = {
197 .obj.orig_dev = dev,
198 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
199 .obj.flags = SWITCHDEV_F_NO_FOREIGN,
200 .flags = flags,
201 .vid = vid,
202 .changed = changed,
203 };
204
205 return switchdev_port_obj_add(dev, &v.obj, extack);
206}
207
208int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
209{
210 struct switchdev_obj_port_vlan v = {
211 .obj.orig_dev = dev,
212 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
213 .vid = vid,
214 };
215
216 return switchdev_port_obj_del(dev, &v.obj);
217}
218
219static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining)
220{
221 struct net_bridge *br = joining->br;
222 struct net_bridge_port *p;
223 int hwdom;
224
225 /* joining is yet to be added to the port list. */
226 list_for_each_entry(p, &br->port_list, list) {
227 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
228 joining->hwdom = p->hwdom;
229 return 0;
230 }
231 }
232
233 hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1);
234 if (hwdom >= BR_HWDOM_MAX)
235 return -EBUSY;
236
237 set_bit(hwdom, &br->busy_hwdoms);
238 joining->hwdom = hwdom;
239 return 0;
240}
241
242static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving)
243{
244 struct net_bridge *br = leaving->br;
245 struct net_bridge_port *p;
246
247 /* leaving is no longer in the port list. */
248 list_for_each_entry(p, &br->port_list, list) {
249 if (p->hwdom == leaving->hwdom)
250 return;
251 }
252
253 clear_bit(leaving->hwdom, &br->busy_hwdoms);
254}
255
256static int nbp_switchdev_add(struct net_bridge_port *p,
257 struct netdev_phys_item_id ppid,
258 bool tx_fwd_offload,
259 struct netlink_ext_ack *extack)
260{
261 int err;
262
263 if (p->offload_count) {
264 /* Prevent unsupported configurations such as a bridge port
265 * which is a bonding interface, and the member ports are from
266 * different hardware switches.
267 */
268 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
269 NL_SET_ERR_MSG_MOD(extack,
270 "Same bridge port cannot be offloaded by two physical switches");
271 return -EBUSY;
272 }
273
274 /* Tolerate drivers that call switchdev_bridge_port_offload()
275 * more than once for the same bridge port, such as when the
276 * bridge port is an offloaded bonding/team interface.
277 */
278 p->offload_count++;
279
280 return 0;
281 }
282
283 p->ppid = ppid;
284 p->offload_count = 1;
285
286 err = nbp_switchdev_hwdom_set(p);
287 if (err)
288 return err;
289
290 if (tx_fwd_offload) {
291 p->flags |= BR_TX_FWD_OFFLOAD;
292 static_branch_inc(&br_switchdev_tx_fwd_offload);
293 }
294
295 return 0;
296}
297
298static void nbp_switchdev_del(struct net_bridge_port *p)
299{
300 if (WARN_ON(!p->offload_count))
301 return;
302
303 p->offload_count--;
304
305 if (p->offload_count)
306 return;
307
308 if (p->hwdom)
309 nbp_switchdev_hwdom_put(p);
310
311 if (p->flags & BR_TX_FWD_OFFLOAD) {
312 p->flags &= ~BR_TX_FWD_OFFLOAD;
313 static_branch_dec(&br_switchdev_tx_fwd_offload);
314 }
315}
316
317static int
318br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
319 const struct net_bridge_fdb_entry *fdb,
320 unsigned long action, const void *ctx)
321{
322 struct switchdev_notifier_fdb_info item;
323 int err;
324
325 br_switchdev_fdb_populate(br, &item, fdb, ctx);
326
327 err = nb->notifier_call(nb, action, &item);
328 return notifier_to_errno(err);
329}
330
331static int
332br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
333 bool adding, struct notifier_block *nb)
334{
335 struct net_bridge_fdb_entry *fdb;
336 struct net_bridge *br;
337 unsigned long action;
338 int err = 0;
339
340 if (!nb)
341 return 0;
342
343 if (!netif_is_bridge_master(br_dev))
344 return -EINVAL;
345
346 br = netdev_priv(br_dev);
347
348 if (adding)
349 action = SWITCHDEV_FDB_ADD_TO_DEVICE;
350 else
351 action = SWITCHDEV_FDB_DEL_TO_DEVICE;
352
353 rcu_read_lock();
354
355 hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
356 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
357 if (err)
358 break;
359 }
360
361 rcu_read_unlock();
362
363 return err;
364}
365
366static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
367 const void *ctx,
368 struct notifier_block *nb,
369 struct netlink_ext_ack *extack)
370{
371 struct switchdev_notifier_port_attr_info attr_info = {
372 .info = {
373 .dev = br_dev,
374 .extack = extack,
375 .ctx = ctx,
376 },
377 };
378 struct net_bridge *br = netdev_priv(br_dev);
379 struct net_bridge_vlan_group *vg;
380 struct switchdev_attr attr;
381 struct net_bridge_vlan *v;
382 int err;
383
384 attr_info.attr = &attr;
385 attr.orig_dev = br_dev;
386
387 vg = br_vlan_group(br);
388 if (!vg)
389 return 0;
390
391 list_for_each_entry(v, &vg->vlan_list, vlist) {
392 if (v->msti) {
393 attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI;
394 attr.u.vlan_msti.vid = v->vid;
395 attr.u.vlan_msti.msti = v->msti;
396
397 err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET,
398 &attr_info);
399 err = notifier_to_errno(err);
400 if (err)
401 return err;
402 }
403 }
404
405 return 0;
406}
407
408static int
409br_switchdev_vlan_replay_one(struct notifier_block *nb,
410 struct net_device *dev,
411 struct switchdev_obj_port_vlan *vlan,
412 const void *ctx, unsigned long action,
413 struct netlink_ext_ack *extack)
414{
415 struct switchdev_notifier_port_obj_info obj_info = {
416 .info = {
417 .dev = dev,
418 .extack = extack,
419 .ctx = ctx,
420 },
421 .obj = &vlan->obj,
422 };
423 int err;
424
425 err = nb->notifier_call(nb, action, &obj_info);
426 return notifier_to_errno(err);
427}
428
429static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
430 struct net_device *dev,
431 struct net_bridge_vlan_group *vg,
432 const void *ctx, unsigned long action,
433 struct netlink_ext_ack *extack)
434{
435 struct net_bridge_vlan *v;
436 int err = 0;
437 u16 pvid;
438
439 if (!vg)
440 return 0;
441
442 pvid = br_get_pvid(vg);
443
444 list_for_each_entry(v, &vg->vlan_list, vlist) {
445 struct switchdev_obj_port_vlan vlan = {
446 .obj.orig_dev = dev,
447 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
448 .flags = br_vlan_flags(v, pvid),
449 .vid = v->vid,
450 };
451
452 if (!br_vlan_should_use(v))
453 continue;
454
455 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
456 action, extack);
457 if (err)
458 return err;
459 }
460
461 return 0;
462}
463
464static int br_switchdev_vlan_replay(struct net_device *br_dev,
465 const void *ctx, bool adding,
466 struct notifier_block *nb,
467 struct netlink_ext_ack *extack)
468{
469 struct net_bridge *br = netdev_priv(br_dev);
470 struct net_bridge_port *p;
471 unsigned long action;
472 int err;
473
474 ASSERT_RTNL();
475
476 if (!nb)
477 return 0;
478
479 if (!netif_is_bridge_master(br_dev))
480 return -EINVAL;
481
482 if (adding)
483 action = SWITCHDEV_PORT_OBJ_ADD;
484 else
485 action = SWITCHDEV_PORT_OBJ_DEL;
486
487 err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
488 ctx, action, extack);
489 if (err)
490 return err;
491
492 list_for_each_entry(p, &br->port_list, list) {
493 struct net_device *dev = p->dev;
494
495 err = br_switchdev_vlan_replay_group(nb, dev,
496 nbp_vlan_group(p),
497 ctx, action, extack);
498 if (err)
499 return err;
500 }
501
502 if (adding) {
503 err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack);
504 if (err)
505 return err;
506 }
507
508 return 0;
509}
510
511#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
512struct br_switchdev_mdb_complete_info {
513 struct net_bridge_port *port;
514 struct br_ip ip;
515};
516
517static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
518{
519 struct br_switchdev_mdb_complete_info *data = priv;
520 struct net_bridge_port_group __rcu **pp;
521 struct net_bridge_port_group *p;
522 struct net_bridge_mdb_entry *mp;
523 struct net_bridge_port *port = data->port;
524 struct net_bridge *br = port->br;
525 u8 old_flags;
526
527 if (err == -EOPNOTSUPP)
528 goto out_free;
529
530 spin_lock_bh(&br->multicast_lock);
531 mp = br_mdb_ip_get(br, &data->ip);
532 if (!mp)
533 goto out;
534 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
535 pp = &p->next) {
536 if (p->key.port != port)
537 continue;
538
539 old_flags = p->flags;
540 br_multicast_set_pg_offload_flags(p, !err);
541 if (br_mdb_should_notify(br, old_flags ^ p->flags))
542 br_mdb_flag_change_notify(br->dev, mp, p);
543 }
544out:
545 spin_unlock_bh(&br->multicast_lock);
546out_free:
547 kfree(priv);
548}
549
550static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
551 const struct net_bridge_mdb_entry *mp)
552{
553 if (mp->addr.proto == htons(ETH_P_IP))
554 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
555#if IS_ENABLED(CONFIG_IPV6)
556 else if (mp->addr.proto == htons(ETH_P_IPV6))
557 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
558#endif
559 else
560 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
561
562 mdb->vid = mp->addr.vid;
563}
564
565static void br_switchdev_host_mdb_one(struct net_device *dev,
566 struct net_device *lower_dev,
567 struct net_bridge_mdb_entry *mp,
568 int type)
569{
570 struct switchdev_obj_port_mdb mdb = {
571 .obj = {
572 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
573 .flags = SWITCHDEV_F_DEFER,
574 .orig_dev = dev,
575 },
576 };
577
578 br_switchdev_mdb_populate(&mdb, mp);
579
580 switch (type) {
581 case RTM_NEWMDB:
582 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
583 break;
584 case RTM_DELMDB:
585 switchdev_port_obj_del(lower_dev, &mdb.obj);
586 break;
587 }
588}
589
590static void br_switchdev_host_mdb(struct net_device *dev,
591 struct net_bridge_mdb_entry *mp, int type)
592{
593 struct net_device *lower_dev;
594 struct list_head *iter;
595
596 netdev_for_each_lower_dev(dev, lower_dev, iter)
597 br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
598}
599
600static int
601br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
602 const struct switchdev_obj_port_mdb *mdb,
603 unsigned long action, const void *ctx,
604 struct netlink_ext_ack *extack)
605{
606 struct switchdev_notifier_port_obj_info obj_info = {
607 .info = {
608 .dev = dev,
609 .extack = extack,
610 .ctx = ctx,
611 },
612 .obj = &mdb->obj,
613 };
614 int err;
615
616 err = nb->notifier_call(nb, action, &obj_info);
617 return notifier_to_errno(err);
618}
619
620static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
621 struct net_device *dev,
622 unsigned long action,
623 enum switchdev_obj_id id,
624 const struct net_bridge_mdb_entry *mp,
625 struct net_device *orig_dev)
626{
627 struct switchdev_obj_port_mdb mdb = {
628 .obj = {
629 .id = id,
630 .orig_dev = orig_dev,
631 },
632 };
633 struct switchdev_obj_port_mdb *pmdb;
634
635 br_switchdev_mdb_populate(&mdb, mp);
636
637 if (action == SWITCHDEV_PORT_OBJ_ADD &&
638 switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) {
639 /* This event is already in the deferred queue of
640 * events, so this replay must be elided, lest the
641 * driver receives duplicate events for it. This can
642 * only happen when replaying additions, since
643 * modifications are always immediately visible in
644 * br->mdb_list, whereas actual event delivery may be
645 * delayed.
646 */
647 return 0;
648 }
649
650 pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC);
651 if (!pmdb)
652 return -ENOMEM;
653
654 list_add_tail(&pmdb->obj.list, mdb_list);
655 return 0;
656}
657
658void br_switchdev_mdb_notify(struct net_device *dev,
659 struct net_bridge_mdb_entry *mp,
660 struct net_bridge_port_group *pg,
661 int type)
662{
663 struct br_switchdev_mdb_complete_info *complete_info;
664 struct switchdev_obj_port_mdb mdb = {
665 .obj = {
666 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
667 .flags = SWITCHDEV_F_DEFER,
668 },
669 };
670
671 if (!pg)
672 return br_switchdev_host_mdb(dev, mp, type);
673
674 br_switchdev_mdb_populate(&mdb, mp);
675
676 mdb.obj.orig_dev = pg->key.port->dev;
677 switch (type) {
678 case RTM_NEWMDB:
679 complete_info = kmalloc_obj(*complete_info, GFP_ATOMIC);
680 if (!complete_info)
681 break;
682 complete_info->port = pg->key.port;
683 complete_info->ip = mp->addr;
684 mdb.obj.complete_priv = complete_info;
685 mdb.obj.complete = br_switchdev_mdb_complete;
686 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
687 kfree(complete_info);
688 break;
689 case RTM_DELMDB:
690 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
691 break;
692 }
693}
694#endif
695
696static int
697br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
698 const void *ctx, bool adding, struct notifier_block *nb,
699 struct netlink_ext_ack *extack)
700{
701#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
702 const struct net_bridge_mdb_entry *mp;
703 struct switchdev_obj *obj, *tmp;
704 struct net_bridge *br;
705 unsigned long action;
706 LIST_HEAD(mdb_list);
707 int err = 0;
708
709 ASSERT_RTNL();
710
711 if (!nb)
712 return 0;
713
714 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
715 return -EINVAL;
716
717 br = netdev_priv(br_dev);
718
719 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
720 return 0;
721
722 if (adding)
723 action = SWITCHDEV_PORT_OBJ_ADD;
724 else
725 action = SWITCHDEV_PORT_OBJ_DEL;
726
727 /* br_switchdev_mdb_queue_one() will take care to not queue a
728 * replay of an event that is already pending in the switchdev
729 * deferred queue. In order to safely determine that, there
730 * must be no new deferred MDB notifications enqueued for the
731 * duration of the MDB scan. Therefore, grab the write-side
732 * lock to avoid racing with any concurrent IGMP/MLD snooping.
733 */
734 spin_lock_bh(&br->multicast_lock);
735
736 hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
737 struct net_bridge_port_group __rcu * const *pp;
738 const struct net_bridge_port_group *p;
739
740 if (mp->host_joined) {
741 err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
742 SWITCHDEV_OBJ_ID_HOST_MDB,
743 mp, br_dev);
744 if (err) {
745 spin_unlock_bh(&br->multicast_lock);
746 goto out_free_mdb;
747 }
748 }
749
750 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
751 pp = &p->next) {
752 if (p->key.port->dev != dev)
753 continue;
754
755 err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
756 SWITCHDEV_OBJ_ID_PORT_MDB,
757 mp, dev);
758 if (err) {
759 spin_unlock_bh(&br->multicast_lock);
760 goto out_free_mdb;
761 }
762 }
763 }
764
765 spin_unlock_bh(&br->multicast_lock);
766
767 list_for_each_entry(obj, &mdb_list, list) {
768 err = br_switchdev_mdb_replay_one(nb, dev,
769 SWITCHDEV_OBJ_PORT_MDB(obj),
770 action, ctx, extack);
771 if (err == -EOPNOTSUPP)
772 err = 0;
773 if (err)
774 goto out_free_mdb;
775 }
776
777out_free_mdb:
778 list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
779 list_del(&obj->list);
780 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
781 }
782
783 if (err)
784 return err;
785#endif
786
787 return 0;
788}
789
790static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
791 struct notifier_block *atomic_nb,
792 struct notifier_block *blocking_nb,
793 struct netlink_ext_ack *extack)
794{
795 struct net_device *br_dev = p->br->dev;
796 struct net_device *dev = p->dev;
797 int err;
798
799 err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
800 if (err && err != -EOPNOTSUPP)
801 return err;
802
803 err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
804 extack);
805 if (err) {
806 /* -EOPNOTSUPP not propagated from MDB replay. */
807 return err;
808 }
809
810 err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
811 if (err && err != -EOPNOTSUPP)
812 return err;
813
814 return 0;
815}
816
817static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
818 const void *ctx,
819 struct notifier_block *atomic_nb,
820 struct notifier_block *blocking_nb)
821{
822 struct net_device *br_dev = p->br->dev;
823 struct net_device *dev = p->dev;
824
825 br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
826
827 br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
828
829 br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
830
831 /* Make sure that the device leaving this bridge has seen all
832 * relevant events before it is disassociated. In the normal
833 * case, when the device is directly attached to the bridge,
834 * this is covered by del_nbp(). If the association was indirect
835 * however, e.g. via a team or bond, and the device is leaving
836 * that intermediate device, then the bridge port remains in
837 * place.
838 */
839 switchdev_deferred_process();
840}
841
842/* Let the bridge know that this port is offloaded, so that it can assign a
843 * switchdev hardware domain to it.
844 */
845int br_switchdev_port_offload(struct net_bridge_port *p,
846 struct net_device *dev, const void *ctx,
847 struct notifier_block *atomic_nb,
848 struct notifier_block *blocking_nb,
849 bool tx_fwd_offload,
850 struct netlink_ext_ack *extack)
851{
852 struct netdev_phys_item_id ppid;
853 int err;
854
855 err = netif_get_port_parent_id(dev, &ppid, false);
856 if (err)
857 return err;
858
859 err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack);
860 if (err)
861 return err;
862
863 err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
864 if (err)
865 goto out_switchdev_del;
866
867 return 0;
868
869out_switchdev_del:
870 nbp_switchdev_del(p);
871
872 return err;
873}
874
875void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx,
876 struct notifier_block *atomic_nb,
877 struct notifier_block *blocking_nb)
878{
879 nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb);
880
881 nbp_switchdev_del(p);
882}
883
884int br_switchdev_port_replay(struct net_bridge_port *p,
885 struct net_device *dev, const void *ctx,
886 struct notifier_block *atomic_nb,
887 struct notifier_block *blocking_nb,
888 struct netlink_ext_ack *extack)
889{
890 return nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
891}