1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023, Intel Corporation. */
5 #include "ice_eswitch_br.h"
7 #include "ice_switch.h"
9 #include "ice_vf_vsi_vlan_ops.h"
10 #include "ice_trace.h"
12 #define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000)
14 static const struct rhashtable_params ice_fdb_ht_params = {
15 .key_offset = offsetof(struct ice_esw_br_fdb_entry, data),
16 .key_len = sizeof(struct ice_esw_br_fdb_data),
17 .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node),
18 .automatic_shrinking = true,
21 static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev)
23 /* Accept only PF netdev, PRs and LAG */
24 return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) ||
25 netif_is_lag_master(dev);
28 static struct net_device *
29 ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev)
31 struct net_device *lower;
32 struct list_head *iter;
34 netdev_for_each_lower_dev(lag_dev, lower, iter) {
35 if (netif_is_ice(lower))
42 static struct ice_esw_br_port *
43 ice_eswitch_br_netdev_to_port(struct net_device *dev)
45 if (ice_is_port_repr_netdev(dev)) {
46 struct ice_repr *repr = ice_netdev_to_repr(dev);
49 } else if (netif_is_ice(dev) || netif_is_lag_master(dev)) {
50 struct net_device *ice_dev;
53 if (netif_is_lag_master(dev))
54 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
61 pf = ice_netdev_to_pf(ice_dev);
70 ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info,
71 u8 pf_id, u16 vf_vsi_idx)
73 rule_info->sw_act.vsi_handle = vf_vsi_idx;
74 rule_info->sw_act.flag |= ICE_FLTR_RX;
75 rule_info->sw_act.src = pf_id;
76 rule_info->priority = 2;
80 ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info,
83 rule_info->sw_act.vsi_handle = pf_vsi_idx;
84 rule_info->sw_act.flag |= ICE_FLTR_TX;
85 rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
86 rule_info->flags_info.act_valid = true;
87 rule_info->priority = 2;
91 ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule)
98 err = ice_rem_adv_rule_by_id(hw, rule);
105 ice_eswitch_br_get_lkups_cnt(u16 vid)
107 return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1;
111 ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid)
113 if (ice_eswitch_br_is_vid_valid(vid)) {
114 list[1].type = ICE_VLAN_OFOS;
115 list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
116 list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
120 static struct ice_rule_query_data *
121 ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
122 const unsigned char *mac, u16 vid)
124 struct ice_adv_rule_info rule_info = { 0 };
125 struct ice_rule_query_data *rule;
126 struct ice_adv_lkup_elem *list;
130 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
132 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
134 return ERR_PTR(-ENOMEM);
136 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
143 case ICE_ESWITCH_BR_UPLINK_PORT:
144 ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx);
146 case ICE_ESWITCH_BR_VF_REPR_PORT:
147 ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id,
155 list[0].type = ICE_MAC_OFOS;
156 ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac);
157 eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr);
159 ice_eswitch_br_add_vlan_lkup(list, vid);
161 rule_info.need_pass_l2 = true;
163 rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
165 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
181 static struct ice_rule_query_data *
182 ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
183 const unsigned char *mac, u16 vid)
185 struct ice_adv_rule_info rule_info = { 0 };
186 struct ice_rule_query_data *rule;
187 struct ice_adv_lkup_elem *list;
191 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
193 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
197 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
201 list[0].type = ICE_MAC_OFOS;
202 ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
203 eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
205 ice_eswitch_br_add_vlan_lkup(list, vid);
207 rule_info.allow_pass_l2 = true;
208 rule_info.sw_act.vsi_handle = vsi_idx;
209 rule_info.sw_act.fltr_act = ICE_NOP;
210 rule_info.priority = 2;
212 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
228 static struct ice_esw_br_flow *
229 ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
230 int port_type, const unsigned char *mac, u16 vid)
232 struct ice_rule_query_data *fwd_rule, *guard_rule;
233 struct ice_esw_br_flow *flow;
236 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
238 return ERR_PTR(-ENOMEM);
240 fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac,
242 err = PTR_ERR_OR_ZERO(fwd_rule);
244 dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n",
245 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
250 guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid);
251 err = PTR_ERR_OR_ZERO(guard_rule);
253 dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n",
254 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
259 flow->fwd_rule = fwd_rule;
260 flow->guard_rule = guard_rule;
265 ice_eswitch_br_rule_delete(hw, fwd_rule);
272 static struct ice_esw_br_fdb_entry *
273 ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac,
276 struct ice_esw_br_fdb_data data = {
280 ether_addr_copy(data.addr, mac);
281 return rhashtable_lookup_fast(&bridge->fdb_ht, &data,
286 ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow)
288 struct device *dev = ice_pf_to_dev(pf);
291 err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule);
293 dev_err(dev, "Failed to delete FDB forward rule, err: %d\n",
296 err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule);
298 dev_err(dev, "Failed to delete FDB guard rule, err: %d\n",
304 static struct ice_esw_br_vlan *
305 ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
307 struct ice_pf *pf = bridge->br_offloads->pf;
308 struct device *dev = ice_pf_to_dev(pf);
309 struct ice_esw_br_port *port;
310 struct ice_esw_br_vlan *vlan;
312 port = xa_load(&bridge->ports, vsi_idx);
314 dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx);
315 return ERR_PTR(-EINVAL);
318 vlan = xa_load(&port->vlans, vid);
320 dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n",
322 return ERR_PTR(-EINVAL);
329 ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge,
330 struct ice_esw_br_fdb_entry *fdb_entry)
332 struct ice_pf *pf = bridge->br_offloads->pf;
334 rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
336 list_del(&fdb_entry->list);
338 ice_eswitch_br_flow_delete(pf, fdb_entry->flow);
344 ice_eswitch_br_fdb_offload_notify(struct net_device *dev,
345 const unsigned char *mac, u16 vid,
348 struct switchdev_notifier_fdb_info fdb_info = {
354 call_switchdev_notifiers(val, dev, &fdb_info.info, NULL);
358 ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge,
359 struct ice_esw_br_fdb_entry *entry)
361 if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER))
362 ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr,
364 SWITCHDEV_FDB_DEL_TO_BRIDGE);
365 ice_eswitch_br_fdb_entry_delete(bridge, entry);
369 ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge,
370 const unsigned char *mac, u16 vid)
372 struct ice_pf *pf = bridge->br_offloads->pf;
373 struct ice_esw_br_fdb_entry *fdb_entry;
374 struct device *dev = ice_pf_to_dev(pf);
376 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
378 dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n",
383 trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry);
384 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
388 ice_eswitch_br_fdb_entry_create(struct net_device *netdev,
389 struct ice_esw_br_port *br_port,
391 const unsigned char *mac, u16 vid)
393 struct ice_esw_br *bridge = br_port->bridge;
394 struct ice_pf *pf = bridge->br_offloads->pf;
395 struct device *dev = ice_pf_to_dev(pf);
396 struct ice_esw_br_fdb_entry *fdb_entry;
397 struct ice_esw_br_flow *flow;
398 struct ice_esw_br_vlan *vlan;
399 struct ice_hw *hw = &pf->hw;
403 /* untagged filtering is not yet supported */
404 if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid)
407 if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) {
408 vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx,
411 dev_err(dev, "Failed to find vlan lookup, err: %ld\n",
417 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
419 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
421 fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
427 flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx,
428 br_port->type, mac, vid);
434 ether_addr_copy(fdb_entry->data.addr, mac);
435 fdb_entry->data.vid = vid;
436 fdb_entry->br_port = br_port;
437 fdb_entry->flow = flow;
438 fdb_entry->dev = netdev;
439 fdb_entry->last_use = jiffies;
440 event = SWITCHDEV_FDB_ADD_TO_BRIDGE;
443 fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER;
444 event = SWITCHDEV_FDB_OFFLOADED;
447 err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
452 list_add(&fdb_entry->list, &bridge->fdb_list);
453 trace_ice_eswitch_br_fdb_entry_create(fdb_entry);
455 ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event);
460 ice_eswitch_br_flow_delete(pf, flow);
464 dev_err(dev, "Failed to create fdb entry, err: %d\n", err);
468 ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work)
470 kfree(fdb_work->fdb_info.addr);
475 ice_eswitch_br_fdb_event_work(struct work_struct *work)
477 struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work);
478 bool added_by_user = fdb_work->fdb_info.added_by_user;
479 const unsigned char *mac = fdb_work->fdb_info.addr;
480 u16 vid = fdb_work->fdb_info.vid;
481 struct ice_esw_br_port *br_port;
485 br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev);
489 switch (fdb_work->event) {
490 case SWITCHDEV_FDB_ADD_TO_DEVICE:
491 ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port,
492 added_by_user, mac, vid);
494 case SWITCHDEV_FDB_DEL_TO_DEVICE:
495 ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge,
504 dev_put(fdb_work->dev);
505 ice_eswitch_br_fdb_work_dealloc(fdb_work);
508 static struct ice_esw_br_fdb_work *
509 ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info,
510 struct net_device *dev,
513 struct ice_esw_br_fdb_work *work;
516 work = kzalloc(sizeof(*work), GFP_ATOMIC);
518 return ERR_PTR(-ENOMEM);
520 INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work);
521 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
523 mac = kzalloc(ETH_ALEN, GFP_ATOMIC);
526 return ERR_PTR(-ENOMEM);
529 ether_addr_copy(mac, fdb_info->addr);
530 work->fdb_info.addr = mac;
538 ice_eswitch_br_switchdev_event(struct notifier_block *nb,
539 unsigned long event, void *ptr)
541 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
542 struct switchdev_notifier_fdb_info *fdb_info;
543 struct switchdev_notifier_info *info = ptr;
544 struct ice_esw_br_offloads *br_offloads;
545 struct ice_esw_br_fdb_work *work;
546 struct netlink_ext_ack *extack;
547 struct net_device *upper;
549 br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb);
550 extack = switchdev_notifier_info_to_extack(ptr);
552 upper = netdev_master_upper_dev_get_rcu(dev);
556 if (!netif_is_bridge_master(upper))
559 if (!ice_eswitch_br_is_dev_valid(dev))
562 if (!ice_eswitch_br_netdev_to_port(dev))
566 case SWITCHDEV_FDB_ADD_TO_DEVICE:
567 case SWITCHDEV_FDB_DEL_TO_DEVICE:
568 fdb_info = container_of(info, typeof(*fdb_info), info);
570 work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event);
572 NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work");
573 return notifier_from_errno(PTR_ERR(work));
577 queue_work(br_offloads->wq, &work->work);
585 static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
587 struct ice_esw_br_fdb_entry *entry, *tmp;
589 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
590 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
594 ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable)
596 if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING))
599 ice_eswitch_br_fdb_flush(bridge);
601 bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING;
603 bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING;
607 ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port)
609 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0);
610 struct ice_vsi_vlan_ops *vlan_ops;
612 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
614 vlan_ops->del_vlan(port->vsi, &port_vlan);
615 vlan_ops->clear_port_vlan(port->vsi);
617 ice_vf_vsi_disable_port_vlan(port->vsi);
623 ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port,
624 struct ice_esw_br_vlan *vlan)
626 struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
627 struct ice_esw_br *bridge = port->bridge;
629 trace_ice_eswitch_br_vlan_cleanup(vlan);
631 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
632 if (vlan->vid == fdb_entry->data.vid)
633 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
636 xa_erase(&port->vlans, vlan->vid);
637 if (port->pvid == vlan->vid)
638 ice_eswitch_br_clear_pvid(port);
642 static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port)
644 struct ice_esw_br_vlan *vlan;
647 xa_for_each(&port->vlans, index, vlan)
648 ice_eswitch_br_vlan_cleanup(port, vlan);
652 ice_eswitch_br_set_pvid(struct ice_esw_br_port *port,
653 struct ice_esw_br_vlan *vlan)
655 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0);
656 struct device *dev = ice_pf_to_dev(port->vsi->back);
657 struct ice_vsi_vlan_ops *vlan_ops;
660 if (port->pvid == vlan->vid || vlan->vid == 1)
663 /* Setting port vlan on uplink isn't supported by hw */
664 if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
669 "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n",
670 port->vsi_idx, port->pvid);
674 ice_vf_vsi_enable_port_vlan(port->vsi);
676 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
677 err = vlan_ops->set_port_vlan(port->vsi, &port_vlan);
681 err = vlan_ops->add_vlan(port->vsi, &port_vlan);
685 ice_eswitch_br_port_vlans_flush(port);
686 port->pvid = vlan->vid;
691 static struct ice_esw_br_vlan *
692 ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port)
694 struct device *dev = ice_pf_to_dev(port->vsi->back);
695 struct ice_esw_br_vlan *vlan;
698 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
700 return ERR_PTR(-ENOMEM);
704 if ((flags & BRIDGE_VLAN_INFO_PVID) &&
705 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
706 err = ice_eswitch_br_set_pvid(port, vlan);
709 } else if ((flags & BRIDGE_VLAN_INFO_PVID) ||
710 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
711 dev_info(dev, "VLAN push and pop are supported only simultaneously\n");
716 err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL);
720 trace_ice_eswitch_br_vlan_create(vlan);
726 ice_eswitch_br_clear_pvid(port);
733 ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid,
734 u16 flags, struct netlink_ext_ack *extack)
736 struct ice_esw_br_port *port;
737 struct ice_esw_br_vlan *vlan;
739 port = xa_load(&bridge->ports, vsi_idx);
744 dev_info(ice_pf_to_dev(port->vsi->back),
745 "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n",
746 port->vsi_idx, port->pvid);
750 vlan = xa_load(&port->vlans, vid);
752 if (vlan->flags == flags)
755 ice_eswitch_br_vlan_cleanup(port, vlan);
758 vlan = ice_eswitch_br_vlan_create(vid, flags, port);
760 NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u",
762 return PTR_ERR(vlan);
769 ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
771 struct ice_esw_br_port *port;
772 struct ice_esw_br_vlan *vlan;
774 port = xa_load(&bridge->ports, vsi_idx);
778 vlan = xa_load(&port->vlans, vid);
782 ice_eswitch_br_vlan_cleanup(port, vlan);
786 ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx,
787 const struct switchdev_obj *obj,
788 struct netlink_ext_ack *extack)
790 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
791 struct switchdev_obj_port_vlan *vlan;
798 case SWITCHDEV_OBJ_ID_PORT_VLAN:
799 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
800 err = ice_eswitch_br_port_vlan_add(br_port->bridge,
801 br_port->vsi_idx, vlan->vid,
802 vlan->flags, extack);
810 ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx,
811 const struct switchdev_obj *obj)
813 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
814 struct switchdev_obj_port_vlan *vlan;
820 case SWITCHDEV_OBJ_ID_PORT_VLAN:
821 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
822 ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx,
831 ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx,
832 const struct switchdev_attr *attr,
833 struct netlink_ext_ack *extack)
835 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
841 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
842 ice_eswitch_br_vlan_filtering_set(br_port->bridge,
843 attr->u.vlan_filtering);
845 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
846 br_port->bridge->ageing_time =
847 clock_t_to_jiffies(attr->u.ageing_time);
855 ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event,
858 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
862 case SWITCHDEV_PORT_OBJ_ADD:
863 err = switchdev_handle_port_obj_add(dev, ptr,
864 ice_eswitch_br_is_dev_valid,
865 ice_eswitch_br_port_obj_add);
867 case SWITCHDEV_PORT_OBJ_DEL:
868 err = switchdev_handle_port_obj_del(dev, ptr,
869 ice_eswitch_br_is_dev_valid,
870 ice_eswitch_br_port_obj_del);
872 case SWITCHDEV_PORT_ATTR_SET:
873 err = switchdev_handle_port_attr_set(dev, ptr,
874 ice_eswitch_br_is_dev_valid,
875 ice_eswitch_br_port_obj_attr_set);
881 return notifier_from_errno(err);
885 ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
886 struct ice_esw_br_port *br_port)
888 struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
889 struct ice_vsi *vsi = br_port->vsi;
891 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
892 if (br_port == fdb_entry->br_port)
893 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
896 if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
897 vsi->back->br_port = NULL;
899 struct ice_repr *repr =
900 ice_repr_get(vsi->back, br_port->repr_id);
903 repr->br_port = NULL;
906 xa_erase(&bridge->ports, br_port->vsi_idx);
907 ice_eswitch_br_port_vlans_flush(br_port);
911 static struct ice_esw_br_port *
912 ice_eswitch_br_port_init(struct ice_esw_br *bridge)
914 struct ice_esw_br_port *br_port;
916 br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
918 return ERR_PTR(-ENOMEM);
920 xa_init(&br_port->vlans);
922 br_port->bridge = bridge;
928 ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
929 struct ice_repr *repr)
931 struct ice_esw_br_port *br_port;
934 br_port = ice_eswitch_br_port_init(bridge);
936 return PTR_ERR(br_port);
938 br_port->vsi = repr->src_vsi;
939 br_port->vsi_idx = br_port->vsi->idx;
940 br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
941 br_port->repr_id = repr->id;
942 repr->br_port = br_port;
944 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
946 ice_eswitch_br_port_deinit(bridge, br_port);
954 ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
956 struct ice_vsi *vsi = pf->eswitch.uplink_vsi;
957 struct ice_esw_br_port *br_port;
960 br_port = ice_eswitch_br_port_init(bridge);
962 return PTR_ERR(br_port);
965 br_port->vsi_idx = br_port->vsi->idx;
966 br_port->type = ICE_ESWITCH_BR_UPLINK_PORT;
967 pf->br_port = br_port;
969 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
971 ice_eswitch_br_port_deinit(bridge, br_port);
979 ice_eswitch_br_ports_flush(struct ice_esw_br *bridge)
981 struct ice_esw_br_port *port;
984 xa_for_each(&bridge->ports, i, port)
985 ice_eswitch_br_port_deinit(bridge, port);
989 ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads,
990 struct ice_esw_br *bridge)
995 /* Cleanup all the ports that were added asynchronously
996 * through NETDEV_CHANGEUPPER event.
998 ice_eswitch_br_ports_flush(bridge);
999 WARN_ON(!xa_empty(&bridge->ports));
1000 xa_destroy(&bridge->ports);
1001 rhashtable_destroy(&bridge->fdb_ht);
1003 br_offloads->bridge = NULL;
1007 static struct ice_esw_br *
1008 ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex)
1010 struct ice_esw_br *bridge;
1013 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
1015 return ERR_PTR(-ENOMEM);
1017 err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params);
1020 return ERR_PTR(err);
1023 INIT_LIST_HEAD(&bridge->fdb_list);
1024 bridge->br_offloads = br_offloads;
1025 bridge->ifindex = ifindex;
1026 bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
1027 xa_init(&bridge->ports);
1028 br_offloads->bridge = bridge;
1033 static struct ice_esw_br *
1034 ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex,
1035 struct netlink_ext_ack *extack)
1037 struct ice_esw_br *bridge = br_offloads->bridge;
1040 if (bridge->ifindex != ifindex) {
1041 NL_SET_ERR_MSG_MOD(extack,
1042 "Only one bridge is supported per eswitch");
1043 return ERR_PTR(-EOPNOTSUPP);
1048 /* Create the bridge if it doesn't exist yet */
1049 bridge = ice_eswitch_br_init(br_offloads, ifindex);
1051 NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge");
1057 ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads,
1058 struct ice_esw_br *bridge)
1060 /* Remove the bridge if it exists and there are no ports left */
1061 if (!bridge || !xa_empty(&bridge->ports))
1064 ice_eswitch_br_deinit(br_offloads, bridge);
1068 ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads,
1069 struct net_device *dev, int ifindex,
1070 struct netlink_ext_ack *extack)
1072 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev);
1073 struct ice_esw_br *bridge;
1076 NL_SET_ERR_MSG_MOD(extack,
1077 "Port representor is not attached to any bridge");
1081 if (br_port->bridge->ifindex != ifindex) {
1082 NL_SET_ERR_MSG_MOD(extack,
1083 "Port representor is attached to another bridge");
1087 bridge = br_port->bridge;
1089 trace_ice_eswitch_br_port_unlink(br_port);
1090 ice_eswitch_br_port_deinit(br_port->bridge, br_port);
1091 ice_eswitch_br_verify_deinit(br_offloads, bridge);
1097 ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads,
1098 struct net_device *dev, int ifindex,
1099 struct netlink_ext_ack *extack)
1101 struct ice_esw_br *bridge;
1104 if (ice_eswitch_br_netdev_to_port(dev)) {
1105 NL_SET_ERR_MSG_MOD(extack,
1106 "Port is already attached to the bridge");
1110 bridge = ice_eswitch_br_get(br_offloads, ifindex, extack);
1112 return PTR_ERR(bridge);
1114 if (ice_is_port_repr_netdev(dev)) {
1115 struct ice_repr *repr = ice_netdev_to_repr(dev);
1117 err = ice_eswitch_br_vf_repr_port_init(bridge, repr);
1118 trace_ice_eswitch_br_port_link(repr->br_port);
1120 struct net_device *ice_dev;
1123 if (netif_is_lag_master(dev))
1124 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
1131 pf = ice_netdev_to_pf(ice_dev);
1133 err = ice_eswitch_br_uplink_port_init(bridge, pf);
1134 trace_ice_eswitch_br_port_link(pf->br_port);
1137 NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port");
1144 ice_eswitch_br_verify_deinit(br_offloads, bridge);
1149 ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr)
1151 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1152 struct netdev_notifier_changeupper_info *info = ptr;
1153 struct ice_esw_br_offloads *br_offloads;
1154 struct netlink_ext_ack *extack;
1155 struct net_device *upper;
1157 br_offloads = ice_nb_to_br_offloads(nb, netdev_nb);
1159 if (!ice_eswitch_br_is_dev_valid(dev))
1162 upper = info->upper_dev;
1163 if (!netif_is_bridge_master(upper))
1166 extack = netdev_notifier_info_to_extack(&info->info);
1169 return ice_eswitch_br_port_link(br_offloads, dev,
1170 upper->ifindex, extack);
1172 return ice_eswitch_br_port_unlink(br_offloads, dev,
1173 upper->ifindex, extack);
1177 ice_eswitch_br_port_event(struct notifier_block *nb,
1178 unsigned long event, void *ptr)
1183 case NETDEV_CHANGEUPPER:
1184 err = ice_eswitch_br_port_changeupper(nb, ptr);
1188 return notifier_from_errno(err);
1192 ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
1194 struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads;
1201 ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
1203 pf->eswitch.br_offloads = NULL;
1207 static struct ice_esw_br_offloads *
1208 ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
1210 struct ice_esw_br_offloads *br_offloads;
1214 if (pf->eswitch.br_offloads)
1215 return ERR_PTR(-EEXIST);
1217 br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
1219 return ERR_PTR(-ENOMEM);
1221 pf->eswitch.br_offloads = br_offloads;
1222 br_offloads->pf = pf;
1228 ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
1230 struct ice_esw_br_offloads *br_offloads;
1232 br_offloads = pf->eswitch.br_offloads;
1236 cancel_delayed_work_sync(&br_offloads->update_work);
1237 unregister_netdevice_notifier(&br_offloads->netdev_nb);
1238 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
1239 unregister_switchdev_notifier(&br_offloads->switchdev_nb);
1240 destroy_workqueue(br_offloads->wq);
1241 /* Although notifier block is unregistered just before,
1242 * so we don't get any new events, some events might be
1243 * already in progress. Hold the rtnl lock and wait for
1247 ice_eswitch_br_offloads_dealloc(pf);
1251 static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads)
1253 struct ice_esw_br *bridge = br_offloads->bridge;
1254 struct ice_esw_br_fdb_entry *entry, *tmp;
1260 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1261 if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)
1264 if (time_is_after_eq_jiffies(entry->last_use +
1265 bridge->ageing_time))
1268 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
1273 static void ice_eswitch_br_update_work(struct work_struct *work)
1275 struct ice_esw_br_offloads *br_offloads;
1277 br_offloads = ice_work_to_br_offloads(work);
1279 ice_eswitch_br_update(br_offloads);
1281 queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
1282 ICE_ESW_BRIDGE_UPDATE_INTERVAL);
1286 ice_eswitch_br_offloads_init(struct ice_pf *pf)
1288 struct ice_esw_br_offloads *br_offloads;
1289 struct device *dev = ice_pf_to_dev(pf);
1293 br_offloads = ice_eswitch_br_offloads_alloc(pf);
1295 if (IS_ERR(br_offloads)) {
1296 dev_err(dev, "Failed to init eswitch bridge\n");
1297 return PTR_ERR(br_offloads);
1300 br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0);
1301 if (!br_offloads->wq) {
1303 dev_err(dev, "Failed to allocate bridge workqueue\n");
1307 br_offloads->switchdev_nb.notifier_call =
1308 ice_eswitch_br_switchdev_event;
1309 err = register_switchdev_notifier(&br_offloads->switchdev_nb);
1312 "Failed to register switchdev notifier\n");
1313 goto err_reg_switchdev_nb;
1316 br_offloads->switchdev_blk.notifier_call =
1317 ice_eswitch_br_event_blocking;
1318 err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
1321 "Failed to register bridge blocking switchdev notifier\n");
1322 goto err_reg_switchdev_blk;
1325 br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event;
1326 err = register_netdevice_notifier(&br_offloads->netdev_nb);
1329 "Failed to register bridge port event notifier\n");
1330 goto err_reg_netdev_nb;
1333 INIT_DELAYED_WORK(&br_offloads->update_work,
1334 ice_eswitch_br_update_work);
1335 queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
1336 ICE_ESW_BRIDGE_UPDATE_INTERVAL);
1341 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
1342 err_reg_switchdev_blk:
1343 unregister_switchdev_notifier(&br_offloads->switchdev_nb);
1344 err_reg_switchdev_nb:
1345 destroy_workqueue(br_offloads->wq);
1348 ice_eswitch_br_offloads_dealloc(pf);