1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
5 #include "ice_eswitch.h"
6 #include "devlink/devlink.h"
7 #include "devlink/devlink_port.h"
9 #include "ice_tc_lib.h"
10 #include "ice_dcb_lib.h"
13 * ice_repr_inc_tx_stats - increment Tx statistic by one packet
14 * @repr: repr to increment stats on
15 * @len: length of the packet
16 * @xmit_status: value returned by xmit function
18 void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
21 struct ice_repr_pcpu_stats *stats;
23 if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
24 xmit_status != NET_XMIT_CN)) {
25 this_cpu_inc(repr->stats->tx_drops);
29 stats = this_cpu_ptr(repr->stats);
30 u64_stats_update_begin(&stats->syncp);
32 stats->tx_bytes += len;
33 u64_stats_update_end(&stats->syncp);
37 * ice_repr_inc_rx_stats - increment Rx statistic by one packet
38 * @netdev: repr netdev to increment stats on
39 * @len: length of the packet
41 void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
43 struct ice_repr *repr = ice_netdev_to_repr(netdev);
44 struct ice_repr_pcpu_stats *stats;
46 stats = this_cpu_ptr(repr->stats);
47 u64_stats_update_begin(&stats->syncp);
49 stats->rx_bytes += len;
50 u64_stats_update_end(&stats->syncp);
54 * ice_repr_get_stats64 - get VF stats for VFPR use
55 * @netdev: pointer to port representor netdev
56 * @stats: pointer to struct where stats can be stored
59 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
61 struct ice_netdev_priv *np = netdev_priv(netdev);
62 struct ice_repr *repr = np->repr;
63 struct ice_eth_stats *eth_stats;
66 if (repr->ops.ready(repr))
70 ice_update_vsi_stats(vsi);
71 eth_stats = &vsi->eth_stats;
73 stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
74 eth_stats->tx_multicast;
75 stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
76 eth_stats->rx_multicast;
77 stats->tx_bytes = eth_stats->tx_bytes;
78 stats->rx_bytes = eth_stats->rx_bytes;
79 stats->multicast = eth_stats->rx_multicast;
80 stats->tx_errors = eth_stats->tx_errors;
81 stats->tx_dropped = eth_stats->tx_discards;
82 stats->rx_dropped = eth_stats->rx_discards;
86 * ice_netdev_to_repr - Get port representor for given netdevice
87 * @netdev: pointer to port representor netdev
89 struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
91 struct ice_netdev_priv *np = netdev_priv(netdev);
97 * ice_repr_vf_open - Enable port representor's network interface
98 * @netdev: network interface device structure
100 * The open entry point is called when a port representor's network
101 * interface is made active by the system (IFF_UP). Corresponding
102 * VF is notified about link status change.
104 * Returns 0 on success
106 static int ice_repr_vf_open(struct net_device *netdev)
108 struct ice_repr *repr = ice_netdev_to_repr(netdev);
112 vf->link_forced = true;
114 ice_vc_notify_vf_link_state(vf);
116 netif_carrier_on(netdev);
117 netif_tx_start_all_queues(netdev);
122 static int ice_repr_sf_open(struct net_device *netdev)
124 netif_carrier_on(netdev);
125 netif_tx_start_all_queues(netdev);
131 * ice_repr_vf_stop - Disable port representor's network interface
132 * @netdev: network interface device structure
134 * The stop entry point is called when a port representor's network
135 * interface is de-activated by the system. Corresponding
136 * VF is notified about link status change.
138 * Returns 0 on success
140 static int ice_repr_vf_stop(struct net_device *netdev)
142 struct ice_repr *repr = ice_netdev_to_repr(netdev);
146 vf->link_forced = true;
148 ice_vc_notify_vf_link_state(vf);
150 netif_carrier_off(netdev);
151 netif_tx_stop_all_queues(netdev);
156 static int ice_repr_sf_stop(struct net_device *netdev)
158 netif_carrier_off(netdev);
159 netif_tx_stop_all_queues(netdev);
165 * ice_repr_sp_stats64 - get slow path stats for port representor
166 * @dev: network interface device structure
167 * @stats: netlink stats structure
170 ice_repr_sp_stats64(const struct net_device *dev,
171 struct rtnl_link_stats64 *stats)
173 struct ice_repr *repr = ice_netdev_to_repr(dev);
176 for_each_possible_cpu(i) {
177 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
178 struct ice_repr_pcpu_stats *repr_stats;
181 repr_stats = per_cpu_ptr(repr->stats, i);
183 start = u64_stats_fetch_begin(&repr_stats->syncp);
184 tbytes = repr_stats->tx_bytes;
185 tpkts = repr_stats->tx_packets;
186 tdrops = repr_stats->tx_drops;
187 rbytes = repr_stats->rx_bytes;
188 rpkts = repr_stats->rx_packets;
189 } while (u64_stats_fetch_retry(&repr_stats->syncp, start));
191 stats->tx_bytes += tbytes;
192 stats->tx_packets += tpkts;
193 stats->tx_dropped += tdrops;
194 stats->rx_bytes += rbytes;
195 stats->rx_packets += rpkts;
201 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
203 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
207 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
210 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
211 return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
217 ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
218 struct flow_cls_offload *flower)
220 switch (flower->command) {
221 case FLOW_CLS_REPLACE:
222 return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
223 case FLOW_CLS_DESTROY:
224 return ice_del_cls_flower(repr->src_vsi, flower);
231 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
234 struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
235 struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
238 case TC_SETUP_CLSFLOWER:
239 return ice_repr_setup_tc_cls_flower(np->repr, flower);
245 static LIST_HEAD(ice_repr_block_cb_list);
248 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
251 struct ice_netdev_priv *np = netdev_priv(netdev);
255 return flow_block_cb_setup_simple((struct flow_block_offload *)
257 &ice_repr_block_cb_list,
258 ice_repr_setup_tc_block_cb,
265 static const struct net_device_ops ice_repr_vf_netdev_ops = {
266 .ndo_get_stats64 = ice_repr_get_stats64,
267 .ndo_open = ice_repr_vf_open,
268 .ndo_stop = ice_repr_vf_stop,
269 .ndo_start_xmit = ice_eswitch_port_start_xmit,
270 .ndo_setup_tc = ice_repr_setup_tc,
271 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
272 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
275 static const struct net_device_ops ice_repr_sf_netdev_ops = {
276 .ndo_get_stats64 = ice_repr_get_stats64,
277 .ndo_open = ice_repr_sf_open,
278 .ndo_stop = ice_repr_sf_stop,
279 .ndo_start_xmit = ice_eswitch_port_start_xmit,
280 .ndo_setup_tc = ice_repr_setup_tc,
281 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
282 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
286 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
287 * @netdev: pointer to netdev
289 bool ice_is_port_repr_netdev(const struct net_device *netdev)
291 return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
292 netdev->netdev_ops == &ice_repr_sf_netdev_ops);
296 * ice_repr_reg_netdev - register port representor netdev
297 * @netdev: pointer to port representor netdev
298 * @ops: new ops for netdev
301 ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
303 eth_hw_addr_random(netdev);
304 netdev->netdev_ops = ops;
305 ice_set_ethtool_repr_ops(netdev);
307 netdev->hw_features |= NETIF_F_HW_TC;
309 netif_carrier_off(netdev);
310 netif_tx_stop_all_queues(netdev);
312 return register_netdev(netdev);
315 static int ice_repr_ready_vf(struct ice_repr *repr)
317 return !ice_check_vf_ready_for_cfg(repr->vf);
320 static int ice_repr_ready_sf(struct ice_repr *repr)
322 return !repr->sf->active;
326 * ice_repr_destroy - remove representor from VF
327 * @repr: pointer to representor structure
329 void ice_repr_destroy(struct ice_repr *repr)
331 free_percpu(repr->stats);
332 free_netdev(repr->netdev);
336 static void ice_repr_rem_vf(struct ice_repr *repr)
338 ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
339 unregister_netdev(repr->netdev);
340 ice_devlink_destroy_vf_port(repr->vf);
341 ice_virtchnl_set_dflt_ops(repr->vf);
344 static void ice_repr_rem_sf(struct ice_repr *repr)
346 unregister_netdev(repr->netdev);
347 ice_devlink_destroy_sf_port(repr->sf);
350 static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
352 /* only export if ADQ and DCB disabled and eswitch enabled*/
353 if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
354 !ice_is_switchdev_running(pf))
357 ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
361 * ice_repr_create - add representor for generic VSI
362 * @src_vsi: pointer to VSI structure of device to represent
364 static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
366 struct ice_netdev_priv *np;
367 struct ice_repr *repr;
370 repr = kzalloc(sizeof(*repr), GFP_KERNEL);
372 return ERR_PTR(-ENOMEM);
374 repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
380 repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
386 repr->src_vsi = src_vsi;
387 repr->id = src_vsi->vsi_num;
388 np = netdev_priv(repr->netdev);
391 repr->netdev->min_mtu = ETH_MIN_MTU;
392 repr->netdev->max_mtu = ICE_MAX_MTU;
394 SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
399 free_netdev(repr->netdev);
405 static int ice_repr_add_vf(struct ice_repr *repr)
407 struct ice_vf *vf = repr->vf;
408 struct devlink *devlink;
411 err = ice_devlink_create_vf_port(vf);
415 SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
416 err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
420 err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
424 ice_virtchnl_set_repr_ops(vf);
426 devlink = priv_to_devlink(vf->pf);
427 ice_repr_set_tx_topology(vf->pf, devlink);
432 unregister_netdev(repr->netdev);
434 ice_devlink_destroy_vf_port(vf);
439 * ice_repr_create_vf - add representor for VF VSI
440 * @vf: VF to create port representor on
442 * Set correct representor type for VF and functions pointer.
444 * Return: created port representor on success, error otherwise
446 struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
448 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
449 struct ice_repr *repr;
452 return ERR_PTR(-EINVAL);
454 repr = ice_repr_create(vsi);
458 repr->type = ICE_REPR_TYPE_VF;
460 repr->ops.add = ice_repr_add_vf;
461 repr->ops.rem = ice_repr_rem_vf;
462 repr->ops.ready = ice_repr_ready_vf;
464 ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
469 static int ice_repr_add_sf(struct ice_repr *repr)
471 struct ice_dynamic_port *sf = repr->sf;
474 err = ice_devlink_create_sf_port(sf);
478 SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
479 err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
483 ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
488 ice_devlink_destroy_sf_port(sf);
493 * ice_repr_create_sf - add representor for SF VSI
494 * @sf: SF to create port representor on
496 * Set correct representor type for SF and functions pointer.
498 * Return: created port representor on success, error otherwise
500 struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
502 struct ice_repr *repr = ice_repr_create(sf->vsi);
507 repr->type = ICE_REPR_TYPE_SF;
509 repr->ops.add = ice_repr_add_sf;
510 repr->ops.rem = ice_repr_rem_sf;
511 repr->ops.ready = ice_repr_ready_sf;
513 ether_addr_copy(repr->parent_mac, sf->hw_addr);
518 struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
520 return xa_load(&pf->eswitch.reprs, id);
524 * ice_repr_start_tx_queues - start Tx queues of port representor
525 * @repr: pointer to repr structure
527 void ice_repr_start_tx_queues(struct ice_repr *repr)
529 netif_carrier_on(repr->netdev);
530 netif_tx_start_all_queues(repr->netdev);
534 * ice_repr_stop_tx_queues - stop Tx queues of port representor
535 * @repr: pointer to repr structure
537 void ice_repr_stop_tx_queues(struct ice_repr *repr)
539 netif_carrier_off(repr->netdev);
540 netif_tx_stop_all_queues(repr->netdev);