1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
5 #include "ice_eswitch.h"
6 #include "ice_devlink.h"
8 #include "ice_tc_lib.h"
9 #include "ice_dcb_lib.h"
12 * ice_repr_get_sw_port_id - get port ID associated with representor
13 * @repr: pointer to port representor
15 static int ice_repr_get_sw_port_id(struct ice_repr *repr)
17 return repr->vf->pf->hw.port_info->lport;
21 * ice_repr_get_phys_port_name - get phys port name
22 * @netdev: pointer to port representor netdev
23 * @buf: write here port name
24 * @len: max length of buf
27 ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
29 struct ice_netdev_priv *np = netdev_priv(netdev);
30 struct ice_repr *repr = np->repr;
33 /* Devlink port is registered and devlink core is taking care of name formatting. */
34 if (repr->vf->devlink_port.devlink)
37 res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
45 * ice_repr_get_stats64 - get VF stats for VFPR use
46 * @netdev: pointer to port representor netdev
47 * @stats: pointer to struct where stats can be stored
50 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
52 struct ice_netdev_priv *np = netdev_priv(netdev);
53 struct ice_eth_stats *eth_stats;
56 if (ice_is_vf_disabled(np->repr->vf))
58 vsi = np->repr->src_vsi;
60 ice_update_vsi_stats(vsi);
61 eth_stats = &vsi->eth_stats;
63 stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
64 eth_stats->tx_multicast;
65 stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
66 eth_stats->rx_multicast;
67 stats->tx_bytes = eth_stats->tx_bytes;
68 stats->rx_bytes = eth_stats->rx_bytes;
69 stats->multicast = eth_stats->rx_multicast;
70 stats->tx_errors = eth_stats->tx_errors;
71 stats->tx_dropped = eth_stats->tx_discards;
72 stats->rx_dropped = eth_stats->rx_discards;
76 * ice_netdev_to_repr - Get port representor for given netdevice
77 * @netdev: pointer to port representor netdev
79 struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
81 struct ice_netdev_priv *np = netdev_priv(netdev);
87 * ice_repr_open - Enable port representor's network interface
88 * @netdev: network interface device structure
90 * The open entry point is called when a port representor's network
91 * interface is made active by the system (IFF_UP). Corresponding
92 * VF is notified about link status change.
94 * Returns 0 on success
96 static int ice_repr_open(struct net_device *netdev)
98 struct ice_repr *repr = ice_netdev_to_repr(netdev);
102 vf->link_forced = true;
104 ice_vc_notify_vf_link_state(vf);
106 netif_carrier_on(netdev);
107 netif_tx_start_all_queues(netdev);
113 * ice_repr_stop - Disable port representor's network interface
114 * @netdev: network interface device structure
116 * The stop entry point is called when a port representor's network
117 * interface is de-activated by the system. Corresponding
118 * VF is notified about link status change.
120 * Returns 0 on success
122 static int ice_repr_stop(struct net_device *netdev)
124 struct ice_repr *repr = ice_netdev_to_repr(netdev);
128 vf->link_forced = true;
130 ice_vc_notify_vf_link_state(vf);
132 netif_carrier_off(netdev);
133 netif_tx_stop_all_queues(netdev);
139 * ice_repr_sp_stats64 - get slow path stats for port representor
140 * @dev: network interface device structure
141 * @stats: netlink stats structure
143 * RX/TX stats are being swapped here to be consistent with VF stats. In slow
144 * path, port representor receives data when the corresponding VF is sending it
145 * (and vice versa), TX and RX bytes/packets are effectively swapped on port
149 ice_repr_sp_stats64(const struct net_device *dev,
150 struct rtnl_link_stats64 *stats)
152 struct ice_netdev_priv *np = netdev_priv(dev);
153 int vf_id = np->repr->vf->vf_id;
154 struct ice_tx_ring *tx_ring;
155 struct ice_rx_ring *rx_ring;
158 tx_ring = np->vsi->tx_rings[vf_id];
159 ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
160 tx_ring->ring_stats->stats,
162 stats->rx_packets = pkts;
163 stats->rx_bytes = bytes;
165 rx_ring = np->vsi->rx_rings[vf_id];
166 ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
167 rx_ring->ring_stats->stats,
169 stats->tx_packets = pkts;
170 stats->tx_bytes = bytes;
171 stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
172 rx_ring->ring_stats->rx_stats.alloc_buf_failed;
178 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
180 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
184 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
187 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
188 return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
194 ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
195 struct flow_cls_offload *flower)
197 switch (flower->command) {
198 case FLOW_CLS_REPLACE:
199 return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
200 case FLOW_CLS_DESTROY:
201 return ice_del_cls_flower(repr->src_vsi, flower);
208 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
211 struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
212 struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
215 case TC_SETUP_CLSFLOWER:
216 return ice_repr_setup_tc_cls_flower(np->repr, flower);
222 static LIST_HEAD(ice_repr_block_cb_list);
225 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
228 struct ice_netdev_priv *np = netdev_priv(netdev);
232 return flow_block_cb_setup_simple((struct flow_block_offload *)
234 &ice_repr_block_cb_list,
235 ice_repr_setup_tc_block_cb,
242 static const struct net_device_ops ice_repr_netdev_ops = {
243 .ndo_get_phys_port_name = ice_repr_get_phys_port_name,
244 .ndo_get_stats64 = ice_repr_get_stats64,
245 .ndo_open = ice_repr_open,
246 .ndo_stop = ice_repr_stop,
247 .ndo_start_xmit = ice_eswitch_port_start_xmit,
248 .ndo_setup_tc = ice_repr_setup_tc,
249 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
250 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
254 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
255 * @netdev: pointer to netdev
257 bool ice_is_port_repr_netdev(const struct net_device *netdev)
259 return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
263 * ice_repr_reg_netdev - register port representor netdev
264 * @netdev: pointer to port representor netdev
267 ice_repr_reg_netdev(struct net_device *netdev)
269 eth_hw_addr_random(netdev);
270 netdev->netdev_ops = &ice_repr_netdev_ops;
271 ice_set_ethtool_repr_ops(netdev);
273 netdev->hw_features |= NETIF_F_HW_TC;
275 netif_carrier_off(netdev);
276 netif_tx_stop_all_queues(netdev);
278 return register_netdev(netdev);
282 * ice_repr_add - add representor for VF
283 * @vf: pointer to VF structure
285 static int ice_repr_add(struct ice_vf *vf)
287 struct ice_q_vector *q_vector;
288 struct ice_netdev_priv *np;
289 struct ice_repr *repr;
293 vsi = ice_get_vf_vsi(vf);
297 repr = kzalloc(sizeof(*repr), GFP_KERNEL);
301 repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
310 np = netdev_priv(repr->netdev);
313 q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
316 goto err_alloc_q_vector;
318 repr->q_vector = q_vector;
320 err = ice_devlink_create_vf_port(vf);
324 repr->netdev->min_mtu = ETH_MIN_MTU;
325 repr->netdev->max_mtu = ICE_MAX_MTU;
327 SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
328 SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
329 err = ice_repr_reg_netdev(repr->netdev);
333 ice_virtchnl_set_repr_ops(vf);
338 ice_devlink_destroy_vf_port(vf);
340 kfree(repr->q_vector);
341 vf->repr->q_vector = NULL;
343 free_netdev(repr->netdev);
352 * ice_repr_rem - remove representor from VF
353 * @vf: pointer to VF structure
355 static void ice_repr_rem(struct ice_vf *vf)
360 kfree(vf->repr->q_vector);
361 vf->repr->q_vector = NULL;
362 unregister_netdev(vf->repr->netdev);
363 ice_devlink_destroy_vf_port(vf);
364 free_netdev(vf->repr->netdev);
365 vf->repr->netdev = NULL;
369 ice_virtchnl_set_dflt_ops(vf);
373 * ice_repr_rem_from_all_vfs - remove port representor for all VFs
374 * @pf: pointer to PF structure
376 void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
378 struct devlink *devlink;
382 lockdep_assert_held(&pf->vfs.table_lock);
384 ice_for_each_vf(pf, bkt, vf)
387 /* since all port representors are destroyed, there is
388 * no point in keeping the nodes
390 devlink = priv_to_devlink(pf);
392 devl_rate_nodes_destroy(devlink);
393 devl_unlock(devlink);
397 * ice_repr_add_for_all_vfs - add port representor for all VFs
398 * @pf: pointer to PF structure
400 int ice_repr_add_for_all_vfs(struct ice_pf *pf)
402 struct devlink *devlink;
407 lockdep_assert_held(&pf->vfs.table_lock);
409 ice_for_each_vf(pf, bkt, vf) {
410 err = ice_repr_add(vf);
415 /* only export if ADQ and DCB disabled */
416 if (ice_is_adq_active(pf) || ice_is_dcb_active(pf))
419 devlink = priv_to_devlink(pf);
420 ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
425 ice_repr_rem_from_all_vfs(pf);
431 * ice_repr_start_tx_queues - start Tx queues of port representor
432 * @repr: pointer to repr structure
434 void ice_repr_start_tx_queues(struct ice_repr *repr)
436 netif_carrier_on(repr->netdev);
437 netif_tx_start_all_queues(repr->netdev);
441 * ice_repr_stop_tx_queues - stop Tx queues of port representor
442 * @repr: pointer to repr structure
444 void ice_repr_stop_tx_queues(struct ice_repr *repr)
446 netif_carrier_off(repr->netdev);
447 netif_tx_stop_all_queues(repr->netdev);
451 * ice_repr_set_traffic_vsi - set traffic VSI for port representor
452 * @repr: repr on with VSI will be set
453 * @vsi: pointer to VSI that will be used by port representor to pass traffic
455 void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
457 struct ice_netdev_priv *np = netdev_priv(repr->netdev);