1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/core/ethtool.c - Ethtool ioctl handler
6 * This file is where we call all the ethtool_ops commands to get
7 * the information ethtool needs.
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/ethtool.h>
15 #include <linux/netdevice.h>
16 #include <linux/net_tstamp.h>
17 #include <linux/phy.h>
18 #include <linux/bitops.h>
19 #include <linux/uaccess.h>
20 #include <linux/vmalloc.h>
21 #include <linux/sfp.h>
22 #include <linux/slab.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/sched/signal.h>
25 #include <linux/net.h>
26 #include <net/devlink.h>
27 #include <net/xdp_sock.h>
28 #include <net/flow_offload.h>
31 * Some useful ethtool_ops methods that're device independent.
32 * If we find that all drivers want to do the same thing here,
33 * we can turn these into dev_() function calls.
36 u32 ethtool_op_get_link(struct net_device *dev)
38 return netif_carrier_ok(dev) ? 1 : 0;
40 EXPORT_SYMBOL(ethtool_op_get_link);
42 int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
44 info->so_timestamping =
45 SOF_TIMESTAMPING_TX_SOFTWARE |
46 SOF_TIMESTAMPING_RX_SOFTWARE |
47 SOF_TIMESTAMPING_SOFTWARE;
51 EXPORT_SYMBOL(ethtool_op_get_ts_info);
53 /* Handlers for each ethtool command */
55 #define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32)
57 static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
58 [NETIF_F_SG_BIT] = "tx-scatter-gather",
59 [NETIF_F_IP_CSUM_BIT] = "tx-checksum-ipv4",
60 [NETIF_F_HW_CSUM_BIT] = "tx-checksum-ip-generic",
61 [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6",
62 [NETIF_F_HIGHDMA_BIT] = "highdma",
63 [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist",
64 [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert",
66 [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse",
67 [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter",
68 [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert",
69 [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse",
70 [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
71 [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged",
72 [NETIF_F_GSO_BIT] = "tx-generic-segmentation",
73 [NETIF_F_LLTX_BIT] = "tx-lockless",
74 [NETIF_F_NETNS_LOCAL_BIT] = "netns-local",
75 [NETIF_F_GRO_BIT] = "rx-gro",
76 [NETIF_F_GRO_HW_BIT] = "rx-gro-hw",
77 [NETIF_F_LRO_BIT] = "rx-lro",
79 [NETIF_F_TSO_BIT] = "tx-tcp-segmentation",
80 [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust",
81 [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation",
82 [NETIF_F_TSO_MANGLEID_BIT] = "tx-tcp-mangleid-segmentation",
83 [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation",
84 [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
85 [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation",
86 [NETIF_F_GSO_GRE_CSUM_BIT] = "tx-gre-csum-segmentation",
87 [NETIF_F_GSO_IPXIP4_BIT] = "tx-ipxip4-segmentation",
88 [NETIF_F_GSO_IPXIP6_BIT] = "tx-ipxip6-segmentation",
89 [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
90 [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
91 [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial",
92 [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation",
93 [NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation",
94 [NETIF_F_GSO_UDP_L4_BIT] = "tx-udp-segmentation",
96 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
97 [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
98 [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu",
99 [NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter",
100 [NETIF_F_RXHASH_BIT] = "rx-hashing",
101 [NETIF_F_RXCSUM_BIT] = "rx-checksum",
102 [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy",
103 [NETIF_F_LOOPBACK_BIT] = "loopback",
104 [NETIF_F_RXFCS_BIT] = "rx-fcs",
105 [NETIF_F_RXALL_BIT] = "rx-all",
106 [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
107 [NETIF_F_HW_TC_BIT] = "hw-tc-offload",
108 [NETIF_F_HW_ESP_BIT] = "esp-hw-offload",
109 [NETIF_F_HW_ESP_TX_CSUM_BIT] = "esp-tx-csum-hw-offload",
110 [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload",
111 [NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record",
112 [NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload",
113 [NETIF_F_HW_TLS_RX_BIT] = "tls-hw-rx-offload",
117 rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = {
118 [ETH_RSS_HASH_TOP_BIT] = "toeplitz",
119 [ETH_RSS_HASH_XOR_BIT] = "xor",
120 [ETH_RSS_HASH_CRC32_BIT] = "crc32",
124 tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
125 [ETHTOOL_ID_UNSPEC] = "Unspec",
126 [ETHTOOL_RX_COPYBREAK] = "rx-copybreak",
127 [ETHTOOL_TX_COPYBREAK] = "tx-copybreak",
128 [ETHTOOL_PFC_PREVENTION_TOUT] = "pfc-prevention-tout",
132 phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
133 [ETHTOOL_ID_UNSPEC] = "Unspec",
134 [ETHTOOL_PHY_DOWNSHIFT] = "phy-downshift",
135 [ETHTOOL_PHY_FAST_LINK_DOWN] = "phy-fast-link-down",
138 static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
140 struct ethtool_gfeatures cmd = {
141 .cmd = ETHTOOL_GFEATURES,
142 .size = ETHTOOL_DEV_FEATURE_WORDS,
144 struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
145 u32 __user *sizeaddr;
149 /* in case feature bits run out again */
150 BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t));
152 for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
153 features[i].available = (u32)(dev->hw_features >> (32 * i));
154 features[i].requested = (u32)(dev->wanted_features >> (32 * i));
155 features[i].active = (u32)(dev->features >> (32 * i));
156 features[i].never_changed =
157 (u32)(NETIF_F_NEVER_CHANGE >> (32 * i));
160 sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
161 if (get_user(copy_size, sizeaddr))
164 if (copy_size > ETHTOOL_DEV_FEATURE_WORDS)
165 copy_size = ETHTOOL_DEV_FEATURE_WORDS;
167 if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
169 useraddr += sizeof(cmd);
170 if (copy_to_user(useraddr, features, copy_size * sizeof(*features)))
176 static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
178 struct ethtool_sfeatures cmd;
179 struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
180 netdev_features_t wanted = 0, valid = 0;
183 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
185 useraddr += sizeof(cmd);
187 if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS)
190 if (copy_from_user(features, useraddr, sizeof(features)))
193 for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
194 valid |= (netdev_features_t)features[i].valid << (32 * i);
195 wanted |= (netdev_features_t)features[i].requested << (32 * i);
198 if (valid & ~NETIF_F_ETHTOOL_BITS)
201 if (valid & ~dev->hw_features) {
202 valid &= dev->hw_features;
203 ret |= ETHTOOL_F_UNSUPPORTED;
206 dev->wanted_features &= ~valid;
207 dev->wanted_features |= wanted & valid;
208 __netdev_update_features(dev);
210 if ((dev->wanted_features ^ dev->features) & valid)
211 ret |= ETHTOOL_F_WISH;
216 static int __ethtool_get_sset_count(struct net_device *dev, int sset)
218 const struct ethtool_ops *ops = dev->ethtool_ops;
220 if (sset == ETH_SS_FEATURES)
221 return ARRAY_SIZE(netdev_features_strings);
223 if (sset == ETH_SS_RSS_HASH_FUNCS)
224 return ARRAY_SIZE(rss_hash_func_strings);
226 if (sset == ETH_SS_TUNABLES)
227 return ARRAY_SIZE(tunable_strings);
229 if (sset == ETH_SS_PHY_TUNABLES)
230 return ARRAY_SIZE(phy_tunable_strings);
232 if (sset == ETH_SS_PHY_STATS && dev->phydev &&
233 !ops->get_ethtool_phy_stats)
234 return phy_ethtool_get_sset_count(dev->phydev);
236 if (ops->get_sset_count && ops->get_strings)
237 return ops->get_sset_count(dev, sset);
242 static void __ethtool_get_strings(struct net_device *dev,
243 u32 stringset, u8 *data)
245 const struct ethtool_ops *ops = dev->ethtool_ops;
247 if (stringset == ETH_SS_FEATURES)
248 memcpy(data, netdev_features_strings,
249 sizeof(netdev_features_strings));
250 else if (stringset == ETH_SS_RSS_HASH_FUNCS)
251 memcpy(data, rss_hash_func_strings,
252 sizeof(rss_hash_func_strings));
253 else if (stringset == ETH_SS_TUNABLES)
254 memcpy(data, tunable_strings, sizeof(tunable_strings));
255 else if (stringset == ETH_SS_PHY_TUNABLES)
256 memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings));
257 else if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
258 !ops->get_ethtool_phy_stats)
259 phy_ethtool_get_strings(dev->phydev, data);
261 /* ops->get_strings is valid because checked earlier */
262 ops->get_strings(dev, stringset, data);
265 static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd)
267 /* feature masks of legacy discrete ethtool ops */
270 case ETHTOOL_GTXCSUM:
271 case ETHTOOL_STXCSUM:
272 return NETIF_F_CSUM_MASK | NETIF_F_SCTP_CRC;
273 case ETHTOOL_GRXCSUM:
274 case ETHTOOL_SRXCSUM:
275 return NETIF_F_RXCSUM;
281 return NETIF_F_ALL_TSO;
293 static int ethtool_get_one_feature(struct net_device *dev,
294 char __user *useraddr, u32 ethcmd)
296 netdev_features_t mask = ethtool_get_feature_mask(ethcmd);
297 struct ethtool_value edata = {
299 .data = !!(dev->features & mask),
302 if (copy_to_user(useraddr, &edata, sizeof(edata)))
307 static int ethtool_set_one_feature(struct net_device *dev,
308 void __user *useraddr, u32 ethcmd)
310 struct ethtool_value edata;
311 netdev_features_t mask;
313 if (copy_from_user(&edata, useraddr, sizeof(edata)))
316 mask = ethtool_get_feature_mask(ethcmd);
317 mask &= dev->hw_features;
322 dev->wanted_features |= mask;
324 dev->wanted_features &= ~mask;
326 __netdev_update_features(dev);
331 #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \
332 ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH)
333 #define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \
334 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \
337 static u32 __ethtool_get_flags(struct net_device *dev)
341 if (dev->features & NETIF_F_LRO)
342 flags |= ETH_FLAG_LRO;
343 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
344 flags |= ETH_FLAG_RXVLAN;
345 if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
346 flags |= ETH_FLAG_TXVLAN;
347 if (dev->features & NETIF_F_NTUPLE)
348 flags |= ETH_FLAG_NTUPLE;
349 if (dev->features & NETIF_F_RXHASH)
350 flags |= ETH_FLAG_RXHASH;
355 static int __ethtool_set_flags(struct net_device *dev, u32 data)
357 netdev_features_t features = 0, changed;
359 if (data & ~ETH_ALL_FLAGS)
362 if (data & ETH_FLAG_LRO)
363 features |= NETIF_F_LRO;
364 if (data & ETH_FLAG_RXVLAN)
365 features |= NETIF_F_HW_VLAN_CTAG_RX;
366 if (data & ETH_FLAG_TXVLAN)
367 features |= NETIF_F_HW_VLAN_CTAG_TX;
368 if (data & ETH_FLAG_NTUPLE)
369 features |= NETIF_F_NTUPLE;
370 if (data & ETH_FLAG_RXHASH)
371 features |= NETIF_F_RXHASH;
373 /* allow changing only bits set in hw_features */
374 changed = (features ^ dev->features) & ETH_ALL_FEATURES;
375 if (changed & ~dev->hw_features)
376 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
378 dev->wanted_features =
379 (dev->wanted_features & ~changed) | (features & changed);
381 __netdev_update_features(dev);
386 /* Given two link masks, AND them together and save the result in dst. */
387 void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
388 struct ethtool_link_ksettings *src)
390 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
391 unsigned int idx = 0;
393 for (; idx < size; idx++) {
394 dst->link_modes.supported[idx] &=
395 src->link_modes.supported[idx];
396 dst->link_modes.advertising[idx] &=
397 src->link_modes.advertising[idx];
400 EXPORT_SYMBOL(ethtool_intersect_link_masks);
402 void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
405 bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
408 EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode);
410 /* return false if src had higher bits set. lower bits always updated. */
411 bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
412 const unsigned long *src)
416 /* TODO: following test will soon always be true */
417 if (__ETHTOOL_LINK_MODE_MASK_NBITS > 32) {
418 __ETHTOOL_DECLARE_LINK_MODE_MASK(ext);
420 bitmap_zero(ext, __ETHTOOL_LINK_MODE_MASK_NBITS);
421 bitmap_fill(ext, 32);
422 bitmap_complement(ext, ext, __ETHTOOL_LINK_MODE_MASK_NBITS);
423 if (bitmap_intersects(ext, src,
424 __ETHTOOL_LINK_MODE_MASK_NBITS)) {
425 /* src mask goes beyond bit 31 */
429 *legacy_u32 = src[0];
432 EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
434 /* return false if legacy contained non-0 deprecated fields
435 * maxtxpkt/maxrxpkt. rest of ksettings always updated
438 convert_legacy_settings_to_link_ksettings(
439 struct ethtool_link_ksettings *link_ksettings,
440 const struct ethtool_cmd *legacy_settings)
444 memset(link_ksettings, 0, sizeof(*link_ksettings));
446 /* This is used to tell users that driver is still using these
447 * deprecated legacy fields, and they should not use
448 * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS
450 if (legacy_settings->maxtxpkt ||
451 legacy_settings->maxrxpkt)
454 ethtool_convert_legacy_u32_to_link_mode(
455 link_ksettings->link_modes.supported,
456 legacy_settings->supported);
457 ethtool_convert_legacy_u32_to_link_mode(
458 link_ksettings->link_modes.advertising,
459 legacy_settings->advertising);
460 ethtool_convert_legacy_u32_to_link_mode(
461 link_ksettings->link_modes.lp_advertising,
462 legacy_settings->lp_advertising);
463 link_ksettings->base.speed
464 = ethtool_cmd_speed(legacy_settings);
465 link_ksettings->base.duplex
466 = legacy_settings->duplex;
467 link_ksettings->base.port
468 = legacy_settings->port;
469 link_ksettings->base.phy_address
470 = legacy_settings->phy_address;
471 link_ksettings->base.autoneg
472 = legacy_settings->autoneg;
473 link_ksettings->base.mdio_support
474 = legacy_settings->mdio_support;
475 link_ksettings->base.eth_tp_mdix
476 = legacy_settings->eth_tp_mdix;
477 link_ksettings->base.eth_tp_mdix_ctrl
478 = legacy_settings->eth_tp_mdix_ctrl;
482 /* return false if ksettings link modes had higher bits
483 * set. legacy_settings always updated (best effort)
486 convert_link_ksettings_to_legacy_settings(
487 struct ethtool_cmd *legacy_settings,
488 const struct ethtool_link_ksettings *link_ksettings)
492 memset(legacy_settings, 0, sizeof(*legacy_settings));
493 /* this also clears the deprecated fields in legacy structure:
499 retval &= ethtool_convert_link_mode_to_legacy_u32(
500 &legacy_settings->supported,
501 link_ksettings->link_modes.supported);
502 retval &= ethtool_convert_link_mode_to_legacy_u32(
503 &legacy_settings->advertising,
504 link_ksettings->link_modes.advertising);
505 retval &= ethtool_convert_link_mode_to_legacy_u32(
506 &legacy_settings->lp_advertising,
507 link_ksettings->link_modes.lp_advertising);
508 ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed);
509 legacy_settings->duplex
510 = link_ksettings->base.duplex;
511 legacy_settings->port
512 = link_ksettings->base.port;
513 legacy_settings->phy_address
514 = link_ksettings->base.phy_address;
515 legacy_settings->autoneg
516 = link_ksettings->base.autoneg;
517 legacy_settings->mdio_support
518 = link_ksettings->base.mdio_support;
519 legacy_settings->eth_tp_mdix
520 = link_ksettings->base.eth_tp_mdix;
521 legacy_settings->eth_tp_mdix_ctrl
522 = link_ksettings->base.eth_tp_mdix_ctrl;
523 legacy_settings->transceiver
524 = link_ksettings->base.transceiver;
528 /* number of 32-bit words to store the user's link mode bitmaps */
529 #define __ETHTOOL_LINK_MODE_MASK_NU32 \
530 DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
532 /* layout of the struct passed from/to userland */
533 struct ethtool_link_usettings {
534 struct ethtool_link_settings base;
536 __u32 supported[__ETHTOOL_LINK_MODE_MASK_NU32];
537 __u32 advertising[__ETHTOOL_LINK_MODE_MASK_NU32];
538 __u32 lp_advertising[__ETHTOOL_LINK_MODE_MASK_NU32];
542 /* Internal kernel helper to query a device ethtool_link_settings. */
543 int __ethtool_get_link_ksettings(struct net_device *dev,
544 struct ethtool_link_ksettings *link_ksettings)
548 if (!dev->ethtool_ops->get_link_ksettings)
551 memset(link_ksettings, 0, sizeof(*link_ksettings));
552 return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
554 EXPORT_SYMBOL(__ethtool_get_link_ksettings);
556 /* convert ethtool_link_usettings in user space to a kernel internal
557 * ethtool_link_ksettings. return 0 on success, errno on error.
559 static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to,
560 const void __user *from)
562 struct ethtool_link_usettings link_usettings;
564 if (copy_from_user(&link_usettings, from, sizeof(link_usettings)))
567 memcpy(&to->base, &link_usettings.base, sizeof(to->base));
568 bitmap_from_arr32(to->link_modes.supported,
569 link_usettings.link_modes.supported,
570 __ETHTOOL_LINK_MODE_MASK_NBITS);
571 bitmap_from_arr32(to->link_modes.advertising,
572 link_usettings.link_modes.advertising,
573 __ETHTOOL_LINK_MODE_MASK_NBITS);
574 bitmap_from_arr32(to->link_modes.lp_advertising,
575 link_usettings.link_modes.lp_advertising,
576 __ETHTOOL_LINK_MODE_MASK_NBITS);
581 /* convert a kernel internal ethtool_link_ksettings to
582 * ethtool_link_usettings in user space. return 0 on success, errno on
586 store_link_ksettings_for_user(void __user *to,
587 const struct ethtool_link_ksettings *from)
589 struct ethtool_link_usettings link_usettings;
591 memcpy(&link_usettings.base, &from->base, sizeof(link_usettings));
592 bitmap_to_arr32(link_usettings.link_modes.supported,
593 from->link_modes.supported,
594 __ETHTOOL_LINK_MODE_MASK_NBITS);
595 bitmap_to_arr32(link_usettings.link_modes.advertising,
596 from->link_modes.advertising,
597 __ETHTOOL_LINK_MODE_MASK_NBITS);
598 bitmap_to_arr32(link_usettings.link_modes.lp_advertising,
599 from->link_modes.lp_advertising,
600 __ETHTOOL_LINK_MODE_MASK_NBITS);
602 if (copy_to_user(to, &link_usettings, sizeof(link_usettings)))
608 /* Query device for its ethtool_link_settings. */
609 static int ethtool_get_link_ksettings(struct net_device *dev,
610 void __user *useraddr)
613 struct ethtool_link_ksettings link_ksettings;
616 if (!dev->ethtool_ops->get_link_ksettings)
619 /* handle bitmap nbits handshake */
620 if (copy_from_user(&link_ksettings.base, useraddr,
621 sizeof(link_ksettings.base)))
624 if (__ETHTOOL_LINK_MODE_MASK_NU32
625 != link_ksettings.base.link_mode_masks_nwords) {
626 /* wrong link mode nbits requested */
627 memset(&link_ksettings, 0, sizeof(link_ksettings));
628 link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS;
629 /* send back number of words required as negative val */
630 compiletime_assert(__ETHTOOL_LINK_MODE_MASK_NU32 <= S8_MAX,
631 "need too many bits for link modes!");
632 link_ksettings.base.link_mode_masks_nwords
633 = -((s8)__ETHTOOL_LINK_MODE_MASK_NU32);
635 /* copy the base fields back to user, not the link
638 if (copy_to_user(useraddr, &link_ksettings.base,
639 sizeof(link_ksettings.base)))
645 /* handshake successful: user/kernel agree on
646 * link_mode_masks_nwords
649 memset(&link_ksettings, 0, sizeof(link_ksettings));
650 err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings);
654 /* make sure we tell the right values to user */
655 link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS;
656 link_ksettings.base.link_mode_masks_nwords
657 = __ETHTOOL_LINK_MODE_MASK_NU32;
659 return store_link_ksettings_for_user(useraddr, &link_ksettings);
662 /* Update device ethtool_link_settings. */
663 static int ethtool_set_link_ksettings(struct net_device *dev,
664 void __user *useraddr)
667 struct ethtool_link_ksettings link_ksettings;
671 if (!dev->ethtool_ops->set_link_ksettings)
674 /* make sure nbits field has expected value */
675 if (copy_from_user(&link_ksettings.base, useraddr,
676 sizeof(link_ksettings.base)))
679 if (__ETHTOOL_LINK_MODE_MASK_NU32
680 != link_ksettings.base.link_mode_masks_nwords)
683 /* copy the whole structure, now that we know it has expected
686 err = load_link_ksettings_from_user(&link_ksettings, useraddr);
690 /* re-check nwords field, just in case */
691 if (__ETHTOOL_LINK_MODE_MASK_NU32
692 != link_ksettings.base.link_mode_masks_nwords)
695 return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
698 /* Query device for its ethtool_cmd settings.
700 * Backward compatibility note: for compatibility with legacy ethtool, this is
701 * now implemented via get_link_ksettings. When driver reports higher link mode
702 * bits, a kernel warning is logged once (with name of 1st driver/device) to
703 * recommend user to upgrade ethtool, but the command is successful (only the
704 * lower link mode bits reported back to user). Deprecated fields from
705 * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero.
707 static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
709 struct ethtool_link_ksettings link_ksettings;
710 struct ethtool_cmd cmd;
714 if (!dev->ethtool_ops->get_link_ksettings)
717 memset(&link_ksettings, 0, sizeof(link_ksettings));
718 err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings);
721 convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings);
723 /* send a sensible cmd tag back to user */
724 cmd.cmd = ETHTOOL_GSET;
726 if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
732 /* Update device link settings with given ethtool_cmd.
734 * Backward compatibility note: for compatibility with legacy ethtool, this is
735 * now always implemented via set_link_settings. When user's request updates
736 * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel
737 * warning is logged once (with name of 1st driver/device) to recommend user to
738 * upgrade ethtool, and the request is rejected.
740 static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
742 struct ethtool_link_ksettings link_ksettings;
743 struct ethtool_cmd cmd;
747 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
749 if (!dev->ethtool_ops->set_link_ksettings)
752 if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd))
754 link_ksettings.base.link_mode_masks_nwords =
755 __ETHTOOL_LINK_MODE_MASK_NU32;
756 return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
759 static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
760 void __user *useraddr)
762 struct ethtool_drvinfo info;
763 const struct ethtool_ops *ops = dev->ethtool_ops;
765 memset(&info, 0, sizeof(info));
766 info.cmd = ETHTOOL_GDRVINFO;
767 if (ops->get_drvinfo) {
768 ops->get_drvinfo(dev, &info);
769 } else if (dev->dev.parent && dev->dev.parent->driver) {
770 strlcpy(info.bus_info, dev_name(dev->dev.parent),
771 sizeof(info.bus_info));
772 strlcpy(info.driver, dev->dev.parent->driver->name,
773 sizeof(info.driver));
779 * this method of obtaining string set info is deprecated;
780 * Use ETHTOOL_GSSET_INFO instead.
782 if (ops->get_sset_count) {
785 rc = ops->get_sset_count(dev, ETH_SS_TEST);
787 info.testinfo_len = rc;
788 rc = ops->get_sset_count(dev, ETH_SS_STATS);
791 rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS);
793 info.n_priv_flags = rc;
795 if (ops->get_regs_len) {
796 int ret = ops->get_regs_len(dev);
799 info.regdump_len = ret;
802 if (ops->get_eeprom_len)
803 info.eedump_len = ops->get_eeprom_len(dev);
805 if (!info.fw_version[0])
806 devlink_compat_running_version(dev, info.fw_version,
807 sizeof(info.fw_version));
809 if (copy_to_user(useraddr, &info, sizeof(info)))
814 static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
815 void __user *useraddr)
817 struct ethtool_sset_info info;
819 int i, idx = 0, n_bits = 0, ret, rc;
820 u32 *info_buf = NULL;
822 if (copy_from_user(&info, useraddr, sizeof(info)))
825 /* store copy of mask, because we zero struct later on */
826 sset_mask = info.sset_mask;
830 /* calculate size of return buffer */
831 n_bits = hweight64(sset_mask);
833 memset(&info, 0, sizeof(info));
834 info.cmd = ETHTOOL_GSSET_INFO;
836 info_buf = kcalloc(n_bits, sizeof(u32), GFP_USER);
841 * fill return buffer based on input bitmask and successful
842 * get_sset_count return
844 for (i = 0; i < 64; i++) {
845 if (!(sset_mask & (1ULL << i)))
848 rc = __ethtool_get_sset_count(dev, i);
850 info.sset_mask |= (1ULL << i);
851 info_buf[idx++] = rc;
856 if (copy_to_user(useraddr, &info, sizeof(info)))
859 useraddr += offsetof(struct ethtool_sset_info, data);
860 if (copy_to_user(useraddr, info_buf, idx * sizeof(u32)))
870 static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
871 u32 cmd, void __user *useraddr)
873 struct ethtool_rxnfc info;
874 size_t info_size = sizeof(info);
877 if (!dev->ethtool_ops->set_rxnfc)
880 /* struct ethtool_rxnfc was originally defined for
881 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
882 * members. User-space might still be using that
884 if (cmd == ETHTOOL_SRXFH)
885 info_size = (offsetof(struct ethtool_rxnfc, data) +
888 if (copy_from_user(&info, useraddr, info_size))
891 rc = dev->ethtool_ops->set_rxnfc(dev, &info);
895 if (cmd == ETHTOOL_SRXCLSRLINS &&
896 copy_to_user(useraddr, &info, info_size))
902 static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
903 u32 cmd, void __user *useraddr)
905 struct ethtool_rxnfc info;
906 size_t info_size = sizeof(info);
907 const struct ethtool_ops *ops = dev->ethtool_ops;
909 void *rule_buf = NULL;
914 /* struct ethtool_rxnfc was originally defined for
915 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
916 * members. User-space might still be using that
918 if (cmd == ETHTOOL_GRXFH)
919 info_size = (offsetof(struct ethtool_rxnfc, data) +
922 if (copy_from_user(&info, useraddr, info_size))
925 /* If FLOW_RSS was requested then user-space must be using the
926 * new definition, as FLOW_RSS is newer.
928 if (cmd == ETHTOOL_GRXFH && info.flow_type & FLOW_RSS) {
929 info_size = sizeof(info);
930 if (copy_from_user(&info, useraddr, info_size))
932 /* Since malicious users may modify the original data,
933 * we need to check whether FLOW_RSS is still requested.
935 if (!(info.flow_type & FLOW_RSS))
942 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
943 if (info.rule_cnt > 0) {
944 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
945 rule_buf = kcalloc(info.rule_cnt, sizeof(u32),
952 ret = ops->get_rxnfc(dev, &info, rule_buf);
957 if (copy_to_user(useraddr, &info, info_size))
961 useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
962 if (copy_to_user(useraddr, rule_buf,
963 info.rule_cnt * sizeof(u32)))
974 static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
975 struct ethtool_rxnfc *rx_rings,
980 if (copy_from_user(indir, useraddr, size * sizeof(indir[0])))
983 /* Validate ring indices */
984 for (i = 0; i < size; i++)
985 if (indir[i] >= rx_rings->data)
991 u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
993 void netdev_rss_key_fill(void *buffer, size_t len)
995 BUG_ON(len > sizeof(netdev_rss_key));
996 net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key));
997 memcpy(buffer, netdev_rss_key, len);
999 EXPORT_SYMBOL(netdev_rss_key_fill);
1001 static int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
1003 u32 dev_size, current_max = 0;
1007 if (!dev->ethtool_ops->get_rxfh_indir_size ||
1008 !dev->ethtool_ops->get_rxfh)
1010 dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
1014 indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
1018 ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
1023 current_max = max(current_max, indir[dev_size]);
1032 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
1033 void __user *useraddr)
1035 u32 user_size, dev_size;
1039 if (!dev->ethtool_ops->get_rxfh_indir_size ||
1040 !dev->ethtool_ops->get_rxfh)
1042 dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
1046 if (copy_from_user(&user_size,
1047 useraddr + offsetof(struct ethtool_rxfh_indir, size),
1051 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size),
1052 &dev_size, sizeof(dev_size)))
1055 /* If the user buffer size is 0, this is just a query for the
1056 * device table size. Otherwise, if it's smaller than the
1057 * device table size it's an error.
1059 if (user_size < dev_size)
1060 return user_size == 0 ? 0 : -EINVAL;
1062 indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
1066 ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
1070 if (copy_to_user(useraddr +
1071 offsetof(struct ethtool_rxfh_indir, ring_index[0]),
1072 indir, dev_size * sizeof(indir[0])))
1080 static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
1081 void __user *useraddr)
1083 struct ethtool_rxnfc rx_rings;
1084 u32 user_size, dev_size, i;
1086 const struct ethtool_ops *ops = dev->ethtool_ops;
1088 u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]);
1090 if (!ops->get_rxfh_indir_size || !ops->set_rxfh ||
1094 dev_size = ops->get_rxfh_indir_size(dev);
1098 if (copy_from_user(&user_size,
1099 useraddr + offsetof(struct ethtool_rxfh_indir, size),
1103 if (user_size != 0 && user_size != dev_size)
1106 indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
1110 rx_rings.cmd = ETHTOOL_GRXRINGS;
1111 ret = ops->get_rxnfc(dev, &rx_rings, NULL);
1115 if (user_size == 0) {
1116 for (i = 0; i < dev_size; i++)
1117 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
1119 ret = ethtool_copy_validate_indir(indir,
1120 useraddr + ringidx_offset,
1127 ret = ops->set_rxfh(dev, indir, NULL, ETH_RSS_HASH_NO_CHANGE);
1131 /* indicate whether rxfh was set to default */
1133 dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
1135 dev->priv_flags |= IFF_RXFH_CONFIGURED;
1142 static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
1143 void __user *useraddr)
1146 const struct ethtool_ops *ops = dev->ethtool_ops;
1147 u32 user_indir_size, user_key_size;
1148 u32 dev_indir_size = 0, dev_key_size = 0;
1149 struct ethtool_rxfh rxfh;
1160 if (ops->get_rxfh_indir_size)
1161 dev_indir_size = ops->get_rxfh_indir_size(dev);
1162 if (ops->get_rxfh_key_size)
1163 dev_key_size = ops->get_rxfh_key_size(dev);
1165 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
1167 user_indir_size = rxfh.indir_size;
1168 user_key_size = rxfh.key_size;
1170 /* Check that reserved fields are 0 for now */
1171 if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd8[2] || rxfh.rsvd32)
1173 /* Most drivers don't handle rss_context, check it's 0 as well */
1174 if (rxfh.rss_context && !ops->get_rxfh_context)
1177 rxfh.indir_size = dev_indir_size;
1178 rxfh.key_size = dev_key_size;
1179 if (copy_to_user(useraddr, &rxfh, sizeof(rxfh)))
1182 if ((user_indir_size && (user_indir_size != dev_indir_size)) ||
1183 (user_key_size && (user_key_size != dev_key_size)))
1186 indir_bytes = user_indir_size * sizeof(indir[0]);
1187 total_size = indir_bytes + user_key_size;
1188 rss_config = kzalloc(total_size, GFP_USER);
1192 if (user_indir_size)
1193 indir = (u32 *)rss_config;
1196 hkey = rss_config + indir_bytes;
1198 if (rxfh.rss_context)
1199 ret = dev->ethtool_ops->get_rxfh_context(dev, indir, hkey,
1203 ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey, &dev_hfunc);
1207 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc),
1208 &dev_hfunc, sizeof(rxfh.hfunc))) {
1210 } else if (copy_to_user(useraddr +
1211 offsetof(struct ethtool_rxfh, rss_config[0]),
1212 rss_config, total_size)) {
1221 static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
1222 void __user *useraddr)
1225 const struct ethtool_ops *ops = dev->ethtool_ops;
1226 struct ethtool_rxnfc rx_rings;
1227 struct ethtool_rxfh rxfh;
1228 u32 dev_indir_size = 0, dev_key_size = 0, i;
1229 u32 *indir = NULL, indir_bytes = 0;
1232 u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
1233 bool delete = false;
1235 if (!ops->get_rxnfc || !ops->set_rxfh)
1238 if (ops->get_rxfh_indir_size)
1239 dev_indir_size = ops->get_rxfh_indir_size(dev);
1240 if (ops->get_rxfh_key_size)
1241 dev_key_size = ops->get_rxfh_key_size(dev);
1243 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
1246 /* Check that reserved fields are 0 for now */
1247 if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd8[2] || rxfh.rsvd32)
1249 /* Most drivers don't handle rss_context, check it's 0 as well */
1250 if (rxfh.rss_context && !ops->set_rxfh_context)
1253 /* If either indir, hash key or function is valid, proceed further.
1254 * Must request at least one change: indir size, hash key or function.
1256 if ((rxfh.indir_size &&
1257 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE &&
1258 rxfh.indir_size != dev_indir_size) ||
1259 (rxfh.key_size && (rxfh.key_size != dev_key_size)) ||
1260 (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE &&
1261 rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE))
1264 if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
1265 indir_bytes = dev_indir_size * sizeof(indir[0]);
1267 rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER);
1271 rx_rings.cmd = ETHTOOL_GRXRINGS;
1272 ret = ops->get_rxnfc(dev, &rx_rings, NULL);
1276 /* rxfh.indir_size == 0 means reset the indir table to default (master
1277 * context) or delete the context (other RSS contexts).
1278 * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged.
1280 if (rxfh.indir_size &&
1281 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
1282 indir = (u32 *)rss_config;
1283 ret = ethtool_copy_validate_indir(indir,
1284 useraddr + rss_cfg_offset,
1289 } else if (rxfh.indir_size == 0) {
1290 if (rxfh.rss_context == 0) {
1291 indir = (u32 *)rss_config;
1292 for (i = 0; i < dev_indir_size; i++)
1293 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
1299 if (rxfh.key_size) {
1300 hkey = rss_config + indir_bytes;
1301 if (copy_from_user(hkey,
1302 useraddr + rss_cfg_offset + indir_bytes,
1309 if (rxfh.rss_context)
1310 ret = ops->set_rxfh_context(dev, indir, hkey, rxfh.hfunc,
1311 &rxfh.rss_context, delete);
1313 ret = ops->set_rxfh(dev, indir, hkey, rxfh.hfunc);
1317 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context),
1318 &rxfh.rss_context, sizeof(rxfh.rss_context)))
1321 if (!rxfh.rss_context) {
1322 /* indicate whether rxfh was set to default */
1323 if (rxfh.indir_size == 0)
1324 dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
1325 else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
1326 dev->priv_flags |= IFF_RXFH_CONFIGURED;
1334 static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1336 struct ethtool_regs regs;
1337 const struct ethtool_ops *ops = dev->ethtool_ops;
1341 if (!ops->get_regs || !ops->get_regs_len)
1344 if (copy_from_user(®s, useraddr, sizeof(regs)))
1347 reglen = ops->get_regs_len(dev);
1351 if (regs.len > reglen)
1354 regbuf = vzalloc(reglen);
1358 if (regs.len < reglen)
1361 ops->get_regs(dev, ®s, regbuf);
1364 if (copy_to_user(useraddr, ®s, sizeof(regs)))
1366 useraddr += offsetof(struct ethtool_regs, data);
1367 if (copy_to_user(useraddr, regbuf, reglen))
1376 static int ethtool_reset(struct net_device *dev, char __user *useraddr)
1378 struct ethtool_value reset;
1381 if (!dev->ethtool_ops->reset)
1384 if (copy_from_user(&reset, useraddr, sizeof(reset)))
1387 ret = dev->ethtool_ops->reset(dev, &reset.data);
1391 if (copy_to_user(useraddr, &reset, sizeof(reset)))
1396 static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
1398 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1400 if (!dev->ethtool_ops->get_wol)
1403 dev->ethtool_ops->get_wol(dev, &wol);
1405 if (copy_to_user(useraddr, &wol, sizeof(wol)))
1410 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
1412 struct ethtool_wolinfo wol;
1415 if (!dev->ethtool_ops->set_wol)
1418 if (copy_from_user(&wol, useraddr, sizeof(wol)))
1421 ret = dev->ethtool_ops->set_wol(dev, &wol);
1425 dev->wol_enabled = !!wol.wolopts;
1430 static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
1432 struct ethtool_eee edata;
1435 if (!dev->ethtool_ops->get_eee)
1438 memset(&edata, 0, sizeof(struct ethtool_eee));
1439 edata.cmd = ETHTOOL_GEEE;
1440 rc = dev->ethtool_ops->get_eee(dev, &edata);
1445 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1451 static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
1453 struct ethtool_eee edata;
1455 if (!dev->ethtool_ops->set_eee)
1458 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1461 return dev->ethtool_ops->set_eee(dev, &edata);
1464 static int ethtool_nway_reset(struct net_device *dev)
1466 if (!dev->ethtool_ops->nway_reset)
1469 return dev->ethtool_ops->nway_reset(dev);
1472 static int ethtool_get_link(struct net_device *dev, char __user *useraddr)
1474 struct ethtool_value edata = { .cmd = ETHTOOL_GLINK };
1476 if (!dev->ethtool_ops->get_link)
1479 edata.data = netif_running(dev) && dev->ethtool_ops->get_link(dev);
1481 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1486 static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
1487 int (*getter)(struct net_device *,
1488 struct ethtool_eeprom *, u8 *),
1491 struct ethtool_eeprom eeprom;
1492 void __user *userbuf = useraddr + sizeof(eeprom);
1493 u32 bytes_remaining;
1497 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
1500 /* Check for wrap and zero */
1501 if (eeprom.offset + eeprom.len <= eeprom.offset)
1504 /* Check for exceeding total eeprom len */
1505 if (eeprom.offset + eeprom.len > total_len)
1508 data = kmalloc(PAGE_SIZE, GFP_USER);
1512 bytes_remaining = eeprom.len;
1513 while (bytes_remaining > 0) {
1514 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
1516 ret = getter(dev, &eeprom, data);
1519 if (copy_to_user(userbuf, data, eeprom.len)) {
1523 userbuf += eeprom.len;
1524 eeprom.offset += eeprom.len;
1525 bytes_remaining -= eeprom.len;
1528 eeprom.len = userbuf - (useraddr + sizeof(eeprom));
1529 eeprom.offset -= eeprom.len;
1530 if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
1537 static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
1539 const struct ethtool_ops *ops = dev->ethtool_ops;
1541 if (!ops->get_eeprom || !ops->get_eeprom_len ||
1542 !ops->get_eeprom_len(dev))
1545 return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom,
1546 ops->get_eeprom_len(dev));
1549 static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
1551 struct ethtool_eeprom eeprom;
1552 const struct ethtool_ops *ops = dev->ethtool_ops;
1553 void __user *userbuf = useraddr + sizeof(eeprom);
1554 u32 bytes_remaining;
1558 if (!ops->set_eeprom || !ops->get_eeprom_len ||
1559 !ops->get_eeprom_len(dev))
1562 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
1565 /* Check for wrap and zero */
1566 if (eeprom.offset + eeprom.len <= eeprom.offset)
1569 /* Check for exceeding total eeprom len */
1570 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
1573 data = kmalloc(PAGE_SIZE, GFP_USER);
1577 bytes_remaining = eeprom.len;
1578 while (bytes_remaining > 0) {
1579 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
1581 if (copy_from_user(data, userbuf, eeprom.len)) {
1585 ret = ops->set_eeprom(dev, &eeprom, data);
1588 userbuf += eeprom.len;
1589 eeprom.offset += eeprom.len;
1590 bytes_remaining -= eeprom.len;
1597 static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev,
1598 void __user *useraddr)
1600 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
1602 if (!dev->ethtool_ops->get_coalesce)
1605 dev->ethtool_ops->get_coalesce(dev, &coalesce);
1607 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
1612 static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
1613 void __user *useraddr)
1615 struct ethtool_coalesce coalesce;
1617 if (!dev->ethtool_ops->set_coalesce)
1620 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
1623 return dev->ethtool_ops->set_coalesce(dev, &coalesce);
1626 static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
1628 struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
1630 if (!dev->ethtool_ops->get_ringparam)
1633 dev->ethtool_ops->get_ringparam(dev, &ringparam);
1635 if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
1640 static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
1642 struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
1644 if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
1647 if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
1650 dev->ethtool_ops->get_ringparam(dev, &max);
1652 /* ensure new ring parameters are within the maximums */
1653 if (ringparam.rx_pending > max.rx_max_pending ||
1654 ringparam.rx_mini_pending > max.rx_mini_max_pending ||
1655 ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending ||
1656 ringparam.tx_pending > max.tx_max_pending)
1659 return dev->ethtool_ops->set_ringparam(dev, &ringparam);
1662 static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1663 void __user *useraddr)
1665 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
1667 if (!dev->ethtool_ops->get_channels)
1670 dev->ethtool_ops->get_channels(dev, &channels);
1672 if (copy_to_user(useraddr, &channels, sizeof(channels)))
1677 static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1678 void __user *useraddr)
1680 struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS };
1681 u16 from_channel, to_channel;
1682 u32 max_rx_in_use = 0;
1685 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
1688 if (copy_from_user(&channels, useraddr, sizeof(channels)))
1691 dev->ethtool_ops->get_channels(dev, &curr);
1693 /* ensure new counts are within the maximums */
1694 if (channels.rx_count > curr.max_rx ||
1695 channels.tx_count > curr.max_tx ||
1696 channels.combined_count > curr.max_combined ||
1697 channels.other_count > curr.max_other)
1700 /* ensure the new Rx count fits within the configured Rx flow
1701 * indirection table settings */
1702 if (netif_is_rxfh_configured(dev) &&
1703 !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
1704 (channels.combined_count + channels.rx_count) <= max_rx_in_use)
1707 /* Disabling channels, query zero-copy AF_XDP sockets */
1708 from_channel = channels.combined_count +
1709 min(channels.rx_count, channels.tx_count);
1710 to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count);
1711 for (i = from_channel; i < to_channel; i++)
1712 if (xdp_get_umem_from_qid(dev, i))
1715 return dev->ethtool_ops->set_channels(dev, &channels);
1718 static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
1720 struct ethtool_pauseparam pauseparam = { .cmd = ETHTOOL_GPAUSEPARAM };
1722 if (!dev->ethtool_ops->get_pauseparam)
1725 dev->ethtool_ops->get_pauseparam(dev, &pauseparam);
1727 if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
1732 static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
1734 struct ethtool_pauseparam pauseparam;
1736 if (!dev->ethtool_ops->set_pauseparam)
1739 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
1742 return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
1745 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
1747 struct ethtool_test test;
1748 const struct ethtool_ops *ops = dev->ethtool_ops;
1752 if (!ops->self_test || !ops->get_sset_count)
1755 test_len = ops->get_sset_count(dev, ETH_SS_TEST);
1758 WARN_ON(test_len == 0);
1760 if (copy_from_user(&test, useraddr, sizeof(test)))
1763 test.len = test_len;
1764 data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
1768 ops->self_test(dev, &test, data);
1771 if (copy_to_user(useraddr, &test, sizeof(test)))
1773 useraddr += sizeof(test);
1774 if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
1783 static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
1785 struct ethtool_gstrings gstrings;
1789 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
1792 ret = __ethtool_get_sset_count(dev, gstrings.string_set);
1795 if (ret > S32_MAX / ETH_GSTRING_LEN)
1802 data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
1806 __ethtool_get_strings(dev, gstrings.string_set, data);
1812 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
1814 useraddr += sizeof(gstrings);
1816 copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
1825 static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1827 struct ethtool_value id;
1829 const struct ethtool_ops *ops = dev->ethtool_ops;
1832 if (!ops->set_phys_id)
1838 if (copy_from_user(&id, useraddr, sizeof(id)))
1841 rc = ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
1845 /* Drop the RTNL lock while waiting, but prevent reentry or
1846 * removal of the device.
1853 /* Driver will handle this itself */
1854 schedule_timeout_interruptible(
1855 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT);
1857 /* Driver expects to be called at twice the frequency in rc */
1858 int n = rc * 2, i, interval = HZ / n;
1860 /* Count down seconds */
1862 /* Count down iterations per second */
1866 rc = ops->set_phys_id(dev,
1867 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
1871 schedule_timeout_interruptible(interval);
1872 } while (!signal_pending(current) && --i != 0);
1873 } while (!signal_pending(current) &&
1874 (id.data == 0 || --id.data != 0));
1881 (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
1885 static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
1887 struct ethtool_stats stats;
1888 const struct ethtool_ops *ops = dev->ethtool_ops;
1892 if (!ops->get_ethtool_stats || !ops->get_sset_count)
1895 n_stats = ops->get_sset_count(dev, ETH_SS_STATS);
1898 if (n_stats > S32_MAX / sizeof(u64))
1900 WARN_ON_ONCE(!n_stats);
1901 if (copy_from_user(&stats, useraddr, sizeof(stats)))
1904 stats.n_stats = n_stats;
1907 data = vzalloc(array_size(n_stats, sizeof(u64)));
1910 ops->get_ethtool_stats(dev, &stats, data);
1916 if (copy_to_user(useraddr, &stats, sizeof(stats)))
1918 useraddr += sizeof(stats);
1919 if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
1928 static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
1930 const struct ethtool_ops *ops = dev->ethtool_ops;
1931 struct phy_device *phydev = dev->phydev;
1932 struct ethtool_stats stats;
1936 if (!phydev && (!ops->get_ethtool_phy_stats || !ops->get_sset_count))
1939 if (dev->phydev && !ops->get_ethtool_phy_stats)
1940 n_stats = phy_ethtool_get_sset_count(dev->phydev);
1942 n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
1945 if (n_stats > S32_MAX / sizeof(u64))
1947 WARN_ON_ONCE(!n_stats);
1949 if (copy_from_user(&stats, useraddr, sizeof(stats)))
1952 stats.n_stats = n_stats;
1955 data = vzalloc(array_size(n_stats, sizeof(u64)));
1959 if (dev->phydev && !ops->get_ethtool_phy_stats) {
1960 ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
1964 ops->get_ethtool_phy_stats(dev, &stats, data);
1971 if (copy_to_user(useraddr, &stats, sizeof(stats)))
1973 useraddr += sizeof(stats);
1974 if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
1983 static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
1985 struct ethtool_perm_addr epaddr;
1987 if (copy_from_user(&epaddr, useraddr, sizeof(epaddr)))
1990 if (epaddr.size < dev->addr_len)
1992 epaddr.size = dev->addr_len;
1994 if (copy_to_user(useraddr, &epaddr, sizeof(epaddr)))
1996 useraddr += sizeof(epaddr);
1997 if (copy_to_user(useraddr, dev->perm_addr, epaddr.size))
2002 static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
2003 u32 cmd, u32 (*actor)(struct net_device *))
2005 struct ethtool_value edata = { .cmd = cmd };
2010 edata.data = actor(dev);
2012 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2017 static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr,
2018 void (*actor)(struct net_device *, u32))
2020 struct ethtool_value edata;
2025 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2028 actor(dev, edata.data);
2032 static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
2033 int (*actor)(struct net_device *, u32))
2035 struct ethtool_value edata;
2040 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2043 return actor(dev, edata.data);
2046 static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
2047 char __user *useraddr)
2049 struct ethtool_flash efl;
2051 if (copy_from_user(&efl, useraddr, sizeof(efl)))
2053 efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
2055 if (!dev->ethtool_ops->flash_device)
2056 return devlink_compat_flash_update(dev, efl.data);
2058 return dev->ethtool_ops->flash_device(dev, &efl);
2061 static int ethtool_set_dump(struct net_device *dev,
2062 void __user *useraddr)
2064 struct ethtool_dump dump;
2066 if (!dev->ethtool_ops->set_dump)
2069 if (copy_from_user(&dump, useraddr, sizeof(dump)))
2072 return dev->ethtool_ops->set_dump(dev, &dump);
2075 static int ethtool_get_dump_flag(struct net_device *dev,
2076 void __user *useraddr)
2079 struct ethtool_dump dump;
2080 const struct ethtool_ops *ops = dev->ethtool_ops;
2082 if (!ops->get_dump_flag)
2085 if (copy_from_user(&dump, useraddr, sizeof(dump)))
2088 ret = ops->get_dump_flag(dev, &dump);
2092 if (copy_to_user(useraddr, &dump, sizeof(dump)))
2097 static int ethtool_get_dump_data(struct net_device *dev,
2098 void __user *useraddr)
2102 struct ethtool_dump dump, tmp;
2103 const struct ethtool_ops *ops = dev->ethtool_ops;
2106 if (!ops->get_dump_data || !ops->get_dump_flag)
2109 if (copy_from_user(&dump, useraddr, sizeof(dump)))
2112 memset(&tmp, 0, sizeof(tmp));
2113 tmp.cmd = ETHTOOL_GET_DUMP_FLAG;
2114 ret = ops->get_dump_flag(dev, &tmp);
2118 len = min(tmp.len, dump.len);
2122 /* Don't ever let the driver think there's more space available
2123 * than it requested with .get_dump_flag().
2127 /* Always allocate enough space to hold the whole thing so that the
2128 * driver does not need to check the length and bother with partial
2131 data = vzalloc(tmp.len);
2134 ret = ops->get_dump_data(dev, &dump, data);
2138 /* There are two sane possibilities:
2139 * 1. The driver's .get_dump_data() does not touch dump.len.
2140 * 2. Or it may set dump.len to how much it really writes, which
2141 * should be tmp.len (or len if it can do a partial dump).
2142 * In any case respond to userspace with the actual length of data
2145 WARN_ON(dump.len != len && dump.len != tmp.len);
2148 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
2152 useraddr += offsetof(struct ethtool_dump, data);
2153 if (copy_to_user(useraddr, data, len))
2160 static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
2163 struct ethtool_ts_info info;
2164 const struct ethtool_ops *ops = dev->ethtool_ops;
2165 struct phy_device *phydev = dev->phydev;
2167 memset(&info, 0, sizeof(info));
2168 info.cmd = ETHTOOL_GET_TS_INFO;
2170 if (phydev && phydev->drv && phydev->drv->ts_info) {
2171 err = phydev->drv->ts_info(phydev, &info);
2172 } else if (ops->get_ts_info) {
2173 err = ops->get_ts_info(dev, &info);
2175 info.so_timestamping =
2176 SOF_TIMESTAMPING_RX_SOFTWARE |
2177 SOF_TIMESTAMPING_SOFTWARE;
2178 info.phc_index = -1;
2184 if (copy_to_user(useraddr, &info, sizeof(info)))
2190 static int __ethtool_get_module_info(struct net_device *dev,
2191 struct ethtool_modinfo *modinfo)
2193 const struct ethtool_ops *ops = dev->ethtool_ops;
2194 struct phy_device *phydev = dev->phydev;
2197 return sfp_get_module_info(dev->sfp_bus, modinfo);
2199 if (phydev && phydev->drv && phydev->drv->module_info)
2200 return phydev->drv->module_info(phydev, modinfo);
2202 if (ops->get_module_info)
2203 return ops->get_module_info(dev, modinfo);
2208 static int ethtool_get_module_info(struct net_device *dev,
2209 void __user *useraddr)
2212 struct ethtool_modinfo modinfo;
2214 if (copy_from_user(&modinfo, useraddr, sizeof(modinfo)))
2217 ret = __ethtool_get_module_info(dev, &modinfo);
2221 if (copy_to_user(useraddr, &modinfo, sizeof(modinfo)))
2227 static int __ethtool_get_module_eeprom(struct net_device *dev,
2228 struct ethtool_eeprom *ee, u8 *data)
2230 const struct ethtool_ops *ops = dev->ethtool_ops;
2231 struct phy_device *phydev = dev->phydev;
2234 return sfp_get_module_eeprom(dev->sfp_bus, ee, data);
2236 if (phydev && phydev->drv && phydev->drv->module_eeprom)
2237 return phydev->drv->module_eeprom(phydev, ee, data);
2239 if (ops->get_module_eeprom)
2240 return ops->get_module_eeprom(dev, ee, data);
2245 static int ethtool_get_module_eeprom(struct net_device *dev,
2246 void __user *useraddr)
2249 struct ethtool_modinfo modinfo;
2251 ret = __ethtool_get_module_info(dev, &modinfo);
2255 return ethtool_get_any_eeprom(dev, useraddr,
2256 __ethtool_get_module_eeprom,
2257 modinfo.eeprom_len);
2260 static int ethtool_tunable_valid(const struct ethtool_tunable *tuna)
2263 case ETHTOOL_RX_COPYBREAK:
2264 case ETHTOOL_TX_COPYBREAK:
2265 if (tuna->len != sizeof(u32) ||
2266 tuna->type_id != ETHTOOL_TUNABLE_U32)
2269 case ETHTOOL_PFC_PREVENTION_TOUT:
2270 if (tuna->len != sizeof(u16) ||
2271 tuna->type_id != ETHTOOL_TUNABLE_U16)
2281 static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
2284 struct ethtool_tunable tuna;
2285 const struct ethtool_ops *ops = dev->ethtool_ops;
2288 if (!ops->get_tunable)
2290 if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
2292 ret = ethtool_tunable_valid(&tuna);
2295 data = kmalloc(tuna.len, GFP_USER);
2298 ret = ops->get_tunable(dev, &tuna, data);
2301 useraddr += sizeof(tuna);
2303 if (copy_to_user(useraddr, data, tuna.len))
2312 static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr)
2315 struct ethtool_tunable tuna;
2316 const struct ethtool_ops *ops = dev->ethtool_ops;
2319 if (!ops->set_tunable)
2321 if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
2323 ret = ethtool_tunable_valid(&tuna);
2326 useraddr += sizeof(tuna);
2327 data = memdup_user(useraddr, tuna.len);
2329 return PTR_ERR(data);
2330 ret = ops->set_tunable(dev, &tuna, data);
2336 static noinline_for_stack int
2337 ethtool_get_per_queue_coalesce(struct net_device *dev,
2338 void __user *useraddr,
2339 struct ethtool_per_queue_op *per_queue_opt)
2343 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE);
2345 if (!dev->ethtool_ops->get_per_queue_coalesce)
2348 useraddr += sizeof(*per_queue_opt);
2350 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask,
2353 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) {
2354 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
2356 ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, &coalesce);
2359 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
2361 useraddr += sizeof(coalesce);
2367 static noinline_for_stack int
2368 ethtool_set_per_queue_coalesce(struct net_device *dev,
2369 void __user *useraddr,
2370 struct ethtool_per_queue_op *per_queue_opt)
2375 struct ethtool_coalesce *backup = NULL, *tmp = NULL;
2376 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE);
2378 if ((!dev->ethtool_ops->set_per_queue_coalesce) ||
2379 (!dev->ethtool_ops->get_per_queue_coalesce))
2382 useraddr += sizeof(*per_queue_opt);
2384 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, MAX_NUM_QUEUE);
2385 n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE);
2386 tmp = backup = kmalloc_array(n_queue, sizeof(*backup), GFP_KERNEL);
2390 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) {
2391 struct ethtool_coalesce coalesce;
2393 ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, tmp);
2399 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) {
2404 ret = dev->ethtool_ops->set_per_queue_coalesce(dev, bit, &coalesce);
2408 useraddr += sizeof(coalesce);
2414 for_each_set_bit(i, queue_mask, bit) {
2415 dev->ethtool_ops->set_per_queue_coalesce(dev, i, tmp);
2424 static int noinline_for_stack ethtool_set_per_queue(struct net_device *dev,
2425 void __user *useraddr, u32 sub_cmd)
2427 struct ethtool_per_queue_op per_queue_opt;
2429 if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
2432 if (per_queue_opt.sub_command != sub_cmd)
2435 switch (per_queue_opt.sub_command) {
2436 case ETHTOOL_GCOALESCE:
2437 return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
2438 case ETHTOOL_SCOALESCE:
2439 return ethtool_set_per_queue_coalesce(dev, useraddr, &per_queue_opt);
2445 static int ethtool_phy_tunable_valid(const struct ethtool_tunable *tuna)
2448 case ETHTOOL_PHY_DOWNSHIFT:
2449 case ETHTOOL_PHY_FAST_LINK_DOWN:
2450 if (tuna->len != sizeof(u8) ||
2451 tuna->type_id != ETHTOOL_TUNABLE_U8)
2461 static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
2464 struct ethtool_tunable tuna;
2465 struct phy_device *phydev = dev->phydev;
2468 if (!(phydev && phydev->drv && phydev->drv->get_tunable))
2471 if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
2473 ret = ethtool_phy_tunable_valid(&tuna);
2476 data = kmalloc(tuna.len, GFP_USER);
2479 mutex_lock(&phydev->lock);
2480 ret = phydev->drv->get_tunable(phydev, &tuna, data);
2481 mutex_unlock(&phydev->lock);
2484 useraddr += sizeof(tuna);
2486 if (copy_to_user(useraddr, data, tuna.len))
2495 static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
2498 struct ethtool_tunable tuna;
2499 struct phy_device *phydev = dev->phydev;
2502 if (!(phydev && phydev->drv && phydev->drv->set_tunable))
2504 if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
2506 ret = ethtool_phy_tunable_valid(&tuna);
2509 useraddr += sizeof(tuna);
2510 data = memdup_user(useraddr, tuna.len);
2512 return PTR_ERR(data);
2513 mutex_lock(&phydev->lock);
2514 ret = phydev->drv->set_tunable(phydev, &tuna, data);
2515 mutex_unlock(&phydev->lock);
2521 static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr)
2523 struct ethtool_fecparam fecparam = { .cmd = ETHTOOL_GFECPARAM };
2526 if (!dev->ethtool_ops->get_fecparam)
2529 rc = dev->ethtool_ops->get_fecparam(dev, &fecparam);
2533 if (copy_to_user(useraddr, &fecparam, sizeof(fecparam)))
2538 static int ethtool_set_fecparam(struct net_device *dev, void __user *useraddr)
2540 struct ethtool_fecparam fecparam;
2542 if (!dev->ethtool_ops->set_fecparam)
2545 if (copy_from_user(&fecparam, useraddr, sizeof(fecparam)))
2548 return dev->ethtool_ops->set_fecparam(dev, &fecparam);
2551 /* The main entry point in this file. Called from net/core/dev_ioctl.c */
2553 int dev_ethtool(struct net *net, struct ifreq *ifr)
2555 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
2556 void __user *useraddr = ifr->ifr_data;
2557 u32 ethcmd, sub_cmd;
2559 netdev_features_t old_features;
2561 if (!dev || !netif_device_present(dev))
2564 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
2567 if (ethcmd == ETHTOOL_PERQUEUE) {
2568 if (copy_from_user(&sub_cmd, useraddr + sizeof(ethcmd), sizeof(sub_cmd)))
2573 /* Allow some commands to be done by anyone */
2576 case ETHTOOL_GDRVINFO:
2577 case ETHTOOL_GMSGLVL:
2579 case ETHTOOL_GCOALESCE:
2580 case ETHTOOL_GRINGPARAM:
2581 case ETHTOOL_GPAUSEPARAM:
2582 case ETHTOOL_GRXCSUM:
2583 case ETHTOOL_GTXCSUM:
2585 case ETHTOOL_GSSET_INFO:
2586 case ETHTOOL_GSTRINGS:
2587 case ETHTOOL_GSTATS:
2588 case ETHTOOL_GPHYSTATS:
2590 case ETHTOOL_GPERMADDR:
2594 case ETHTOOL_GFLAGS:
2595 case ETHTOOL_GPFLAGS:
2597 case ETHTOOL_GRXRINGS:
2598 case ETHTOOL_GRXCLSRLCNT:
2599 case ETHTOOL_GRXCLSRULE:
2600 case ETHTOOL_GRXCLSRLALL:
2601 case ETHTOOL_GRXFHINDIR:
2603 case ETHTOOL_GFEATURES:
2604 case ETHTOOL_GCHANNELS:
2605 case ETHTOOL_GET_TS_INFO:
2607 case ETHTOOL_GTUNABLE:
2608 case ETHTOOL_PHY_GTUNABLE:
2609 case ETHTOOL_GLINKSETTINGS:
2610 case ETHTOOL_GFECPARAM:
2613 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2617 if (dev->ethtool_ops->begin) {
2618 rc = dev->ethtool_ops->begin(dev);
2622 old_features = dev->features;
2626 rc = ethtool_get_settings(dev, useraddr);
2629 rc = ethtool_set_settings(dev, useraddr);
2631 case ETHTOOL_GDRVINFO:
2632 rc = ethtool_get_drvinfo(dev, useraddr);
2635 rc = ethtool_get_regs(dev, useraddr);
2638 rc = ethtool_get_wol(dev, useraddr);
2641 rc = ethtool_set_wol(dev, useraddr);
2643 case ETHTOOL_GMSGLVL:
2644 rc = ethtool_get_value(dev, useraddr, ethcmd,
2645 dev->ethtool_ops->get_msglevel);
2647 case ETHTOOL_SMSGLVL:
2648 rc = ethtool_set_value_void(dev, useraddr,
2649 dev->ethtool_ops->set_msglevel);
2652 rc = ethtool_get_eee(dev, useraddr);
2655 rc = ethtool_set_eee(dev, useraddr);
2657 case ETHTOOL_NWAY_RST:
2658 rc = ethtool_nway_reset(dev);
2661 rc = ethtool_get_link(dev, useraddr);
2663 case ETHTOOL_GEEPROM:
2664 rc = ethtool_get_eeprom(dev, useraddr);
2666 case ETHTOOL_SEEPROM:
2667 rc = ethtool_set_eeprom(dev, useraddr);
2669 case ETHTOOL_GCOALESCE:
2670 rc = ethtool_get_coalesce(dev, useraddr);
2672 case ETHTOOL_SCOALESCE:
2673 rc = ethtool_set_coalesce(dev, useraddr);
2675 case ETHTOOL_GRINGPARAM:
2676 rc = ethtool_get_ringparam(dev, useraddr);
2678 case ETHTOOL_SRINGPARAM:
2679 rc = ethtool_set_ringparam(dev, useraddr);
2681 case ETHTOOL_GPAUSEPARAM:
2682 rc = ethtool_get_pauseparam(dev, useraddr);
2684 case ETHTOOL_SPAUSEPARAM:
2685 rc = ethtool_set_pauseparam(dev, useraddr);
2688 rc = ethtool_self_test(dev, useraddr);
2690 case ETHTOOL_GSTRINGS:
2691 rc = ethtool_get_strings(dev, useraddr);
2693 case ETHTOOL_PHYS_ID:
2694 rc = ethtool_phys_id(dev, useraddr);
2696 case ETHTOOL_GSTATS:
2697 rc = ethtool_get_stats(dev, useraddr);
2699 case ETHTOOL_GPERMADDR:
2700 rc = ethtool_get_perm_addr(dev, useraddr);
2702 case ETHTOOL_GFLAGS:
2703 rc = ethtool_get_value(dev, useraddr, ethcmd,
2704 __ethtool_get_flags);
2706 case ETHTOOL_SFLAGS:
2707 rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
2709 case ETHTOOL_GPFLAGS:
2710 rc = ethtool_get_value(dev, useraddr, ethcmd,
2711 dev->ethtool_ops->get_priv_flags);
2713 case ETHTOOL_SPFLAGS:
2714 rc = ethtool_set_value(dev, useraddr,
2715 dev->ethtool_ops->set_priv_flags);
2718 case ETHTOOL_GRXRINGS:
2719 case ETHTOOL_GRXCLSRLCNT:
2720 case ETHTOOL_GRXCLSRULE:
2721 case ETHTOOL_GRXCLSRLALL:
2722 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
2725 case ETHTOOL_SRXCLSRLDEL:
2726 case ETHTOOL_SRXCLSRLINS:
2727 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
2729 case ETHTOOL_FLASHDEV:
2730 rc = ethtool_flash_device(dev, useraddr);
2733 rc = ethtool_reset(dev, useraddr);
2735 case ETHTOOL_GSSET_INFO:
2736 rc = ethtool_get_sset_info(dev, useraddr);
2738 case ETHTOOL_GRXFHINDIR:
2739 rc = ethtool_get_rxfh_indir(dev, useraddr);
2741 case ETHTOOL_SRXFHINDIR:
2742 rc = ethtool_set_rxfh_indir(dev, useraddr);
2745 rc = ethtool_get_rxfh(dev, useraddr);
2748 rc = ethtool_set_rxfh(dev, useraddr);
2750 case ETHTOOL_GFEATURES:
2751 rc = ethtool_get_features(dev, useraddr);
2753 case ETHTOOL_SFEATURES:
2754 rc = ethtool_set_features(dev, useraddr);
2756 case ETHTOOL_GTXCSUM:
2757 case ETHTOOL_GRXCSUM:
2762 rc = ethtool_get_one_feature(dev, useraddr, ethcmd);
2764 case ETHTOOL_STXCSUM:
2765 case ETHTOOL_SRXCSUM:
2770 rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
2772 case ETHTOOL_GCHANNELS:
2773 rc = ethtool_get_channels(dev, useraddr);
2775 case ETHTOOL_SCHANNELS:
2776 rc = ethtool_set_channels(dev, useraddr);
2778 case ETHTOOL_SET_DUMP:
2779 rc = ethtool_set_dump(dev, useraddr);
2781 case ETHTOOL_GET_DUMP_FLAG:
2782 rc = ethtool_get_dump_flag(dev, useraddr);
2784 case ETHTOOL_GET_DUMP_DATA:
2785 rc = ethtool_get_dump_data(dev, useraddr);
2787 case ETHTOOL_GET_TS_INFO:
2788 rc = ethtool_get_ts_info(dev, useraddr);
2790 case ETHTOOL_GMODULEINFO:
2791 rc = ethtool_get_module_info(dev, useraddr);
2793 case ETHTOOL_GMODULEEEPROM:
2794 rc = ethtool_get_module_eeprom(dev, useraddr);
2796 case ETHTOOL_GTUNABLE:
2797 rc = ethtool_get_tunable(dev, useraddr);
2799 case ETHTOOL_STUNABLE:
2800 rc = ethtool_set_tunable(dev, useraddr);
2802 case ETHTOOL_GPHYSTATS:
2803 rc = ethtool_get_phy_stats(dev, useraddr);
2805 case ETHTOOL_PERQUEUE:
2806 rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
2808 case ETHTOOL_GLINKSETTINGS:
2809 rc = ethtool_get_link_ksettings(dev, useraddr);
2811 case ETHTOOL_SLINKSETTINGS:
2812 rc = ethtool_set_link_ksettings(dev, useraddr);
2814 case ETHTOOL_PHY_GTUNABLE:
2815 rc = get_phy_tunable(dev, useraddr);
2817 case ETHTOOL_PHY_STUNABLE:
2818 rc = set_phy_tunable(dev, useraddr);
2820 case ETHTOOL_GFECPARAM:
2821 rc = ethtool_get_fecparam(dev, useraddr);
2823 case ETHTOOL_SFECPARAM:
2824 rc = ethtool_set_fecparam(dev, useraddr);
2830 if (dev->ethtool_ops->complete)
2831 dev->ethtool_ops->complete(dev);
2833 if (old_features != dev->features)
2834 netdev_features_change(dev);
2839 struct ethtool_rx_flow_key {
2840 struct flow_dissector_key_basic basic;
2842 struct flow_dissector_key_ipv4_addrs ipv4;
2843 struct flow_dissector_key_ipv6_addrs ipv6;
2845 struct flow_dissector_key_ports tp;
2846 struct flow_dissector_key_ip ip;
2847 struct flow_dissector_key_vlan vlan;
2848 struct flow_dissector_key_eth_addrs eth_addrs;
2849 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
2851 struct ethtool_rx_flow_match {
2852 struct flow_dissector dissector;
2853 struct ethtool_rx_flow_key key;
2854 struct ethtool_rx_flow_key mask;
2857 struct ethtool_rx_flow_rule *
2858 ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
2860 const struct ethtool_rx_flow_spec *fs = input->fs;
2861 static struct in6_addr zero_addr = {};
2862 struct ethtool_rx_flow_match *match;
2863 struct ethtool_rx_flow_rule *flow;
2864 struct flow_action_entry *act;
2866 flow = kzalloc(sizeof(struct ethtool_rx_flow_rule) +
2867 sizeof(struct ethtool_rx_flow_match), GFP_KERNEL);
2869 return ERR_PTR(-ENOMEM);
2871 /* ethtool_rx supports only one single action per rule. */
2872 flow->rule = flow_rule_alloc(1);
2875 return ERR_PTR(-ENOMEM);
2878 match = (struct ethtool_rx_flow_match *)flow->priv;
2879 flow->rule->match.dissector = &match->dissector;
2880 flow->rule->match.mask = &match->mask;
2881 flow->rule->match.key = &match->key;
2883 match->mask.basic.n_proto = htons(0xffff);
2885 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
2887 const struct ethhdr *ether_spec, *ether_m_spec;
2889 ether_spec = &fs->h_u.ether_spec;
2890 ether_m_spec = &fs->m_u.ether_spec;
2892 if (!is_zero_ether_addr(ether_m_spec->h_source)) {
2893 ether_addr_copy(match->key.eth_addrs.src,
2894 ether_spec->h_source);
2895 ether_addr_copy(match->mask.eth_addrs.src,
2896 ether_m_spec->h_source);
2898 if (!is_zero_ether_addr(ether_m_spec->h_dest)) {
2899 ether_addr_copy(match->key.eth_addrs.dst,
2900 ether_spec->h_dest);
2901 ether_addr_copy(match->mask.eth_addrs.dst,
2902 ether_m_spec->h_dest);
2904 if (ether_m_spec->h_proto) {
2905 match->key.basic.n_proto = ether_spec->h_proto;
2906 match->mask.basic.n_proto = ether_m_spec->h_proto;
2912 const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
2914 match->key.basic.n_proto = htons(ETH_P_IP);
2916 v4_spec = &fs->h_u.tcp_ip4_spec;
2917 v4_m_spec = &fs->m_u.tcp_ip4_spec;
2919 if (v4_m_spec->ip4src) {
2920 match->key.ipv4.src = v4_spec->ip4src;
2921 match->mask.ipv4.src = v4_m_spec->ip4src;
2923 if (v4_m_spec->ip4dst) {
2924 match->key.ipv4.dst = v4_spec->ip4dst;
2925 match->mask.ipv4.dst = v4_m_spec->ip4dst;
2927 if (v4_m_spec->ip4src ||
2928 v4_m_spec->ip4dst) {
2929 match->dissector.used_keys |=
2930 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
2931 match->dissector.offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] =
2932 offsetof(struct ethtool_rx_flow_key, ipv4);
2934 if (v4_m_spec->psrc) {
2935 match->key.tp.src = v4_spec->psrc;
2936 match->mask.tp.src = v4_m_spec->psrc;
2938 if (v4_m_spec->pdst) {
2939 match->key.tp.dst = v4_spec->pdst;
2940 match->mask.tp.dst = v4_m_spec->pdst;
2942 if (v4_m_spec->psrc ||
2944 match->dissector.used_keys |=
2945 BIT(FLOW_DISSECTOR_KEY_PORTS);
2946 match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] =
2947 offsetof(struct ethtool_rx_flow_key, tp);
2949 if (v4_m_spec->tos) {
2950 match->key.ip.tos = v4_spec->tos;
2951 match->mask.ip.tos = v4_m_spec->tos;
2952 match->dissector.used_keys |=
2953 BIT(FLOW_DISSECTOR_KEY_IP);
2954 match->dissector.offset[FLOW_DISSECTOR_KEY_IP] =
2955 offsetof(struct ethtool_rx_flow_key, ip);
2961 const struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
2963 match->key.basic.n_proto = htons(ETH_P_IPV6);
2965 v6_spec = &fs->h_u.tcp_ip6_spec;
2966 v6_m_spec = &fs->m_u.tcp_ip6_spec;
2967 if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
2968 memcpy(&match->key.ipv6.src, v6_spec->ip6src,
2969 sizeof(match->key.ipv6.src));
2970 memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src,
2971 sizeof(match->mask.ipv6.src));
2973 if (memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
2974 memcpy(&match->key.ipv6.dst, v6_spec->ip6dst,
2975 sizeof(match->key.ipv6.dst));
2976 memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst,
2977 sizeof(match->mask.ipv6.dst));
2979 if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
2980 memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
2981 match->dissector.used_keys |=
2982 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
2983 match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
2984 offsetof(struct ethtool_rx_flow_key, ipv6);
2986 if (v6_m_spec->psrc) {
2987 match->key.tp.src = v6_spec->psrc;
2988 match->mask.tp.src = v6_m_spec->psrc;
2990 if (v6_m_spec->pdst) {
2991 match->key.tp.dst = v6_spec->pdst;
2992 match->mask.tp.dst = v6_m_spec->pdst;
2994 if (v6_m_spec->psrc ||
2996 match->dissector.used_keys |=
2997 BIT(FLOW_DISSECTOR_KEY_PORTS);
2998 match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] =
2999 offsetof(struct ethtool_rx_flow_key, tp);
3001 if (v6_m_spec->tclass) {
3002 match->key.ip.tos = v6_spec->tclass;
3003 match->mask.ip.tos = v6_m_spec->tclass;
3004 match->dissector.used_keys |=
3005 BIT(FLOW_DISSECTOR_KEY_IP);
3006 match->dissector.offset[FLOW_DISSECTOR_KEY_IP] =
3007 offsetof(struct ethtool_rx_flow_key, ip);
3012 ethtool_rx_flow_rule_destroy(flow);
3013 return ERR_PTR(-EINVAL);
3016 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
3019 match->key.basic.ip_proto = IPPROTO_TCP;
3023 match->key.basic.ip_proto = IPPROTO_UDP;
3026 match->mask.basic.ip_proto = 0xff;
3028 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
3029 match->dissector.offset[FLOW_DISSECTOR_KEY_BASIC] =
3030 offsetof(struct ethtool_rx_flow_key, basic);
3032 if (fs->flow_type & FLOW_EXT) {
3033 const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
3034 const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
3036 if (ext_m_spec->vlan_etype) {
3037 match->key.vlan.vlan_tpid = ext_h_spec->vlan_etype;
3038 match->mask.vlan.vlan_tpid = ext_m_spec->vlan_etype;
3041 if (ext_m_spec->vlan_tci) {
3042 match->key.vlan.vlan_id =
3043 ntohs(ext_h_spec->vlan_tci) & 0x0fff;
3044 match->mask.vlan.vlan_id =
3045 ntohs(ext_m_spec->vlan_tci) & 0x0fff;
3047 match->key.vlan.vlan_dei =
3048 !!(ext_h_spec->vlan_tci & htons(0x1000));
3049 match->mask.vlan.vlan_dei =
3050 !!(ext_m_spec->vlan_tci & htons(0x1000));
3052 match->key.vlan.vlan_priority =
3053 (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13;
3054 match->mask.vlan.vlan_priority =
3055 (ntohs(ext_m_spec->vlan_tci) & 0xe000) >> 13;
3058 if (ext_m_spec->vlan_etype ||
3059 ext_m_spec->vlan_tci) {
3060 match->dissector.used_keys |=
3061 BIT(FLOW_DISSECTOR_KEY_VLAN);
3062 match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
3063 offsetof(struct ethtool_rx_flow_key, vlan);
3066 if (fs->flow_type & FLOW_MAC_EXT) {
3067 const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
3068 const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
3070 memcpy(match->key.eth_addrs.dst, ext_h_spec->h_dest,
3072 memcpy(match->mask.eth_addrs.dst, ext_m_spec->h_dest,
3075 match->dissector.used_keys |=
3076 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
3077 match->dissector.offset[FLOW_DISSECTOR_KEY_ETH_ADDRS] =
3078 offsetof(struct ethtool_rx_flow_key, eth_addrs);
3081 act = &flow->rule->action.entries[0];
3082 switch (fs->ring_cookie) {
3083 case RX_CLS_FLOW_DISC:
3084 act->id = FLOW_ACTION_DROP;
3086 case RX_CLS_FLOW_WAKE:
3087 act->id = FLOW_ACTION_WAKE;
3090 act->id = FLOW_ACTION_QUEUE;
3091 if (fs->flow_type & FLOW_RSS)
3092 act->queue.ctx = input->rss_ctx;
3094 act->queue.vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
3095 act->queue.index = ethtool_get_flow_spec_ring(fs->ring_cookie);
3101 EXPORT_SYMBOL(ethtool_rx_flow_rule_create);
3103 void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *flow)
3108 EXPORT_SYMBOL(ethtool_rx_flow_rule_destroy);