1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/netdevice.h>
4 #include <linux/notifier.h>
5 #include <linux/rtnetlink.h>
6 #include <net/busy_poll.h>
7 #include <net/net_namespace.h>
8 #include <net/netdev_queues.h>
9 #include <net/netdev_rx_queue.h>
12 #include <net/xdp_sock.h>
16 #include "netdev-genl-gen.h"
18 struct netdev_nl_dump_ctx {
19 unsigned long ifindex;
25 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
27 NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx);
29 return (struct netdev_nl_dump_ctx *)cb->ctx;
33 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
34 const struct genl_info *info)
40 hdr = genlmsg_iput(rsp, info);
44 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \
45 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
47 XDP_METADATA_KFUNC_xxx
48 #undef XDP_METADATA_KFUNC
50 if (netdev->xsk_tx_metadata_ops) {
51 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp)
52 xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP;
53 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum)
54 xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM;
57 if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
58 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
59 netdev->xdp_features, NETDEV_A_DEV_PAD) ||
60 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
61 xdp_rx_meta, NETDEV_A_DEV_PAD) ||
62 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
63 xsk_features, NETDEV_A_DEV_PAD))
66 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
67 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
68 netdev->xdp_zc_max_segs))
72 genlmsg_end(rsp, hdr);
77 genlmsg_cancel(rsp, hdr);
82 netdev_genl_dev_notify(struct net_device *netdev, int cmd)
84 struct genl_info info;
87 if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev),
91 genl_info_init_ntf(&info, &netdev_nl_family, cmd);
93 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
97 if (netdev_nl_dev_fill(netdev, ntf, &info)) {
102 genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf,
103 0, NETDEV_NLGRP_MGMT, GFP_KERNEL);
106 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
108 struct net_device *netdev;
113 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX))
116 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
118 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
124 netdev = __dev_get_by_index(genl_info_net(info), ifindex);
126 err = netdev_nl_dev_fill(netdev, rsp, info);
135 return genlmsg_reply(rsp, info);
142 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
144 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
145 struct net *net = sock_net(skb->sk);
146 struct net_device *netdev;
150 for_each_netdev_dump(net, netdev, ctx->ifindex) {
151 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb));
161 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
162 const struct genl_info *info)
164 unsigned long irq_suspend_timeout;
165 unsigned long gro_flush_timeout;
166 u32 napi_defer_hard_irqs;
170 if (WARN_ON_ONCE(!napi->dev))
172 if (!(napi->dev->flags & IFF_UP))
175 hdr = genlmsg_iput(rsp, info);
179 if (napi->napi_id >= MIN_NAPI_ID &&
180 nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
181 goto nla_put_failure;
183 if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
184 goto nla_put_failure;
186 if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq))
187 goto nla_put_failure;
190 pid = task_pid_nr(napi->thread);
191 if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid))
192 goto nla_put_failure;
195 napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi);
196 if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS,
197 napi_defer_hard_irqs))
198 goto nla_put_failure;
200 irq_suspend_timeout = napi_get_irq_suspend_timeout(napi);
201 if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
202 irq_suspend_timeout))
203 goto nla_put_failure;
205 gro_flush_timeout = napi_get_gro_flush_timeout(napi);
206 if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
208 goto nla_put_failure;
210 genlmsg_end(rsp, hdr);
215 genlmsg_cancel(rsp, hdr);
219 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
221 struct napi_struct *napi;
226 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
229 napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
231 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
238 napi = napi_by_id(napi_id);
240 err = netdev_nl_napi_fill_one(rsp, napi, info);
242 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
251 } else if (!rsp->len) {
256 return genlmsg_reply(rsp, info);
264 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
265 const struct genl_info *info,
266 struct netdev_nl_dump_ctx *ctx)
268 struct napi_struct *napi;
271 if (!(netdev->flags & IFF_UP))
274 list_for_each_entry(napi, &netdev->napi_list, dev_list) {
275 if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
278 err = netdev_nl_napi_fill_one(rsp, napi, info);
281 ctx->napi_id = napi->napi_id;
286 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
288 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
289 const struct genl_info *info = genl_info_dump(cb);
290 struct net *net = sock_net(skb->sk);
291 struct net_device *netdev;
295 if (info->attrs[NETDEV_A_NAPI_IFINDEX])
296 ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]);
300 netdev = __dev_get_by_index(net, ifindex);
302 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
306 for_each_netdev_dump(net, netdev, ctx->ifindex) {
307 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
319 netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info)
321 u64 irq_suspend_timeout = 0;
322 u64 gro_flush_timeout = 0;
325 if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) {
326 defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]);
327 napi_set_defer_hard_irqs(napi, defer);
330 if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) {
331 irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]);
332 napi_set_irq_suspend_timeout(napi, irq_suspend_timeout);
335 if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) {
336 gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]);
337 napi_set_gro_flush_timeout(napi, gro_flush_timeout);
343 int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info)
345 struct napi_struct *napi;
346 unsigned int napi_id;
349 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
352 napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
357 napi = napi_by_id(napi_id);
359 err = netdev_nl_napi_set_config(napi, info);
361 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
372 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
373 u32 q_idx, u32 q_type, const struct genl_info *info)
375 struct net_devmem_dmabuf_binding *binding;
376 struct netdev_rx_queue *rxq;
377 struct netdev_queue *txq;
380 hdr = genlmsg_iput(rsp, info);
384 if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) ||
385 nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) ||
386 nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex))
387 goto nla_put_failure;
390 case NETDEV_QUEUE_TYPE_RX:
391 rxq = __netif_get_rx_queue(netdev, q_idx);
392 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
394 goto nla_put_failure;
396 binding = rxq->mp_params.mp_priv;
398 nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id))
399 goto nla_put_failure;
402 case NETDEV_QUEUE_TYPE_TX:
403 txq = netdev_get_tx_queue(netdev, q_idx);
404 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
406 goto nla_put_failure;
409 genlmsg_end(rsp, hdr);
414 genlmsg_cancel(rsp, hdr);
418 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id,
422 case NETDEV_QUEUE_TYPE_RX:
423 if (q_id >= netdev->real_num_rx_queues)
426 case NETDEV_QUEUE_TYPE_TX:
427 if (q_id >= netdev->real_num_tx_queues)
434 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
435 u32 q_type, const struct genl_info *info)
439 if (!(netdev->flags & IFF_UP))
442 err = netdev_nl_queue_validate(netdev, q_idx, q_type);
446 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info);
449 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
451 u32 q_id, q_type, ifindex;
452 struct net_device *netdev;
456 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) ||
457 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
458 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX))
461 q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]);
462 q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]);
463 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
465 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
471 netdev = __dev_get_by_index(genl_info_net(info), ifindex);
473 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info);
482 return genlmsg_reply(rsp, info);
490 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
491 const struct genl_info *info,
492 struct netdev_nl_dump_ctx *ctx)
496 if (!(netdev->flags & IFF_UP))
499 for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
500 err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx,
501 NETDEV_QUEUE_TYPE_RX, info);
505 for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
506 err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx,
507 NETDEV_QUEUE_TYPE_TX, info);
515 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
517 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
518 const struct genl_info *info = genl_info_dump(cb);
519 struct net *net = sock_net(skb->sk);
520 struct net_device *netdev;
524 if (info->attrs[NETDEV_A_QUEUE_IFINDEX])
525 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
529 netdev = __dev_get_by_index(net, ifindex);
531 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
535 for_each_netdev_dump(net, netdev, ctx->ifindex) {
536 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
548 #define NETDEV_STAT_NOT_SET (~0ULL)
550 static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
552 const u64 *add = _add;
556 if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
564 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
566 if (value == NETDEV_STAT_NOT_SET)
568 return nla_put_uint(rsp, attr_id, value);
572 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
574 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
575 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
576 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) ||
577 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) ||
578 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) ||
579 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) ||
580 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) ||
581 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) ||
582 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) ||
583 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) ||
584 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) ||
585 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) ||
586 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits))
592 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
594 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
595 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) ||
596 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) ||
597 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) ||
598 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) ||
599 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) ||
600 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) ||
601 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) ||
602 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) ||
603 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) ||
604 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) ||
605 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) ||
606 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake))
612 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
613 u32 q_type, int i, const struct genl_info *info)
615 const struct netdev_stat_ops *ops = netdev->stat_ops;
616 struct netdev_queue_stats_rx rx;
617 struct netdev_queue_stats_tx tx;
620 hdr = genlmsg_iput(rsp, info);
623 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
624 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
625 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
626 goto nla_put_failure;
629 case NETDEV_QUEUE_TYPE_RX:
630 memset(&rx, 0xff, sizeof(rx));
631 ops->get_queue_stats_rx(netdev, i, &rx);
632 if (!memchr_inv(&rx, 0xff, sizeof(rx)))
634 if (netdev_nl_stats_write_rx(rsp, &rx))
635 goto nla_put_failure;
637 case NETDEV_QUEUE_TYPE_TX:
638 memset(&tx, 0xff, sizeof(tx));
639 ops->get_queue_stats_tx(netdev, i, &tx);
640 if (!memchr_inv(&tx, 0xff, sizeof(tx)))
642 if (netdev_nl_stats_write_tx(rsp, &tx))
643 goto nla_put_failure;
647 genlmsg_end(rsp, hdr);
651 genlmsg_cancel(rsp, hdr);
654 genlmsg_cancel(rsp, hdr);
659 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
660 const struct genl_info *info,
661 struct netdev_nl_dump_ctx *ctx)
663 const struct netdev_stat_ops *ops = netdev->stat_ops;
666 if (!(netdev->flags & IFF_UP))
670 while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
671 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
678 while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
679 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
692 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
693 const struct genl_info *info)
695 struct netdev_queue_stats_rx rx_sum, rx;
696 struct netdev_queue_stats_tx tx_sum, tx;
697 const struct netdev_stat_ops *ops;
701 ops = netdev->stat_ops;
702 /* Netdev can't guarantee any complete counters */
703 if (!ops->get_base_stats)
706 memset(&rx_sum, 0xff, sizeof(rx_sum));
707 memset(&tx_sum, 0xff, sizeof(tx_sum));
709 ops->get_base_stats(netdev, &rx_sum, &tx_sum);
711 /* The op was there, but nothing reported, don't bother */
712 if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
713 !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
716 hdr = genlmsg_iput(rsp, info);
719 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
720 goto nla_put_failure;
722 for (i = 0; i < netdev->real_num_rx_queues; i++) {
723 memset(&rx, 0xff, sizeof(rx));
724 if (ops->get_queue_stats_rx)
725 ops->get_queue_stats_rx(netdev, i, &rx);
726 netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
728 for (i = 0; i < netdev->real_num_tx_queues; i++) {
729 memset(&tx, 0xff, sizeof(tx));
730 if (ops->get_queue_stats_tx)
731 ops->get_queue_stats_tx(netdev, i, &tx);
732 netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
735 if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
736 netdev_nl_stats_write_tx(rsp, &tx_sum))
737 goto nla_put_failure;
739 genlmsg_end(rsp, hdr);
743 genlmsg_cancel(rsp, hdr);
748 netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope,
749 struct sk_buff *skb, const struct genl_info *info,
750 struct netdev_nl_dump_ctx *ctx)
752 if (!netdev->stat_ops)
757 return netdev_nl_stats_by_netdev(netdev, skb, info);
758 case NETDEV_QSTATS_SCOPE_QUEUE:
759 return netdev_nl_stats_by_queue(netdev, skb, info, ctx);
762 return -EINVAL; /* Should not happen, per netlink policy */
765 int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
766 struct netlink_callback *cb)
768 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
769 const struct genl_info *info = genl_info_dump(cb);
770 struct net *net = sock_net(skb->sk);
771 struct net_device *netdev;
772 unsigned int ifindex;
777 if (info->attrs[NETDEV_A_QSTATS_SCOPE])
778 scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
781 if (info->attrs[NETDEV_A_QSTATS_IFINDEX])
782 ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]);
786 netdev = __dev_get_by_index(net, ifindex);
787 if (netdev && netdev->stat_ops) {
788 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
791 NL_SET_BAD_ATTR(info->extack,
792 info->attrs[NETDEV_A_QSTATS_IFINDEX]);
793 err = netdev ? -EOPNOTSUPP : -ENODEV;
796 for_each_netdev_dump(net, netdev, ctx->ifindex) {
797 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
808 int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
810 struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
811 struct net_devmem_dmabuf_binding *binding;
812 struct list_head *sock_binding_list;
813 u32 ifindex, dmabuf_fd, rxq_idx;
814 struct net_device *netdev;
820 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
821 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) ||
822 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES))
825 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
826 dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]);
828 sock_binding_list = genl_sk_priv_get(&netdev_nl_family,
830 if (IS_ERR(sock_binding_list))
831 return PTR_ERR(sock_binding_list);
833 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
837 hdr = genlmsg_iput(rsp, info);
840 goto err_genlmsg_free;
845 netdev = __dev_get_by_index(genl_info_net(info), ifindex);
846 if (!netdev || !netif_device_present(netdev)) {
851 if (dev_xdp_prog_count(netdev)) {
852 NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached");
857 binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack);
858 if (IS_ERR(binding)) {
859 err = PTR_ERR(binding);
863 nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
864 genlmsg_data(info->genlhdr),
865 genlmsg_len(info->genlhdr), rem) {
866 err = nla_parse_nested(
867 tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
868 netdev_queue_id_nl_policy, info->extack);
872 if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
873 NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
878 if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
879 NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
884 rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
886 err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
892 list_add(&binding->list, sock_binding_list);
894 nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
895 genlmsg_end(rsp, hdr);
897 err = genlmsg_reply(rsp, info);
906 net_devmem_unbind_dmabuf(binding);
914 void netdev_nl_sock_priv_init(struct list_head *priv)
916 INIT_LIST_HEAD(priv);
919 void netdev_nl_sock_priv_destroy(struct list_head *priv)
921 struct net_devmem_dmabuf_binding *binding;
922 struct net_devmem_dmabuf_binding *temp;
924 list_for_each_entry_safe(binding, temp, priv, list) {
926 net_devmem_unbind_dmabuf(binding);
931 static int netdev_genl_netdevice_event(struct notifier_block *nb,
932 unsigned long event, void *ptr)
934 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
937 case NETDEV_REGISTER:
938 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF);
940 case NETDEV_UNREGISTER:
941 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF);
943 case NETDEV_XDP_FEAT_CHANGE:
944 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF);
951 static struct notifier_block netdev_genl_nb = {
952 .notifier_call = netdev_genl_netdevice_event,
955 static int __init netdev_genl_init(void)
959 err = register_netdevice_notifier(&netdev_genl_nb);
963 err = genl_register_family(&netdev_nl_family);
970 unregister_netdevice_notifier(&netdev_genl_nb);
974 subsys_initcall(netdev_genl_init);