]> Git Repo - J-linux.git/blob - net/core/netdev-genl.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / net / core / netdev-genl.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/netdevice.h>
4 #include <linux/notifier.h>
5 #include <linux/rtnetlink.h>
6 #include <net/busy_poll.h>
7 #include <net/net_namespace.h>
8 #include <net/netdev_queues.h>
9 #include <net/netdev_rx_queue.h>
10 #include <net/sock.h>
11 #include <net/xdp.h>
12 #include <net/xdp_sock.h>
13
14 #include "dev.h"
15 #include "devmem.h"
16 #include "netdev-genl-gen.h"
17
18 struct netdev_nl_dump_ctx {
19         unsigned long   ifindex;
20         unsigned int    rxq_idx;
21         unsigned int    txq_idx;
22         unsigned int    napi_id;
23 };
24
25 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
26 {
27         NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx);
28
29         return (struct netdev_nl_dump_ctx *)cb->ctx;
30 }
31
32 static int
33 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
34                    const struct genl_info *info)
35 {
36         u64 xsk_features = 0;
37         u64 xdp_rx_meta = 0;
38         void *hdr;
39
40         hdr = genlmsg_iput(rsp, info);
41         if (!hdr)
42                 return -EMSGSIZE;
43
44 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \
45         if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
46                 xdp_rx_meta |= flag;
47 XDP_METADATA_KFUNC_xxx
48 #undef XDP_METADATA_KFUNC
49
50         if (netdev->xsk_tx_metadata_ops) {
51                 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp)
52                         xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP;
53                 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum)
54                         xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM;
55         }
56
57         if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
58             nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
59                               netdev->xdp_features, NETDEV_A_DEV_PAD) ||
60             nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
61                               xdp_rx_meta, NETDEV_A_DEV_PAD) ||
62             nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
63                               xsk_features, NETDEV_A_DEV_PAD))
64                 goto err_cancel_msg;
65
66         if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
67                 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
68                                 netdev->xdp_zc_max_segs))
69                         goto err_cancel_msg;
70         }
71
72         genlmsg_end(rsp, hdr);
73
74         return 0;
75
76 err_cancel_msg:
77         genlmsg_cancel(rsp, hdr);
78         return -EMSGSIZE;
79 }
80
81 static void
82 netdev_genl_dev_notify(struct net_device *netdev, int cmd)
83 {
84         struct genl_info info;
85         struct sk_buff *ntf;
86
87         if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev),
88                                 NETDEV_NLGRP_MGMT))
89                 return;
90
91         genl_info_init_ntf(&info, &netdev_nl_family, cmd);
92
93         ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
94         if (!ntf)
95                 return;
96
97         if (netdev_nl_dev_fill(netdev, ntf, &info)) {
98                 nlmsg_free(ntf);
99                 return;
100         }
101
102         genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf,
103                                 0, NETDEV_NLGRP_MGMT, GFP_KERNEL);
104 }
105
106 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
107 {
108         struct net_device *netdev;
109         struct sk_buff *rsp;
110         u32 ifindex;
111         int err;
112
113         if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX))
114                 return -EINVAL;
115
116         ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
117
118         rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
119         if (!rsp)
120                 return -ENOMEM;
121
122         rtnl_lock();
123
124         netdev = __dev_get_by_index(genl_info_net(info), ifindex);
125         if (netdev)
126                 err = netdev_nl_dev_fill(netdev, rsp, info);
127         else
128                 err = -ENODEV;
129
130         rtnl_unlock();
131
132         if (err)
133                 goto err_free_msg;
134
135         return genlmsg_reply(rsp, info);
136
137 err_free_msg:
138         nlmsg_free(rsp);
139         return err;
140 }
141
142 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
143 {
144         struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
145         struct net *net = sock_net(skb->sk);
146         struct net_device *netdev;
147         int err = 0;
148
149         rtnl_lock();
150         for_each_netdev_dump(net, netdev, ctx->ifindex) {
151                 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb));
152                 if (err < 0)
153                         break;
154         }
155         rtnl_unlock();
156
157         return err;
158 }
159
160 static int
161 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
162                         const struct genl_info *info)
163 {
164         unsigned long irq_suspend_timeout;
165         unsigned long gro_flush_timeout;
166         u32 napi_defer_hard_irqs;
167         void *hdr;
168         pid_t pid;
169
170         if (WARN_ON_ONCE(!napi->dev))
171                 return -EINVAL;
172         if (!(napi->dev->flags & IFF_UP))
173                 return 0;
174
175         hdr = genlmsg_iput(rsp, info);
176         if (!hdr)
177                 return -EMSGSIZE;
178
179         if (napi->napi_id >= MIN_NAPI_ID &&
180             nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
181                 goto nla_put_failure;
182
183         if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
184                 goto nla_put_failure;
185
186         if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq))
187                 goto nla_put_failure;
188
189         if (napi->thread) {
190                 pid = task_pid_nr(napi->thread);
191                 if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid))
192                         goto nla_put_failure;
193         }
194
195         napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi);
196         if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS,
197                         napi_defer_hard_irqs))
198                 goto nla_put_failure;
199
200         irq_suspend_timeout = napi_get_irq_suspend_timeout(napi);
201         if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
202                          irq_suspend_timeout))
203                 goto nla_put_failure;
204
205         gro_flush_timeout = napi_get_gro_flush_timeout(napi);
206         if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
207                          gro_flush_timeout))
208                 goto nla_put_failure;
209
210         genlmsg_end(rsp, hdr);
211
212         return 0;
213
214 nla_put_failure:
215         genlmsg_cancel(rsp, hdr);
216         return -EMSGSIZE;
217 }
218
219 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
220 {
221         struct napi_struct *napi;
222         struct sk_buff *rsp;
223         u32 napi_id;
224         int err;
225
226         if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
227                 return -EINVAL;
228
229         napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
230
231         rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
232         if (!rsp)
233                 return -ENOMEM;
234
235         rtnl_lock();
236         rcu_read_lock();
237
238         napi = napi_by_id(napi_id);
239         if (napi) {
240                 err = netdev_nl_napi_fill_one(rsp, napi, info);
241         } else {
242                 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
243                 err = -ENOENT;
244         }
245
246         rcu_read_unlock();
247         rtnl_unlock();
248
249         if (err) {
250                 goto err_free_msg;
251         } else if (!rsp->len) {
252                 err = -ENOENT;
253                 goto err_free_msg;
254         }
255
256         return genlmsg_reply(rsp, info);
257
258 err_free_msg:
259         nlmsg_free(rsp);
260         return err;
261 }
262
263 static int
264 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
265                         const struct genl_info *info,
266                         struct netdev_nl_dump_ctx *ctx)
267 {
268         struct napi_struct *napi;
269         int err = 0;
270
271         if (!(netdev->flags & IFF_UP))
272                 return err;
273
274         list_for_each_entry(napi, &netdev->napi_list, dev_list) {
275                 if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
276                         continue;
277
278                 err = netdev_nl_napi_fill_one(rsp, napi, info);
279                 if (err)
280                         return err;
281                 ctx->napi_id = napi->napi_id;
282         }
283         return err;
284 }
285
286 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
287 {
288         struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
289         const struct genl_info *info = genl_info_dump(cb);
290         struct net *net = sock_net(skb->sk);
291         struct net_device *netdev;
292         u32 ifindex = 0;
293         int err = 0;
294
295         if (info->attrs[NETDEV_A_NAPI_IFINDEX])
296                 ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]);
297
298         rtnl_lock();
299         if (ifindex) {
300                 netdev = __dev_get_by_index(net, ifindex);
301                 if (netdev)
302                         err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
303                 else
304                         err = -ENODEV;
305         } else {
306                 for_each_netdev_dump(net, netdev, ctx->ifindex) {
307                         err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
308                         if (err < 0)
309                                 break;
310                         ctx->napi_id = 0;
311                 }
312         }
313         rtnl_unlock();
314
315         return err;
316 }
317
318 static int
319 netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info)
320 {
321         u64 irq_suspend_timeout = 0;
322         u64 gro_flush_timeout = 0;
323         u32 defer = 0;
324
325         if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) {
326                 defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]);
327                 napi_set_defer_hard_irqs(napi, defer);
328         }
329
330         if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) {
331                 irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]);
332                 napi_set_irq_suspend_timeout(napi, irq_suspend_timeout);
333         }
334
335         if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) {
336                 gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]);
337                 napi_set_gro_flush_timeout(napi, gro_flush_timeout);
338         }
339
340         return 0;
341 }
342
343 int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info)
344 {
345         struct napi_struct *napi;
346         unsigned int napi_id;
347         int err;
348
349         if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
350                 return -EINVAL;
351
352         napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
353
354         rtnl_lock();
355         rcu_read_lock();
356
357         napi = napi_by_id(napi_id);
358         if (napi) {
359                 err = netdev_nl_napi_set_config(napi, info);
360         } else {
361                 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
362                 err = -ENOENT;
363         }
364
365         rcu_read_unlock();
366         rtnl_unlock();
367
368         return err;
369 }
370
371 static int
372 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
373                          u32 q_idx, u32 q_type, const struct genl_info *info)
374 {
375         struct net_devmem_dmabuf_binding *binding;
376         struct netdev_rx_queue *rxq;
377         struct netdev_queue *txq;
378         void *hdr;
379
380         hdr = genlmsg_iput(rsp, info);
381         if (!hdr)
382                 return -EMSGSIZE;
383
384         if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) ||
385             nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) ||
386             nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex))
387                 goto nla_put_failure;
388
389         switch (q_type) {
390         case NETDEV_QUEUE_TYPE_RX:
391                 rxq = __netif_get_rx_queue(netdev, q_idx);
392                 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
393                                              rxq->napi->napi_id))
394                         goto nla_put_failure;
395
396                 binding = rxq->mp_params.mp_priv;
397                 if (binding &&
398                     nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id))
399                         goto nla_put_failure;
400
401                 break;
402         case NETDEV_QUEUE_TYPE_TX:
403                 txq = netdev_get_tx_queue(netdev, q_idx);
404                 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
405                                              txq->napi->napi_id))
406                         goto nla_put_failure;
407         }
408
409         genlmsg_end(rsp, hdr);
410
411         return 0;
412
413 nla_put_failure:
414         genlmsg_cancel(rsp, hdr);
415         return -EMSGSIZE;
416 }
417
418 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id,
419                                     u32 q_type)
420 {
421         switch (q_type) {
422         case NETDEV_QUEUE_TYPE_RX:
423                 if (q_id >= netdev->real_num_rx_queues)
424                         return -EINVAL;
425                 return 0;
426         case NETDEV_QUEUE_TYPE_TX:
427                 if (q_id >= netdev->real_num_tx_queues)
428                         return -EINVAL;
429         }
430         return 0;
431 }
432
433 static int
434 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
435                      u32 q_type, const struct genl_info *info)
436 {
437         int err;
438
439         if (!(netdev->flags & IFF_UP))
440                 return -ENOENT;
441
442         err = netdev_nl_queue_validate(netdev, q_idx, q_type);
443         if (err)
444                 return err;
445
446         return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info);
447 }
448
449 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
450 {
451         u32 q_id, q_type, ifindex;
452         struct net_device *netdev;
453         struct sk_buff *rsp;
454         int err;
455
456         if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) ||
457             GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
458             GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX))
459                 return -EINVAL;
460
461         q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]);
462         q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]);
463         ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
464
465         rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
466         if (!rsp)
467                 return -ENOMEM;
468
469         rtnl_lock();
470
471         netdev = __dev_get_by_index(genl_info_net(info), ifindex);
472         if (netdev)
473                 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info);
474         else
475                 err = -ENODEV;
476
477         rtnl_unlock();
478
479         if (err)
480                 goto err_free_msg;
481
482         return genlmsg_reply(rsp, info);
483
484 err_free_msg:
485         nlmsg_free(rsp);
486         return err;
487 }
488
489 static int
490 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
491                          const struct genl_info *info,
492                          struct netdev_nl_dump_ctx *ctx)
493 {
494         int err = 0;
495
496         if (!(netdev->flags & IFF_UP))
497                 return err;
498
499         for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
500                 err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx,
501                                                NETDEV_QUEUE_TYPE_RX, info);
502                 if (err)
503                         return err;
504         }
505         for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
506                 err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx,
507                                                NETDEV_QUEUE_TYPE_TX, info);
508                 if (err)
509                         return err;
510         }
511
512         return err;
513 }
514
515 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
516 {
517         struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
518         const struct genl_info *info = genl_info_dump(cb);
519         struct net *net = sock_net(skb->sk);
520         struct net_device *netdev;
521         u32 ifindex = 0;
522         int err = 0;
523
524         if (info->attrs[NETDEV_A_QUEUE_IFINDEX])
525                 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
526
527         rtnl_lock();
528         if (ifindex) {
529                 netdev = __dev_get_by_index(net, ifindex);
530                 if (netdev)
531                         err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
532                 else
533                         err = -ENODEV;
534         } else {
535                 for_each_netdev_dump(net, netdev, ctx->ifindex) {
536                         err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
537                         if (err < 0)
538                                 break;
539                         ctx->rxq_idx = 0;
540                         ctx->txq_idx = 0;
541                 }
542         }
543         rtnl_unlock();
544
545         return err;
546 }
547
548 #define NETDEV_STAT_NOT_SET             (~0ULL)
549
550 static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
551 {
552         const u64 *add = _add;
553         u64 *sum = _sum;
554
555         while (size) {
556                 if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
557                         *sum += *add;
558                 sum++;
559                 add++;
560                 size -= 8;
561         }
562 }
563
564 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
565 {
566         if (value == NETDEV_STAT_NOT_SET)
567                 return 0;
568         return nla_put_uint(rsp, attr_id, value);
569 }
570
571 static int
572 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
573 {
574         if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
575             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
576             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) ||
577             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) ||
578             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) ||
579             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) ||
580             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) ||
581             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) ||
582             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) ||
583             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) ||
584             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) ||
585             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) ||
586             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits))
587                 return -EMSGSIZE;
588         return 0;
589 }
590
591 static int
592 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
593 {
594         if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
595             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) ||
596             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) ||
597             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) ||
598             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) ||
599             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) ||
600             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) ||
601             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) ||
602             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) ||
603             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) ||
604             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) ||
605             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) ||
606             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake))
607                 return -EMSGSIZE;
608         return 0;
609 }
610
611 static int
612 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
613                       u32 q_type, int i, const struct genl_info *info)
614 {
615         const struct netdev_stat_ops *ops = netdev->stat_ops;
616         struct netdev_queue_stats_rx rx;
617         struct netdev_queue_stats_tx tx;
618         void *hdr;
619
620         hdr = genlmsg_iput(rsp, info);
621         if (!hdr)
622                 return -EMSGSIZE;
623         if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
624             nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
625             nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
626                 goto nla_put_failure;
627
628         switch (q_type) {
629         case NETDEV_QUEUE_TYPE_RX:
630                 memset(&rx, 0xff, sizeof(rx));
631                 ops->get_queue_stats_rx(netdev, i, &rx);
632                 if (!memchr_inv(&rx, 0xff, sizeof(rx)))
633                         goto nla_cancel;
634                 if (netdev_nl_stats_write_rx(rsp, &rx))
635                         goto nla_put_failure;
636                 break;
637         case NETDEV_QUEUE_TYPE_TX:
638                 memset(&tx, 0xff, sizeof(tx));
639                 ops->get_queue_stats_tx(netdev, i, &tx);
640                 if (!memchr_inv(&tx, 0xff, sizeof(tx)))
641                         goto nla_cancel;
642                 if (netdev_nl_stats_write_tx(rsp, &tx))
643                         goto nla_put_failure;
644                 break;
645         }
646
647         genlmsg_end(rsp, hdr);
648         return 0;
649
650 nla_cancel:
651         genlmsg_cancel(rsp, hdr);
652         return 0;
653 nla_put_failure:
654         genlmsg_cancel(rsp, hdr);
655         return -EMSGSIZE;
656 }
657
658 static int
659 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
660                          const struct genl_info *info,
661                          struct netdev_nl_dump_ctx *ctx)
662 {
663         const struct netdev_stat_ops *ops = netdev->stat_ops;
664         int i, err;
665
666         if (!(netdev->flags & IFF_UP))
667                 return 0;
668
669         i = ctx->rxq_idx;
670         while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
671                 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
672                                             i, info);
673                 if (err)
674                         return err;
675                 ctx->rxq_idx = ++i;
676         }
677         i = ctx->txq_idx;
678         while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
679                 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
680                                             i, info);
681                 if (err)
682                         return err;
683                 ctx->txq_idx = ++i;
684         }
685
686         ctx->rxq_idx = 0;
687         ctx->txq_idx = 0;
688         return 0;
689 }
690
691 static int
692 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
693                           const struct genl_info *info)
694 {
695         struct netdev_queue_stats_rx rx_sum, rx;
696         struct netdev_queue_stats_tx tx_sum, tx;
697         const struct netdev_stat_ops *ops;
698         void *hdr;
699         int i;
700
701         ops = netdev->stat_ops;
702         /* Netdev can't guarantee any complete counters */
703         if (!ops->get_base_stats)
704                 return 0;
705
706         memset(&rx_sum, 0xff, sizeof(rx_sum));
707         memset(&tx_sum, 0xff, sizeof(tx_sum));
708
709         ops->get_base_stats(netdev, &rx_sum, &tx_sum);
710
711         /* The op was there, but nothing reported, don't bother */
712         if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
713             !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
714                 return 0;
715
716         hdr = genlmsg_iput(rsp, info);
717         if (!hdr)
718                 return -EMSGSIZE;
719         if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
720                 goto nla_put_failure;
721
722         for (i = 0; i < netdev->real_num_rx_queues; i++) {
723                 memset(&rx, 0xff, sizeof(rx));
724                 if (ops->get_queue_stats_rx)
725                         ops->get_queue_stats_rx(netdev, i, &rx);
726                 netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
727         }
728         for (i = 0; i < netdev->real_num_tx_queues; i++) {
729                 memset(&tx, 0xff, sizeof(tx));
730                 if (ops->get_queue_stats_tx)
731                         ops->get_queue_stats_tx(netdev, i, &tx);
732                 netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
733         }
734
735         if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
736             netdev_nl_stats_write_tx(rsp, &tx_sum))
737                 goto nla_put_failure;
738
739         genlmsg_end(rsp, hdr);
740         return 0;
741
742 nla_put_failure:
743         genlmsg_cancel(rsp, hdr);
744         return -EMSGSIZE;
745 }
746
747 static int
748 netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope,
749                               struct sk_buff *skb, const struct genl_info *info,
750                               struct netdev_nl_dump_ctx *ctx)
751 {
752         if (!netdev->stat_ops)
753                 return 0;
754
755         switch (scope) {
756         case 0:
757                 return netdev_nl_stats_by_netdev(netdev, skb, info);
758         case NETDEV_QSTATS_SCOPE_QUEUE:
759                 return netdev_nl_stats_by_queue(netdev, skb, info, ctx);
760         }
761
762         return -EINVAL; /* Should not happen, per netlink policy */
763 }
764
765 int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
766                                 struct netlink_callback *cb)
767 {
768         struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
769         const struct genl_info *info = genl_info_dump(cb);
770         struct net *net = sock_net(skb->sk);
771         struct net_device *netdev;
772         unsigned int ifindex;
773         unsigned int scope;
774         int err = 0;
775
776         scope = 0;
777         if (info->attrs[NETDEV_A_QSTATS_SCOPE])
778                 scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
779
780         ifindex = 0;
781         if (info->attrs[NETDEV_A_QSTATS_IFINDEX])
782                 ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]);
783
784         rtnl_lock();
785         if (ifindex) {
786                 netdev = __dev_get_by_index(net, ifindex);
787                 if (netdev && netdev->stat_ops) {
788                         err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
789                                                             info, ctx);
790                 } else {
791                         NL_SET_BAD_ATTR(info->extack,
792                                         info->attrs[NETDEV_A_QSTATS_IFINDEX]);
793                         err = netdev ? -EOPNOTSUPP : -ENODEV;
794                 }
795         } else {
796                 for_each_netdev_dump(net, netdev, ctx->ifindex) {
797                         err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
798                                                             info, ctx);
799                         if (err < 0)
800                                 break;
801                 }
802         }
803         rtnl_unlock();
804
805         return err;
806 }
807
808 int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
809 {
810         struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
811         struct net_devmem_dmabuf_binding *binding;
812         struct list_head *sock_binding_list;
813         u32 ifindex, dmabuf_fd, rxq_idx;
814         struct net_device *netdev;
815         struct sk_buff *rsp;
816         struct nlattr *attr;
817         int rem, err = 0;
818         void *hdr;
819
820         if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
821             GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) ||
822             GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES))
823                 return -EINVAL;
824
825         ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
826         dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]);
827
828         sock_binding_list = genl_sk_priv_get(&netdev_nl_family,
829                                              NETLINK_CB(skb).sk);
830         if (IS_ERR(sock_binding_list))
831                 return PTR_ERR(sock_binding_list);
832
833         rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
834         if (!rsp)
835                 return -ENOMEM;
836
837         hdr = genlmsg_iput(rsp, info);
838         if (!hdr) {
839                 err = -EMSGSIZE;
840                 goto err_genlmsg_free;
841         }
842
843         rtnl_lock();
844
845         netdev = __dev_get_by_index(genl_info_net(info), ifindex);
846         if (!netdev || !netif_device_present(netdev)) {
847                 err = -ENODEV;
848                 goto err_unlock;
849         }
850
851         if (dev_xdp_prog_count(netdev)) {
852                 NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached");
853                 err = -EEXIST;
854                 goto err_unlock;
855         }
856
857         binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack);
858         if (IS_ERR(binding)) {
859                 err = PTR_ERR(binding);
860                 goto err_unlock;
861         }
862
863         nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
864                                genlmsg_data(info->genlhdr),
865                                genlmsg_len(info->genlhdr), rem) {
866                 err = nla_parse_nested(
867                         tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
868                         netdev_queue_id_nl_policy, info->extack);
869                 if (err < 0)
870                         goto err_unbind;
871
872                 if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
873                     NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
874                         err = -EINVAL;
875                         goto err_unbind;
876                 }
877
878                 if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
879                         NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
880                         err = -EINVAL;
881                         goto err_unbind;
882                 }
883
884                 rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
885
886                 err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
887                                                       info->extack);
888                 if (err)
889                         goto err_unbind;
890         }
891
892         list_add(&binding->list, sock_binding_list);
893
894         nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
895         genlmsg_end(rsp, hdr);
896
897         err = genlmsg_reply(rsp, info);
898         if (err)
899                 goto err_unbind;
900
901         rtnl_unlock();
902
903         return 0;
904
905 err_unbind:
906         net_devmem_unbind_dmabuf(binding);
907 err_unlock:
908         rtnl_unlock();
909 err_genlmsg_free:
910         nlmsg_free(rsp);
911         return err;
912 }
913
914 void netdev_nl_sock_priv_init(struct list_head *priv)
915 {
916         INIT_LIST_HEAD(priv);
917 }
918
919 void netdev_nl_sock_priv_destroy(struct list_head *priv)
920 {
921         struct net_devmem_dmabuf_binding *binding;
922         struct net_devmem_dmabuf_binding *temp;
923
924         list_for_each_entry_safe(binding, temp, priv, list) {
925                 rtnl_lock();
926                 net_devmem_unbind_dmabuf(binding);
927                 rtnl_unlock();
928         }
929 }
930
931 static int netdev_genl_netdevice_event(struct notifier_block *nb,
932                                        unsigned long event, void *ptr)
933 {
934         struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
935
936         switch (event) {
937         case NETDEV_REGISTER:
938                 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF);
939                 break;
940         case NETDEV_UNREGISTER:
941                 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF);
942                 break;
943         case NETDEV_XDP_FEAT_CHANGE:
944                 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF);
945                 break;
946         }
947
948         return NOTIFY_OK;
949 }
950
951 static struct notifier_block netdev_genl_nb = {
952         .notifier_call  = netdev_genl_netdevice_event,
953 };
954
955 static int __init netdev_genl_init(void)
956 {
957         int err;
958
959         err = register_netdevice_notifier(&netdev_genl_nb);
960         if (err)
961                 return err;
962
963         err = genl_register_family(&netdev_nl_family);
964         if (err)
965                 goto err_unreg_ntf;
966
967         return 0;
968
969 err_unreg_ntf:
970         unregister_netdevice_notifier(&netdev_genl_nb);
971         return err;
972 }
973
974 subsys_initcall(netdev_genl_init);
This page took 0.07903 seconds and 4 git commands to generate.