]>
Commit | Line | Data |
---|---|---|
a36b38aa BT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* XDP sockets monitoring support | |
3 | * | |
4 | * Copyright(c) 2019 Intel Corporation. | |
5 | * | |
6 | * Author: Björn Töpel <[email protected]> | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <net/xdp_sock.h> | |
11 | #include <linux/xdp_diag.h> | |
12 | #include <linux/sock_diag.h> | |
13 | ||
14 | #include "xsk_queue.h" | |
15 | #include "xsk.h" | |
16 | ||
17 | static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb) | |
18 | { | |
19 | struct xdp_diag_info di = {}; | |
20 | ||
21 | di.ifindex = xs->dev ? xs->dev->ifindex : 0; | |
22 | di.queue_id = xs->queue_id; | |
23 | return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di); | |
24 | } | |
25 | ||
26 | static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type, | |
27 | struct sk_buff *nlskb) | |
28 | { | |
29 | struct xdp_diag_ring dr = {}; | |
30 | ||
31 | dr.entries = queue->nentries; | |
32 | return nla_put(nlskb, nl_type, sizeof(dr), &dr); | |
33 | } | |
34 | ||
35 | static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs, | |
36 | struct sk_buff *nlskb) | |
37 | { | |
38 | int err = 0; | |
39 | ||
40 | if (xs->rx) | |
41 | err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb); | |
42 | if (!err && xs->tx) | |
43 | err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb); | |
44 | return err; | |
45 | } | |
46 | ||
47 | static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) | |
48 | { | |
7361f9c3 | 49 | struct xsk_buff_pool *pool = xs->pool; |
a36b38aa BT |
50 | struct xdp_umem *umem = xs->umem; |
51 | struct xdp_diag_umem du = {}; | |
52 | int err; | |
53 | ||
54 | if (!umem) | |
55 | return 0; | |
56 | ||
57 | du.id = umem->id; | |
58 | du.size = umem->size; | |
59 | du.num_pages = umem->npgs; | |
2b43470a | 60 | du.chunk_size = umem->chunk_size; |
a36b38aa | 61 | du.headroom = umem->headroom; |
53ea2076 MK |
62 | du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0; |
63 | du.queue_id = pool ? pool->queue_id : 0; | |
a36b38aa BT |
64 | du.flags = 0; |
65 | if (umem->zc) | |
66 | du.flags |= XDP_DU_F_ZEROCOPY; | |
67 | du.refs = refcount_read(&umem->users); | |
68 | ||
69 | err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du); | |
53ea2076 | 70 | if (!err && pool && pool->fq) |
7361f9c3 MK |
71 | err = xsk_diag_put_ring(pool->fq, |
72 | XDP_DIAG_UMEM_FILL_RING, nlskb); | |
53ea2076 MK |
73 | if (!err && pool && pool->cq) |
74 | err = xsk_diag_put_ring(pool->cq, | |
75 | XDP_DIAG_UMEM_COMPLETION_RING, nlskb); | |
a36b38aa BT |
76 | return err; |
77 | } | |
78 | ||
0d80cb46 CL |
79 | static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb) |
80 | { | |
81 | struct xdp_diag_stats du = {}; | |
82 | ||
83 | du.n_rx_dropped = xs->rx_dropped; | |
84 | du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx); | |
85 | du.n_rx_full = xs->rx_queue_full; | |
7361f9c3 | 86 | du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; |
0d80cb46 CL |
87 | du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx); |
88 | du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx); | |
89 | return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du); | |
90 | } | |
91 | ||
a36b38aa BT |
92 | static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, |
93 | struct xdp_diag_req *req, | |
94 | struct user_namespace *user_ns, | |
95 | u32 portid, u32 seq, u32 flags, int sk_ino) | |
96 | { | |
97 | struct xdp_sock *xs = xdp_sk(sk); | |
98 | struct xdp_diag_msg *msg; | |
99 | struct nlmsghdr *nlh; | |
100 | ||
101 | nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg), | |
102 | flags); | |
103 | if (!nlh) | |
104 | return -EMSGSIZE; | |
105 | ||
106 | msg = nlmsg_data(nlh); | |
107 | memset(msg, 0, sizeof(*msg)); | |
108 | msg->xdiag_family = AF_XDP; | |
109 | msg->xdiag_type = sk->sk_type; | |
110 | msg->xdiag_ino = sk_ino; | |
111 | sock_diag_save_cookie(sk, msg->xdiag_cookie); | |
112 | ||
25dc18ff | 113 | mutex_lock(&xs->mutex); |
a36b38aa BT |
114 | if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb)) |
115 | goto out_nlmsg_trim; | |
116 | ||
117 | if ((req->xdiag_show & XDP_SHOW_INFO) && | |
118 | nla_put_u32(nlskb, XDP_DIAG_UID, | |
119 | from_kuid_munged(user_ns, sock_i_uid(sk)))) | |
120 | goto out_nlmsg_trim; | |
121 | ||
122 | if ((req->xdiag_show & XDP_SHOW_RING_CFG) && | |
123 | xsk_diag_put_rings_cfg(xs, nlskb)) | |
124 | goto out_nlmsg_trim; | |
125 | ||
126 | if ((req->xdiag_show & XDP_SHOW_UMEM) && | |
127 | xsk_diag_put_umem(xs, nlskb)) | |
128 | goto out_nlmsg_trim; | |
129 | ||
130 | if ((req->xdiag_show & XDP_SHOW_MEMINFO) && | |
131 | sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO)) | |
132 | goto out_nlmsg_trim; | |
133 | ||
0d80cb46 CL |
134 | if ((req->xdiag_show & XDP_SHOW_STATS) && |
135 | xsk_diag_put_stats(xs, nlskb)) | |
136 | goto out_nlmsg_trim; | |
137 | ||
25dc18ff | 138 | mutex_unlock(&xs->mutex); |
a36b38aa BT |
139 | nlmsg_end(nlskb, nlh); |
140 | return 0; | |
141 | ||
142 | out_nlmsg_trim: | |
25dc18ff | 143 | mutex_unlock(&xs->mutex); |
a36b38aa BT |
144 | nlmsg_cancel(nlskb, nlh); |
145 | return -EMSGSIZE; | |
146 | } | |
147 | ||
148 | static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb) | |
149 | { | |
150 | struct xdp_diag_req *req = nlmsg_data(cb->nlh); | |
151 | struct net *net = sock_net(nlskb->sk); | |
152 | int num = 0, s_num = cb->args[0]; | |
153 | struct sock *sk; | |
154 | ||
155 | mutex_lock(&net->xdp.lock); | |
156 | ||
157 | sk_for_each(sk, &net->xdp.list) { | |
158 | if (!net_eq(sock_net(sk), net)) | |
159 | continue; | |
160 | if (num++ < s_num) | |
161 | continue; | |
162 | ||
163 | if (xsk_diag_fill(sk, nlskb, req, | |
164 | sk_user_ns(NETLINK_CB(cb->skb).sk), | |
165 | NETLINK_CB(cb->skb).portid, | |
166 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | |
167 | sock_i_ino(sk)) < 0) { | |
168 | num--; | |
169 | break; | |
170 | } | |
171 | } | |
172 | ||
173 | mutex_unlock(&net->xdp.lock); | |
174 | cb->args[0] = num; | |
175 | return nlskb->len; | |
176 | } | |
177 | ||
178 | static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr) | |
179 | { | |
180 | struct netlink_dump_control c = { .dump = xsk_diag_dump }; | |
181 | int hdrlen = sizeof(struct xdp_diag_req); | |
182 | struct net *net = sock_net(nlskb->sk); | |
183 | ||
184 | if (nlmsg_len(hdr) < hdrlen) | |
185 | return -EINVAL; | |
186 | ||
187 | if (!(hdr->nlmsg_flags & NLM_F_DUMP)) | |
188 | return -EOPNOTSUPP; | |
189 | ||
190 | return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c); | |
191 | } | |
192 | ||
193 | static const struct sock_diag_handler xsk_diag_handler = { | |
194 | .family = AF_XDP, | |
195 | .dump = xsk_diag_handler_dump, | |
196 | }; | |
197 | ||
198 | static int __init xsk_diag_init(void) | |
199 | { | |
200 | return sock_diag_register(&xsk_diag_handler); | |
201 | } | |
202 | ||
203 | static void __exit xsk_diag_exit(void) | |
204 | { | |
205 | sock_diag_unregister(&xsk_diag_handler); | |
206 | } | |
207 | ||
208 | module_init(xsk_diag_init); | |
209 | module_exit(xsk_diag_exit); | |
210 | MODULE_LICENSE("GPL"); | |
211 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP); |