1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/netdevice.h>
3 #include <linux/proc_fs.h>
4 #include <linux/seq_file.h>
6 #include <net/hotdata.h>
10 static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos)
12 unsigned long ifindex = *pos;
13 struct net_device *dev;
15 for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
22 static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
27 return SEQ_START_TOKEN;
29 return dev_seq_from_index(seq, pos);
32 static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
35 return dev_seq_from_index(seq, pos);
38 static void dev_seq_stop(struct seq_file *seq, void *v)
44 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
46 struct rtnl_link_stats64 temp;
47 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
49 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
50 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
51 dev->name, stats->rx_bytes, stats->rx_packets,
53 stats->rx_dropped + stats->rx_missed_errors,
54 stats->rx_fifo_errors,
55 stats->rx_length_errors + stats->rx_over_errors +
56 stats->rx_crc_errors + stats->rx_frame_errors,
57 stats->rx_compressed, stats->multicast,
58 stats->tx_bytes, stats->tx_packets,
59 stats->tx_errors, stats->tx_dropped,
60 stats->tx_fifo_errors, stats->collisions,
61 stats->tx_carrier_errors +
62 stats->tx_aborted_errors +
63 stats->tx_window_errors +
64 stats->tx_heartbeat_errors,
65 stats->tx_compressed);
69 * Called from the PROCfs module. This now uses the new arbitrary sized
70 * /proc/net interface to create /proc/net/dev
72 static int dev_seq_show(struct seq_file *seq, void *v)
74 if (v == SEQ_START_TOKEN)
75 seq_puts(seq, "Inter-| Receive "
77 " face |bytes packets errs drop fifo frame "
78 "compressed multicast|bytes packets errs "
79 "drop fifo colls carrier compressed\n");
81 dev_seq_printf_stats(seq, v);
85 static u32 softnet_input_pkt_queue_len(struct softnet_data *sd)
87 return skb_queue_len_lockless(&sd->input_pkt_queue);
90 static u32 softnet_process_queue_len(struct softnet_data *sd)
92 return skb_queue_len_lockless(&sd->process_queue);
95 static struct softnet_data *softnet_get_online(loff_t *pos)
97 struct softnet_data *sd = NULL;
99 while (*pos < nr_cpu_ids)
100 if (cpu_online(*pos)) {
101 sd = &per_cpu(softnet_data, *pos);
108 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
110 return softnet_get_online(pos);
113 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
116 return softnet_get_online(pos);
119 static void softnet_seq_stop(struct seq_file *seq, void *v)
123 static int softnet_seq_show(struct seq_file *seq, void *v)
125 struct softnet_data *sd = v;
126 u32 input_qlen = softnet_input_pkt_queue_len(sd);
127 u32 process_qlen = softnet_process_queue_len(sd);
128 unsigned int flow_limit_count = 0;
130 #ifdef CONFIG_NET_FLOW_LIMIT
131 struct sd_flow_limit *fl;
134 fl = rcu_dereference(sd->flow_limit);
136 flow_limit_count = fl->count;
140 /* the index is the CPU id owing this sd. Since offline CPUs are not
141 * displayed, it would be othrwise not trivial for the user-space
142 * mapping the data a specific CPU
145 "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
147 sd->processed, sd->dropped, sd->time_squeeze, 0,
148 0, 0, 0, 0, /* was fastroute */
149 0, /* was cpu_collision */
150 sd->received_rps, flow_limit_count,
151 input_qlen + process_qlen, (int)seq->index,
152 input_qlen, process_qlen);
156 static const struct seq_operations dev_seq_ops = {
157 .start = dev_seq_start,
158 .next = dev_seq_next,
159 .stop = dev_seq_stop,
160 .show = dev_seq_show,
163 static const struct seq_operations softnet_seq_ops = {
164 .start = softnet_seq_start,
165 .next = softnet_seq_next,
166 .stop = softnet_seq_stop,
167 .show = softnet_seq_show,
170 static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
172 struct list_head *ptype_list = NULL;
173 struct packet_type *pt = NULL;
174 struct net_device *dev;
178 for_each_netdev_rcu(seq_file_net(seq), dev) {
179 ptype_list = &dev->ptype_all;
180 list_for_each_entry_rcu(pt, ptype_list, list) {
187 list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
193 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
194 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
203 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
207 return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
210 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
212 struct net_device *dev;
213 struct packet_type *pt;
214 struct list_head *nxt;
218 if (v == SEQ_START_TOKEN)
219 return ptype_get_idx(seq, 0);
224 if (nxt != &pt->dev->ptype_all)
228 for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
229 if (!list_empty(&dev->ptype_all)) {
230 nxt = dev->ptype_all.next;
235 nxt = net_hotdata.ptype_all.next;
239 if (pt->type == htons(ETH_P_ALL)) {
241 if (nxt != &net_hotdata.ptype_all)
244 nxt = ptype_base[0].next;
246 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
248 while (nxt == &ptype_base[hash]) {
249 if (++hash >= PTYPE_HASH_SIZE)
251 nxt = ptype_base[hash].next;
254 return list_entry(nxt, struct packet_type, list);
257 static void ptype_seq_stop(struct seq_file *seq, void *v)
263 static int ptype_seq_show(struct seq_file *seq, void *v)
265 struct packet_type *pt = v;
267 if (v == SEQ_START_TOKEN)
268 seq_puts(seq, "Type Device Function\n");
269 else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
270 (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
271 if (pt->type == htons(ETH_P_ALL))
272 seq_puts(seq, "ALL ");
274 seq_printf(seq, "%04x", ntohs(pt->type));
276 seq_printf(seq, " %-8s %ps\n",
277 pt->dev ? pt->dev->name : "", pt->func);
283 static const struct seq_operations ptype_seq_ops = {
284 .start = ptype_seq_start,
285 .next = ptype_seq_next,
286 .stop = ptype_seq_stop,
287 .show = ptype_seq_show,
290 static int __net_init dev_proc_net_init(struct net *net)
294 if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
295 sizeof(struct seq_net_private)))
297 if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
300 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
301 sizeof(struct seq_net_private)))
304 if (wext_proc_init(net))
310 remove_proc_entry("ptype", net->proc_net);
312 remove_proc_entry("softnet_stat", net->proc_net);
314 remove_proc_entry("dev", net->proc_net);
318 static void __net_exit dev_proc_net_exit(struct net *net)
322 remove_proc_entry("ptype", net->proc_net);
323 remove_proc_entry("softnet_stat", net->proc_net);
324 remove_proc_entry("dev", net->proc_net);
327 static struct pernet_operations __net_initdata dev_proc_ops = {
328 .init = dev_proc_net_init,
329 .exit = dev_proc_net_exit,
332 static int dev_mc_seq_show(struct seq_file *seq, void *v)
334 struct netdev_hw_addr *ha;
335 struct net_device *dev = v;
337 if (v == SEQ_START_TOKEN)
340 netif_addr_lock_bh(dev);
341 netdev_for_each_mc_addr(ha, dev) {
342 seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
343 dev->ifindex, dev->name,
344 ha->refcount, ha->global_use,
345 (int)dev->addr_len, ha->addr);
347 netif_addr_unlock_bh(dev);
351 static const struct seq_operations dev_mc_seq_ops = {
352 .start = dev_seq_start,
353 .next = dev_seq_next,
354 .stop = dev_seq_stop,
355 .show = dev_mc_seq_show,
358 static int __net_init dev_mc_net_init(struct net *net)
360 if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
361 sizeof(struct seq_net_private)))
366 static void __net_exit dev_mc_net_exit(struct net *net)
368 remove_proc_entry("dev_mcast", net->proc_net);
371 static struct pernet_operations __net_initdata dev_mc_net_ops = {
372 .init = dev_mc_net_init,
373 .exit = dev_mc_net_exit,
376 int __init dev_proc_init(void)
378 int ret = register_pernet_subsys(&dev_proc_ops);
380 return register_pernet_subsys(&dev_mc_net_ops);