1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/netdevice.h>
3 #include <linux/proc_fs.h>
4 #include <linux/seq_file.h>
6 #include <net/hotdata.h>
10 static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos)
12 unsigned long ifindex = *pos;
13 struct net_device *dev;
15 for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
22 static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
27 return SEQ_START_TOKEN;
29 return dev_seq_from_index(seq, pos);
32 static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
35 return dev_seq_from_index(seq, pos);
38 static void dev_seq_stop(struct seq_file *seq, void *v)
44 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
46 struct rtnl_link_stats64 temp;
47 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
49 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
50 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
51 dev->name, stats->rx_bytes, stats->rx_packets,
53 stats->rx_dropped + stats->rx_missed_errors,
54 stats->rx_fifo_errors,
55 stats->rx_length_errors + stats->rx_over_errors +
56 stats->rx_crc_errors + stats->rx_frame_errors,
57 stats->rx_compressed, stats->multicast,
58 stats->tx_bytes, stats->tx_packets,
59 stats->tx_errors, stats->tx_dropped,
60 stats->tx_fifo_errors, stats->collisions,
61 stats->tx_carrier_errors +
62 stats->tx_aborted_errors +
63 stats->tx_window_errors +
64 stats->tx_heartbeat_errors,
65 stats->tx_compressed);
69 * Called from the PROCfs module. This now uses the new arbitrary sized
70 * /proc/net interface to create /proc/net/dev
72 static int dev_seq_show(struct seq_file *seq, void *v)
74 if (v == SEQ_START_TOKEN)
75 seq_puts(seq, "Inter-| Receive "
77 " face |bytes packets errs drop fifo frame "
78 "compressed multicast|bytes packets errs "
79 "drop fifo colls carrier compressed\n");
81 dev_seq_printf_stats(seq, v);
85 static u32 softnet_input_pkt_queue_len(struct softnet_data *sd)
87 return skb_queue_len_lockless(&sd->input_pkt_queue);
90 static u32 softnet_process_queue_len(struct softnet_data *sd)
92 return skb_queue_len_lockless(&sd->process_queue);
95 static struct softnet_data *softnet_get_online(loff_t *pos)
97 struct softnet_data *sd = NULL;
99 while (*pos < nr_cpu_ids)
100 if (cpu_online(*pos)) {
101 sd = &per_cpu(softnet_data, *pos);
108 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
110 return softnet_get_online(pos);
113 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
116 return softnet_get_online(pos);
119 static void softnet_seq_stop(struct seq_file *seq, void *v)
123 static int softnet_seq_show(struct seq_file *seq, void *v)
125 struct softnet_data *sd = v;
126 u32 input_qlen = softnet_input_pkt_queue_len(sd);
127 u32 process_qlen = softnet_process_queue_len(sd);
128 unsigned int flow_limit_count = 0;
130 #ifdef CONFIG_NET_FLOW_LIMIT
131 struct sd_flow_limit *fl;
134 fl = rcu_dereference(sd->flow_limit);
136 flow_limit_count = fl->count;
140 /* the index is the CPU id owing this sd. Since offline CPUs are not
141 * displayed, it would be othrwise not trivial for the user-space
142 * mapping the data a specific CPU
145 "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
147 sd->processed, atomic_read(&sd->dropped),
149 0, 0, 0, 0, /* was fastroute */
150 0, /* was cpu_collision */
151 sd->received_rps, flow_limit_count,
152 input_qlen + process_qlen, (int)seq->index,
153 input_qlen, process_qlen);
157 static const struct seq_operations dev_seq_ops = {
158 .start = dev_seq_start,
159 .next = dev_seq_next,
160 .stop = dev_seq_stop,
161 .show = dev_seq_show,
164 static const struct seq_operations softnet_seq_ops = {
165 .start = softnet_seq_start,
166 .next = softnet_seq_next,
167 .stop = softnet_seq_stop,
168 .show = softnet_seq_show,
171 static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
173 struct list_head *ptype_list = NULL;
174 struct packet_type *pt = NULL;
175 struct net_device *dev;
179 for_each_netdev_rcu(seq_file_net(seq), dev) {
180 ptype_list = &dev->ptype_all;
181 list_for_each_entry_rcu(pt, ptype_list, list) {
188 list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
194 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
195 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
204 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
208 return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
211 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
213 struct net_device *dev;
214 struct packet_type *pt;
215 struct list_head *nxt;
219 if (v == SEQ_START_TOKEN)
220 return ptype_get_idx(seq, 0);
225 if (nxt != &pt->dev->ptype_all)
229 for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
230 if (!list_empty(&dev->ptype_all)) {
231 nxt = dev->ptype_all.next;
236 nxt = net_hotdata.ptype_all.next;
240 if (pt->type == htons(ETH_P_ALL)) {
242 if (nxt != &net_hotdata.ptype_all)
245 nxt = ptype_base[0].next;
247 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
249 while (nxt == &ptype_base[hash]) {
250 if (++hash >= PTYPE_HASH_SIZE)
252 nxt = ptype_base[hash].next;
255 return list_entry(nxt, struct packet_type, list);
258 static void ptype_seq_stop(struct seq_file *seq, void *v)
264 static int ptype_seq_show(struct seq_file *seq, void *v)
266 struct packet_type *pt = v;
268 if (v == SEQ_START_TOKEN)
269 seq_puts(seq, "Type Device Function\n");
270 else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
271 (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
272 if (pt->type == htons(ETH_P_ALL))
273 seq_puts(seq, "ALL ");
275 seq_printf(seq, "%04x", ntohs(pt->type));
277 seq_printf(seq, " %-8s %ps\n",
278 pt->dev ? pt->dev->name : "", pt->func);
284 static const struct seq_operations ptype_seq_ops = {
285 .start = ptype_seq_start,
286 .next = ptype_seq_next,
287 .stop = ptype_seq_stop,
288 .show = ptype_seq_show,
291 static int __net_init dev_proc_net_init(struct net *net)
295 if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
296 sizeof(struct seq_net_private)))
298 if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
301 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
302 sizeof(struct seq_net_private)))
305 if (wext_proc_init(net))
311 remove_proc_entry("ptype", net->proc_net);
313 remove_proc_entry("softnet_stat", net->proc_net);
315 remove_proc_entry("dev", net->proc_net);
319 static void __net_exit dev_proc_net_exit(struct net *net)
323 remove_proc_entry("ptype", net->proc_net);
324 remove_proc_entry("softnet_stat", net->proc_net);
325 remove_proc_entry("dev", net->proc_net);
328 static struct pernet_operations __net_initdata dev_proc_ops = {
329 .init = dev_proc_net_init,
330 .exit = dev_proc_net_exit,
333 static int dev_mc_seq_show(struct seq_file *seq, void *v)
335 struct netdev_hw_addr *ha;
336 struct net_device *dev = v;
338 if (v == SEQ_START_TOKEN)
341 netif_addr_lock_bh(dev);
342 netdev_for_each_mc_addr(ha, dev) {
343 seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
344 dev->ifindex, dev->name,
345 ha->refcount, ha->global_use,
346 (int)dev->addr_len, ha->addr);
348 netif_addr_unlock_bh(dev);
352 static const struct seq_operations dev_mc_seq_ops = {
353 .start = dev_seq_start,
354 .next = dev_seq_next,
355 .stop = dev_seq_stop,
356 .show = dev_mc_seq_show,
359 static int __net_init dev_mc_net_init(struct net *net)
361 if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
362 sizeof(struct seq_net_private)))
367 static void __net_exit dev_mc_net_exit(struct net *net)
369 remove_proc_entry("dev_mcast", net->proc_net);
372 static struct pernet_operations __net_initdata dev_mc_net_ops = {
373 .init = dev_mc_net_init,
374 .exit = dev_mc_net_exit,
377 int __init dev_proc_init(void)
379 int ret = register_pernet_subsys(&dev_proc_ops);
381 return register_pernet_subsys(&dev_mc_net_ops);