1 /* net/sched/sch_teql.c "True" (or "trivial") link equalizer.
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License
5 * as published by the Free Software Foundation; either version
6 * 2 of the License, or (at your option) any later version.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/if_arp.h>
18 #include <linux/netdevice.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/moduleparam.h>
23 #include <net/neighbour.h>
24 #include <net/pkt_sched.h>
30 After loading this module you will find a new device teqlN
31 and new qdisc with the same name. To join a slave to the equalizer
32 you should just set this qdisc on a device f.e.
34 # tc qdisc add dev eth0 root teql0
35 # tc qdisc add dev eth1 root teql0
37 That's all. Full PnP 8)
42 1. Slave devices MUST be active devices, i.e., they must raise the tbusy
43 signal and generate EOI events. If you want to equalize virtual devices
44 like tunnels, use a normal eql device.
45 2. This device puts no limitations on physical slave characteristics
46 f.e. it will equalize 9600baud line and 100Mb ethernet perfectly :-)
47 Certainly, large difference in link speeds will make the resulting
48 eqalized link unusable, because of huge packet reordering.
49 I estimate an upper useful difference as ~10 times.
50 3. If the slave requires address resolution, only protocols using
51 neighbour cache (IPv4/IPv6) will work over the equalized link.
52 Other protocols are still allowed to use the slave device directly,
53 which will not break load balancing, though native slave
54 traffic will have the highest priority. */
58 struct Qdisc_ops qops;
59 struct net_device *dev;
61 struct list_head master_list;
62 unsigned long tx_bytes;
63 unsigned long tx_packets;
64 unsigned long tx_errors;
65 unsigned long tx_dropped;
68 struct teql_sched_data
71 struct teql_master *m;
72 struct neighbour *ncache;
73 struct sk_buff_head q;
76 #define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
78 #define FMASK (IFF_BROADCAST|IFF_POINTOPOINT)
80 /* "teql*" qdisc routines */
83 teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
85 struct net_device *dev = qdisc_dev(sch);
86 struct teql_sched_data *q = qdisc_priv(sch);
88 if (q->q.qlen < dev->tx_queue_len) {
89 __skb_queue_tail(&q->q, skb);
90 qdisc_bstats_update(sch, skb);
91 return NET_XMIT_SUCCESS;
99 static struct sk_buff *
100 teql_dequeue(struct Qdisc* sch)
102 struct teql_sched_data *dat = qdisc_priv(sch);
103 struct netdev_queue *dat_queue;
106 skb = __skb_dequeue(&dat->q);
107 dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
109 struct net_device *m = qdisc_dev(dat_queue->qdisc);
111 dat->m->slaves = sch;
115 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
119 static struct sk_buff *
120 teql_peek(struct Qdisc* sch)
122 /* teql is meant to be used as root qdisc */
126 static __inline__ void
127 teql_neigh_release(struct neighbour *n)
134 teql_reset(struct Qdisc* sch)
136 struct teql_sched_data *dat = qdisc_priv(sch);
138 skb_queue_purge(&dat->q);
140 teql_neigh_release(xchg(&dat->ncache, NULL));
144 teql_destroy(struct Qdisc* sch)
146 struct Qdisc *q, *prev;
147 struct teql_sched_data *dat = qdisc_priv(sch);
148 struct teql_master *master = dat->m;
150 if ((prev = master->slaves) != NULL) {
152 q = NEXT_SLAVE(prev);
154 NEXT_SLAVE(prev) = NEXT_SLAVE(q);
155 if (q == master->slaves) {
156 master->slaves = NEXT_SLAVE(q);
157 if (q == master->slaves) {
158 struct netdev_queue *txq;
159 spinlock_t *root_lock;
161 txq = netdev_get_tx_queue(master->dev, 0);
162 master->slaves = NULL;
164 root_lock = qdisc_root_sleeping_lock(txq->qdisc);
165 spin_lock_bh(root_lock);
166 qdisc_reset(txq->qdisc);
167 spin_unlock_bh(root_lock);
170 skb_queue_purge(&dat->q);
171 teql_neigh_release(xchg(&dat->ncache, NULL));
175 } while ((prev = q) != master->slaves);
179 static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
181 struct net_device *dev = qdisc_dev(sch);
182 struct teql_master *m = (struct teql_master*)sch->ops;
183 struct teql_sched_data *q = qdisc_priv(sch);
185 if (dev->hard_header_len > m->dev->hard_header_len)
193 skb_queue_head_init(&q->q);
196 if (m->dev->flags & IFF_UP) {
197 if ((m->dev->flags & IFF_POINTOPOINT &&
198 !(dev->flags & IFF_POINTOPOINT)) ||
199 (m->dev->flags & IFF_BROADCAST &&
200 !(dev->flags & IFF_BROADCAST)) ||
201 (m->dev->flags & IFF_MULTICAST &&
202 !(dev->flags & IFF_MULTICAST)) ||
203 dev->mtu < m->dev->mtu)
206 if (!(dev->flags&IFF_POINTOPOINT))
207 m->dev->flags &= ~IFF_POINTOPOINT;
208 if (!(dev->flags&IFF_BROADCAST))
209 m->dev->flags &= ~IFF_BROADCAST;
210 if (!(dev->flags&IFF_MULTICAST))
211 m->dev->flags &= ~IFF_MULTICAST;
212 if (dev->mtu < m->dev->mtu)
213 m->dev->mtu = dev->mtu;
215 q->next = NEXT_SLAVE(m->slaves);
216 NEXT_SLAVE(m->slaves) = sch;
220 m->dev->mtu = dev->mtu;
221 m->dev->flags = (m->dev->flags&~FMASK)|(dev->flags&FMASK);
228 __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
230 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
231 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
232 struct neighbour *mn = skb_dst(skb)->neighbour;
233 struct neighbour *n = q->ncache;
237 if (n && n->tbl == mn->tbl &&
238 memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) {
239 atomic_inc(&n->refcnt);
241 n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev);
245 if (neigh_event_send(n, skb_res) == 0) {
247 char haddr[MAX_ADDR_LEN];
249 neigh_ha_snapshot(haddr, n, dev);
250 err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr,
257 teql_neigh_release(xchg(&q->ncache, n));
261 return (skb_res == NULL) ? -EAGAIN : 1;
264 static inline int teql_resolve(struct sk_buff *skb,
265 struct sk_buff *skb_res, struct net_device *dev)
267 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
268 if (txq->qdisc == &noop_qdisc)
271 if (dev->header_ops == NULL ||
272 skb_dst(skb) == NULL ||
273 skb_dst(skb)->neighbour == NULL)
275 return __teql_resolve(skb, skb_res, dev);
278 static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
280 struct teql_master *master = netdev_priv(dev);
281 struct Qdisc *start, *q;
284 int subq = skb_get_queue_mapping(skb);
285 struct sk_buff *skb_res = NULL;
287 start = master->slaves;
293 if ((q = start) == NULL)
297 struct net_device *slave = qdisc_dev(q);
298 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
299 const struct net_device_ops *slave_ops = slave->netdev_ops;
301 if (slave_txq->qdisc_sleeping != q)
303 if (__netif_subqueue_stopped(slave, subq) ||
304 !netif_running(slave)) {
309 switch (teql_resolve(skb, skb_res, slave)) {
311 if (__netif_tx_trylock(slave_txq)) {
312 unsigned int length = qdisc_pkt_len(skb);
314 if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
315 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
316 txq_trans_update(slave_txq);
317 __netif_tx_unlock(slave_txq);
318 master->slaves = NEXT_SLAVE(q);
319 netif_wake_queue(dev);
320 master->tx_packets++;
321 master->tx_bytes += length;
324 __netif_tx_unlock(slave_txq);
326 if (netif_queue_stopped(dev))
330 master->slaves = NEXT_SLAVE(q);
336 __skb_pull(skb, skb_network_offset(skb));
337 } while ((q = NEXT_SLAVE(q)) != start);
339 if (nores && skb_res == NULL) {
345 netif_stop_queue(dev);
346 return NETDEV_TX_BUSY;
351 master->tx_dropped++;
356 static int teql_master_open(struct net_device *dev)
359 struct teql_master *m = netdev_priv(dev);
361 unsigned flags = IFF_NOARP|IFF_MULTICAST;
363 if (m->slaves == NULL)
370 struct net_device *slave = qdisc_dev(q);
375 if (slave->mtu < mtu)
377 if (slave->hard_header_len > LL_MAX_HEADER)
380 /* If all the slaves are BROADCAST, master is BROADCAST
381 If all the slaves are PtP, master is PtP
382 Otherwise, master is NBMA.
384 if (!(slave->flags&IFF_POINTOPOINT))
385 flags &= ~IFF_POINTOPOINT;
386 if (!(slave->flags&IFF_BROADCAST))
387 flags &= ~IFF_BROADCAST;
388 if (!(slave->flags&IFF_MULTICAST))
389 flags &= ~IFF_MULTICAST;
390 } while ((q = NEXT_SLAVE(q)) != m->slaves);
393 m->dev->flags = (m->dev->flags&~FMASK) | flags;
394 netif_start_queue(m->dev);
398 static int teql_master_close(struct net_device *dev)
400 netif_stop_queue(dev);
404 static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
405 struct rtnl_link_stats64 *stats)
407 struct teql_master *m = netdev_priv(dev);
409 stats->tx_packets = m->tx_packets;
410 stats->tx_bytes = m->tx_bytes;
411 stats->tx_errors = m->tx_errors;
412 stats->tx_dropped = m->tx_dropped;
416 static int teql_master_mtu(struct net_device *dev, int new_mtu)
418 struct teql_master *m = netdev_priv(dev);
427 if (new_mtu > qdisc_dev(q)->mtu)
429 } while ((q=NEXT_SLAVE(q)) != m->slaves);
436 static const struct net_device_ops teql_netdev_ops = {
437 .ndo_open = teql_master_open,
438 .ndo_stop = teql_master_close,
439 .ndo_start_xmit = teql_master_xmit,
440 .ndo_get_stats64 = teql_master_stats64,
441 .ndo_change_mtu = teql_master_mtu,
444 static __init void teql_master_setup(struct net_device *dev)
446 struct teql_master *master = netdev_priv(dev);
447 struct Qdisc_ops *ops = &master->qops;
450 ops->priv_size = sizeof(struct teql_sched_data);
452 ops->enqueue = teql_enqueue;
453 ops->dequeue = teql_dequeue;
454 ops->peek = teql_peek;
455 ops->init = teql_qdisc_init;
456 ops->reset = teql_reset;
457 ops->destroy = teql_destroy;
458 ops->owner = THIS_MODULE;
460 dev->netdev_ops = &teql_netdev_ops;
461 dev->type = ARPHRD_VOID;
463 dev->tx_queue_len = 100;
464 dev->flags = IFF_NOARP;
465 dev->hard_header_len = LL_MAX_HEADER;
466 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
469 static LIST_HEAD(master_dev_list);
470 static int max_equalizers = 1;
471 module_param(max_equalizers, int, 0);
472 MODULE_PARM_DESC(max_equalizers, "Max number of link equalizers");
474 static int __init teql_init(void)
479 for (i = 0; i < max_equalizers; i++) {
480 struct net_device *dev;
481 struct teql_master *master;
483 dev = alloc_netdev(sizeof(struct teql_master),
484 "teql%d", teql_master_setup);
490 if ((err = register_netdev(dev))) {
495 master = netdev_priv(dev);
497 strlcpy(master->qops.id, dev->name, IFNAMSIZ);
498 err = register_qdisc(&master->qops);
501 unregister_netdev(dev);
506 list_add_tail(&master->master_list, &master_dev_list);
511 static void __exit teql_exit(void)
513 struct teql_master *master, *nxt;
515 list_for_each_entry_safe(master, nxt, &master_dev_list, master_list) {
517 list_del(&master->master_list);
519 unregister_qdisc(&master->qops);
520 unregister_netdev(master->dev);
521 free_netdev(master->dev);
525 module_init(teql_init);
526 module_exit(teql_exit);
528 MODULE_LICENSE("GPL");