]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Equalizer Load-balancer for serial network interfaces. | |
3 | * | |
4 | * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes | |
5 | * NCM: Network and Communications Management, Inc. | |
6 | * | |
7 | * (c) Copyright 2002 David S. Miller ([email protected]) | |
8 | * | |
9 | * This software may be used and distributed according to the terms | |
10 | * of the GNU General Public License, incorporated herein by reference. | |
6aa20a22 | 11 | * |
1da177e4 LT |
12 | * The author may be reached as [email protected], or C/O |
13 | * NCM | |
14 | * Attn: Simon Janes | |
15 | * 6803 Whittier Ave | |
16 | * McLean VA 22101 | |
17 | * Phone: 1-703-847-0040 ext 103 | |
18 | */ | |
19 | ||
20 | /* | |
21 | * Sources: | |
22 | * skeleton.c by Donald Becker. | |
23 | * Inspirations: | |
24 | * The Harried and Overworked Alan Cox | |
25 | * Conspiracies: | |
6aa20a22 | 26 | * The Alan Cox and Mike McLagan plot to get someone else to do the code, |
1da177e4 LT |
27 | * which turned out to be me. |
28 | */ | |
29 | ||
30 | /* | |
31 | * $Log: eql.c,v $ | |
32 | * Revision 1.2 1996/04/11 17:51:52 guru | |
33 | * Added one-line eql_remove_slave patch. | |
34 | * | |
35 | * Revision 1.1 1996/04/11 17:44:17 guru | |
36 | * Initial revision | |
37 | * | |
38 | * Revision 3.13 1996/01/21 15:17:18 alan | |
39 | * tx_queue_len changes. | |
40 | * reformatted. | |
41 | * | |
42 | * Revision 3.12 1995/03/22 21:07:51 anarchy | |
43 | * Added capable() checks on configuration. | |
44 | * Moved header file. | |
45 | * | |
46 | * Revision 3.11 1995/01/19 23:14:31 guru | |
47 | * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - | |
48 | * (priority_Bps) + bytes_queued * 8; | |
49 | * | |
50 | * Revision 3.10 1995/01/19 23:07:53 guru | |
51 | * back to | |
52 | * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - | |
53 | * (priority_Bps) + bytes_queued; | |
54 | * | |
55 | * Revision 3.9 1995/01/19 22:38:20 guru | |
56 | * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - | |
57 | * (priority_Bps) + bytes_queued * 4; | |
58 | * | |
59 | * Revision 3.8 1995/01/19 22:30:55 guru | |
60 | * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - | |
61 | * (priority_Bps) + bytes_queued * 2; | |
62 | * | |
63 | * Revision 3.7 1995/01/19 21:52:35 guru | |
64 | * printk's trimmed out. | |
65 | * | |
66 | * Revision 3.6 1995/01/19 21:49:56 guru | |
67 | * This is working pretty well. I gained 1 K/s in speed.. now it's just | |
68 | * robustness and printk's to be diked out. | |
69 | * | |
70 | * Revision 3.5 1995/01/18 22:29:59 guru | |
71 | * still crashes the kernel when the lock_wait thing is woken up. | |
72 | * | |
73 | * Revision 3.4 1995/01/18 21:59:47 guru | |
74 | * Broken set-bit locking snapshot | |
75 | * | |
76 | * Revision 3.3 1995/01/17 22:09:18 guru | |
77 | * infinite sleep in a lock somewhere.. | |
78 | * | |
79 | * Revision 3.2 1995/01/15 16:46:06 guru | |
80 | * Log trimmed of non-pertinent 1.x branch messages | |
81 | * | |
82 | * Revision 3.1 1995/01/15 14:41:45 guru | |
83 | * New Scheduler and timer stuff... | |
84 | * | |
85 | * Revision 1.15 1995/01/15 14:29:02 guru | |
86 | * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one | |
87 | * with the dumber scheduler | |
88 | * | |
89 | * Revision 1.14 1995/01/15 02:37:08 guru | |
90 | * shock.. the kept-new-versions could have zonked working | |
91 | * stuff.. shudder | |
92 | * | |
93 | * Revision 1.13 1995/01/15 02:36:31 guru | |
94 | * big changes | |
95 | * | |
96 | * scheduler was torn out and replaced with something smarter | |
97 | * | |
98 | * global names not prefixed with eql_ were renamed to protect | |
99 | * against namespace collisions | |
100 | * | |
101 | * a few more abstract interfaces were added to facilitate any | |
102 | * potential change of datastructure. the driver is still using | |
103 | * a linked list of slaves. going to a heap would be a bit of | |
104 | * an overkill. | |
105 | * | |
106 | * this compiles fine with no warnings. | |
107 | * | |
108 | * the locking mechanism and timer stuff must be written however, | |
109 | * this version will not work otherwise | |
110 | * | |
111 | * Sorry, I had to rewrite most of this for 2.5.x -DaveM | |
112 | */ | |
113 | ||
63f97425 JP |
114 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
115 | ||
d92f7b59 | 116 | #include <linux/compat.h> |
d43c36dc | 117 | #include <linux/capability.h> |
1da177e4 LT |
118 | #include <linux/module.h> |
119 | #include <linux/kernel.h> | |
120 | #include <linux/init.h> | |
5a0e3ad6 | 121 | #include <linux/slab.h> |
1da177e4 LT |
122 | #include <linux/timer.h> |
123 | #include <linux/netdevice.h> | |
881d966b | 124 | #include <net/net_namespace.h> |
1da177e4 LT |
125 | |
126 | #include <linux/if.h> | |
127 | #include <linux/if_arp.h> | |
128 | #include <linux/if_eql.h> | |
09e79d6e | 129 | #include <linux/pkt_sched.h> |
1da177e4 | 130 | |
7c0f6ba6 | 131 | #include <linux/uaccess.h> |
1da177e4 LT |
132 | |
133 | static int eql_open(struct net_device *dev); | |
134 | static int eql_close(struct net_device *dev); | |
d92f7b59 AB |
135 | static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr, |
136 | void __user *data, int cmd); | |
424efe9c | 137 | static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev); |
1da177e4 LT |
138 | |
139 | #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) | |
140 | #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) | |
141 | ||
14a59e18 | 142 | static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave); |
1da177e4 | 143 | |
e99e88a9 | 144 | static void eql_timer(struct timer_list *t) |
1da177e4 | 145 | { |
e99e88a9 | 146 | equalizer_t *eql = from_timer(eql, t, timer); |
1da177e4 | 147 | struct list_head *this, *tmp, *head; |
6aa20a22 | 148 | |
09e79d6e | 149 | spin_lock(&eql->queue.lock); |
1da177e4 LT |
150 | head = &eql->queue.all_slaves; |
151 | list_for_each_safe(this, tmp, head) { | |
152 | slave_t *slave = list_entry(this, slave_t, list); | |
153 | ||
154 | if ((slave->dev->flags & IFF_UP) == IFF_UP) { | |
155 | slave->bytes_queued -= slave->priority_Bps; | |
156 | if (slave->bytes_queued < 0) | |
157 | slave->bytes_queued = 0; | |
158 | } else { | |
14a59e18 | 159 | eql_kill_one_slave(&eql->queue, slave); |
1da177e4 LT |
160 | } |
161 | ||
162 | } | |
09e79d6e | 163 | spin_unlock(&eql->queue.lock); |
1da177e4 LT |
164 | |
165 | eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; | |
166 | add_timer(&eql->timer); | |
167 | } | |
168 | ||
aec464bb | 169 | static const char version[] __initconst = |
63f97425 | 170 | "Equalizer2002: Simon Janes ([email protected]) and David S. Miller ([email protected])"; |
1da177e4 | 171 | |
99921b7e SH |
172 | static const struct net_device_ops eql_netdev_ops = { |
173 | .ndo_open = eql_open, | |
174 | .ndo_stop = eql_close, | |
d92f7b59 | 175 | .ndo_siocdevprivate = eql_siocdevprivate, |
99921b7e SH |
176 | .ndo_start_xmit = eql_slave_xmit, |
177 | }; | |
178 | ||
1da177e4 LT |
179 | static void __init eql_setup(struct net_device *dev) |
180 | { | |
181 | equalizer_t *eql = netdev_priv(dev); | |
182 | ||
e99e88a9 | 183 | timer_setup(&eql->timer, eql_timer, 0); |
1da177e4 | 184 | eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; |
1da177e4 LT |
185 | |
186 | spin_lock_init(&eql->queue.lock); | |
187 | INIT_LIST_HEAD(&eql->queue.all_slaves); | |
188 | eql->queue.master_dev = dev; | |
189 | ||
99921b7e | 190 | dev->netdev_ops = &eql_netdev_ops; |
6aa20a22 | 191 | |
1da177e4 LT |
192 | /* |
193 | * Now we undo some of the things that eth_setup does | |
6aa20a22 | 194 | * that we don't like |
1da177e4 | 195 | */ |
6aa20a22 | 196 | |
1da177e4 LT |
197 | dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */ |
198 | dev->flags = IFF_MASTER; | |
199 | ||
200 | dev->type = ARPHRD_SLIP; | |
201 | dev->tx_queue_len = 5; /* Hands them off fast */ | |
02875878 | 202 | netif_keep_dst(dev); |
1da177e4 LT |
203 | } |
204 | ||
205 | static int eql_open(struct net_device *dev) | |
206 | { | |
207 | equalizer_t *eql = netdev_priv(dev); | |
208 | ||
209 | /* XXX We should force this off automatically for the user. */ | |
63f97425 JP |
210 | netdev_info(dev, |
211 | "remember to turn off Van-Jacobson compression on your slave devices\n"); | |
1da177e4 | 212 | |
5d9428de | 213 | BUG_ON(!list_empty(&eql->queue.all_slaves)); |
1da177e4 LT |
214 | |
215 | eql->min_slaves = 1; | |
216 | eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */ | |
217 | ||
218 | add_timer(&eql->timer); | |
219 | ||
220 | return 0; | |
221 | } | |
222 | ||
14a59e18 | 223 | static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) |
1da177e4 LT |
224 | { |
225 | list_del(&slave->list); | |
14a59e18 | 226 | queue->num_slaves--; |
1da177e4 | 227 | slave->dev->flags &= ~IFF_SLAVE; |
d62607c3 | 228 | netdev_put(slave->dev, &slave->dev_tracker); |
1da177e4 LT |
229 | kfree(slave); |
230 | } | |
231 | ||
232 | static void eql_kill_slave_queue(slave_queue_t *queue) | |
6aa20a22 | 233 | { |
1da177e4 LT |
234 | struct list_head *head, *tmp, *this; |
235 | ||
236 | spin_lock_bh(&queue->lock); | |
237 | ||
238 | head = &queue->all_slaves; | |
239 | list_for_each_safe(this, tmp, head) { | |
240 | slave_t *s = list_entry(this, slave_t, list); | |
241 | ||
14a59e18 | 242 | eql_kill_one_slave(queue, s); |
1da177e4 LT |
243 | } |
244 | ||
245 | spin_unlock_bh(&queue->lock); | |
246 | } | |
247 | ||
248 | static int eql_close(struct net_device *dev) | |
249 | { | |
250 | equalizer_t *eql = netdev_priv(dev); | |
251 | ||
252 | /* | |
253 | * The timer has to be stopped first before we start hacking away | |
6aa20a22 | 254 | * at the data structure it scans every so often... |
1da177e4 LT |
255 | */ |
256 | ||
257 | del_timer_sync(&eql->timer); | |
258 | ||
259 | eql_kill_slave_queue(&eql->queue); | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq); | |
265 | static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq); | |
266 | ||
267 | static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc); | |
268 | static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc); | |
269 | ||
270 | static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc); | |
271 | static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc); | |
272 | ||
d92f7b59 AB |
273 | static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr, |
274 | void __user *data, int cmd) | |
6aa20a22 | 275 | { |
1da177e4 LT |
276 | if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG && |
277 | !capable(CAP_NET_ADMIN)) | |
278 | return -EPERM; | |
279 | ||
d92f7b59 AB |
280 | if (in_compat_syscall()) /* to be implemented */ |
281 | return -EOPNOTSUPP; | |
282 | ||
1da177e4 LT |
283 | switch (cmd) { |
284 | case EQL_ENSLAVE: | |
d92f7b59 | 285 | return eql_enslave(dev, data); |
1da177e4 | 286 | case EQL_EMANCIPATE: |
d92f7b59 | 287 | return eql_emancipate(dev, data); |
1da177e4 | 288 | case EQL_GETSLAVECFG: |
d92f7b59 | 289 | return eql_g_slave_cfg(dev, data); |
1da177e4 | 290 | case EQL_SETSLAVECFG: |
d92f7b59 | 291 | return eql_s_slave_cfg(dev, data); |
1da177e4 | 292 | case EQL_GETMASTRCFG: |
d92f7b59 | 293 | return eql_g_master_cfg(dev, data); |
1da177e4 | 294 | case EQL_SETMASTRCFG: |
d92f7b59 | 295 | return eql_s_master_cfg(dev, data); |
1da177e4 LT |
296 | default: |
297 | return -EOPNOTSUPP; | |
ee289b64 | 298 | } |
1da177e4 LT |
299 | } |
300 | ||
301 | /* queue->lock must be held */ | |
302 | static slave_t *__eql_schedule_slaves(slave_queue_t *queue) | |
303 | { | |
304 | unsigned long best_load = ~0UL; | |
305 | struct list_head *this, *tmp, *head; | |
306 | slave_t *best_slave; | |
307 | ||
308 | best_slave = NULL; | |
309 | ||
310 | /* Make a pass to set the best slave. */ | |
311 | head = &queue->all_slaves; | |
312 | list_for_each_safe(this, tmp, head) { | |
313 | slave_t *slave = list_entry(this, slave_t, list); | |
6aa20a22 | 314 | unsigned long slave_load, bytes_queued, priority_Bps; |
1da177e4 LT |
315 | |
316 | /* Go through the slave list once, updating best_slave | |
317 | * whenever a new best_load is found. | |
318 | */ | |
319 | bytes_queued = slave->bytes_queued; | |
6aa20a22 | 320 | priority_Bps = slave->priority_Bps; |
1da177e4 | 321 | if ((slave->dev->flags & IFF_UP) == IFF_UP) { |
6aa20a22 | 322 | slave_load = (~0UL - (~0UL / 2)) - |
1da177e4 LT |
323 | (priority_Bps) + bytes_queued * 8; |
324 | ||
325 | if (slave_load < best_load) { | |
326 | best_load = slave_load; | |
327 | best_slave = slave; | |
328 | } | |
329 | } else { | |
330 | /* We found a dead slave, kill it. */ | |
14a59e18 | 331 | eql_kill_one_slave(queue, slave); |
1da177e4 LT |
332 | } |
333 | } | |
334 | return best_slave; | |
335 | } | |
336 | ||
424efe9c | 337 | static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) |
1da177e4 LT |
338 | { |
339 | equalizer_t *eql = netdev_priv(dev); | |
340 | slave_t *slave; | |
341 | ||
342 | spin_lock(&eql->queue.lock); | |
343 | ||
344 | slave = __eql_schedule_slaves(&eql->queue); | |
345 | if (slave) { | |
346 | struct net_device *slave_dev = slave->dev; | |
347 | ||
348 | skb->dev = slave_dev; | |
09e79d6e | 349 | skb->priority = TC_PRIO_FILLER; |
6aa20a22 | 350 | slave->bytes_queued += skb->len; |
1da177e4 | 351 | dev_queue_xmit(skb); |
09f75cd7 | 352 | dev->stats.tx_packets++; |
1da177e4 | 353 | } else { |
09f75cd7 | 354 | dev->stats.tx_dropped++; |
1da177e4 | 355 | dev_kfree_skb(skb); |
6aa20a22 | 356 | } |
1da177e4 LT |
357 | |
358 | spin_unlock(&eql->queue.lock); | |
359 | ||
6ed10654 | 360 | return NETDEV_TX_OK; |
1da177e4 LT |
361 | } |
362 | ||
1da177e4 LT |
363 | /* |
364 | * Private ioctl functions | |
365 | */ | |
366 | ||
367 | /* queue->lock must be held */ | |
368 | static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev) | |
369 | { | |
370 | struct list_head *this, *head; | |
371 | ||
372 | head = &queue->all_slaves; | |
373 | list_for_each(this, head) { | |
374 | slave_t *slave = list_entry(this, slave_t, list); | |
375 | ||
376 | if (slave->dev == dev) | |
377 | return slave; | |
378 | } | |
379 | ||
380 | return NULL; | |
381 | } | |
382 | ||
383 | static inline int eql_is_full(slave_queue_t *queue) | |
384 | { | |
385 | equalizer_t *eql = netdev_priv(queue->master_dev); | |
386 | ||
387 | if (queue->num_slaves >= eql->max_slaves) | |
388 | return 1; | |
389 | return 0; | |
390 | } | |
391 | ||
392 | /* queue->lock must be held */ | |
393 | static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) | |
394 | { | |
395 | if (!eql_is_full(queue)) { | |
396 | slave_t *duplicate_slave = NULL; | |
397 | ||
398 | duplicate_slave = __eql_find_slave_dev(queue, slave->dev); | |
37d2e731 | 399 | if (duplicate_slave) |
14a59e18 | 400 | eql_kill_one_slave(queue, duplicate_slave); |
1da177e4 | 401 | |
d62607c3 | 402 | netdev_hold(slave->dev, &slave->dev_tracker, GFP_ATOMIC); |
1da177e4 LT |
403 | list_add(&slave->list, &queue->all_slaves); |
404 | queue->num_slaves++; | |
405 | slave->dev->flags |= IFF_SLAVE; | |
406 | ||
407 | return 0; | |
408 | } | |
409 | ||
410 | return -ENOSPC; | |
411 | } | |
412 | ||
413 | static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp) | |
414 | { | |
415 | struct net_device *slave_dev; | |
416 | slaving_request_t srq; | |
417 | ||
418 | if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) | |
419 | return -EFAULT; | |
420 | ||
ebd93a7d YX |
421 | slave_dev = __dev_get_by_name(&init_net, srq.slave_name); |
422 | if (!slave_dev) | |
423 | return -ENODEV; | |
424 | ||
425 | if ((master_dev->flags & IFF_UP) == IFF_UP) { | |
426 | /* slave is not a master & not already a slave: */ | |
427 | if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) { | |
9c8774e6 | 428 | slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL); |
ebd93a7d YX |
429 | equalizer_t *eql = netdev_priv(master_dev); |
430 | int ret; | |
431 | ||
432 | if (!s) | |
433 | return -ENOMEM; | |
434 | ||
ebd93a7d YX |
435 | s->dev = slave_dev; |
436 | s->priority = srq.priority; | |
437 | s->priority_bps = srq.priority; | |
438 | s->priority_Bps = srq.priority / 8; | |
439 | ||
440 | spin_lock_bh(&eql->queue.lock); | |
441 | ret = __eql_insert_slave(&eql->queue, s); | |
442 | if (ret) | |
443 | kfree(s); | |
444 | ||
445 | spin_unlock_bh(&eql->queue.lock); | |
446 | ||
447 | return ret; | |
1da177e4 | 448 | } |
1da177e4 LT |
449 | } |
450 | ||
451 | return -EINVAL; | |
452 | } | |
453 | ||
454 | static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp) | |
455 | { | |
456 | equalizer_t *eql = netdev_priv(master_dev); | |
457 | struct net_device *slave_dev; | |
458 | slaving_request_t srq; | |
459 | int ret; | |
460 | ||
461 | if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) | |
462 | return -EFAULT; | |
463 | ||
ebd93a7d YX |
464 | slave_dev = __dev_get_by_name(&init_net, srq.slave_name); |
465 | if (!slave_dev) | |
466 | return -ENODEV; | |
1da177e4 | 467 | |
ebd93a7d YX |
468 | ret = -EINVAL; |
469 | spin_lock_bh(&eql->queue.lock); | |
470 | if (eql_is_slave(slave_dev)) { | |
471 | slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev); | |
472 | if (slave) { | |
473 | eql_kill_one_slave(&eql->queue, slave); | |
474 | ret = 0; | |
1da177e4 | 475 | } |
1da177e4 | 476 | } |
ebd93a7d | 477 | spin_unlock_bh(&eql->queue.lock); |
1da177e4 LT |
478 | |
479 | return ret; | |
480 | } | |
481 | ||
482 | static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp) | |
483 | { | |
484 | equalizer_t *eql = netdev_priv(dev); | |
485 | slave_t *slave; | |
486 | struct net_device *slave_dev; | |
487 | slave_config_t sc; | |
488 | int ret; | |
489 | ||
490 | if (copy_from_user(&sc, scp, sizeof (slave_config_t))) | |
491 | return -EFAULT; | |
492 | ||
ebd93a7d | 493 | slave_dev = __dev_get_by_name(&init_net, sc.slave_name); |
1da177e4 LT |
494 | if (!slave_dev) |
495 | return -ENODEV; | |
496 | ||
497 | ret = -EINVAL; | |
498 | ||
499 | spin_lock_bh(&eql->queue.lock); | |
500 | if (eql_is_slave(slave_dev)) { | |
501 | slave = __eql_find_slave_dev(&eql->queue, slave_dev); | |
502 | if (slave) { | |
503 | sc.priority = slave->priority; | |
504 | ret = 0; | |
505 | } | |
506 | } | |
507 | spin_unlock_bh(&eql->queue.lock); | |
508 | ||
1da177e4 LT |
509 | if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t))) |
510 | ret = -EFAULT; | |
511 | ||
512 | return ret; | |
513 | } | |
514 | ||
515 | static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) | |
516 | { | |
517 | slave_t *slave; | |
518 | equalizer_t *eql; | |
519 | struct net_device *slave_dev; | |
520 | slave_config_t sc; | |
521 | int ret; | |
522 | ||
523 | if (copy_from_user(&sc, scp, sizeof (slave_config_t))) | |
524 | return -EFAULT; | |
525 | ||
ebd93a7d | 526 | slave_dev = __dev_get_by_name(&init_net, sc.slave_name); |
1da177e4 LT |
527 | if (!slave_dev) |
528 | return -ENODEV; | |
529 | ||
530 | ret = -EINVAL; | |
531 | ||
532 | eql = netdev_priv(dev); | |
533 | spin_lock_bh(&eql->queue.lock); | |
534 | if (eql_is_slave(slave_dev)) { | |
535 | slave = __eql_find_slave_dev(&eql->queue, slave_dev); | |
536 | if (slave) { | |
537 | slave->priority = sc.priority; | |
538 | slave->priority_bps = sc.priority; | |
539 | slave->priority_Bps = sc.priority / 8; | |
540 | ret = 0; | |
541 | } | |
542 | } | |
543 | spin_unlock_bh(&eql->queue.lock); | |
544 | ||
545 | return ret; | |
546 | } | |
547 | ||
548 | static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) | |
549 | { | |
550 | equalizer_t *eql; | |
551 | master_config_t mc; | |
552 | ||
44467187 DR |
553 | memset(&mc, 0, sizeof(master_config_t)); |
554 | ||
1da177e4 LT |
555 | if (eql_is_master(dev)) { |
556 | eql = netdev_priv(dev); | |
557 | mc.max_slaves = eql->max_slaves; | |
558 | mc.min_slaves = eql->min_slaves; | |
559 | if (copy_to_user(mcp, &mc, sizeof (master_config_t))) | |
560 | return -EFAULT; | |
561 | return 0; | |
562 | } | |
563 | return -EINVAL; | |
564 | } | |
565 | ||
566 | static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp) | |
567 | { | |
568 | equalizer_t *eql; | |
569 | master_config_t mc; | |
570 | ||
571 | if (copy_from_user(&mc, mcp, sizeof (master_config_t))) | |
572 | return -EFAULT; | |
573 | ||
574 | if (eql_is_master(dev)) { | |
575 | eql = netdev_priv(dev); | |
576 | eql->max_slaves = mc.max_slaves; | |
577 | eql->min_slaves = mc.min_slaves; | |
578 | return 0; | |
579 | } | |
580 | return -EINVAL; | |
581 | } | |
582 | ||
583 | static struct net_device *dev_eql; | |
584 | ||
585 | static int __init eql_init_module(void) | |
586 | { | |
587 | int err; | |
588 | ||
63f97425 | 589 | pr_info("%s\n", version); |
1da177e4 | 590 | |
c835a677 TG |
591 | dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", NET_NAME_UNKNOWN, |
592 | eql_setup); | |
1da177e4 LT |
593 | if (!dev_eql) |
594 | return -ENOMEM; | |
595 | ||
596 | err = register_netdev(dev_eql); | |
6aa20a22 | 597 | if (err) |
1da177e4 LT |
598 | free_netdev(dev_eql); |
599 | return err; | |
600 | } | |
601 | ||
602 | static void __exit eql_cleanup_module(void) | |
603 | { | |
604 | unregister_netdev(dev_eql); | |
605 | free_netdev(dev_eql); | |
606 | } | |
607 | ||
608 | module_init(eql_init_module); | |
609 | module_exit(eql_cleanup_module); | |
55c90047 | 610 | MODULE_DESCRIPTION("Equalizer Load-balancer for serial network interfaces"); |
1da177e4 | 611 | MODULE_LICENSE("GPL"); |