1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
3 /* PLIP: A parallel port "network" driver for Linux. */
4 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
15 * Modularization and ifreq/ifmap support by Alan Cox.
16 * Rewritten by Niibe Yutaka.
17 * parport-sharing awareness code by Philip Blundell.
18 * SMP locking by Niibe Yutaka.
19 * Support for parallel ports with no IRQ (poll mode),
20 * Modifications to use the parallel port API
25 * - Module initialization.
27 * - Make sure other end is OK, before sending a packet.
28 * - Fix immediate timer problem.
31 * - Changed {enable,disable}_irq handling to make it work
32 * with new ("stack") semantics.
37 * inspired by Russ Nelson's parallel port packet driver.
40 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
41 * Because of the necessity to communicate to DOS machines with the
42 * Crynwr packet driver, Peter Bauer changed the protocol again
43 * back to original protocol.
45 * This version follows original PLIP protocol.
46 * So, this PLIP can't communicate the PLIP of Linux v1.0.
50 * To use with DOS box, please do (Turn on ARP switch):
51 * # ifconfig plip[0-2] arp
58 "parallel.asm" parallel port packet driver.
60 The "Crynwr" parallel port standard specifies the following protocol:
61 Trigger by sending nibble '0x8' (this causes interrupt on other end)
66 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
67 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
69 The packet is encapsulated as if it were ethernet.
71 The cable used is a de facto standard parallel null cable -- sold as
72 a "LapLink" cable by various places. You'll need a 12-conductor cable to
73 make one yourself. The wiring is:
76 D0->ERROR 2 - 15 15 - 2
77 D1->SLCT 3 - 13 13 - 3
78 D2->PAPOUT 4 - 12 12 - 4
80 D4->BUSY 6 - 11 11 - 6
81 Do not connect the other pins. They are
83 STROBE is 1, FEED is 14, INIT is 16
84 extra grounds are 18,19,20,21,22,23,24
87 #include <linux/module.h>
88 #include <linux/kernel.h>
89 #include <linux/types.h>
90 #include <linux/fcntl.h>
91 #include <linux/interrupt.h>
92 #include <linux/string.h>
93 #include <linux/slab.h>
94 #include <linux/if_ether.h>
96 #include <linux/errno.h>
97 #include <linux/delay.h>
98 #include <linux/init.h>
99 #include <linux/netdevice.h>
100 #include <linux/etherdevice.h>
101 #include <linux/inetdevice.h>
102 #include <linux/skbuff.h>
103 #include <linux/if_plip.h>
104 #include <linux/workqueue.h>
105 #include <linux/spinlock.h>
106 #include <linux/completion.h>
107 #include <linux/parport.h>
108 #include <linux/bitops.h>
110 #include <net/neighbour.h>
113 #include <asm/byteorder.h>
115 /* Maximum number of devices to support. */
118 /* Use 0 for production, 1 for verification, >2 for debug */
122 static const unsigned int net_debug = NET_DEBUG;
124 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
125 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
127 /* In micro second */
128 #define PLIP_DELAY_UNIT 1
130 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
131 #define PLIP_TRIGGER_WAIT 500
133 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
134 #define PLIP_NIBBLE_WAIT 3000
137 static void plip_kick_bh(struct work_struct *work);
138 static void plip_bh(struct work_struct *work);
139 static void plip_timer_bh(struct work_struct *work);
141 /* Interrupt handler */
142 static void plip_interrupt(void *dev_id);
144 /* Functions for DEV methods */
145 static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
146 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
147 unsigned short type, const void *daddr,
148 const void *saddr, unsigned len);
149 static int plip_hard_header_cache(const struct neighbour *neigh,
150 struct hh_cache *hh, __be16 type);
151 static int plip_open(struct net_device *dev);
152 static int plip_close(struct net_device *dev);
153 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
154 static int plip_preempt(void *handle);
155 static void plip_wakeup(void *handle);
157 enum plip_connection_state {
165 enum plip_packet_state {
174 enum plip_nibble_state {
181 enum plip_packet_state state;
182 enum plip_nibble_state nibble;
185 #if defined(__LITTLE_ENDIAN)
188 #elif defined(__BIG_ENDIAN)
192 #error "Please fix the endianness defines in <asm/byteorder.h>"
198 unsigned char checksum;
204 struct net_device *dev;
205 struct work_struct immediate;
206 struct delayed_work deferred;
207 struct delayed_work timer;
208 struct plip_local snd_data;
209 struct plip_local rcv_data;
210 struct pardevice *pardev;
211 unsigned long trigger;
212 unsigned long nibble;
213 enum plip_connection_state connection;
214 unsigned short timeout_count;
217 int should_relinquish;
220 struct completion killed_timer_cmp;
223 static inline void enable_parport_interrupts (struct net_device *dev)
227 struct parport *port =
228 ((struct net_local *)netdev_priv(dev))->pardev->port;
229 port->ops->enable_irq (port);
233 static inline void disable_parport_interrupts (struct net_device *dev)
237 struct parport *port =
238 ((struct net_local *)netdev_priv(dev))->pardev->port;
239 port->ops->disable_irq (port);
243 static inline void write_data (struct net_device *dev, unsigned char data)
245 struct parport *port =
246 ((struct net_local *)netdev_priv(dev))->pardev->port;
248 port->ops->write_data (port, data);
251 static inline unsigned char read_status (struct net_device *dev)
253 struct parport *port =
254 ((struct net_local *)netdev_priv(dev))->pardev->port;
256 return port->ops->read_status (port);
259 static const struct header_ops plip_header_ops = {
260 .create = plip_hard_header,
261 .cache = plip_hard_header_cache,
264 static const struct net_device_ops plip_netdev_ops = {
265 .ndo_open = plip_open,
266 .ndo_stop = plip_close,
267 .ndo_start_xmit = plip_tx_packet,
268 .ndo_do_ioctl = plip_ioctl,
269 .ndo_set_mac_address = eth_mac_addr,
270 .ndo_validate_addr = eth_validate_addr,
273 /* Entry point of PLIP driver.
274 Probe the hardware, and register/initialize the driver.
276 PLIP is rather weird, because of the way it interacts with the parport
277 system. It is _not_ initialised from Space.c. Instead, plip_init()
278 is called, and that function makes up a "struct net_device" for each port, and
283 plip_init_netdev(struct net_device *dev)
285 struct net_local *nl = netdev_priv(dev);
287 /* Then, override parts of it */
288 dev->tx_queue_len = 10;
289 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
290 memset(dev->dev_addr, 0xfc, ETH_ALEN);
292 dev->netdev_ops = &plip_netdev_ops;
293 dev->header_ops = &plip_header_ops;
298 /* Initialize constants */
299 nl->trigger = PLIP_TRIGGER_WAIT;
300 nl->nibble = PLIP_NIBBLE_WAIT;
302 /* Initialize task queue structures */
303 INIT_WORK(&nl->immediate, plip_bh);
304 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
307 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
309 spin_lock_init(&nl->lock);
312 /* Bottom half handler for the delayed request.
313 This routine is kicked by do_timer().
314 Request `plip_bh' to be invoked. */
316 plip_kick_bh(struct work_struct *work)
318 struct net_local *nl =
319 container_of(work, struct net_local, deferred.work);
322 schedule_work(&nl->immediate);
325 /* Forward declarations of internal routines */
326 static int plip_none(struct net_device *, struct net_local *,
327 struct plip_local *, struct plip_local *);
328 static int plip_receive_packet(struct net_device *, struct net_local *,
329 struct plip_local *, struct plip_local *);
330 static int plip_send_packet(struct net_device *, struct net_local *,
331 struct plip_local *, struct plip_local *);
332 static int plip_connection_close(struct net_device *, struct net_local *,
333 struct plip_local *, struct plip_local *);
334 static int plip_error(struct net_device *, struct net_local *,
335 struct plip_local *, struct plip_local *);
336 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
337 struct plip_local *snd,
338 struct plip_local *rcv,
346 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
347 struct plip_local *snd, struct plip_local *rcv);
349 static const plip_func connection_state_table[] =
354 plip_connection_close,
358 /* Bottom half handler of PLIP. */
360 plip_bh(struct work_struct *work)
362 struct net_local *nl = container_of(work, struct net_local, immediate);
363 struct plip_local *snd = &nl->snd_data;
364 struct plip_local *rcv = &nl->rcv_data;
369 f = connection_state_table[nl->connection];
370 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
371 (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
373 schedule_delayed_work(&nl->deferred, 1);
378 plip_timer_bh(struct work_struct *work)
380 struct net_local *nl =
381 container_of(work, struct net_local, timer.work);
383 if (!(atomic_read (&nl->kill_timer))) {
384 plip_interrupt (nl->dev);
386 schedule_delayed_work(&nl->timer, 1);
389 complete(&nl->killed_timer_cmp);
394 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
395 struct plip_local *snd, struct plip_local *rcv,
400 * This is tricky. If we got here from the beginning of send (either
401 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
402 * already disabled. With the old variant of {enable,disable}_irq()
403 * extra disable_irq() was a no-op. Now it became mortal - it's
404 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
405 * that is). So we have to treat HS_TIMEOUT and ERROR from send
409 spin_lock_irq(&nl->lock);
410 if (nl->connection == PLIP_CN_SEND) {
412 if (error != ERROR) { /* Timeout */
414 if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
415 nl->timeout_count <= 3) {
416 spin_unlock_irq(&nl->lock);
417 /* Try again later */
420 c0 = read_status(dev);
421 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
422 dev->name, snd->state, c0);
425 dev->stats.tx_errors++;
426 dev->stats.tx_aborted_errors++;
427 } else if (nl->connection == PLIP_CN_RECEIVE) {
428 if (rcv->state == PLIP_PK_TRIGGER) {
429 /* Transmission was interrupted. */
430 spin_unlock_irq(&nl->lock);
433 if (error != ERROR) { /* Timeout */
434 if (++nl->timeout_count <= 3) {
435 spin_unlock_irq(&nl->lock);
436 /* Try again later */
439 c0 = read_status(dev);
440 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
441 dev->name, rcv->state, c0);
443 dev->stats.rx_dropped++;
445 rcv->state = PLIP_PK_DONE;
450 snd->state = PLIP_PK_DONE;
452 dev_kfree_skb(snd->skb);
455 spin_unlock_irq(&nl->lock);
456 if (error == HS_TIMEOUT) {
458 synchronize_irq(dev->irq);
460 disable_parport_interrupts (dev);
461 netif_stop_queue (dev);
462 nl->connection = PLIP_CN_ERROR;
463 write_data (dev, 0x00);
469 plip_none(struct net_device *dev, struct net_local *nl,
470 struct plip_local *snd, struct plip_local *rcv)
475 /* PLIP_RECEIVE --- receive a byte(two nibbles)
476 Returns OK on success, TIMEOUT on timeout */
478 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
479 enum plip_nibble_state *ns_p, unsigned char *data_p)
481 unsigned char c0, c1;
488 c0 = read_status(dev);
489 udelay(PLIP_DELAY_UNIT);
490 if ((c0 & 0x80) == 0) {
491 c1 = read_status(dev);
498 *data_p = (c0 >> 3) & 0x0f;
499 write_data (dev, 0x10); /* send ACK */
506 c0 = read_status(dev);
507 udelay(PLIP_DELAY_UNIT);
509 c1 = read_status(dev);
516 *data_p |= (c0 << 1) & 0xf0;
517 write_data (dev, 0x00); /* send ACK */
518 *ns_p = PLIP_NB_BEGIN;
527 * Determine the packet's protocol ID. The rule here is that we
528 * assume 802.3 if the type field is short enough to be a length.
529 * This is normal practice and works for any 'now in use' protocol.
531 * PLIP is ethernet ish but the daddr might not be valid if unicast.
532 * PLIP fortunately has no bus architecture (its Point-to-point).
534 * We can't fix the daddr thing as that quirk (more bug) is embedded
535 * in far too many old systems not all even running Linux.
538 static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
543 skb_reset_mac_header(skb);
544 skb_pull(skb,dev->hard_header_len);
547 if(is_multicast_ether_addr(eth->h_dest))
549 if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
550 skb->pkt_type=PACKET_BROADCAST;
552 skb->pkt_type=PACKET_MULTICAST;
556 * This ALLMULTI check should be redundant by 1.4
557 * so don't forget to remove it.
560 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
566 * This is a magic hack to spot IPX packets. Older Novell breaks
567 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
568 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
569 * won't work for fault tolerant netware but does for the rest.
571 if (*(unsigned short *)rawp == 0xFFFF)
572 return htons(ETH_P_802_3);
577 return htons(ETH_P_802_2);
580 /* PLIP_RECEIVE_PACKET --- receive a packet */
582 plip_receive_packet(struct net_device *dev, struct net_local *nl,
583 struct plip_local *snd, struct plip_local *rcv)
585 unsigned short nibble_timeout = nl->nibble;
588 switch (rcv->state) {
589 case PLIP_PK_TRIGGER:
591 /* Don't need to synchronize irq, as we can safely ignore it */
592 disable_parport_interrupts (dev);
593 write_data (dev, 0x01); /* send ACK */
595 printk(KERN_DEBUG "%s: receive start\n", dev->name);
596 rcv->state = PLIP_PK_LENGTH_LSB;
597 rcv->nibble = PLIP_NB_BEGIN;
600 case PLIP_PK_LENGTH_LSB:
601 if (snd->state != PLIP_PK_DONE) {
602 if (plip_receive(nl->trigger, dev,
603 &rcv->nibble, &rcv->length.b.lsb)) {
604 /* collision, here dev->tbusy == 1 */
605 rcv->state = PLIP_PK_DONE;
607 nl->connection = PLIP_CN_SEND;
608 schedule_delayed_work(&nl->deferred, 1);
609 enable_parport_interrupts (dev);
614 if (plip_receive(nibble_timeout, dev,
615 &rcv->nibble, &rcv->length.b.lsb))
618 rcv->state = PLIP_PK_LENGTH_MSB;
621 case PLIP_PK_LENGTH_MSB:
622 if (plip_receive(nibble_timeout, dev,
623 &rcv->nibble, &rcv->length.b.msb))
625 if (rcv->length.h > dev->mtu + dev->hard_header_len ||
627 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
630 /* Malloc up new buffer. */
631 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
632 if (rcv->skb == NULL) {
633 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
636 skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
637 skb_put(rcv->skb,rcv->length.h);
639 rcv->state = PLIP_PK_DATA;
645 lbuf = rcv->skb->data;
647 if (plip_receive(nibble_timeout, dev,
648 &rcv->nibble, &lbuf[rcv->byte]))
650 } while (++rcv->byte < rcv->length.h);
652 rcv->checksum += lbuf[--rcv->byte];
654 rcv->state = PLIP_PK_CHECKSUM;
657 case PLIP_PK_CHECKSUM:
658 if (plip_receive(nibble_timeout, dev,
659 &rcv->nibble, &rcv->data))
661 if (rcv->data != rcv->checksum) {
662 dev->stats.rx_crc_errors++;
664 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
667 rcv->state = PLIP_PK_DONE;
671 /* Inform the upper layer for the arrival of a packet. */
672 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
673 netif_rx_ni(rcv->skb);
674 dev->stats.rx_bytes += rcv->length.h;
675 dev->stats.rx_packets++;
678 printk(KERN_DEBUG "%s: receive end\n", dev->name);
680 /* Close the connection. */
681 write_data (dev, 0x00);
682 spin_lock_irq(&nl->lock);
683 if (snd->state != PLIP_PK_DONE) {
684 nl->connection = PLIP_CN_SEND;
685 spin_unlock_irq(&nl->lock);
686 schedule_work(&nl->immediate);
687 enable_parport_interrupts (dev);
691 nl->connection = PLIP_CN_NONE;
692 spin_unlock_irq(&nl->lock);
693 enable_parport_interrupts (dev);
701 /* PLIP_SEND --- send a byte (two nibbles)
702 Returns OK on success, TIMEOUT when timeout */
704 plip_send(unsigned short nibble_timeout, struct net_device *dev,
705 enum plip_nibble_state *ns_p, unsigned char data)
712 write_data (dev, data & 0x0f);
717 write_data (dev, 0x10 | (data & 0x0f));
720 c0 = read_status(dev);
721 if ((c0 & 0x80) == 0)
725 udelay(PLIP_DELAY_UNIT);
727 write_data (dev, 0x10 | (data >> 4));
732 write_data (dev, (data >> 4));
735 c0 = read_status(dev);
740 udelay(PLIP_DELAY_UNIT);
742 *ns_p = PLIP_NB_BEGIN;
748 /* PLIP_SEND_PACKET --- send a packet */
750 plip_send_packet(struct net_device *dev, struct net_local *nl,
751 struct plip_local *snd, struct plip_local *rcv)
753 unsigned short nibble_timeout = nl->nibble;
758 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
759 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
760 snd->state = PLIP_PK_DONE;
765 switch (snd->state) {
766 case PLIP_PK_TRIGGER:
767 if ((read_status(dev) & 0xf8) != 0x80)
770 /* Trigger remote rx interrupt. */
771 write_data (dev, 0x08);
774 udelay(PLIP_DELAY_UNIT);
775 spin_lock_irq(&nl->lock);
776 if (nl->connection == PLIP_CN_RECEIVE) {
777 spin_unlock_irq(&nl->lock);
779 dev->stats.collisions++;
782 c0 = read_status(dev);
784 spin_unlock_irq(&nl->lock);
786 synchronize_irq(dev->irq);
787 if (nl->connection == PLIP_CN_RECEIVE) {
789 We don't need to enable irq,
790 as it is soon disabled. */
791 /* Yes, we do. New variant of
792 {enable,disable}_irq *counts*
795 dev->stats.collisions++;
798 disable_parport_interrupts (dev);
800 printk(KERN_DEBUG "%s: send start\n", dev->name);
801 snd->state = PLIP_PK_LENGTH_LSB;
802 snd->nibble = PLIP_NB_BEGIN;
803 nl->timeout_count = 0;
806 spin_unlock_irq(&nl->lock);
808 write_data (dev, 0x00);
814 case PLIP_PK_LENGTH_LSB:
815 if (plip_send(nibble_timeout, dev,
816 &snd->nibble, snd->length.b.lsb))
818 snd->state = PLIP_PK_LENGTH_MSB;
821 case PLIP_PK_LENGTH_MSB:
822 if (plip_send(nibble_timeout, dev,
823 &snd->nibble, snd->length.b.msb))
825 snd->state = PLIP_PK_DATA;
832 if (plip_send(nibble_timeout, dev,
833 &snd->nibble, lbuf[snd->byte]))
835 } while (++snd->byte < snd->length.h);
837 snd->checksum += lbuf[--snd->byte];
839 snd->state = PLIP_PK_CHECKSUM;
842 case PLIP_PK_CHECKSUM:
843 if (plip_send(nibble_timeout, dev,
844 &snd->nibble, snd->checksum))
847 dev->stats.tx_bytes += snd->skb->len;
848 dev_kfree_skb(snd->skb);
849 dev->stats.tx_packets++;
850 snd->state = PLIP_PK_DONE;
854 /* Close the connection */
855 write_data (dev, 0x00);
858 printk(KERN_DEBUG "%s: send end\n", dev->name);
859 nl->connection = PLIP_CN_CLOSING;
861 schedule_delayed_work(&nl->deferred, 1);
862 enable_parport_interrupts (dev);
870 plip_connection_close(struct net_device *dev, struct net_local *nl,
871 struct plip_local *snd, struct plip_local *rcv)
873 spin_lock_irq(&nl->lock);
874 if (nl->connection == PLIP_CN_CLOSING) {
875 nl->connection = PLIP_CN_NONE;
876 netif_wake_queue (dev);
878 spin_unlock_irq(&nl->lock);
879 if (nl->should_relinquish) {
880 nl->should_relinquish = nl->port_owner = 0;
881 parport_release(nl->pardev);
886 /* PLIP_ERROR --- wait till other end settled */
888 plip_error(struct net_device *dev, struct net_local *nl,
889 struct plip_local *snd, struct plip_local *rcv)
891 unsigned char status;
893 status = read_status(dev);
894 if ((status & 0xf8) == 0x80) {
896 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
897 nl->connection = PLIP_CN_NONE;
898 nl->should_relinquish = 0;
899 netif_start_queue (dev);
900 enable_parport_interrupts (dev);
902 netif_wake_queue (dev);
905 schedule_delayed_work(&nl->deferred, 1);
911 /* Handle the parallel port interrupts. */
913 plip_interrupt(void *dev_id)
915 struct net_device *dev = dev_id;
916 struct net_local *nl;
917 struct plip_local *rcv;
921 nl = netdev_priv(dev);
924 spin_lock_irqsave (&nl->lock, flags);
926 c0 = read_status(dev);
927 if ((c0 & 0xf8) != 0xc0) {
928 if ((dev->irq != -1) && (net_debug > 1))
929 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
930 spin_unlock_irqrestore (&nl->lock, flags);
935 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
937 switch (nl->connection) {
938 case PLIP_CN_CLOSING:
939 netif_wake_queue (dev);
943 rcv->state = PLIP_PK_TRIGGER;
944 nl->connection = PLIP_CN_RECEIVE;
945 nl->timeout_count = 0;
946 schedule_work(&nl->immediate);
949 case PLIP_CN_RECEIVE:
950 /* May occur because there is race condition
951 around test and set of dev->interrupt.
952 Ignore this interrupt. */
956 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
960 spin_unlock_irqrestore(&nl->lock, flags);
964 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
966 struct net_local *nl = netdev_priv(dev);
967 struct plip_local *snd = &nl->snd_data;
969 if (netif_queue_stopped(dev))
970 return NETDEV_TX_BUSY;
972 /* We may need to grab the bus */
973 if (!nl->port_owner) {
974 if (parport_claim(nl->pardev))
975 return NETDEV_TX_BUSY;
979 netif_stop_queue (dev);
981 if (skb->len > dev->mtu + dev->hard_header_len) {
982 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
983 netif_start_queue (dev);
984 return NETDEV_TX_BUSY;
988 printk(KERN_DEBUG "%s: send request\n", dev->name);
990 spin_lock_irq(&nl->lock);
992 snd->length.h = skb->len;
993 snd->state = PLIP_PK_TRIGGER;
994 if (nl->connection == PLIP_CN_NONE) {
995 nl->connection = PLIP_CN_SEND;
996 nl->timeout_count = 0;
998 schedule_work(&nl->immediate);
999 spin_unlock_irq(&nl->lock);
1001 return NETDEV_TX_OK;
1005 plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1007 const struct in_device *in_dev;
1010 in_dev = __in_dev_get_rcu(dev);
1012 /* Any address will do - we take the first */
1013 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1015 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1016 memset(eth->h_dest, 0xfc, 2);
1017 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1024 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1025 unsigned short type, const void *daddr,
1026 const void *saddr, unsigned len)
1030 ret = eth_header(skb, dev, type, daddr, saddr, len);
1032 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1037 static int plip_hard_header_cache(const struct neighbour *neigh,
1038 struct hh_cache *hh, __be16 type)
1042 ret = eth_header_cache(neigh, hh, type);
1046 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1047 HH_DATA_OFF(sizeof(*eth)));
1048 plip_rewrite_address (neigh->dev, eth);
1054 /* Open/initialize the board. This is called (in the current kernel)
1055 sometime after booting when the 'ifconfig' program is run.
1057 This routine gets exclusive access to the parallel port by allocating
1061 plip_open(struct net_device *dev)
1063 struct net_local *nl = netdev_priv(dev);
1064 struct in_device *in_dev;
1067 if (!nl->port_owner) {
1068 if (parport_claim(nl->pardev)) return -EAGAIN;
1072 nl->should_relinquish = 0;
1074 /* Clear the data port. */
1075 write_data (dev, 0x00);
1077 /* Enable rx interrupt. */
1078 enable_parport_interrupts (dev);
1081 atomic_set (&nl->kill_timer, 0);
1082 schedule_delayed_work(&nl->timer, 1);
1085 /* Initialize the state machine. */
1086 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1087 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1088 nl->connection = PLIP_CN_NONE;
1089 nl->is_deferred = 0;
1091 /* Fill in the MAC-level header.
1092 We used to abuse dev->broadcast to store the point-to-point
1093 MAC address, but we no longer do it. Instead, we fetch the
1094 interface address whenever it is needed, which is cheap enough
1095 because we use the hh_cache. Actually, abusing dev->broadcast
1096 didn't work, because when using plip_open the point-to-point
1097 address isn't yet known.
1098 PLIP doesn't have a real MAC address, but we need it to be
1099 DOS compatible, and to properly support taps (otherwise,
1100 when the device address isn't identical to the address of a
1101 received frame, the kernel incorrectly drops it). */
1103 in_dev=__in_dev_get_rtnl(dev);
1105 /* Any address will do - we take the first. We already
1106 have the first two bytes filled with 0xfc, from
1108 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1110 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1114 netif_start_queue (dev);
1119 /* The inverse routine to plip_open (). */
1121 plip_close(struct net_device *dev)
1123 struct net_local *nl = netdev_priv(dev);
1124 struct plip_local *snd = &nl->snd_data;
1125 struct plip_local *rcv = &nl->rcv_data;
1127 netif_stop_queue (dev);
1129 synchronize_irq(dev->irq);
1133 init_completion(&nl->killed_timer_cmp);
1134 atomic_set (&nl->kill_timer, 1);
1135 wait_for_completion(&nl->killed_timer_cmp);
1139 outb(0x00, PAR_DATA(dev));
1141 nl->is_deferred = 0;
1142 nl->connection = PLIP_CN_NONE;
1143 if (nl->port_owner) {
1144 parport_release(nl->pardev);
1148 snd->state = PLIP_PK_DONE;
1150 dev_kfree_skb(snd->skb);
1153 rcv->state = PLIP_PK_DONE;
1155 kfree_skb(rcv->skb);
1161 outb(0x00, PAR_CONTROL(dev));
1167 plip_preempt(void *handle)
1169 struct net_device *dev = (struct net_device *)handle;
1170 struct net_local *nl = netdev_priv(dev);
1172 /* Stand our ground if a datagram is on the wire */
1173 if (nl->connection != PLIP_CN_NONE) {
1174 nl->should_relinquish = 1;
1178 nl->port_owner = 0; /* Remember that we released the bus */
1183 plip_wakeup(void *handle)
1185 struct net_device *dev = (struct net_device *)handle;
1186 struct net_local *nl = netdev_priv(dev);
1188 if (nl->port_owner) {
1189 /* Why are we being woken up? */
1190 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1191 if (!parport_claim(nl->pardev))
1192 /* bus_owner is already set (but why?) */
1193 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1198 if (!(dev->flags & IFF_UP))
1199 /* Don't need the port when the interface is down */
1202 if (!parport_claim(nl->pardev)) {
1204 /* Clear the data port. */
1205 write_data (dev, 0x00);
1210 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1212 struct net_local *nl = netdev_priv(dev);
1213 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1215 if (cmd != SIOCDEVPLIP)
1219 case PLIP_GET_TIMEOUT:
1220 pc->trigger = nl->trigger;
1221 pc->nibble = nl->nibble;
1223 case PLIP_SET_TIMEOUT:
1224 if(!capable(CAP_NET_ADMIN))
1226 nl->trigger = pc->trigger;
1227 nl->nibble = pc->nibble;
1235 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1238 module_param_array(parport, int, NULL, 0);
1239 module_param(timid, int, 0);
1240 MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1242 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1245 plip_searchfor(int list[], int a)
1248 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1249 if (list[i] == a) return 1;
1254 /* plip_attach() is called (by the parport code) when a port is
1255 * available to use. */
1256 static void plip_attach (struct parport *port)
1259 struct net_device *dev;
1260 struct net_local *nl;
1261 char name[IFNAMSIZ];
1262 struct pardev_cb plip_cb;
1264 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1265 plip_searchfor(parport, port->number)) {
1266 if (unit == PLIP_MAX) {
1267 printk(KERN_ERR "plip: too many devices\n");
1271 sprintf(name, "plip%d", unit);
1272 dev = alloc_etherdev(sizeof(struct net_local));
1276 strcpy(dev->name, name);
1278 dev->irq = port->irq;
1279 dev->base_addr = port->base;
1280 if (port->irq == -1) {
1281 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1282 "which is fairly inefficient!\n", port->name);
1285 nl = netdev_priv(dev);
1288 memset(&plip_cb, 0, sizeof(plip_cb));
1289 plip_cb.private = dev;
1290 plip_cb.preempt = plip_preempt;
1291 plip_cb.wakeup = plip_wakeup;
1292 plip_cb.irq_func = plip_interrupt;
1294 nl->pardev = parport_register_dev_model(port, dev->name,
1298 printk(KERN_ERR "%s: parport_register failed\n", name);
1302 plip_init_netdev(dev);
1304 if (register_netdev(dev)) {
1305 printk(KERN_ERR "%s: network register failed\n", name);
1306 goto err_parport_unregister;
1309 printk(KERN_INFO "%s", version);
1311 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1313 dev->name, dev->base_addr, dev->irq);
1315 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1317 dev->name, dev->base_addr);
1318 dev_plip[unit++] = dev;
1322 err_parport_unregister:
1323 parport_unregister_device(nl->pardev);
1328 /* plip_detach() is called (by the parport code) when a port is
1329 * no longer available to use. */
1330 static void plip_detach (struct parport *port)
1335 static int plip_probe(struct pardevice *par_dev)
1337 struct device_driver *drv = par_dev->dev.driver;
1338 int len = strlen(drv->name);
1340 if (strncmp(par_dev->name, drv->name, len))
1346 static struct parport_driver plip_driver = {
1348 .probe = plip_probe,
1349 .match_port = plip_attach,
1350 .detach = plip_detach,
1354 static void __exit plip_cleanup_module (void)
1356 struct net_device *dev;
1359 for (i=0; i < PLIP_MAX; i++) {
1360 if ((dev = dev_plip[i])) {
1361 struct net_local *nl = netdev_priv(dev);
1362 unregister_netdev(dev);
1364 parport_release(nl->pardev);
1365 parport_unregister_device(nl->pardev);
1371 parport_unregister_driver(&plip_driver);
1376 static int parport_ptr;
1378 static int __init plip_setup(char *str)
1382 str = get_options(str, ARRAY_SIZE(ints), ints);
1385 if (!strncmp(str, "parport", 7)) {
1386 int n = simple_strtoul(str+7, NULL, 10);
1387 if (parport_ptr < PLIP_MAX)
1388 parport[parport_ptr++] = n;
1390 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1392 } else if (!strcmp(str, "timid")) {
1395 if (ints[0] == 0 || ints[1] == 0) {
1396 /* disable driver on "plip=" or "plip=0" */
1399 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1406 __setup("plip=", plip_setup);
1408 #endif /* !MODULE */
1410 static int __init plip_init (void)
1412 if (parport[0] == -2)
1415 if (parport[0] != -1 && timid) {
1416 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1420 if (parport_register_driver (&plip_driver)) {
1421 printk (KERN_WARNING "plip: couldn't register driver\n");
1428 module_init(plip_init);
1429 module_exit(plip_cleanup_module);
1430 MODULE_LICENSE("GPL");