1 /*********************************************************************
3 * sir_dev.c: irda sir network device
5 * Copyright (c) 2002 Martin Diehl
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
12 ********************************************************************/
14 #include <linux/hardirq.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
21 #include <net/irda/irda.h>
22 #include <net/irda/wrapper.h>
23 #include <net/irda/irda_device.h>
28 static struct workqueue_struct *irda_sir_wq;
32 /* substate handler of the config-fsm to handle the cases where we want
33 * to wait for transmit completion before changing the port configuration
36 static int sirdev_tx_complete_fsm(struct sir_dev *dev)
38 struct sir_fsm *fsm = &dev->fsm;
39 unsigned next_state, delay;
43 next_state = fsm->substate; /* default: stay in current substate */
46 switch(fsm->substate) {
48 case SIRDEV_STATE_WAIT_XMIT:
49 if (dev->drv->chars_in_buffer)
50 bytes_left = dev->drv->chars_in_buffer(dev);
54 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
58 if (dev->speed > 115200)
59 delay = (bytes_left*8*10000) / (dev->speed/100);
60 else if (dev->speed > 0)
61 delay = (bytes_left*10*10000) / (dev->speed/100);
64 /* expected delay (usec) until remaining bytes are sent */
70 /* sleep some longer delay (msec) */
71 delay = (delay+999) / 1000;
74 case SIRDEV_STATE_WAIT_UNTIL_SENT:
75 /* block until underlaying hardware buffer are empty */
76 if (dev->drv->wait_until_sent)
77 dev->drv->wait_until_sent(dev);
78 next_state = SIRDEV_STATE_TX_DONE;
81 case SIRDEV_STATE_TX_DONE:
85 net_err_ratelimited("%s - undefined state\n", __func__);
88 fsm->substate = next_state;
94 * Function sirdev_config_fsm
96 * State machine to handle the configuration of the device (and attached dongle, if any).
97 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
98 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
99 * long. Instead, for longer delays we start a timer to reschedule us later.
100 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
101 * Both must be unlocked/restarted on completion - but only on final exit.
104 static void sirdev_config_fsm(struct work_struct *work)
106 struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
107 struct sir_fsm *fsm = &dev->fsm;
112 pr_debug("%s(), <%ld>\n", __func__, jiffies);
115 pr_debug("%s - state=0x%04x / substate=0x%04x\n",
116 __func__, fsm->state, fsm->substate);
118 next_state = fsm->state;
123 case SIRDEV_STATE_DONGLE_OPEN:
124 if (dev->dongle_drv != NULL) {
125 ret = sirdev_put_dongle(dev);
127 fsm->result = -EINVAL;
128 next_state = SIRDEV_STATE_ERROR;
133 /* Initialize dongle */
134 ret = sirdev_get_dongle(dev, fsm->param);
137 next_state = SIRDEV_STATE_ERROR;
141 /* Dongles are powered through the modem control lines which
142 * were just set during open. Before resetting, let's wait for
143 * the power to stabilize. This is what some dongle drivers did
144 * in open before, while others didn't - should be safe anyway.
148 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
149 next_state = SIRDEV_STATE_DONGLE_RESET;
155 case SIRDEV_STATE_DONGLE_CLOSE:
156 /* shouldn't we just treat this as success=? */
157 if (dev->dongle_drv == NULL) {
158 fsm->result = -EINVAL;
159 next_state = SIRDEV_STATE_ERROR;
163 ret = sirdev_put_dongle(dev);
166 next_state = SIRDEV_STATE_ERROR;
169 next_state = SIRDEV_STATE_DONE;
172 case SIRDEV_STATE_SET_DTR_RTS:
173 ret = sirdev_set_dtr_rts(dev,
174 (fsm->param&0x02) ? TRUE : FALSE,
175 (fsm->param&0x01) ? TRUE : FALSE);
176 next_state = SIRDEV_STATE_DONE;
179 case SIRDEV_STATE_SET_SPEED:
180 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
181 next_state = SIRDEV_STATE_DONGLE_CHECK;
184 case SIRDEV_STATE_DONGLE_CHECK:
185 ret = sirdev_tx_complete_fsm(dev);
188 next_state = SIRDEV_STATE_ERROR;
191 if ((delay=ret) != 0)
194 if (dev->dongle_drv) {
195 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
196 next_state = SIRDEV_STATE_DONGLE_RESET;
199 dev->speed = fsm->param;
200 next_state = SIRDEV_STATE_PORT_SPEED;
204 case SIRDEV_STATE_DONGLE_RESET:
205 if (dev->dongle_drv->reset) {
206 ret = dev->dongle_drv->reset(dev);
209 next_state = SIRDEV_STATE_ERROR;
215 if ((delay=ret) == 0) {
216 /* set serial port according to dongle default speed */
217 if (dev->drv->set_speed)
218 dev->drv->set_speed(dev, dev->speed);
219 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
220 next_state = SIRDEV_STATE_DONGLE_SPEED;
224 case SIRDEV_STATE_DONGLE_SPEED:
225 if (dev->dongle_drv->set_speed) {
226 ret = dev->dongle_drv->set_speed(dev, fsm->param);
229 next_state = SIRDEV_STATE_ERROR;
235 if ((delay=ret) == 0)
236 next_state = SIRDEV_STATE_PORT_SPEED;
239 case SIRDEV_STATE_PORT_SPEED:
240 /* Finally we are ready to change the serial port speed */
241 if (dev->drv->set_speed)
242 dev->drv->set_speed(dev, dev->speed);
244 next_state = SIRDEV_STATE_DONE;
247 case SIRDEV_STATE_DONE:
248 /* Signal network layer so it can send more frames */
249 netif_wake_queue(dev->netdev);
250 next_state = SIRDEV_STATE_COMPLETE;
254 net_err_ratelimited("%s - undefined state\n", __func__);
255 fsm->result = -EINVAL;
258 case SIRDEV_STATE_ERROR:
259 net_err_ratelimited("%s - error: %d\n",
260 __func__, fsm->result);
262 #if 0 /* don't enable this before we have netdev->tx_timeout to recover */
263 netif_stop_queue(dev->netdev);
265 netif_wake_queue(dev->netdev);
269 case SIRDEV_STATE_COMPLETE:
270 /* config change finished, so we are not busy any longer */
271 sirdev_enable_rx(dev);
275 fsm->state = next_state;
278 queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
281 /* schedule some device configuration task for execution by kIrDAd
282 * on behalf of the above state machine.
283 * can be called from process or interrupt/tasklet context.
286 int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
288 struct sir_fsm *fsm = &dev->fsm;
290 pr_debug("%s - state=0x%04x / param=%u\n", __func__,
291 initial_state, param);
293 if (down_trylock(&fsm->sem)) {
294 if (in_interrupt() || in_atomic() || irqs_disabled()) {
295 pr_debug("%s(), state machine busy!\n", __func__);
301 if (fsm->state == SIRDEV_STATE_DEAD) {
302 /* race with sirdev_close should never happen */
303 net_err_ratelimited("%s(), instance staled!\n", __func__);
305 return -ESTALE; /* or better EPIPE? */
308 netif_stop_queue(dev->netdev);
309 atomic_set(&dev->enable_rx, 0);
311 fsm->state = initial_state;
315 INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
316 queue_delayed_work(irda_sir_wq, &fsm->work, 0);
321 /***************************************************************************/
323 void sirdev_enable_rx(struct sir_dev *dev)
325 if (unlikely(atomic_read(&dev->enable_rx)))
328 /* flush rx-buffer - should also help in case of problems with echo cancelation */
329 dev->rx_buff.data = dev->rx_buff.head;
330 dev->rx_buff.len = 0;
331 dev->rx_buff.in_frame = FALSE;
332 dev->rx_buff.state = OUTSIDE_FRAME;
333 atomic_set(&dev->enable_rx, 1);
336 static int sirdev_is_receiving(struct sir_dev *dev)
338 if (!atomic_read(&dev->enable_rx))
341 return dev->rx_buff.state != OUTSIDE_FRAME;
344 int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
348 pr_debug("%s : requesting dongle %d.\n", __func__, type);
350 err = sirdev_schedule_dongle_open(dev, type);
353 down(&dev->fsm.sem); /* block until config change completed */
354 err = dev->fsm.result;
358 EXPORT_SYMBOL(sirdev_set_dongle);
360 /* used by dongle drivers for dongle programming */
362 int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
367 if (unlikely(len > dev->tx_buff.truesize))
370 spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */
371 while (dev->tx_buff.len > 0) { /* wait until tx idle */
372 spin_unlock_irqrestore(&dev->tx_lock, flags);
374 spin_lock_irqsave(&dev->tx_lock, flags);
377 dev->tx_buff.data = dev->tx_buff.head;
378 memcpy(dev->tx_buff.data, buf, len);
379 dev->tx_buff.len = len;
381 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
383 pr_debug("%s(), raw-tx started\n", __func__);
385 dev->tx_buff.data += ret;
386 dev->tx_buff.len -= ret;
388 ret = len; /* all data is going to be sent */
390 spin_unlock_irqrestore(&dev->tx_lock, flags);
393 EXPORT_SYMBOL(sirdev_raw_write);
395 /* seems some dongle drivers may need this */
397 int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
401 if (atomic_read(&dev->enable_rx))
402 return -EIO; /* fail if we expect irda-frames */
404 count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;
407 memcpy(buf, dev->rx_buff.data, count);
408 dev->rx_buff.data += count;
409 dev->rx_buff.len -= count;
412 /* remaining stuff gets flushed when re-enabling normal rx */
416 EXPORT_SYMBOL(sirdev_raw_read);
418 int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
421 if (dev->drv->set_dtr_rts)
422 ret = dev->drv->set_dtr_rts(dev, dtr, rts);
425 EXPORT_SYMBOL(sirdev_set_dtr_rts);
427 /**********************************************************************/
429 /* called from client driver - likely with bh-context - to indicate
430 * it made some progress with transmission. Hence we send the next
431 * chunk, if any, or complete the skb otherwise
434 void sirdev_write_complete(struct sir_dev *dev)
441 spin_lock_irqsave(&dev->tx_lock, flags);
443 pr_debug("%s() - dev->tx_buff.len = %d\n",
444 __func__, dev->tx_buff.len);
446 if (likely(dev->tx_buff.len > 0)) {
447 /* Write data left in transmit buffer */
448 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
450 if (likely(actual>0)) {
451 dev->tx_buff.data += actual;
452 dev->tx_buff.len -= actual;
454 else if (unlikely(actual<0)) {
455 /* could be dropped later when we have tx_timeout to recover */
456 net_err_ratelimited("%s: drv->do_write failed (%d)\n",
458 if ((skb=dev->tx_skb) != NULL) {
460 dev_kfree_skb_any(skb);
461 dev->netdev->stats.tx_errors++;
462 dev->netdev->stats.tx_dropped++;
464 dev->tx_buff.len = 0;
466 if (dev->tx_buff.len > 0)
467 goto done; /* more data to send later */
470 if (unlikely(dev->raw_tx != 0)) {
471 /* in raw mode we are just done now after the buffer was sent
472 * completely. Since this was requested by some dongle driver
473 * running under the control of the irda-thread we must take
474 * care here not to re-enable the queue. The queue will be
475 * restarted when the irda-thread has completed the request.
478 pr_debug("%s(), raw-tx done\n", __func__);
480 goto done; /* no post-frame handling in raw mode */
483 /* we have finished now sending this skb.
484 * update statistics and free the skb.
485 * finally we check and trigger a pending speed change, if any.
486 * if not we switch to rx mode and wake the queue for further
488 * note the scheduled speed request blocks until the lower
489 * client driver and the corresponding hardware has really
490 * finished sending all data (xmit fifo drained f.e.)
491 * before the speed change gets finally done and the queue
495 pr_debug("%s(), finished with frame!\n", __func__);
497 if ((skb=dev->tx_skb) != NULL) {
499 dev->netdev->stats.tx_packets++;
500 dev->netdev->stats.tx_bytes += skb->len;
501 dev_kfree_skb_any(skb);
504 if (unlikely(dev->new_speed > 0)) {
505 pr_debug("%s(), Changing speed!\n", __func__);
506 err = sirdev_schedule_speed(dev, dev->new_speed);
508 /* should never happen
509 * forget the speed change and hope the stack recovers
511 net_err_ratelimited("%s - schedule speed change failed: %d\n",
513 netif_wake_queue(dev->netdev);
516 * speed change in progress now
517 * on completion dev->new_speed gets cleared,
518 * rx-reenabled and the queue restarted
522 sirdev_enable_rx(dev);
523 netif_wake_queue(dev->netdev);
527 spin_unlock_irqrestore(&dev->tx_lock, flags);
529 EXPORT_SYMBOL(sirdev_write_complete);
531 /* called from client driver - likely with bh-context - to give us
532 * some more received bytes. We put them into the rx-buffer,
533 * normally unwrapping and building LAP-skb's (unless rx disabled)
536 int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
538 if (!dev || !dev->netdev) {
539 net_warn_ratelimited("%s(), not ready yet!\n", __func__);
544 net_warn_ratelimited("%s - too early: %p / %zd!\n",
545 __func__, cp, count);
550 /* error already at lower level receive
551 * just update stats and set media busy
553 irda_device_set_media_busy(dev->netdev, TRUE);
554 dev->netdev->stats.rx_dropped++;
555 pr_debug("%s; rx-drop: %zd\n", __func__, count);
559 /* Read the characters into the buffer */
560 if (likely(atomic_read(&dev->enable_rx))) {
562 /* Unwrap and destuff one byte */
563 async_unwrap_char(dev->netdev, &dev->netdev->stats,
564 &dev->rx_buff, *cp++);
567 /* rx not enabled: save the raw bytes and never
568 * trigger any netif_rx. The received bytes are flushed
569 * later when we re-enable rx but might be read meanwhile
570 * by the dongle driver.
572 dev->rx_buff.data[dev->rx_buff.len++] = *cp++;
574 /* What should we do when the buffer is full? */
575 if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
576 dev->rx_buff.len = 0;
582 EXPORT_SYMBOL(sirdev_receive);
584 /**********************************************************************/
586 /* callbacks from network layer */
588 static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
589 struct net_device *ndev)
591 struct sir_dev *dev = netdev_priv(ndev);
597 IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
599 netif_stop_queue(ndev);
601 pr_debug("%s(), skb->len = %d\n", __func__, skb->len);
603 speed = irda_get_next_speed(skb);
604 if ((speed != dev->speed) && (speed != -1)) {
606 err = sirdev_schedule_speed(dev, speed);
607 if (unlikely(err == -EWOULDBLOCK)) {
608 /* Failed to initiate the speed change, likely the fsm
609 * is still busy (pretty unlikely, but...)
610 * We refuse to accept the skb and return with the queue
611 * stopped so the network layer will retry after the
612 * fsm completes and wakes the queue.
614 return NETDEV_TX_BUSY;
616 else if (unlikely(err)) {
617 /* other fatal error - forget the speed change and
618 * hope the stack will recover somehow
620 netif_start_queue(ndev);
623 * speed change in progress now
624 * on completion the queue gets restarted
627 dev_kfree_skb_any(skb);
630 dev->new_speed = speed;
634 dev->tx_buff.data = dev->tx_buff.head;
637 if(spin_is_locked(&dev->tx_lock)) {
638 pr_debug("%s(), write not completed\n", __func__);
641 /* serialize with write completion */
642 spin_lock_irqsave(&dev->tx_lock, flags);
644 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
645 dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize);
647 /* transmission will start now - disable receive.
648 * if we are just in the middle of an incoming frame,
649 * treat it as collision. probably it's a good idea to
650 * reset the rx_buf OUTSIDE_FRAME in this case too?
652 atomic_set(&dev->enable_rx, 0);
653 if (unlikely(sirdev_is_receiving(dev)))
654 dev->netdev->stats.collisions++;
656 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
658 if (likely(actual > 0)) {
660 dev->tx_buff.data += actual;
661 dev->tx_buff.len -= actual;
663 else if (unlikely(actual < 0)) {
664 /* could be dropped later when we have tx_timeout to recover */
665 net_err_ratelimited("%s: drv->do_write failed (%d)\n",
667 dev_kfree_skb_any(skb);
668 dev->netdev->stats.tx_errors++;
669 dev->netdev->stats.tx_dropped++;
670 netif_wake_queue(ndev);
672 spin_unlock_irqrestore(&dev->tx_lock, flags);
677 /* called from network layer with rtnl hold */
679 static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
681 struct if_irda_req *irq = (struct if_irda_req *) rq;
682 struct sir_dev *dev = netdev_priv(ndev);
685 IRDA_ASSERT(dev != NULL, return -1;);
687 pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
690 case SIOCSBANDWIDTH: /* Set bandwidth */
691 if (!capable(CAP_NET_ADMIN))
694 ret = sirdev_schedule_speed(dev, irq->ifr_baudrate);
695 /* cannot sleep here for completion
696 * we are called from network layer with rtnl hold
700 case SIOCSDONGLE: /* Set dongle */
701 if (!capable(CAP_NET_ADMIN))
704 ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle);
705 /* cannot sleep here for completion
706 * we are called from network layer with rtnl hold
710 case SIOCSMEDIABUSY: /* Set media busy */
711 if (!capable(CAP_NET_ADMIN))
714 irda_device_set_media_busy(dev->netdev, TRUE);
717 case SIOCGRECEIVING: /* Check if we are receiving right now */
718 irq->ifr_receiving = sirdev_is_receiving(dev);
722 if (!capable(CAP_NET_ADMIN))
725 ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
726 /* cannot sleep here for completion
727 * we are called from network layer with rtnl hold
733 if (!capable(CAP_NET_ADMIN))
736 ret = sirdev_schedule_mode(dev, irq->ifr_mode);
737 /* cannot sleep here for completion
738 * we are called from network layer with rtnl hold
749 /* ----------------------------------------------------------------------------- */
751 #define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */
753 static int sirdev_alloc_buffers(struct sir_dev *dev)
755 dev->tx_buff.truesize = SIRBUF_ALLOCSIZE;
756 dev->rx_buff.truesize = IRDA_SKB_MAX_MTU;
758 /* Bootstrap ZeroCopy Rx */
759 dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize,
761 if (dev->rx_buff.skb == NULL)
763 skb_reserve(dev->rx_buff.skb, 1);
764 dev->rx_buff.head = dev->rx_buff.skb->data;
766 dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL);
767 if (dev->tx_buff.head == NULL) {
768 kfree_skb(dev->rx_buff.skb);
769 dev->rx_buff.skb = NULL;
770 dev->rx_buff.head = NULL;
774 dev->tx_buff.data = dev->tx_buff.head;
775 dev->rx_buff.data = dev->rx_buff.head;
776 dev->tx_buff.len = 0;
777 dev->rx_buff.len = 0;
779 dev->rx_buff.in_frame = FALSE;
780 dev->rx_buff.state = OUTSIDE_FRAME;
784 static void sirdev_free_buffers(struct sir_dev *dev)
786 kfree_skb(dev->rx_buff.skb);
787 kfree(dev->tx_buff.head);
788 dev->rx_buff.head = dev->tx_buff.head = NULL;
789 dev->rx_buff.skb = NULL;
792 static int sirdev_open(struct net_device *ndev)
794 struct sir_dev *dev = netdev_priv(ndev);
795 const struct sir_driver *drv = dev->drv;
800 /* increase the reference count of the driver module before doing serious stuff */
801 if (!try_module_get(drv->owner))
804 if (sirdev_alloc_buffers(dev))
807 if (!dev->drv->start_dev || dev->drv->start_dev(dev))
810 sirdev_enable_rx(dev);
813 netif_start_queue(ndev);
814 dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname);
818 netif_wake_queue(ndev);
820 pr_debug("%s - done, speed = %d\n", __func__, dev->speed);
825 atomic_set(&dev->enable_rx, 0);
826 if (dev->drv->stop_dev)
827 dev->drv->stop_dev(dev);
829 sirdev_free_buffers(dev);
831 module_put(drv->owner);
835 static int sirdev_close(struct net_device *ndev)
837 struct sir_dev *dev = netdev_priv(ndev);
838 const struct sir_driver *drv;
840 /* pr_debug("%s\n", __func__); */
842 netif_stop_queue(ndev);
844 down(&dev->fsm.sem); /* block on pending config completion */
846 atomic_set(&dev->enable_rx, 0);
848 if (unlikely(!dev->irlap))
850 irlap_close(dev->irlap);
854 if (unlikely(!drv || !dev->priv))
860 sirdev_free_buffers(dev);
861 module_put(drv->owner);
869 static const struct net_device_ops sirdev_ops = {
870 .ndo_start_xmit = sirdev_hard_xmit,
871 .ndo_open = sirdev_open,
872 .ndo_stop = sirdev_close,
873 .ndo_do_ioctl = sirdev_ioctl,
875 /* ----------------------------------------------------------------------------- */
877 struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
879 struct net_device *ndev;
882 pr_debug("%s - %s\n", __func__, name);
884 /* instead of adding tests to protect against drv->do_write==NULL
885 * at several places we refuse to create a sir_dev instance for
886 * drivers which don't implement do_write.
888 if (!drv || !drv->do_write)
892 * Allocate new instance of the device
894 ndev = alloc_irdadev(sizeof(*dev));
896 net_err_ratelimited("%s - Can't allocate memory for IrDA control block!\n",
900 dev = netdev_priv(ndev);
902 irda_init_max_qos_capabilies(&dev->qos);
903 dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
904 dev->qos.min_turn_time.bits = drv->qos_mtt_bits;
905 irda_qos_bits_to_value(&dev->qos);
907 strncpy(dev->hwname, name, sizeof(dev->hwname)-1);
909 atomic_set(&dev->enable_rx, 0);
912 spin_lock_init(&dev->tx_lock);
913 sema_init(&dev->fsm.sem, 1);
918 /* Override the network functions we need to use */
919 ndev->netdev_ops = &sirdev_ops;
921 if (register_netdev(ndev)) {
922 net_err_ratelimited("%s(), register_netdev() failed!\n",
934 EXPORT_SYMBOL(sirdev_get_instance);
936 int sirdev_put_instance(struct sir_dev *dev)
940 pr_debug("%s\n", __func__);
942 atomic_set(&dev->enable_rx, 0);
944 netif_carrier_off(dev->netdev);
945 netif_device_detach(dev->netdev);
948 err = sirdev_schedule_dongle_close(dev);
950 net_err_ratelimited("%s - error %d\n", __func__, err);
952 sirdev_close(dev->netdev);
955 dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */
956 dev->dongle_drv = NULL;
960 /* Remove netdevice */
961 unregister_netdev(dev->netdev);
963 free_netdev(dev->netdev);
967 EXPORT_SYMBOL(sirdev_put_instance);
969 static int __init sir_wq_init(void)
971 irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
977 static void __exit sir_wq_exit(void)
979 destroy_workqueue(irda_sir_wq);
982 module_init(sir_wq_init);
983 module_exit(sir_wq_exit);
986 MODULE_DESCRIPTION("IrDA SIR core");
987 MODULE_LICENSE("GPL");