1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * The Serio abstraction module
5 * Copyright (c) 1999-2004 Vojtech Pavlik
6 * Copyright (c) 2004 Dmitry Torokhov
7 * Copyright (c) 2003 Daniele Bellucci
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/stddef.h>
13 #include <linux/module.h>
14 #include <linux/serio.h>
15 #include <linux/errno.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/mutex.h>
22 MODULE_DESCRIPTION("Serio abstraction core");
23 MODULE_LICENSE("GPL");
26 * serio_mutex protects entire serio subsystem and is taken every time
27 * serio port or driver registered or unregistered.
29 static DEFINE_MUTEX(serio_mutex);
31 static LIST_HEAD(serio_list);
33 static void serio_add_port(struct serio *serio);
34 static int serio_reconnect_port(struct serio *serio);
35 static void serio_disconnect_port(struct serio *serio);
36 static void serio_reconnect_subtree(struct serio *serio);
37 static void serio_attach_driver(struct serio_driver *drv);
39 static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
43 mutex_lock(&serio->drv_mutex);
44 retval = drv->connect(serio, drv);
45 mutex_unlock(&serio->drv_mutex);
50 static int serio_reconnect_driver(struct serio *serio)
54 mutex_lock(&serio->drv_mutex);
55 if (serio->drv && serio->drv->reconnect)
56 retval = serio->drv->reconnect(serio);
57 mutex_unlock(&serio->drv_mutex);
62 static void serio_disconnect_driver(struct serio *serio)
64 mutex_lock(&serio->drv_mutex);
66 serio->drv->disconnect(serio);
67 mutex_unlock(&serio->drv_mutex);
70 static int serio_match_port(const struct serio_device_id *ids, struct serio *serio)
72 while (ids->type || ids->proto) {
73 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) &&
74 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) &&
75 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) &&
76 (ids->id == SERIO_ANY || ids->id == serio->id.id))
84 * Basic serio -> driver core mappings
87 static int serio_bind_driver(struct serio *serio, struct serio_driver *drv)
91 if (serio_match_port(drv->id_table, serio)) {
93 serio->dev.driver = &drv->driver;
94 if (serio_connect_driver(serio, drv)) {
95 serio->dev.driver = NULL;
99 error = device_bind_driver(&serio->dev);
101 dev_warn(&serio->dev,
102 "device_bind_driver() failed for %s (%s) and %s, error: %d\n",
103 serio->phys, serio->name,
104 drv->description, error);
105 serio_disconnect_driver(serio);
106 serio->dev.driver = NULL;
113 static void serio_find_driver(struct serio *serio)
117 error = device_attach(&serio->dev);
118 if (error < 0 && error != -EPROBE_DEFER)
119 dev_warn(&serio->dev,
120 "device_attach() failed for %s (%s), error: %d\n",
121 serio->phys, serio->name, error);
126 * Serio event processing.
129 enum serio_event_type {
131 SERIO_RECONNECT_PORT,
132 SERIO_RECONNECT_SUBTREE,
138 enum serio_event_type type;
140 struct module *owner;
141 struct list_head node;
144 static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */
145 static LIST_HEAD(serio_event_list);
147 static struct serio_event *serio_get_event(void)
149 struct serio_event *event = NULL;
152 spin_lock_irqsave(&serio_event_lock, flags);
154 if (!list_empty(&serio_event_list)) {
155 event = list_first_entry(&serio_event_list,
156 struct serio_event, node);
157 list_del_init(&event->node);
160 spin_unlock_irqrestore(&serio_event_lock, flags);
164 static void serio_free_event(struct serio_event *event)
166 module_put(event->owner);
170 static void serio_remove_duplicate_events(void *object,
171 enum serio_event_type type)
173 struct serio_event *e, *next;
176 spin_lock_irqsave(&serio_event_lock, flags);
178 list_for_each_entry_safe(e, next, &serio_event_list, node) {
179 if (object == e->object) {
181 * If this event is of different type we should not
182 * look further - we only suppress duplicate events
183 * that were sent back-to-back.
188 list_del_init(&e->node);
193 spin_unlock_irqrestore(&serio_event_lock, flags);
196 static void serio_handle_event(struct work_struct *work)
198 struct serio_event *event;
200 mutex_lock(&serio_mutex);
202 while ((event = serio_get_event())) {
204 switch (event->type) {
206 case SERIO_REGISTER_PORT:
207 serio_add_port(event->object);
210 case SERIO_RECONNECT_PORT:
211 serio_reconnect_port(event->object);
214 case SERIO_RESCAN_PORT:
215 serio_disconnect_port(event->object);
216 serio_find_driver(event->object);
219 case SERIO_RECONNECT_SUBTREE:
220 serio_reconnect_subtree(event->object);
223 case SERIO_ATTACH_DRIVER:
224 serio_attach_driver(event->object);
228 serio_remove_duplicate_events(event->object, event->type);
229 serio_free_event(event);
232 mutex_unlock(&serio_mutex);
235 static DECLARE_WORK(serio_event_work, serio_handle_event);
237 static int serio_queue_event(void *object, struct module *owner,
238 enum serio_event_type event_type)
241 struct serio_event *event;
244 spin_lock_irqsave(&serio_event_lock, flags);
247 * Scan event list for the other events for the same serio port,
248 * starting with the most recent one. If event is the same we
249 * do not need add new one. If event is of different type we
250 * need to add this event and should not look further because
251 * we need to preseve sequence of distinct events.
253 list_for_each_entry_reverse(event, &serio_event_list, node) {
254 if (event->object == object) {
255 if (event->type == event_type)
261 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
263 pr_err("Not enough memory to queue event %d\n", event_type);
268 if (!try_module_get(owner)) {
269 pr_warn("Can't get module reference, dropping event %d\n",
276 event->type = event_type;
277 event->object = object;
278 event->owner = owner;
280 list_add_tail(&event->node, &serio_event_list);
281 queue_work(system_long_wq, &serio_event_work);
284 spin_unlock_irqrestore(&serio_event_lock, flags);
289 * Remove all events that have been submitted for a given
290 * object, be it serio port or driver.
292 static void serio_remove_pending_events(void *object)
294 struct serio_event *event, *next;
297 spin_lock_irqsave(&serio_event_lock, flags);
299 list_for_each_entry_safe(event, next, &serio_event_list, node) {
300 if (event->object == object) {
301 list_del_init(&event->node);
302 serio_free_event(event);
306 spin_unlock_irqrestore(&serio_event_lock, flags);
310 * Locate child serio port (if any) that has not been fully registered yet.
312 * Children are registered by driver's connect() handler so there can't be a
313 * grandchild pending registration together with a child.
315 static struct serio *serio_get_pending_child(struct serio *parent)
317 struct serio_event *event;
318 struct serio *serio, *child = NULL;
321 spin_lock_irqsave(&serio_event_lock, flags);
323 list_for_each_entry(event, &serio_event_list, node) {
324 if (event->type == SERIO_REGISTER_PORT) {
325 serio = event->object;
326 if (serio->parent == parent) {
333 spin_unlock_irqrestore(&serio_event_lock, flags);
338 * Serio port operations
341 static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf)
343 struct serio *serio = to_serio_port(dev);
344 return sprintf(buf, "%s\n", serio->name);
347 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
349 struct serio *serio = to_serio_port(dev);
351 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n",
352 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
355 static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
357 struct serio *serio = to_serio_port(dev);
358 return sprintf(buf, "%02x\n", serio->id.type);
361 static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf)
363 struct serio *serio = to_serio_port(dev);
364 return sprintf(buf, "%02x\n", serio->id.proto);
367 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
369 struct serio *serio = to_serio_port(dev);
370 return sprintf(buf, "%02x\n", serio->id.id);
373 static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf)
375 struct serio *serio = to_serio_port(dev);
376 return sprintf(buf, "%02x\n", serio->id.extra);
379 static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
381 struct serio *serio = to_serio_port(dev);
382 struct device_driver *drv;
385 error = mutex_lock_interruptible(&serio_mutex);
389 if (!strncmp(buf, "none", count)) {
390 serio_disconnect_port(serio);
391 } else if (!strncmp(buf, "reconnect", count)) {
392 serio_reconnect_subtree(serio);
393 } else if (!strncmp(buf, "rescan", count)) {
394 serio_disconnect_port(serio);
395 serio_find_driver(serio);
396 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
397 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
398 serio_disconnect_port(serio);
399 error = serio_bind_driver(serio, to_serio_driver(drv));
400 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
405 mutex_unlock(&serio_mutex);
407 return error ? error : count;
410 static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf)
412 struct serio *serio = to_serio_port(dev);
413 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto");
416 static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
418 struct serio *serio = to_serio_port(dev);
422 if (!strncmp(buf, "manual", count)) {
423 serio->manual_bind = true;
424 } else if (!strncmp(buf, "auto", count)) {
425 serio->manual_bind = false;
433 static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
435 struct serio *serio = to_serio_port(dev);
437 return sprintf(buf, "%s\n", serio->firmware_id);
440 static DEVICE_ATTR_RO(type);
441 static DEVICE_ATTR_RO(proto);
442 static DEVICE_ATTR_RO(id);
443 static DEVICE_ATTR_RO(extra);
445 static struct attribute *serio_device_id_attrs[] = {
447 &dev_attr_proto.attr,
449 &dev_attr_extra.attr,
453 static const struct attribute_group serio_id_attr_group = {
455 .attrs = serio_device_id_attrs,
458 static DEVICE_ATTR_RO(modalias);
459 static DEVICE_ATTR_WO(drvctl);
460 static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
461 static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
462 static DEVICE_ATTR_RO(firmware_id);
464 static struct attribute *serio_device_attrs[] = {
465 &dev_attr_modalias.attr,
466 &dev_attr_description.attr,
467 &dev_attr_drvctl.attr,
468 &dev_attr_bind_mode.attr,
469 &dev_attr_firmware_id.attr,
473 static const struct attribute_group serio_device_attr_group = {
474 .attrs = serio_device_attrs,
477 static const struct attribute_group *serio_device_attr_groups[] = {
478 &serio_id_attr_group,
479 &serio_device_attr_group,
483 static void serio_release_port(struct device *dev)
485 struct serio *serio = to_serio_port(dev);
488 module_put(THIS_MODULE);
492 * Prepare serio port for registration.
494 static void serio_init_port(struct serio *serio)
496 static atomic_t serio_no = ATOMIC_INIT(-1);
498 __module_get(THIS_MODULE);
500 INIT_LIST_HEAD(&serio->node);
501 INIT_LIST_HEAD(&serio->child_node);
502 INIT_LIST_HEAD(&serio->children);
503 spin_lock_init(&serio->lock);
504 mutex_init(&serio->drv_mutex);
505 device_initialize(&serio->dev);
506 dev_set_name(&serio->dev, "serio%lu",
507 (unsigned long)atomic_inc_return(&serio_no));
508 serio->dev.bus = &serio_bus;
509 serio->dev.release = serio_release_port;
510 serio->dev.groups = serio_device_attr_groups;
512 serio->dev.parent = &serio->parent->dev;
513 serio->depth = serio->parent->depth + 1;
516 lockdep_set_subclass(&serio->lock, serio->depth);
520 * Complete serio port registration.
521 * Driver core will attempt to find appropriate driver for the port.
523 static void serio_add_port(struct serio *serio)
525 struct serio *parent = serio->parent;
529 serio_pause_rx(parent);
530 list_add_tail(&serio->child_node, &parent->children);
531 serio_continue_rx(parent);
534 list_add_tail(&serio->node, &serio_list);
539 error = device_add(&serio->dev);
542 "device_add() failed for %s (%s), error: %d\n",
543 serio->phys, serio->name, error);
547 * serio_destroy_port() completes unregistration process and removes
548 * port from the system
550 static void serio_destroy_port(struct serio *serio)
554 while ((child = serio_get_pending_child(serio)) != NULL) {
555 serio_remove_pending_events(child);
556 put_device(&child->dev);
563 serio_pause_rx(serio->parent);
564 list_del_init(&serio->child_node);
565 serio_continue_rx(serio->parent);
566 serio->parent = NULL;
569 if (device_is_registered(&serio->dev))
570 device_del(&serio->dev);
572 list_del_init(&serio->node);
573 serio_remove_pending_events(serio);
574 put_device(&serio->dev);
578 * Reconnect serio port (re-initialize attached device).
579 * If reconnect fails (old device is no longer attached or
580 * there was no device to begin with) we do full rescan in
581 * hope of finding a driver for the port.
583 static int serio_reconnect_port(struct serio *serio)
585 int error = serio_reconnect_driver(serio);
588 serio_disconnect_port(serio);
589 serio_find_driver(serio);
596 * Reconnect serio port and all its children (re-initialize attached
599 static void serio_reconnect_subtree(struct serio *root)
601 struct serio *s = root;
605 error = serio_reconnect_port(s);
608 * Reconnect was successful, move on to do the
611 if (!list_empty(&s->children)) {
612 s = list_first_entry(&s->children,
613 struct serio, child_node);
619 * Either it was a leaf node or reconnect failed and it
620 * became a leaf node. Continue reconnecting starting with
621 * the next sibling of the parent node.
624 struct serio *parent = s->parent;
626 if (!list_is_last(&s->child_node, &parent->children)) {
627 s = list_entry(s->child_node.next,
628 struct serio, child_node);
638 * serio_disconnect_port() unbinds a port from its driver. As a side effect
639 * all children ports are unbound and destroyed.
641 static void serio_disconnect_port(struct serio *serio)
643 struct serio *s = serio;
646 * Children ports should be disconnected and destroyed
647 * first; we travel the tree in depth-first order.
649 while (!list_empty(&serio->children)) {
652 while (!list_empty(&s->children))
653 s = list_first_entry(&s->children,
654 struct serio, child_node);
657 * Prune this leaf node unless it is the one we
661 struct serio *parent = s->parent;
663 device_release_driver(&s->dev);
664 serio_destroy_port(s);
671 * OK, no children left, now disconnect this port.
673 device_release_driver(&serio->dev);
676 void serio_rescan(struct serio *serio)
678 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT);
680 EXPORT_SYMBOL(serio_rescan);
682 void serio_reconnect(struct serio *serio)
684 serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE);
686 EXPORT_SYMBOL(serio_reconnect);
689 * Submits register request to kseriod for subsequent execution.
690 * Note that port registration is always asynchronous.
692 void __serio_register_port(struct serio *serio, struct module *owner)
694 serio_init_port(serio);
695 serio_queue_event(serio, owner, SERIO_REGISTER_PORT);
697 EXPORT_SYMBOL(__serio_register_port);
700 * Synchronously unregisters serio port.
702 void serio_unregister_port(struct serio *serio)
704 mutex_lock(&serio_mutex);
705 serio_disconnect_port(serio);
706 serio_destroy_port(serio);
707 mutex_unlock(&serio_mutex);
709 EXPORT_SYMBOL(serio_unregister_port);
712 * Safely unregisters children ports if they are present.
714 void serio_unregister_child_port(struct serio *serio)
716 struct serio *s, *next;
718 mutex_lock(&serio_mutex);
719 list_for_each_entry_safe(s, next, &serio->children, child_node) {
720 serio_disconnect_port(s);
721 serio_destroy_port(s);
723 mutex_unlock(&serio_mutex);
725 EXPORT_SYMBOL(serio_unregister_child_port);
729 * Serio driver operations
732 static ssize_t description_show(struct device_driver *drv, char *buf)
734 struct serio_driver *driver = to_serio_driver(drv);
735 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)");
737 static DRIVER_ATTR_RO(description);
739 static ssize_t bind_mode_show(struct device_driver *drv, char *buf)
741 struct serio_driver *serio_drv = to_serio_driver(drv);
742 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto");
745 static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count)
747 struct serio_driver *serio_drv = to_serio_driver(drv);
751 if (!strncmp(buf, "manual", count)) {
752 serio_drv->manual_bind = true;
753 } else if (!strncmp(buf, "auto", count)) {
754 serio_drv->manual_bind = false;
761 static DRIVER_ATTR_RW(bind_mode);
763 static struct attribute *serio_driver_attrs[] = {
764 &driver_attr_description.attr,
765 &driver_attr_bind_mode.attr,
768 ATTRIBUTE_GROUPS(serio_driver);
770 static int serio_driver_probe(struct device *dev)
772 struct serio *serio = to_serio_port(dev);
773 struct serio_driver *drv = to_serio_driver(dev->driver);
775 return serio_connect_driver(serio, drv);
778 static void serio_driver_remove(struct device *dev)
780 struct serio *serio = to_serio_port(dev);
782 serio_disconnect_driver(serio);
785 static void serio_cleanup(struct serio *serio)
787 mutex_lock(&serio->drv_mutex);
788 if (serio->drv && serio->drv->cleanup)
789 serio->drv->cleanup(serio);
790 mutex_unlock(&serio->drv_mutex);
793 static void serio_shutdown(struct device *dev)
795 struct serio *serio = to_serio_port(dev);
797 serio_cleanup(serio);
800 static void serio_attach_driver(struct serio_driver *drv)
804 error = driver_attach(&drv->driver);
806 pr_warn("driver_attach() failed for %s with error %d\n",
807 drv->driver.name, error);
810 int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
812 bool manual_bind = drv->manual_bind;
815 drv->driver.bus = &serio_bus;
816 drv->driver.owner = owner;
817 drv->driver.mod_name = mod_name;
820 * Temporarily disable automatic binding because probing
821 * takes long time and we are better off doing it in kseriod
823 drv->manual_bind = true;
825 error = driver_register(&drv->driver);
827 pr_err("driver_register() failed for %s, error: %d\n",
828 drv->driver.name, error);
833 * Restore original bind mode and let kseriod bind the
834 * driver to free ports
837 drv->manual_bind = false;
838 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER);
840 driver_unregister(&drv->driver);
847 EXPORT_SYMBOL(__serio_register_driver);
849 void serio_unregister_driver(struct serio_driver *drv)
853 mutex_lock(&serio_mutex);
855 drv->manual_bind = true; /* so serio_find_driver ignores it */
856 serio_remove_pending_events(drv);
859 list_for_each_entry(serio, &serio_list, node) {
860 if (serio->drv == drv) {
861 serio_disconnect_port(serio);
862 serio_find_driver(serio);
863 /* we could've deleted some ports, restart */
868 driver_unregister(&drv->driver);
869 mutex_unlock(&serio_mutex);
871 EXPORT_SYMBOL(serio_unregister_driver);
873 static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
875 serio_pause_rx(serio);
877 serio_continue_rx(serio);
880 static int serio_bus_match(struct device *dev, struct device_driver *drv)
882 struct serio *serio = to_serio_port(dev);
883 struct serio_driver *serio_drv = to_serio_driver(drv);
885 if (serio->manual_bind || serio_drv->manual_bind)
888 return serio_match_port(serio_drv->id_table, serio);
891 #define SERIO_ADD_UEVENT_VAR(fmt, val...) \
893 int err = add_uevent_var(env, fmt, val); \
898 static int serio_uevent(const struct device *dev, struct kobj_uevent_env *env)
900 const struct serio *serio;
905 serio = to_serio_port(dev);
907 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type);
908 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
909 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
910 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
912 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
913 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
915 if (serio->firmware_id[0])
916 SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
921 #undef SERIO_ADD_UEVENT_VAR
924 static int serio_suspend(struct device *dev)
926 struct serio *serio = to_serio_port(dev);
928 serio_cleanup(serio);
933 static int serio_resume(struct device *dev)
935 struct serio *serio = to_serio_port(dev);
938 mutex_lock(&serio->drv_mutex);
939 if (serio->drv && serio->drv->fast_reconnect) {
940 error = serio->drv->fast_reconnect(serio);
941 if (error && error != -ENOENT)
942 dev_warn(dev, "fast reconnect failed with error %d\n",
945 mutex_unlock(&serio->drv_mutex);
949 * Driver reconnect can take a while, so better let
950 * kseriod deal with it.
952 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT);
958 static const struct dev_pm_ops serio_pm_ops = {
959 .suspend = serio_suspend,
960 .resume = serio_resume,
961 .poweroff = serio_suspend,
962 .restore = serio_resume,
964 #endif /* CONFIG_PM */
966 /* called from serio_driver->connect/disconnect methods under serio_mutex */
967 int serio_open(struct serio *serio, struct serio_driver *drv)
969 serio_set_drv(serio, drv);
971 if (serio->open && serio->open(serio)) {
972 serio_set_drv(serio, NULL);
977 EXPORT_SYMBOL(serio_open);
979 /* called from serio_driver->connect/disconnect methods under serio_mutex */
980 void serio_close(struct serio *serio)
985 serio_set_drv(serio, NULL);
987 EXPORT_SYMBOL(serio_close);
989 irqreturn_t serio_interrupt(struct serio *serio,
990 unsigned char data, unsigned int dfl)
993 irqreturn_t ret = IRQ_NONE;
995 spin_lock_irqsave(&serio->lock, flags);
997 if (likely(serio->drv)) {
998 ret = serio->drv->interrupt(serio, data, dfl);
999 } else if (!dfl && device_is_registered(&serio->dev)) {
1000 serio_rescan(serio);
1004 spin_unlock_irqrestore(&serio->lock, flags);
1008 EXPORT_SYMBOL(serio_interrupt);
1010 const struct bus_type serio_bus = {
1012 .drv_groups = serio_driver_groups,
1013 .match = serio_bus_match,
1014 .uevent = serio_uevent,
1015 .probe = serio_driver_probe,
1016 .remove = serio_driver_remove,
1017 .shutdown = serio_shutdown,
1019 .pm = &serio_pm_ops,
1022 EXPORT_SYMBOL(serio_bus);
1024 static int __init serio_init(void)
1028 error = bus_register(&serio_bus);
1030 pr_err("Failed to register serio bus, error: %d\n", error);
1037 static void __exit serio_exit(void)
1039 bus_unregister(&serio_bus);
1042 * There should not be any outstanding events but work may
1043 * still be scheduled so simply cancel it.
1045 cancel_work_sync(&serio_event_work);
1048 subsys_initcall(serio_init);
1049 module_exit(serio_exit);