1 // SPDX-License-Identifier: GPL-2.0-only
5 #include <linux/errno.h>
7 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/poll.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/wwan.h>
17 #define WWAN_MAX_MINORS 256 /* 256 minors allowed with register_chrdev() */
19 static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
20 static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
21 static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
22 static struct class *wwan_class;
23 static int wwan_major;
25 #define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
26 #define to_wwan_port(d) container_of(d, struct wwan_port, dev)
29 #define WWAN_PORT_TX_OFF 0
32 * struct wwan_device - The structure that defines a WWAN device
34 * @id: WWAN device unique ID.
35 * @dev: Underlying device.
36 * @port_id: Current available port ID to pick.
45 * struct wwan_port - The structure that defines a WWAN port
47 * @start_count: Port start counter
48 * @flags: Store port state and capabilities
49 * @ops: Pointer to WWAN port operations
50 * @ops_lock: Protect port ops
51 * @dev: Underlying device
52 * @rxq: Buffer inbound queue
53 * @waitqueue: The waitqueue for port fops (read/write/poll)
56 enum wwan_port_type type;
57 unsigned int start_count;
59 const struct wwan_port_ops *ops;
60 struct mutex ops_lock; /* Serialize ops + protect against removal */
62 struct sk_buff_head rxq;
63 wait_queue_head_t waitqueue;
66 static void wwan_dev_destroy(struct device *dev)
68 struct wwan_device *wwandev = to_wwan_dev(dev);
70 ida_free(&wwan_dev_ids, wwandev->id);
74 static const struct device_type wwan_dev_type = {
76 .release = wwan_dev_destroy,
79 static int wwan_dev_parent_match(struct device *dev, const void *parent)
81 return (dev->type == &wwan_dev_type && dev->parent == parent);
84 static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
88 dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
90 return ERR_PTR(-ENODEV);
92 return to_wwan_dev(dev);
95 /* This function allocates and registers a new WWAN device OR if a WWAN device
96 * already exist for the given parent, it gets a reference and return it.
97 * This function is not exported (for now), it is called indirectly via
100 static struct wwan_device *wwan_create_dev(struct device *parent)
102 struct wwan_device *wwandev;
105 /* The 'find-alloc-register' operation must be protected against
106 * concurrent execution, a WWAN device is possibly shared between
107 * multiple callers or concurrently unregistered from wwan_remove_dev().
109 mutex_lock(&wwan_register_lock);
111 /* If wwandev already exists, return it */
112 wwandev = wwan_dev_get_by_parent(parent);
113 if (!IS_ERR(wwandev))
116 id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
120 wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
122 ida_free(&wwan_dev_ids, id);
126 wwandev->dev.parent = parent;
127 wwandev->dev.class = wwan_class;
128 wwandev->dev.type = &wwan_dev_type;
130 dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
132 err = device_register(&wwandev->dev);
134 put_device(&wwandev->dev);
139 mutex_unlock(&wwan_register_lock);
144 static int is_wwan_child(struct device *dev, void *data)
146 return dev->class == wwan_class;
149 static void wwan_remove_dev(struct wwan_device *wwandev)
153 /* Prevent concurrent picking from wwan_create_dev */
154 mutex_lock(&wwan_register_lock);
156 /* WWAN device is created and registered (get+add) along with its first
157 * child port, and subsequent port registrations only grab a reference
158 * (get). The WWAN device must then be unregistered (del+put) along with
159 * its latest port, and reference simply dropped (put) otherwise.
161 ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
163 device_unregister(&wwandev->dev);
165 put_device(&wwandev->dev);
167 mutex_unlock(&wwan_register_lock);
170 /* ------- WWAN port management ------- */
172 static void wwan_port_destroy(struct device *dev)
174 struct wwan_port *port = to_wwan_port(dev);
176 ida_free(&minors, MINOR(port->dev.devt));
177 skb_queue_purge(&port->rxq);
178 mutex_destroy(&port->ops_lock);
182 static const struct device_type wwan_port_dev_type = {
184 .release = wwan_port_destroy,
187 static int wwan_port_minor_match(struct device *dev, const void *minor)
189 return (dev->type == &wwan_port_dev_type &&
190 MINOR(dev->devt) == *(unsigned int *)minor);
193 static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
197 dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
199 return ERR_PTR(-ENODEV);
201 return to_wwan_port(dev);
204 /* Keep aligned with wwan_port_type enum */
205 static const char * const wwan_port_type_str[] = {
213 struct wwan_port *wwan_create_port(struct device *parent,
214 enum wwan_port_type type,
215 const struct wwan_port_ops *ops,
218 struct wwan_device *wwandev;
219 struct wwan_port *port;
220 int minor, err = -ENOMEM;
222 if (type >= WWAN_PORT_MAX || !ops)
223 return ERR_PTR(-EINVAL);
225 /* A port is always a child of a WWAN device, retrieve (allocate or
226 * pick) the WWAN device based on the provided parent device.
228 wwandev = wwan_create_dev(parent);
230 return ERR_CAST(wwandev);
232 /* A port is exposed as character device, get a minor */
233 minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
235 goto error_wwandev_remove;
237 port = kzalloc(sizeof(*port), GFP_KERNEL);
239 ida_free(&minors, minor);
240 goto error_wwandev_remove;
245 mutex_init(&port->ops_lock);
246 skb_queue_head_init(&port->rxq);
247 init_waitqueue_head(&port->waitqueue);
249 port->dev.parent = &wwandev->dev;
250 port->dev.class = wwan_class;
251 port->dev.type = &wwan_port_dev_type;
252 port->dev.devt = MKDEV(wwan_major, minor);
253 dev_set_drvdata(&port->dev, drvdata);
255 /* create unique name based on wwan device id, port index and type */
256 dev_set_name(&port->dev, "wwan%up%u%s", wwandev->id,
257 atomic_inc_return(&wwandev->port_id),
258 wwan_port_type_str[port->type]);
260 err = device_register(&port->dev);
262 goto error_put_device;
267 put_device(&port->dev);
268 error_wwandev_remove:
269 wwan_remove_dev(wwandev);
273 EXPORT_SYMBOL_GPL(wwan_create_port);
275 void wwan_remove_port(struct wwan_port *port)
277 struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
279 mutex_lock(&port->ops_lock);
280 if (port->start_count)
281 port->ops->stop(port);
282 port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
283 mutex_unlock(&port->ops_lock);
285 wake_up_interruptible(&port->waitqueue);
287 skb_queue_purge(&port->rxq);
288 dev_set_drvdata(&port->dev, NULL);
289 device_unregister(&port->dev);
291 /* Release related wwan device */
292 wwan_remove_dev(wwandev);
294 EXPORT_SYMBOL_GPL(wwan_remove_port);
296 void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
298 skb_queue_tail(&port->rxq, skb);
299 wake_up_interruptible(&port->waitqueue);
301 EXPORT_SYMBOL_GPL(wwan_port_rx);
303 void wwan_port_txon(struct wwan_port *port)
305 clear_bit(WWAN_PORT_TX_OFF, &port->flags);
306 wake_up_interruptible(&port->waitqueue);
308 EXPORT_SYMBOL_GPL(wwan_port_txon);
310 void wwan_port_txoff(struct wwan_port *port)
312 set_bit(WWAN_PORT_TX_OFF, &port->flags);
314 EXPORT_SYMBOL_GPL(wwan_port_txoff);
316 void *wwan_port_get_drvdata(struct wwan_port *port)
318 return dev_get_drvdata(&port->dev);
320 EXPORT_SYMBOL_GPL(wwan_port_get_drvdata);
322 static int wwan_port_op_start(struct wwan_port *port)
326 mutex_lock(&port->ops_lock);
327 if (!port->ops) { /* Port got unplugged */
332 /* If port is already started, don't start again */
333 if (!port->start_count)
334 ret = port->ops->start(port);
340 mutex_unlock(&port->ops_lock);
345 static void wwan_port_op_stop(struct wwan_port *port)
347 mutex_lock(&port->ops_lock);
349 if (port->ops && !port->start_count)
350 port->ops->stop(port);
351 mutex_unlock(&port->ops_lock);
354 static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb)
358 mutex_lock(&port->ops_lock);
359 if (!port->ops) { /* Port got unplugged */
364 ret = port->ops->tx(port, skb);
367 mutex_unlock(&port->ops_lock);
372 static bool is_read_blocked(struct wwan_port *port)
374 return skb_queue_empty(&port->rxq) && port->ops;
377 static bool is_write_blocked(struct wwan_port *port)
379 return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops;
382 static int wwan_wait_rx(struct wwan_port *port, bool nonblock)
384 if (!is_read_blocked(port))
390 if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port)))
396 static int wwan_wait_tx(struct wwan_port *port, bool nonblock)
398 if (!is_write_blocked(port))
404 if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port)))
410 static int wwan_port_fops_open(struct inode *inode, struct file *file)
412 struct wwan_port *port;
415 port = wwan_port_get_by_minor(iminor(inode));
417 return PTR_ERR(port);
419 file->private_data = port;
420 stream_open(inode, file);
422 err = wwan_port_op_start(port);
424 put_device(&port->dev);
429 static int wwan_port_fops_release(struct inode *inode, struct file *filp)
431 struct wwan_port *port = filp->private_data;
433 wwan_port_op_stop(port);
434 put_device(&port->dev);
439 static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
440 size_t count, loff_t *ppos)
442 struct wwan_port *port = filp->private_data;
447 ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK));
451 skb = skb_dequeue(&port->rxq);
455 copied = min_t(size_t, count, skb->len);
456 if (copy_to_user(buf, skb->data, copied)) {
460 skb_pull(skb, copied);
462 /* skb is not fully consumed, keep it in the queue */
464 skb_queue_head(&port->rxq, skb);
471 static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
472 size_t count, loff_t *offp)
474 struct wwan_port *port = filp->private_data;
478 ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
482 skb = alloc_skb(count, GFP_KERNEL);
486 if (copy_from_user(skb_put(skb, count), buf, count)) {
491 ret = wwan_port_op_tx(port, skb);
500 static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
502 struct wwan_port *port = filp->private_data;
505 poll_wait(filp, &port->waitqueue, wait);
507 if (!is_write_blocked(port))
508 mask |= EPOLLOUT | EPOLLWRNORM;
509 if (!is_read_blocked(port))
510 mask |= EPOLLIN | EPOLLRDNORM;
512 mask |= EPOLLHUP | EPOLLERR;
517 static const struct file_operations wwan_port_fops = {
518 .owner = THIS_MODULE,
519 .open = wwan_port_fops_open,
520 .release = wwan_port_fops_release,
521 .read = wwan_port_fops_read,
522 .write = wwan_port_fops_write,
523 .poll = wwan_port_fops_poll,
524 .llseek = noop_llseek,
527 static int __init wwan_init(void)
529 wwan_class = class_create(THIS_MODULE, "wwan");
530 if (IS_ERR(wwan_class))
531 return PTR_ERR(wwan_class);
533 /* chrdev used for wwan ports */
534 wwan_major = register_chrdev(0, "wwan_port", &wwan_port_fops);
535 if (wwan_major < 0) {
536 class_destroy(wwan_class);
543 static void __exit wwan_exit(void)
545 unregister_chrdev(wwan_major, "wwan_port");
546 class_destroy(wwan_class);
549 module_init(wwan_init);
550 module_exit(wwan_exit);
553 MODULE_DESCRIPTION("WWAN core");
554 MODULE_LICENSE("GPL v2");