1 // SPDX-License-Identifier: GPL-2.0
3 * cdev.c - Character device component for Mostcore
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/module.h>
10 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/device.h>
14 #include <linux/cdev.h>
15 #include <linux/poll.h>
16 #include <linux/kfifo.h>
17 #include <linux/uaccess.h>
18 #include <linux/idr.h>
19 #include "most/core.h"
21 #define CHRDEV_REGION_SIZE 50
23 static struct cdev_component {
28 struct core_component cc;
33 spinlock_t unlink; /* synchronization lock to unlink channels */
36 struct mutex io_mutex;
37 struct most_interface *iface;
38 struct most_channel_config *cfg;
39 unsigned int channel_id;
42 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
44 struct list_head list;
47 #define to_channel(d) container_of(d, struct comp_channel, cdev)
48 static struct list_head channel_list;
49 static spinlock_t ch_list_lock;
51 static inline bool ch_has_mbo(struct comp_channel *c)
53 return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
56 static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
58 if (!kfifo_peek(&c->fifo, mbo)) {
59 *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
61 kfifo_in(&c->fifo, mbo, 1);
66 static struct comp_channel *get_channel(struct most_interface *iface, int id)
68 struct comp_channel *c, *tmp;
70 int found_channel = 0;
72 spin_lock_irqsave(&ch_list_lock, flags);
73 list_for_each_entry_safe(c, tmp, &channel_list, list) {
74 if ((c->iface == iface) && (c->channel_id == id)) {
79 spin_unlock_irqrestore(&ch_list_lock, flags);
85 static void stop_channel(struct comp_channel *c)
89 while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
91 most_stop_channel(c->iface, c->channel_id, &comp.cc);
94 static void destroy_cdev(struct comp_channel *c)
98 device_destroy(comp.class, c->devno);
100 spin_lock_irqsave(&ch_list_lock, flags);
102 spin_unlock_irqrestore(&ch_list_lock, flags);
105 static void destroy_channel(struct comp_channel *c)
107 ida_simple_remove(&comp.minor_id, MINOR(c->devno));
108 kfifo_free(&c->fifo);
113 * comp_open - implements the syscall to open the device
114 * @inode: inode pointer
115 * @filp: file pointer
117 * This stores the channel pointer in the private data field of
118 * the file structure and activates the channel within the core.
120 static int comp_open(struct inode *inode, struct file *filp)
122 struct comp_channel *c;
125 c = to_channel(inode->i_cdev);
126 filp->private_data = c;
128 if (((c->cfg->direction == MOST_CH_RX) &&
129 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
130 ((c->cfg->direction == MOST_CH_TX) &&
131 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
132 pr_info("WARN: Access flags mismatch\n");
136 mutex_lock(&c->io_mutex);
138 pr_info("WARN: Device is destroyed\n");
139 mutex_unlock(&c->io_mutex);
144 pr_info("WARN: Device is busy\n");
145 mutex_unlock(&c->io_mutex);
150 ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
153 mutex_unlock(&c->io_mutex);
158 * comp_close - implements the syscall to close the device
159 * @inode: inode pointer
160 * @filp: file pointer
162 * This stops the channel within the core.
164 static int comp_close(struct inode *inode, struct file *filp)
166 struct comp_channel *c = to_channel(inode->i_cdev);
168 mutex_lock(&c->io_mutex);
169 spin_lock(&c->unlink);
171 spin_unlock(&c->unlink);
174 mutex_unlock(&c->io_mutex);
176 mutex_unlock(&c->io_mutex);
183 * comp_write - implements the syscall to write to the device
184 * @filp: file pointer
185 * @buf: pointer to user buffer
186 * @count: number of bytes to write
187 * @offset: offset from where to start writing
189 static ssize_t comp_write(struct file *filp, const char __user *buf,
190 size_t count, loff_t *offset)
193 size_t to_copy, left;
194 struct mbo *mbo = NULL;
195 struct comp_channel *c = filp->private_data;
197 mutex_lock(&c->io_mutex);
198 while (c->dev && !ch_get_mbo(c, &mbo)) {
199 mutex_unlock(&c->io_mutex);
201 if ((filp->f_flags & O_NONBLOCK))
203 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
205 mutex_lock(&c->io_mutex);
208 if (unlikely(!c->dev)) {
213 to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
214 left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
215 if (left == to_copy) {
220 c->mbo_offs += to_copy - left;
221 if (c->mbo_offs >= c->cfg->buffer_size ||
222 c->cfg->data_type == MOST_CH_CONTROL ||
223 c->cfg->data_type == MOST_CH_ASYNC) {
224 kfifo_skip(&c->fifo);
225 mbo->buffer_length = c->mbo_offs;
227 most_submit_mbo(mbo);
230 ret = to_copy - left;
232 mutex_unlock(&c->io_mutex);
237 * comp_read - implements the syscall to read from the device
238 * @filp: file pointer
239 * @buf: pointer to user buffer
240 * @count: number of bytes to read
241 * @offset: offset from where to start reading
244 comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
246 size_t to_copy, not_copied, copied;
247 struct mbo *mbo = NULL;
248 struct comp_channel *c = filp->private_data;
250 mutex_lock(&c->io_mutex);
251 while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
252 mutex_unlock(&c->io_mutex);
253 if (filp->f_flags & O_NONBLOCK)
255 if (wait_event_interruptible(c->wq,
256 (!kfifo_is_empty(&c->fifo) ||
259 mutex_lock(&c->io_mutex);
262 /* make sure we don't submit to gone devices */
263 if (unlikely(!c->dev)) {
264 mutex_unlock(&c->io_mutex);
268 to_copy = min_t(size_t,
270 mbo->processed_length - c->mbo_offs);
272 not_copied = copy_to_user(buf,
273 mbo->virt_address + c->mbo_offs,
276 copied = to_copy - not_copied;
278 c->mbo_offs += copied;
279 if (c->mbo_offs >= mbo->processed_length) {
280 kfifo_skip(&c->fifo);
284 mutex_unlock(&c->io_mutex);
288 static __poll_t comp_poll(struct file *filp, poll_table *wait)
290 struct comp_channel *c = filp->private_data;
293 poll_wait(filp, &c->wq, wait);
295 mutex_lock(&c->io_mutex);
296 if (c->cfg->direction == MOST_CH_RX) {
297 if (!c->dev || !kfifo_is_empty(&c->fifo))
298 mask |= EPOLLIN | EPOLLRDNORM;
300 if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
301 mask |= EPOLLOUT | EPOLLWRNORM;
303 mutex_unlock(&c->io_mutex);
308 * Initialization of struct file_operations
310 static const struct file_operations channel_fops = {
311 .owner = THIS_MODULE,
315 .release = comp_close,
320 * comp_disconnect_channel - disconnect a channel
321 * @iface: pointer to interface instance
322 * @channel_id: channel index
324 * This frees allocated memory and removes the cdev that represents this
325 * channel in user space.
327 static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
329 struct comp_channel *c;
332 pr_info("Bad interface pointer\n");
336 c = get_channel(iface, channel_id);
340 mutex_lock(&c->io_mutex);
341 spin_lock(&c->unlink);
343 spin_unlock(&c->unlink);
347 wake_up_interruptible(&c->wq);
348 mutex_unlock(&c->io_mutex);
350 mutex_unlock(&c->io_mutex);
357 * comp_rx_completion - completion handler for rx channels
358 * @mbo: pointer to buffer object that has completed
360 * This searches for the channel linked to this MBO and stores it in the local
363 static int comp_rx_completion(struct mbo *mbo)
365 struct comp_channel *c;
370 c = get_channel(mbo->ifp, mbo->hdm_channel_id);
374 spin_lock(&c->unlink);
375 if (!c->access_ref || !c->dev) {
376 spin_unlock(&c->unlink);
379 kfifo_in(&c->fifo, &mbo, 1);
380 spin_unlock(&c->unlink);
382 if (kfifo_is_full(&c->fifo))
383 pr_info("WARN: Fifo is full\n");
385 wake_up_interruptible(&c->wq);
390 * comp_tx_completion - completion handler for tx channels
391 * @iface: pointer to interface instance
392 * @channel_id: channel index/ID
394 * This wakes sleeping processes in the wait-queue.
396 static int comp_tx_completion(struct most_interface *iface, int channel_id)
398 struct comp_channel *c;
401 pr_info("Bad interface pointer\n");
404 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
405 pr_info("Channel ID out of range\n");
409 c = get_channel(iface, channel_id);
412 wake_up_interruptible(&c->wq);
417 * comp_probe - probe function of the driver module
418 * @iface: pointer to interface instance
419 * @channel_id: channel index/ID
420 * @cfg: pointer to actual channel configuration
421 * @name: name of the device to be created
423 * This allocates achannel object and creates the device node in /dev
425 * Returns 0 on success or error code otherwise.
427 static int comp_probe(struct most_interface *iface, int channel_id,
428 struct most_channel_config *cfg, char *name)
430 struct comp_channel *c;
431 unsigned long cl_flags;
435 if ((!iface) || (!cfg) || (!name)) {
436 pr_info("Probing component with bad arguments");
439 c = get_channel(iface, channel_id);
443 current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
444 if (current_minor < 0)
445 return current_minor;
447 c = kzalloc(sizeof(*c), GFP_KERNEL);
450 goto error_alloc_channel;
453 c->devno = MKDEV(comp.major, current_minor);
454 cdev_init(&c->cdev, &channel_fops);
455 c->cdev.owner = THIS_MODULE;
456 cdev_add(&c->cdev, c->devno, 1);
459 c->channel_id = channel_id;
461 spin_lock_init(&c->unlink);
463 retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
465 pr_info("failed to alloc channel kfifo");
466 goto error_alloc_kfifo;
468 init_waitqueue_head(&c->wq);
469 mutex_init(&c->io_mutex);
470 spin_lock_irqsave(&ch_list_lock, cl_flags);
471 list_add_tail(&c->list, &channel_list);
472 spin_unlock_irqrestore(&ch_list_lock, cl_flags);
473 c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
475 if (IS_ERR(c->dev)) {
476 retval = PTR_ERR(c->dev);
477 pr_info("failed to create new device node %s\n", name);
478 goto error_create_device;
480 kobject_uevent(&c->dev->kobj, KOBJ_ADD);
484 kfifo_free(&c->fifo);
490 ida_simple_remove(&comp.minor_id, current_minor);
494 static struct cdev_component comp = {
497 .probe_channel = comp_probe,
498 .disconnect_channel = comp_disconnect_channel,
499 .rx_completion = comp_rx_completion,
500 .tx_completion = comp_tx_completion,
504 static int __init mod_init(void)
510 comp.class = class_create(THIS_MODULE, "most_cdev");
511 if (IS_ERR(comp.class)) {
512 pr_info("No udev support.\n");
513 return PTR_ERR(comp.class);
516 INIT_LIST_HEAD(&channel_list);
517 spin_lock_init(&ch_list_lock);
518 ida_init(&comp.minor_id);
520 err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
523 comp.major = MAJOR(comp.devno);
524 err = most_register_component(&comp.cc);
530 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
532 ida_destroy(&comp.minor_id);
533 class_destroy(comp.class);
537 static void __exit mod_exit(void)
539 struct comp_channel *c, *tmp;
541 pr_info("exit module\n");
543 most_deregister_component(&comp.cc);
545 list_for_each_entry_safe(c, tmp, &channel_list, list) {
549 unregister_chrdev_region(comp.devno, 1);
550 ida_destroy(&comp.minor_id);
551 class_destroy(comp.class);
554 module_init(mod_init);
555 module_exit(mod_exit);
557 MODULE_LICENSE("GPL");
558 MODULE_DESCRIPTION("character device component for mostcore");