]> Git Repo - linux.git/blob - drivers/platform/goldfish/goldfish_pipe.c
Linux 6.14-rc3
[linux.git] / drivers / platform / goldfish / goldfish_pipe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2012 Intel, Inc.
4  * Copyright (C) 2013 Intel, Inc.
5  * Copyright (C) 2014 Linaro Limited
6  * Copyright (C) 2011-2016 Google, Inc.
7  *
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18
19 /* This source file contains the implementation of a special device driver
20  * that intends to provide a *very* fast communication channel between the
21  * guest system and the QEMU emulator.
22  *
23  * Usage from the guest is simply the following (error handling simplified):
24  *
25  *    int  fd = open("/dev/qemu_pipe",O_RDWR);
26  *    .... write() or read() through the pipe.
27  *
28  * This driver doesn't deal with the exact protocol used during the session.
29  * It is intended to be as simple as something like:
30  *
31  *    // do this _just_ after opening the fd to connect to a specific
32  *    // emulator service.
33  *    const char*  msg = "<pipename>";
34  *    if (write(fd, msg, strlen(msg)+1) < 0) {
35  *       ... could not connect to <pipename> service
36  *       close(fd);
37  *    }
38  *
39  *    // after this, simply read() and write() to communicate with the
40  *    // service. Exact protocol details left as an exercise to the reader.
41  *
42  * This driver is very fast because it doesn't copy any data through
43  * intermediate buffers, since the emulator is capable of translating
44  * guest user addresses into host ones.
45  *
46  * Note that we must however ensure that each user page involved in the
47  * exchange is properly mapped during a transfer.
48  */
49
50 #include <linux/module.h>
51 #include <linux/mod_devicetable.h>
52 #include <linux/interrupt.h>
53 #include <linux/kernel.h>
54 #include <linux/spinlock.h>
55 #include <linux/miscdevice.h>
56 #include <linux/platform_device.h>
57 #include <linux/poll.h>
58 #include <linux/sched.h>
59 #include <linux/bitops.h>
60 #include <linux/slab.h>
61 #include <linux/io.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/mm.h>
64 #include <linux/bug.h>
65 #include "goldfish_pipe_qemu.h"
66
67 /*
68  * Update this when something changes in the driver's behavior so the host
69  * can benefit from knowing it
70  */
71 enum {
72         PIPE_DRIVER_VERSION = 2,
73         PIPE_CURRENT_DEVICE_VERSION = 2
74 };
75
76 enum {
77         MAX_BUFFERS_PER_COMMAND = 336,
78         MAX_SIGNALLED_PIPES = 64,
79         INITIAL_PIPES_CAPACITY = 64
80 };
81
82 struct goldfish_pipe_dev;
83
84 /* A per-pipe command structure, shared with the host */
85 struct goldfish_pipe_command {
86         s32 cmd;        /* PipeCmdCode, guest -> host */
87         s32 id;         /* pipe id, guest -> host */
88         s32 status;     /* command execution status, host -> guest */
89         s32 reserved;   /* to pad to 64-bit boundary */
90         union {
91                 /* Parameters for PIPE_CMD_{READ,WRITE} */
92                 struct {
93                         /* number of buffers, guest -> host */
94                         u32 buffers_count;
95                         /* number of consumed bytes, host -> guest */
96                         s32 consumed_size;
97                         /* buffer pointers, guest -> host */
98                         u64 ptrs[MAX_BUFFERS_PER_COMMAND];
99                         /* buffer sizes, guest -> host */
100                         u32 sizes[MAX_BUFFERS_PER_COMMAND];
101                 } rw_params;
102         };
103 };
104
105 /* A single signalled pipe information */
106 struct signalled_pipe_buffer {
107         u32 id;
108         u32 flags;
109 };
110
111 /* Parameters for the PIPE_CMD_OPEN command */
112 struct open_command_param {
113         u64 command_buffer_ptr;
114         u32 rw_params_max_count;
115 };
116
117 /* Device-level set of buffers shared with the host */
118 struct goldfish_pipe_dev_buffers {
119         struct open_command_param open_command_params;
120         struct signalled_pipe_buffer
121                 signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
122 };
123
124 /* This data type models a given pipe instance */
125 struct goldfish_pipe {
126         /* pipe ID - index into goldfish_pipe_dev::pipes array */
127         u32 id;
128
129         /* The wake flags pipe is waiting for
130          * Note: not protected with any lock, uses atomic operations
131          *  and barriers to make it thread-safe.
132          */
133         unsigned long flags;
134
135         /* wake flags host have signalled,
136          *  - protected by goldfish_pipe_dev::lock
137          */
138         unsigned long signalled_flags;
139
140         /* A pointer to command buffer */
141         struct goldfish_pipe_command *command_buffer;
142
143         /* doubly linked list of signalled pipes, protected by
144          * goldfish_pipe_dev::lock
145          */
146         struct goldfish_pipe *prev_signalled;
147         struct goldfish_pipe *next_signalled;
148
149         /*
150          * A pipe's own lock. Protects the following:
151          *  - *command_buffer - makes sure a command can safely write its
152          *    parameters to the host and read the results back.
153          */
154         struct mutex lock;
155
156         /* A wake queue for sleeping until host signals an event */
157         wait_queue_head_t wake_queue;
158
159         /* Pointer to the parent goldfish_pipe_dev instance */
160         struct goldfish_pipe_dev *dev;
161
162         /* A buffer of pages, too large to fit into a stack frame */
163         struct page *pages[MAX_BUFFERS_PER_COMMAND];
164 };
165
166 /* The global driver data. Holds a reference to the i/o page used to
167  * communicate with the emulator, and a wake queue for blocked tasks
168  * waiting to be awoken.
169  */
170 struct goldfish_pipe_dev {
171         /* A magic number to check if this is an instance of this struct */
172         void *magic;
173
174         /*
175          * Global device spinlock. Protects the following members:
176          *  - pipes, pipes_capacity
177          *  - [*pipes, *pipes + pipes_capacity) - array data
178          *  - first_signalled_pipe,
179          *      goldfish_pipe::prev_signalled,
180          *      goldfish_pipe::next_signalled,
181          *      goldfish_pipe::signalled_flags - all singnalled-related fields,
182          *                                       in all allocated pipes
183          *  - open_command_params - PIPE_CMD_OPEN-related buffers
184          *
185          * It looks like a lot of different fields, but the trick is that
186          * the only operation that happens often is the signalled pipes array
187          * manipulation. That's why it's OK for now to keep the rest of the
188          * fields under the same lock. If we notice too much contention because
189          * of PIPE_CMD_OPEN, then we should add a separate lock there.
190          */
191         spinlock_t lock;
192
193         /*
194          * Array of the pipes of |pipes_capacity| elements,
195          * indexed by goldfish_pipe::id
196          */
197         struct goldfish_pipe **pipes;
198         u32 pipes_capacity;
199
200         /* Pointers to the buffers host uses for interaction with this driver */
201         struct goldfish_pipe_dev_buffers *buffers;
202
203         /* Head of a doubly linked list of signalled pipes */
204         struct goldfish_pipe *first_signalled_pipe;
205
206         /* ptr to platform device's device struct */
207         struct device *pdev_dev;
208
209         /* Some device-specific data */
210         int irq;
211         int version;
212         unsigned char __iomem *base;
213
214         struct miscdevice miscdev;
215 };
216
217 static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe,
218                                     enum PipeCmdCode cmd)
219 {
220         pipe->command_buffer->cmd = cmd;
221         /* failure by default */
222         pipe->command_buffer->status = PIPE_ERROR_INVAL;
223         writel(pipe->id, pipe->dev->base + PIPE_REG_CMD);
224         return pipe->command_buffer->status;
225 }
226
227 static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
228 {
229         int status;
230
231         if (mutex_lock_interruptible(&pipe->lock))
232                 return PIPE_ERROR_IO;
233         status = goldfish_pipe_cmd_locked(pipe, cmd);
234         mutex_unlock(&pipe->lock);
235         return status;
236 }
237
238 /*
239  * This function converts an error code returned by the emulator through
240  * the PIPE_REG_STATUS i/o register into a valid negative errno value.
241  */
242 static int goldfish_pipe_error_convert(int status)
243 {
244         switch (status) {
245         case PIPE_ERROR_AGAIN:
246                 return -EAGAIN;
247         case PIPE_ERROR_NOMEM:
248                 return -ENOMEM;
249         case PIPE_ERROR_IO:
250                 return -EIO;
251         default:
252                 return -EINVAL;
253         }
254 }
255
256 static int goldfish_pin_pages(unsigned long first_page,
257                               unsigned long last_page,
258                               unsigned int last_page_size,
259                               int is_write,
260                               struct page *pages[MAX_BUFFERS_PER_COMMAND],
261                               unsigned int *iter_last_page_size)
262 {
263         int ret;
264         int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
265
266         if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
267                 requested_pages = MAX_BUFFERS_PER_COMMAND;
268                 *iter_last_page_size = PAGE_SIZE;
269         } else {
270                 *iter_last_page_size = last_page_size;
271         }
272
273         ret = pin_user_pages_fast(first_page, requested_pages,
274                                   !is_write ? FOLL_WRITE : 0,
275                                   pages);
276         if (ret <= 0)
277                 return -EFAULT;
278         if (ret < requested_pages)
279                 *iter_last_page_size = PAGE_SIZE;
280
281         return ret;
282 }
283
284 /* Populate the call parameters, merging adjacent pages together */
285 static void populate_rw_params(struct page **pages,
286                                int pages_count,
287                                unsigned long address,
288                                unsigned long address_end,
289                                unsigned long first_page,
290                                unsigned long last_page,
291                                unsigned int iter_last_page_size,
292                                int is_write,
293                                struct goldfish_pipe_command *command)
294 {
295         /*
296          * Process the first page separately - it's the only page that
297          * needs special handling for its start address.
298          */
299         unsigned long xaddr = page_to_phys(pages[0]);
300         unsigned long xaddr_prev = xaddr;
301         int buffer_idx = 0;
302         int i = 1;
303         int size_on_page = first_page == last_page
304                         ? (int)(address_end - address)
305                         : (PAGE_SIZE - (address & ~PAGE_MASK));
306         command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK));
307         command->rw_params.sizes[0] = size_on_page;
308         for (; i < pages_count; ++i) {
309                 xaddr = page_to_phys(pages[i]);
310                 size_on_page = (i == pages_count - 1) ?
311                         iter_last_page_size : PAGE_SIZE;
312                 if (xaddr == xaddr_prev + PAGE_SIZE) {
313                         command->rw_params.sizes[buffer_idx] += size_on_page;
314                 } else {
315                         ++buffer_idx;
316                         command->rw_params.ptrs[buffer_idx] = (u64)xaddr;
317                         command->rw_params.sizes[buffer_idx] = size_on_page;
318                 }
319                 xaddr_prev = xaddr;
320         }
321         command->rw_params.buffers_count = buffer_idx + 1;
322 }
323
324 static int transfer_max_buffers(struct goldfish_pipe *pipe,
325                                 unsigned long address,
326                                 unsigned long address_end,
327                                 int is_write,
328                                 unsigned long last_page,
329                                 unsigned int last_page_size,
330                                 s32 *consumed_size,
331                                 int *status)
332 {
333         unsigned long first_page = address & PAGE_MASK;
334         unsigned int iter_last_page_size;
335         int pages_count;
336
337         /* Serialize access to the pipe command buffers */
338         if (mutex_lock_interruptible(&pipe->lock))
339                 return -ERESTARTSYS;
340
341         pages_count = goldfish_pin_pages(first_page, last_page,
342                                          last_page_size, is_write,
343                                          pipe->pages, &iter_last_page_size);
344         if (pages_count < 0) {
345                 mutex_unlock(&pipe->lock);
346                 return pages_count;
347         }
348
349         populate_rw_params(pipe->pages, pages_count, address, address_end,
350                            first_page, last_page, iter_last_page_size, is_write,
351                            pipe->command_buffer);
352
353         /* Transfer the data */
354         *status = goldfish_pipe_cmd_locked(pipe,
355                                 is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
356
357         *consumed_size = pipe->command_buffer->rw_params.consumed_size;
358
359         unpin_user_pages_dirty_lock(pipe->pages, pages_count,
360                                     !is_write && *consumed_size > 0);
361
362         mutex_unlock(&pipe->lock);
363         return 0;
364 }
365
366 static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
367 {
368         u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
369
370         set_bit(wake_bit, &pipe->flags);
371
372         /* Tell the emulator we're going to wait for a wake event */
373         goldfish_pipe_cmd(pipe,
374                 is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
375
376         while (test_bit(wake_bit, &pipe->flags)) {
377                 if (wait_event_interruptible(pipe->wake_queue,
378                                              !test_bit(wake_bit, &pipe->flags)))
379                         return -ERESTARTSYS;
380
381                 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
382                         return -EIO;
383         }
384
385         return 0;
386 }
387
388 static ssize_t goldfish_pipe_read_write(struct file *filp,
389                                         char __user *buffer,
390                                         size_t bufflen,
391                                         int is_write)
392 {
393         struct goldfish_pipe *pipe = filp->private_data;
394         int count = 0, ret = -EINVAL;
395         unsigned long address, address_end, last_page;
396         unsigned int last_page_size;
397
398         /* If the emulator already closed the pipe, no need to go further */
399         if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
400                 return -EIO;
401         /* Null reads or writes succeeds */
402         if (unlikely(bufflen == 0))
403                 return 0;
404         /* Check the buffer range for access */
405         if (unlikely(!access_ok(buffer, bufflen)))
406                 return -EFAULT;
407
408         address = (unsigned long)buffer;
409         address_end = address + bufflen;
410         last_page = (address_end - 1) & PAGE_MASK;
411         last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
412
413         while (address < address_end) {
414                 s32 consumed_size;
415                 int status;
416
417                 ret = transfer_max_buffers(pipe, address, address_end, is_write,
418                                            last_page, last_page_size,
419                                            &consumed_size, &status);
420                 if (ret < 0)
421                         break;
422
423                 if (consumed_size > 0) {
424                         /* No matter what's the status, we've transferred
425                          * something.
426                          */
427                         count += consumed_size;
428                         address += consumed_size;
429                 }
430                 if (status > 0)
431                         continue;
432                 if (status == 0) {
433                         /* EOF */
434                         ret = 0;
435                         break;
436                 }
437                 if (count > 0) {
438                         /*
439                          * An error occurred, but we already transferred
440                          * something on one of the previous iterations.
441                          * Just return what we already copied and log this
442                          * err.
443                          */
444                         if (status != PIPE_ERROR_AGAIN)
445                                 dev_err_ratelimited(pipe->dev->pdev_dev,
446                                         "backend error %d on %s\n",
447                                         status, is_write ? "write" : "read");
448                         break;
449                 }
450
451                 /*
452                  * If the error is not PIPE_ERROR_AGAIN, or if we are in
453                  * non-blocking mode, just return the error code.
454                  */
455                 if (status != PIPE_ERROR_AGAIN ||
456                         (filp->f_flags & O_NONBLOCK) != 0) {
457                         ret = goldfish_pipe_error_convert(status);
458                         break;
459                 }
460
461                 status = wait_for_host_signal(pipe, is_write);
462                 if (status < 0)
463                         return status;
464         }
465
466         if (count > 0)
467                 return count;
468         return ret;
469 }
470
471 static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
472                                   size_t bufflen, loff_t *ppos)
473 {
474         return goldfish_pipe_read_write(filp, buffer, bufflen,
475                                         /* is_write */ 0);
476 }
477
478 static ssize_t goldfish_pipe_write(struct file *filp,
479                                    const char __user *buffer, size_t bufflen,
480                                    loff_t *ppos)
481 {
482         /* cast away the const */
483         char __user *no_const_buffer = (char __user *)buffer;
484
485         return goldfish_pipe_read_write(filp, no_const_buffer, bufflen,
486                                         /* is_write */ 1);
487 }
488
489 static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
490 {
491         struct goldfish_pipe *pipe = filp->private_data;
492         __poll_t mask = 0;
493         int status;
494
495         poll_wait(filp, &pipe->wake_queue, wait);
496
497         status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
498         if (status < 0)
499                 return -ERESTARTSYS;
500
501         if (status & PIPE_POLL_IN)
502                 mask |= EPOLLIN | EPOLLRDNORM;
503         if (status & PIPE_POLL_OUT)
504                 mask |= EPOLLOUT | EPOLLWRNORM;
505         if (status & PIPE_POLL_HUP)
506                 mask |= EPOLLHUP;
507         if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
508                 mask |= EPOLLERR;
509
510         return mask;
511 }
512
513 static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
514                                        u32 id, u32 flags)
515 {
516         struct goldfish_pipe *pipe;
517
518         if (WARN_ON(id >= dev->pipes_capacity))
519                 return;
520
521         pipe = dev->pipes[id];
522         if (!pipe)
523                 return;
524         pipe->signalled_flags |= flags;
525
526         if (pipe->prev_signalled || pipe->next_signalled ||
527                 dev->first_signalled_pipe == pipe)
528                 return; /* already in the list */
529         pipe->next_signalled = dev->first_signalled_pipe;
530         if (dev->first_signalled_pipe)
531                 dev->first_signalled_pipe->prev_signalled = pipe;
532         dev->first_signalled_pipe = pipe;
533 }
534
535 static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
536                                           struct goldfish_pipe *pipe)
537 {
538         if (pipe->prev_signalled)
539                 pipe->prev_signalled->next_signalled = pipe->next_signalled;
540         if (pipe->next_signalled)
541                 pipe->next_signalled->prev_signalled = pipe->prev_signalled;
542         if (pipe == dev->first_signalled_pipe)
543                 dev->first_signalled_pipe = pipe->next_signalled;
544         pipe->prev_signalled = NULL;
545         pipe->next_signalled = NULL;
546 }
547
548 static struct goldfish_pipe *signalled_pipes_pop_front(
549                 struct goldfish_pipe_dev *dev, int *wakes)
550 {
551         struct goldfish_pipe *pipe;
552         unsigned long flags;
553
554         spin_lock_irqsave(&dev->lock, flags);
555
556         pipe = dev->first_signalled_pipe;
557         if (pipe) {
558                 *wakes = pipe->signalled_flags;
559                 pipe->signalled_flags = 0;
560                 /*
561                  * This is an optimized version of
562                  * signalled_pipes_remove_locked()
563                  * - We want to make it as fast as possible to
564                  * wake the sleeping pipe operations faster.
565                  */
566                 dev->first_signalled_pipe = pipe->next_signalled;
567                 if (dev->first_signalled_pipe)
568                         dev->first_signalled_pipe->prev_signalled = NULL;
569                 pipe->next_signalled = NULL;
570         }
571
572         spin_unlock_irqrestore(&dev->lock, flags);
573         return pipe;
574 }
575
576 static irqreturn_t goldfish_interrupt_task(int irq, void *dev_addr)
577 {
578         /* Iterate over the signalled pipes and wake them one by one */
579         struct goldfish_pipe_dev *dev = dev_addr;
580         struct goldfish_pipe *pipe;
581         int wakes;
582
583         while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
584                 if (wakes & PIPE_WAKE_CLOSED) {
585                         pipe->flags = 1 << BIT_CLOSED_ON_HOST;
586                 } else {
587                         if (wakes & PIPE_WAKE_READ)
588                                 clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
589                         if (wakes & PIPE_WAKE_WRITE)
590                                 clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
591                 }
592                 /*
593                  * wake_up_interruptible() implies a write barrier, so don't
594                  * explicitly add another one here.
595                  */
596                 wake_up_interruptible(&pipe->wake_queue);
597         }
598         return IRQ_HANDLED;
599 }
600
601 static void goldfish_pipe_device_deinit(struct platform_device *pdev,
602                                         struct goldfish_pipe_dev *dev);
603
604 /*
605  * The general idea of the (threaded) interrupt handling:
606  *
607  *  1. device raises an interrupt if there's at least one signalled pipe
608  *  2. IRQ handler reads the signalled pipes and their count from the device
609  *  3. device writes them into a shared buffer and returns the count
610  *      it only resets the IRQ if it has returned all signalled pipes,
611  *      otherwise it leaves it raised, so IRQ handler will be called
612  *      again for the next chunk
613  *  4. IRQ handler adds all returned pipes to the device's signalled pipes list
614  *  5. IRQ handler defers processing the signalled pipes from the list in a
615  *      separate context
616  */
617 static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
618 {
619         u32 count;
620         u32 i;
621         unsigned long flags;
622         struct goldfish_pipe_dev *dev = dev_id;
623
624         if (dev->magic != &goldfish_pipe_device_deinit)
625                 return IRQ_NONE;
626
627         /* Request the signalled pipes from the device */
628         spin_lock_irqsave(&dev->lock, flags);
629
630         count = readl(dev->base + PIPE_REG_GET_SIGNALLED);
631         if (count == 0) {
632                 spin_unlock_irqrestore(&dev->lock, flags);
633                 return IRQ_NONE;
634         }
635         if (count > MAX_SIGNALLED_PIPES)
636                 count = MAX_SIGNALLED_PIPES;
637
638         for (i = 0; i < count; ++i)
639                 signalled_pipes_add_locked(dev,
640                         dev->buffers->signalled_pipe_buffers[i].id,
641                         dev->buffers->signalled_pipe_buffers[i].flags);
642
643         spin_unlock_irqrestore(&dev->lock, flags);
644
645         return IRQ_WAKE_THREAD;
646 }
647
648 static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
649 {
650         int id;
651
652         for (id = 0; id < dev->pipes_capacity; ++id)
653                 if (!dev->pipes[id])
654                         return id;
655
656         {
657                 /* Reallocate the array.
658                  * Since get_free_pipe_id_locked runs with interrupts disabled,
659                  * we don't want to make calls that could lead to sleep.
660                  */
661                 u32 new_capacity = 2 * dev->pipes_capacity;
662                 struct goldfish_pipe **pipes =
663                         kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
664                 if (!pipes)
665                         return -ENOMEM;
666                 memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
667                 kfree(dev->pipes);
668                 dev->pipes = pipes;
669                 id = dev->pipes_capacity;
670                 dev->pipes_capacity = new_capacity;
671         }
672         return id;
673 }
674
675 /* A helper function to get the instance of goldfish_pipe_dev from file */
676 static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
677 {
678         struct miscdevice *miscdev = file->private_data;
679
680         return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
681 }
682
683 /**
684  *      goldfish_pipe_open - open a channel to the AVD
685  *      @inode: inode of device
686  *      @file: file struct of opener
687  *
688  *      Create a new pipe link between the emulator and the use application.
689  *      Each new request produces a new pipe.
690  *
691  *      Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
692  *      right now so this is fine. A move to 64bit will need this addressing
693  */
694 static int goldfish_pipe_open(struct inode *inode, struct file *file)
695 {
696         struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
697         unsigned long flags;
698         int id;
699         int status;
700
701         /* Allocate new pipe kernel object */
702         struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
703
704         if (!pipe)
705                 return -ENOMEM;
706
707         pipe->dev = dev;
708         mutex_init(&pipe->lock);
709         init_waitqueue_head(&pipe->wake_queue);
710
711         /*
712          * Command buffer needs to be allocated on its own page to make sure
713          * it is physically contiguous in host's address space.
714          */
715         BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
716         pipe->command_buffer =
717                 (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
718         if (!pipe->command_buffer) {
719                 status = -ENOMEM;
720                 goto err_pipe;
721         }
722
723         spin_lock_irqsave(&dev->lock, flags);
724
725         id = get_free_pipe_id_locked(dev);
726         if (id < 0) {
727                 status = id;
728                 goto err_id_locked;
729         }
730
731         dev->pipes[id] = pipe;
732         pipe->id = id;
733         pipe->command_buffer->id = id;
734
735         /* Now tell the emulator we're opening a new pipe. */
736         dev->buffers->open_command_params.rw_params_max_count =
737                         MAX_BUFFERS_PER_COMMAND;
738         dev->buffers->open_command_params.command_buffer_ptr =
739                         (u64)(unsigned long)__pa(pipe->command_buffer);
740         status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
741         spin_unlock_irqrestore(&dev->lock, flags);
742         if (status < 0)
743                 goto err_cmd;
744         /* All is done, save the pipe into the file's private data field */
745         file->private_data = pipe;
746         return 0;
747
748 err_cmd:
749         spin_lock_irqsave(&dev->lock, flags);
750         dev->pipes[id] = NULL;
751 err_id_locked:
752         spin_unlock_irqrestore(&dev->lock, flags);
753         free_page((unsigned long)pipe->command_buffer);
754 err_pipe:
755         kfree(pipe);
756         return status;
757 }
758
759 static int goldfish_pipe_release(struct inode *inode, struct file *filp)
760 {
761         unsigned long flags;
762         struct goldfish_pipe *pipe = filp->private_data;
763         struct goldfish_pipe_dev *dev = pipe->dev;
764
765         /* The guest is closing the channel, so tell the emulator right now */
766         goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
767
768         spin_lock_irqsave(&dev->lock, flags);
769         dev->pipes[pipe->id] = NULL;
770         signalled_pipes_remove_locked(dev, pipe);
771         spin_unlock_irqrestore(&dev->lock, flags);
772
773         filp->private_data = NULL;
774         free_page((unsigned long)pipe->command_buffer);
775         kfree(pipe);
776         return 0;
777 }
778
779 static const struct file_operations goldfish_pipe_fops = {
780         .owner = THIS_MODULE,
781         .read = goldfish_pipe_read,
782         .write = goldfish_pipe_write,
783         .poll = goldfish_pipe_poll,
784         .open = goldfish_pipe_open,
785         .release = goldfish_pipe_release,
786 };
787
788 static void init_miscdevice(struct miscdevice *miscdev)
789 {
790         memset(miscdev, 0, sizeof(*miscdev));
791
792         miscdev->minor = MISC_DYNAMIC_MINOR;
793         miscdev->name = "goldfish_pipe";
794         miscdev->fops = &goldfish_pipe_fops;
795 }
796
797 static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth)
798 {
799         const unsigned long paddr = __pa(addr);
800
801         writel(upper_32_bits(paddr), porth);
802         writel(lower_32_bits(paddr), portl);
803 }
804
805 static int goldfish_pipe_device_init(struct platform_device *pdev,
806                                      struct goldfish_pipe_dev *dev)
807 {
808         int err;
809
810         err = devm_request_threaded_irq(&pdev->dev, dev->irq,
811                                         goldfish_pipe_interrupt,
812                                         goldfish_interrupt_task,
813                                         IRQF_SHARED, "goldfish_pipe", dev);
814         if (err) {
815                 dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
816                 return err;
817         }
818
819         init_miscdevice(&dev->miscdev);
820         err = misc_register(&dev->miscdev);
821         if (err) {
822                 dev_err(&pdev->dev, "unable to register v2 device\n");
823                 return err;
824         }
825
826         dev->pdev_dev = &pdev->dev;
827         dev->first_signalled_pipe = NULL;
828         dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
829         dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
830                              GFP_KERNEL);
831         if (!dev->pipes) {
832                 misc_deregister(&dev->miscdev);
833                 return -ENOMEM;
834         }
835
836         /*
837          * We're going to pass two buffers, open_command_params and
838          * signalled_pipe_buffers, to the host. This means each of those buffers
839          * needs to be contained in a single physical page. The easiest choice
840          * is to just allocate a page and place the buffers in it.
841          */
842         BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
843         dev->buffers = (struct goldfish_pipe_dev_buffers *)
844                 __get_free_page(GFP_KERNEL);
845         if (!dev->buffers) {
846                 kfree(dev->pipes);
847                 misc_deregister(&dev->miscdev);
848                 return -ENOMEM;
849         }
850
851         /* Send the buffer addresses to the host */
852         write_pa_addr(&dev->buffers->signalled_pipe_buffers,
853                       dev->base + PIPE_REG_SIGNAL_BUFFER,
854                       dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
855
856         writel(MAX_SIGNALLED_PIPES,
857                dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
858
859         write_pa_addr(&dev->buffers->open_command_params,
860                       dev->base + PIPE_REG_OPEN_BUFFER,
861                       dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
862
863         platform_set_drvdata(pdev, dev);
864         return 0;
865 }
866
867 static void goldfish_pipe_device_deinit(struct platform_device *pdev,
868                                         struct goldfish_pipe_dev *dev)
869 {
870         misc_deregister(&dev->miscdev);
871         kfree(dev->pipes);
872         free_page((unsigned long)dev->buffers);
873 }
874
875 static int goldfish_pipe_probe(struct platform_device *pdev)
876 {
877         struct resource *r;
878         struct goldfish_pipe_dev *dev;
879
880         dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
881         if (!dev)
882                 return -ENOMEM;
883
884         dev->magic = &goldfish_pipe_device_deinit;
885         spin_lock_init(&dev->lock);
886
887         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
888         if (!r || resource_size(r) < PAGE_SIZE) {
889                 dev_err(&pdev->dev, "can't allocate i/o page\n");
890                 return -EINVAL;
891         }
892         dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
893         if (!dev->base) {
894                 dev_err(&pdev->dev, "ioremap failed\n");
895                 return -EINVAL;
896         }
897
898         dev->irq = platform_get_irq(pdev, 0);
899         if (dev->irq < 0)
900                 return dev->irq;
901
902         /*
903          * Exchange the versions with the host device
904          *
905          * Note: v1 driver used to not report its version, so we write it before
906          *  reading device version back: this allows the host implementation to
907          *  detect the old driver (if there was no version write before read).
908          */
909         writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
910         dev->version = readl(dev->base + PIPE_REG_VERSION);
911         if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
912                 return -EINVAL;
913
914         return goldfish_pipe_device_init(pdev, dev);
915 }
916
917 static void goldfish_pipe_remove(struct platform_device *pdev)
918 {
919         struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
920
921         goldfish_pipe_device_deinit(pdev, dev);
922 }
923
924 static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
925         { "GFSH0003", 0 },
926         { },
927 };
928 MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
929
930 static const struct of_device_id goldfish_pipe_of_match[] = {
931         { .compatible = "google,android-pipe", },
932         {},
933 };
934 MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
935
936 static struct platform_driver goldfish_pipe_driver = {
937         .probe = goldfish_pipe_probe,
938         .remove = goldfish_pipe_remove,
939         .driver = {
940                 .name = "goldfish_pipe",
941                 .of_match_table = goldfish_pipe_of_match,
942                 .acpi_match_table = goldfish_pipe_acpi_match,
943         }
944 };
945
946 module_platform_driver(goldfish_pipe_driver);
947 MODULE_AUTHOR("David Turner <[email protected]>");
948 MODULE_DESCRIPTION("Goldfish virtual device for QEMU pipes");
949 MODULE_LICENSE("GPL v2");
This page took 0.08508 seconds and 4 git commands to generate.