]>
Commit | Line | Data |
---|---|---|
bcabbcca OBC |
1 | /* |
2 | * Virtio-based remote processor messaging bus | |
3 | * | |
4 | * Copyright (C) 2011 Texas Instruments, Inc. | |
5 | * Copyright (C) 2011 Google, Inc. | |
6 | * | |
7 | * Ohad Ben-Cohen <[email protected]> | |
8 | * Brian Swetland <[email protected]> | |
9 | * | |
10 | * This software is licensed under the terms of the GNU General Public | |
11 | * License version 2, as published by the Free Software Foundation, and | |
12 | * may be copied, distributed, and modified under those terms. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) "%s: " fmt, __func__ | |
21 | ||
22 | #include <linux/kernel.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/virtio.h> | |
25 | #include <linux/virtio_ids.h> | |
26 | #include <linux/virtio_config.h> | |
27 | #include <linux/scatterlist.h> | |
28 | #include <linux/dma-mapping.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/idr.h> | |
31 | #include <linux/jiffies.h> | |
32 | #include <linux/sched.h> | |
33 | #include <linux/wait.h> | |
34 | #include <linux/rpmsg.h> | |
35 | #include <linux/mutex.h> | |
36 | ||
37 | /** | |
38 | * struct virtproc_info - virtual remote processor state | |
39 | * @vdev: the virtio device | |
40 | * @rvq: rx virtqueue | |
41 | * @svq: tx virtqueue | |
42 | * @rbufs: kernel address of rx buffers | |
43 | * @sbufs: kernel address of tx buffers | |
b1b98914 | 44 | * @num_bufs: total number of buffers for rx and tx |
bcabbcca OBC |
45 | * @last_sbuf: index of last tx buffer used |
46 | * @bufs_dma: dma base addr of the buffers | |
47 | * @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders. | |
48 | * sending a message might require waking up a dozing remote | |
49 | * processor, which involves sleeping, hence the mutex. | |
50 | * @endpoints: idr of local endpoints, allows fast retrieval | |
51 | * @endpoints_lock: lock of the endpoints set | |
52 | * @sendq: wait queue of sending contexts waiting for a tx buffers | |
53 | * @sleepers: number of senders that are waiting for a tx buffer | |
54 | * @ns_ept: the bus's name service endpoint | |
55 | * | |
56 | * This structure stores the rpmsg state of a given virtio remote processor | |
57 | * device (there might be several virtio proc devices for each physical | |
58 | * remote processor). | |
59 | */ | |
60 | struct virtproc_info { | |
61 | struct virtio_device *vdev; | |
62 | struct virtqueue *rvq, *svq; | |
63 | void *rbufs, *sbufs; | |
b1b98914 | 64 | unsigned int num_bufs; |
bcabbcca OBC |
65 | int last_sbuf; |
66 | dma_addr_t bufs_dma; | |
67 | struct mutex tx_lock; | |
68 | struct idr endpoints; | |
69 | struct mutex endpoints_lock; | |
70 | wait_queue_head_t sendq; | |
71 | atomic_t sleepers; | |
72 | struct rpmsg_endpoint *ns_ept; | |
73 | }; | |
74 | ||
75 | /** | |
76 | * struct rpmsg_channel_info - internal channel info representation | |
77 | * @name: name of service | |
78 | * @src: local address | |
79 | * @dst: destination address | |
80 | */ | |
81 | struct rpmsg_channel_info { | |
82 | char name[RPMSG_NAME_SIZE]; | |
83 | u32 src; | |
84 | u32 dst; | |
85 | }; | |
86 | ||
87 | #define to_rpmsg_channel(d) container_of(d, struct rpmsg_channel, dev) | |
88 | #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv) | |
89 | ||
90 | /* | |
b1b98914 SA |
91 | * We're allocating buffers of 512 bytes each for communications. The |
92 | * number of buffers will be computed from the number of buffers supported | |
93 | * by the vring, upto a maximum of 512 buffers (256 in each direction). | |
bcabbcca OBC |
94 | * |
95 | * Each buffer will have 16 bytes for the msg header and 496 bytes for | |
96 | * the payload. | |
97 | * | |
b1b98914 | 98 | * This will utilize a maximum total space of 256KB for the buffers. |
bcabbcca OBC |
99 | * |
100 | * We might also want to add support for user-provided buffers in time. | |
101 | * This will allow bigger buffer size flexibility, and can also be used | |
102 | * to achieve zero-copy messaging. | |
103 | * | |
104 | * Note that these numbers are purely a decision of this driver - we | |
105 | * can change this without changing anything in the firmware of the remote | |
106 | * processor. | |
107 | */ | |
b1b98914 | 108 | #define MAX_RPMSG_NUM_BUFS (512) |
bcabbcca | 109 | #define RPMSG_BUF_SIZE (512) |
bcabbcca OBC |
110 | |
111 | /* | |
112 | * Local addresses are dynamically allocated on-demand. | |
113 | * We do not dynamically assign addresses from the low 1024 range, | |
114 | * in order to reserve that address range for predefined services. | |
115 | */ | |
116 | #define RPMSG_RESERVED_ADDRESSES (1024) | |
117 | ||
118 | /* Address 53 is reserved for advertising remote services */ | |
119 | #define RPMSG_NS_ADDR (53) | |
120 | ||
121 | /* sysfs show configuration fields */ | |
122 | #define rpmsg_show_attr(field, path, format_string) \ | |
123 | static ssize_t \ | |
124 | field##_show(struct device *dev, \ | |
125 | struct device_attribute *attr, char *buf) \ | |
126 | { \ | |
127 | struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); \ | |
128 | \ | |
129 | return sprintf(buf, format_string, rpdev->path); \ | |
130 | } | |
131 | ||
132 | /* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */ | |
133 | rpmsg_show_attr(name, id.name, "%s\n"); | |
134 | rpmsg_show_attr(src, src, "0x%x\n"); | |
135 | rpmsg_show_attr(dst, dst, "0x%x\n"); | |
136 | rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n"); | |
137 | ||
138 | /* | |
139 | * Unique (and free running) index for rpmsg devices. | |
140 | * | |
141 | * Yeah, we're not recycling those numbers (yet?). will be easy | |
142 | * to change if/when we want to. | |
143 | */ | |
144 | static unsigned int rpmsg_dev_index; | |
145 | ||
146 | static ssize_t modalias_show(struct device *dev, | |
147 | struct device_attribute *attr, char *buf) | |
148 | { | |
149 | struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); | |
150 | ||
151 | return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name); | |
152 | } | |
153 | ||
154 | static struct device_attribute rpmsg_dev_attrs[] = { | |
155 | __ATTR_RO(name), | |
156 | __ATTR_RO(modalias), | |
157 | __ATTR_RO(dst), | |
158 | __ATTR_RO(src), | |
159 | __ATTR_RO(announce), | |
160 | __ATTR_NULL | |
161 | }; | |
162 | ||
163 | /* rpmsg devices and drivers are matched using the service name */ | |
164 | static inline int rpmsg_id_match(const struct rpmsg_channel *rpdev, | |
165 | const struct rpmsg_device_id *id) | |
166 | { | |
167 | return strncmp(id->name, rpdev->id.name, RPMSG_NAME_SIZE) == 0; | |
168 | } | |
169 | ||
170 | /* match rpmsg channel and rpmsg driver */ | |
171 | static int rpmsg_dev_match(struct device *dev, struct device_driver *drv) | |
172 | { | |
173 | struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); | |
174 | struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv); | |
175 | const struct rpmsg_device_id *ids = rpdrv->id_table; | |
176 | unsigned int i; | |
177 | ||
178 | for (i = 0; ids[i].name[0]; i++) | |
179 | if (rpmsg_id_match(rpdev, &ids[i])) | |
180 | return 1; | |
181 | ||
182 | return 0; | |
183 | } | |
184 | ||
185 | static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env) | |
186 | { | |
187 | struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); | |
188 | ||
189 | return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT, | |
190 | rpdev->id.name); | |
191 | } | |
192 | ||
5a081caa OBC |
193 | /** |
194 | * __ept_release() - deallocate an rpmsg endpoint | |
195 | * @kref: the ept's reference count | |
196 | * | |
197 | * This function deallocates an ept, and is invoked when its @kref refcount | |
198 | * drops to zero. | |
199 | * | |
200 | * Never invoke this function directly! | |
201 | */ | |
202 | static void __ept_release(struct kref *kref) | |
203 | { | |
204 | struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint, | |
205 | refcount); | |
206 | /* | |
207 | * At this point no one holds a reference to ept anymore, | |
208 | * so we can directly free it | |
209 | */ | |
210 | kfree(ept); | |
211 | } | |
212 | ||
bcabbcca OBC |
213 | /* for more info, see below documentation of rpmsg_create_ept() */ |
214 | static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp, | |
215 | struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb, | |
216 | void *priv, u32 addr) | |
217 | { | |
d0ffce77 | 218 | int id_min, id_max, id; |
bcabbcca OBC |
219 | struct rpmsg_endpoint *ept; |
220 | struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev; | |
221 | ||
bcabbcca OBC |
222 | ept = kzalloc(sizeof(*ept), GFP_KERNEL); |
223 | if (!ept) { | |
224 | dev_err(dev, "failed to kzalloc a new ept\n"); | |
225 | return NULL; | |
226 | } | |
227 | ||
5a081caa | 228 | kref_init(&ept->refcount); |
15fd943a | 229 | mutex_init(&ept->cb_lock); |
5a081caa | 230 | |
bcabbcca OBC |
231 | ept->rpdev = rpdev; |
232 | ept->cb = cb; | |
233 | ept->priv = priv; | |
234 | ||
235 | /* do we need to allocate a local address ? */ | |
d0ffce77 TH |
236 | if (addr == RPMSG_ADDR_ANY) { |
237 | id_min = RPMSG_RESERVED_ADDRESSES; | |
238 | id_max = 0; | |
239 | } else { | |
240 | id_min = addr; | |
241 | id_max = addr + 1; | |
242 | } | |
bcabbcca OBC |
243 | |
244 | mutex_lock(&vrp->endpoints_lock); | |
245 | ||
246 | /* bind the endpoint to an rpmsg address (and allocate one if needed) */ | |
d0ffce77 TH |
247 | id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL); |
248 | if (id < 0) { | |
249 | dev_err(dev, "idr_alloc failed: %d\n", id); | |
bcabbcca OBC |
250 | goto free_ept; |
251 | } | |
d0ffce77 | 252 | ept->addr = id; |
bcabbcca OBC |
253 | |
254 | mutex_unlock(&vrp->endpoints_lock); | |
255 | ||
256 | return ept; | |
257 | ||
bcabbcca OBC |
258 | free_ept: |
259 | mutex_unlock(&vrp->endpoints_lock); | |
5a081caa | 260 | kref_put(&ept->refcount, __ept_release); |
bcabbcca OBC |
261 | return NULL; |
262 | } | |
263 | ||
264 | /** | |
265 | * rpmsg_create_ept() - create a new rpmsg_endpoint | |
266 | * @rpdev: rpmsg channel device | |
267 | * @cb: rx callback handler | |
268 | * @priv: private data for the driver's use | |
269 | * @addr: local rpmsg address to bind with @cb | |
270 | * | |
271 | * Every rpmsg address in the system is bound to an rx callback (so when | |
272 | * inbound messages arrive, they are dispatched by the rpmsg bus using the | |
273 | * appropriate callback handler) by means of an rpmsg_endpoint struct. | |
274 | * | |
275 | * This function allows drivers to create such an endpoint, and by that, | |
276 | * bind a callback, and possibly some private data too, to an rpmsg address | |
277 | * (either one that is known in advance, or one that will be dynamically | |
278 | * assigned for them). | |
279 | * | |
280 | * Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint | |
281 | * is already created for them when they are probed by the rpmsg bus | |
282 | * (using the rx callback provided when they registered to the rpmsg bus). | |
283 | * | |
284 | * So things should just work for simple drivers: they already have an | |
285 | * endpoint, their rx callback is bound to their rpmsg address, and when | |
286 | * relevant inbound messages arrive (i.e. messages which their dst address | |
287 | * equals to the src address of their rpmsg channel), the driver's handler | |
288 | * is invoked to process it. | |
289 | * | |
290 | * That said, more complicated drivers might do need to allocate | |
291 | * additional rpmsg addresses, and bind them to different rx callbacks. | |
292 | * To accomplish that, those drivers need to call this function. | |
293 | * | |
294 | * Drivers should provide their @rpdev channel (so the new endpoint would belong | |
295 | * to the same remote processor their channel belongs to), an rx callback | |
296 | * function, an optional private data (which is provided back when the | |
297 | * rx callback is invoked), and an address they want to bind with the | |
298 | * callback. If @addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will | |
299 | * dynamically assign them an available rpmsg address (drivers should have | |
300 | * a very good reason why not to always use RPMSG_ADDR_ANY here). | |
301 | * | |
302 | * Returns a pointer to the endpoint on success, or NULL on error. | |
303 | */ | |
304 | struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev, | |
305 | rpmsg_rx_cb_t cb, void *priv, u32 addr) | |
306 | { | |
307 | return __rpmsg_create_ept(rpdev->vrp, rpdev, cb, priv, addr); | |
308 | } | |
309 | EXPORT_SYMBOL(rpmsg_create_ept); | |
310 | ||
311 | /** | |
fa2d7795 OBC |
312 | * __rpmsg_destroy_ept() - destroy an existing rpmsg endpoint |
313 | * @vrp: virtproc which owns this ept | |
bcabbcca OBC |
314 | * @ept: endpoing to destroy |
315 | * | |
fa2d7795 OBC |
316 | * An internal function which destroy an ept without assuming it is |
317 | * bound to an rpmsg channel. This is needed for handling the internal | |
318 | * name service endpoint, which isn't bound to an rpmsg channel. | |
319 | * See also __rpmsg_create_ept(). | |
bcabbcca | 320 | */ |
fa2d7795 OBC |
321 | static void |
322 | __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept) | |
bcabbcca | 323 | { |
15fd943a | 324 | /* make sure new inbound messages can't find this ept anymore */ |
bcabbcca OBC |
325 | mutex_lock(&vrp->endpoints_lock); |
326 | idr_remove(&vrp->endpoints, ept->addr); | |
327 | mutex_unlock(&vrp->endpoints_lock); | |
328 | ||
15fd943a OBC |
329 | /* make sure in-flight inbound messages won't invoke cb anymore */ |
330 | mutex_lock(&ept->cb_lock); | |
331 | ept->cb = NULL; | |
332 | mutex_unlock(&ept->cb_lock); | |
333 | ||
5a081caa | 334 | kref_put(&ept->refcount, __ept_release); |
bcabbcca | 335 | } |
fa2d7795 OBC |
336 | |
337 | /** | |
338 | * rpmsg_destroy_ept() - destroy an existing rpmsg endpoint | |
339 | * @ept: endpoing to destroy | |
340 | * | |
341 | * Should be used by drivers to destroy an rpmsg endpoint previously | |
342 | * created with rpmsg_create_ept(). | |
343 | */ | |
344 | void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) | |
345 | { | |
346 | __rpmsg_destroy_ept(ept->rpdev->vrp, ept); | |
347 | } | |
bcabbcca OBC |
348 | EXPORT_SYMBOL(rpmsg_destroy_ept); |
349 | ||
350 | /* | |
351 | * when an rpmsg driver is probed with a channel, we seamlessly create | |
352 | * it an endpoint, binding its rx callback to a unique local rpmsg | |
353 | * address. | |
354 | * | |
355 | * if we need to, we also announce about this channel to the remote | |
356 | * processor (needed in case the driver is exposing an rpmsg service). | |
357 | */ | |
358 | static int rpmsg_dev_probe(struct device *dev) | |
359 | { | |
360 | struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); | |
361 | struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver); | |
362 | struct virtproc_info *vrp = rpdev->vrp; | |
363 | struct rpmsg_endpoint *ept; | |
364 | int err; | |
365 | ||
366 | ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, rpdev->src); | |
367 | if (!ept) { | |
368 | dev_err(dev, "failed to create endpoint\n"); | |
369 | err = -ENOMEM; | |
370 | goto out; | |
371 | } | |
372 | ||
373 | rpdev->ept = ept; | |
374 | rpdev->src = ept->addr; | |
375 | ||
376 | err = rpdrv->probe(rpdev); | |
377 | if (err) { | |
378 | dev_err(dev, "%s: failed: %d\n", __func__, err); | |
379 | rpmsg_destroy_ept(ept); | |
380 | goto out; | |
381 | } | |
382 | ||
383 | /* need to tell remote processor's name service about this channel ? */ | |
384 | if (rpdev->announce && | |
385 | virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) { | |
386 | struct rpmsg_ns_msg nsm; | |
387 | ||
388 | strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE); | |
389 | nsm.addr = rpdev->src; | |
390 | nsm.flags = RPMSG_NS_CREATE; | |
391 | ||
392 | err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR); | |
393 | if (err) | |
394 | dev_err(dev, "failed to announce service %d\n", err); | |
395 | } | |
396 | ||
397 | out: | |
398 | return err; | |
399 | } | |
400 | ||
401 | static int rpmsg_dev_remove(struct device *dev) | |
402 | { | |
403 | struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); | |
404 | struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver); | |
405 | struct virtproc_info *vrp = rpdev->vrp; | |
406 | int err = 0; | |
407 | ||
408 | /* tell remote processor's name service we're removing this channel */ | |
409 | if (rpdev->announce && | |
410 | virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) { | |
411 | struct rpmsg_ns_msg nsm; | |
412 | ||
413 | strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE); | |
414 | nsm.addr = rpdev->src; | |
415 | nsm.flags = RPMSG_NS_DESTROY; | |
416 | ||
417 | err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR); | |
418 | if (err) | |
419 | dev_err(dev, "failed to announce service %d\n", err); | |
420 | } | |
421 | ||
422 | rpdrv->remove(rpdev); | |
423 | ||
424 | rpmsg_destroy_ept(rpdev->ept); | |
425 | ||
426 | return err; | |
427 | } | |
428 | ||
429 | static struct bus_type rpmsg_bus = { | |
430 | .name = "rpmsg", | |
431 | .match = rpmsg_dev_match, | |
432 | .dev_attrs = rpmsg_dev_attrs, | |
433 | .uevent = rpmsg_uevent, | |
434 | .probe = rpmsg_dev_probe, | |
435 | .remove = rpmsg_dev_remove, | |
436 | }; | |
437 | ||
438 | /** | |
439 | * register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus | |
440 | * @rpdrv: pointer to a struct rpmsg_driver | |
441 | * | |
442 | * Returns 0 on success, and an appropriate error value on failure. | |
443 | */ | |
444 | int register_rpmsg_driver(struct rpmsg_driver *rpdrv) | |
445 | { | |
446 | rpdrv->drv.bus = &rpmsg_bus; | |
447 | return driver_register(&rpdrv->drv); | |
448 | } | |
449 | EXPORT_SYMBOL(register_rpmsg_driver); | |
450 | ||
451 | /** | |
452 | * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus | |
453 | * @rpdrv: pointer to a struct rpmsg_driver | |
454 | * | |
455 | * Returns 0 on success, and an appropriate error value on failure. | |
456 | */ | |
457 | void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv) | |
458 | { | |
459 | driver_unregister(&rpdrv->drv); | |
460 | } | |
461 | EXPORT_SYMBOL(unregister_rpmsg_driver); | |
462 | ||
463 | static void rpmsg_release_device(struct device *dev) | |
464 | { | |
465 | struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); | |
466 | ||
467 | kfree(rpdev); | |
468 | } | |
469 | ||
470 | /* | |
471 | * match an rpmsg channel with a channel info struct. | |
472 | * this is used to make sure we're not creating rpmsg devices for channels | |
473 | * that already exist. | |
474 | */ | |
475 | static int rpmsg_channel_match(struct device *dev, void *data) | |
476 | { | |
477 | struct rpmsg_channel_info *chinfo = data; | |
478 | struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); | |
479 | ||
480 | if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src) | |
481 | return 0; | |
482 | ||
483 | if (chinfo->dst != RPMSG_ADDR_ANY && chinfo->dst != rpdev->dst) | |
484 | return 0; | |
485 | ||
486 | if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE)) | |
487 | return 0; | |
488 | ||
489 | /* found a match ! */ | |
490 | return 1; | |
491 | } | |
492 | ||
493 | /* | |
494 | * create an rpmsg channel using its name and address info. | |
495 | * this function will be used to create both static and dynamic | |
496 | * channels. | |
497 | */ | |
498 | static struct rpmsg_channel *rpmsg_create_channel(struct virtproc_info *vrp, | |
499 | struct rpmsg_channel_info *chinfo) | |
500 | { | |
501 | struct rpmsg_channel *rpdev; | |
502 | struct device *tmp, *dev = &vrp->vdev->dev; | |
503 | int ret; | |
504 | ||
505 | /* make sure a similar channel doesn't already exist */ | |
506 | tmp = device_find_child(dev, chinfo, rpmsg_channel_match); | |
507 | if (tmp) { | |
508 | /* decrement the matched device's refcount back */ | |
509 | put_device(tmp); | |
510 | dev_err(dev, "channel %s:%x:%x already exist\n", | |
511 | chinfo->name, chinfo->src, chinfo->dst); | |
512 | return NULL; | |
513 | } | |
514 | ||
515 | rpdev = kzalloc(sizeof(struct rpmsg_channel), GFP_KERNEL); | |
516 | if (!rpdev) { | |
517 | pr_err("kzalloc failed\n"); | |
518 | return NULL; | |
519 | } | |
520 | ||
521 | rpdev->vrp = vrp; | |
522 | rpdev->src = chinfo->src; | |
523 | rpdev->dst = chinfo->dst; | |
524 | ||
525 | /* | |
526 | * rpmsg server channels has predefined local address (for now), | |
527 | * and their existence needs to be announced remotely | |
528 | */ | |
529 | rpdev->announce = rpdev->src != RPMSG_ADDR_ANY ? true : false; | |
530 | ||
531 | strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE); | |
532 | ||
533 | /* very simple device indexing plumbing which is enough for now */ | |
534 | dev_set_name(&rpdev->dev, "rpmsg%d", rpmsg_dev_index++); | |
535 | ||
536 | rpdev->dev.parent = &vrp->vdev->dev; | |
537 | rpdev->dev.bus = &rpmsg_bus; | |
538 | rpdev->dev.release = rpmsg_release_device; | |
539 | ||
540 | ret = device_register(&rpdev->dev); | |
541 | if (ret) { | |
542 | dev_err(dev, "device_register failed: %d\n", ret); | |
543 | put_device(&rpdev->dev); | |
544 | return NULL; | |
545 | } | |
546 | ||
547 | return rpdev; | |
548 | } | |
549 | ||
550 | /* | |
551 | * find an existing channel using its name + address properties, | |
552 | * and destroy it | |
553 | */ | |
554 | static int rpmsg_destroy_channel(struct virtproc_info *vrp, | |
555 | struct rpmsg_channel_info *chinfo) | |
556 | { | |
557 | struct virtio_device *vdev = vrp->vdev; | |
558 | struct device *dev; | |
559 | ||
560 | dev = device_find_child(&vdev->dev, chinfo, rpmsg_channel_match); | |
561 | if (!dev) | |
562 | return -EINVAL; | |
563 | ||
564 | device_unregister(dev); | |
565 | ||
566 | put_device(dev); | |
567 | ||
568 | return 0; | |
569 | } | |
570 | ||
571 | /* super simple buffer "allocator" that is just enough for now */ | |
572 | static void *get_a_tx_buf(struct virtproc_info *vrp) | |
573 | { | |
574 | unsigned int len; | |
575 | void *ret; | |
576 | ||
577 | /* support multiple concurrent senders */ | |
578 | mutex_lock(&vrp->tx_lock); | |
579 | ||
580 | /* | |
581 | * either pick the next unused tx buffer | |
582 | * (half of our buffers are used for sending messages) | |
583 | */ | |
b1b98914 | 584 | if (vrp->last_sbuf < vrp->num_bufs / 2) |
bcabbcca OBC |
585 | ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++; |
586 | /* or recycle a used one */ | |
587 | else | |
588 | ret = virtqueue_get_buf(vrp->svq, &len); | |
589 | ||
590 | mutex_unlock(&vrp->tx_lock); | |
591 | ||
592 | return ret; | |
593 | } | |
594 | ||
595 | /** | |
596 | * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed | |
597 | * @vrp: virtual remote processor state | |
598 | * | |
599 | * This function is called before a sender is blocked, waiting for | |
600 | * a tx buffer to become available. | |
601 | * | |
602 | * If we already have blocking senders, this function merely increases | |
603 | * the "sleepers" reference count, and exits. | |
604 | * | |
605 | * Otherwise, if this is the first sender to block, we also enable | |
606 | * virtio's tx callbacks, so we'd be immediately notified when a tx | |
607 | * buffer is consumed (we rely on virtio's tx callback in order | |
608 | * to wake up sleeping senders as soon as a tx buffer is used by the | |
609 | * remote processor). | |
610 | */ | |
611 | static void rpmsg_upref_sleepers(struct virtproc_info *vrp) | |
612 | { | |
613 | /* support multiple concurrent senders */ | |
614 | mutex_lock(&vrp->tx_lock); | |
615 | ||
616 | /* are we the first sleeping context waiting for tx buffers ? */ | |
617 | if (atomic_inc_return(&vrp->sleepers) == 1) | |
618 | /* enable "tx-complete" interrupts before dozing off */ | |
619 | virtqueue_enable_cb(vrp->svq); | |
620 | ||
621 | mutex_unlock(&vrp->tx_lock); | |
622 | } | |
623 | ||
624 | /** | |
625 | * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed | |
626 | * @vrp: virtual remote processor state | |
627 | * | |
628 | * This function is called after a sender, that waited for a tx buffer | |
629 | * to become available, is unblocked. | |
630 | * | |
631 | * If we still have blocking senders, this function merely decreases | |
632 | * the "sleepers" reference count, and exits. | |
633 | * | |
634 | * Otherwise, if there are no more blocking senders, we also disable | |
635 | * virtio's tx callbacks, to avoid the overhead incurred with handling | |
636 | * those (now redundant) interrupts. | |
637 | */ | |
638 | static void rpmsg_downref_sleepers(struct virtproc_info *vrp) | |
639 | { | |
640 | /* support multiple concurrent senders */ | |
641 | mutex_lock(&vrp->tx_lock); | |
642 | ||
643 | /* are we the last sleeping context waiting for tx buffers ? */ | |
644 | if (atomic_dec_and_test(&vrp->sleepers)) | |
645 | /* disable "tx-complete" interrupts */ | |
646 | virtqueue_disable_cb(vrp->svq); | |
647 | ||
648 | mutex_unlock(&vrp->tx_lock); | |
649 | } | |
650 | ||
651 | /** | |
652 | * rpmsg_send_offchannel_raw() - send a message across to the remote processor | |
653 | * @rpdev: the rpmsg channel | |
654 | * @src: source address | |
655 | * @dst: destination address | |
656 | * @data: payload of message | |
657 | * @len: length of payload | |
658 | * @wait: indicates whether caller should block in case no TX buffers available | |
659 | * | |
660 | * This function is the base implementation for all of the rpmsg sending API. | |
661 | * | |
662 | * It will send @data of length @len to @dst, and say it's from @src. The | |
663 | * message will be sent to the remote processor which the @rpdev channel | |
664 | * belongs to. | |
665 | * | |
666 | * The message is sent using one of the TX buffers that are available for | |
667 | * communication with this remote processor. | |
668 | * | |
669 | * If @wait is true, the caller will be blocked until either a TX buffer is | |
670 | * available, or 15 seconds elapses (we don't want callers to | |
671 | * sleep indefinitely due to misbehaving remote processors), and in that | |
672 | * case -ERESTARTSYS is returned. The number '15' itself was picked | |
673 | * arbitrarily; there's little point in asking drivers to provide a timeout | |
674 | * value themselves. | |
675 | * | |
676 | * Otherwise, if @wait is false, and there are no TX buffers available, | |
677 | * the function will immediately fail, and -ENOMEM will be returned. | |
678 | * | |
679 | * Normally drivers shouldn't use this function directly; instead, drivers | |
680 | * should use the appropriate rpmsg_{try}send{to, _offchannel} API | |
681 | * (see include/linux/rpmsg.h). | |
682 | * | |
683 | * Returns 0 on success and an appropriate error value on failure. | |
684 | */ | |
685 | int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, | |
686 | void *data, int len, bool wait) | |
687 | { | |
688 | struct virtproc_info *vrp = rpdev->vrp; | |
689 | struct device *dev = &rpdev->dev; | |
690 | struct scatterlist sg; | |
691 | struct rpmsg_hdr *msg; | |
692 | int err; | |
693 | ||
694 | /* bcasting isn't allowed */ | |
695 | if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) { | |
696 | dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst); | |
697 | return -EINVAL; | |
698 | } | |
699 | ||
700 | /* | |
701 | * We currently use fixed-sized buffers, and therefore the payload | |
702 | * length is limited. | |
703 | * | |
704 | * One of the possible improvements here is either to support | |
705 | * user-provided buffers (and then we can also support zero-copy | |
706 | * messaging), or to improve the buffer allocator, to support | |
707 | * variable-length buffer sizes. | |
708 | */ | |
709 | if (len > RPMSG_BUF_SIZE - sizeof(struct rpmsg_hdr)) { | |
710 | dev_err(dev, "message is too big (%d)\n", len); | |
711 | return -EMSGSIZE; | |
712 | } | |
713 | ||
714 | /* grab a buffer */ | |
715 | msg = get_a_tx_buf(vrp); | |
716 | if (!msg && !wait) | |
717 | return -ENOMEM; | |
718 | ||
719 | /* no free buffer ? wait for one (but bail after 15 seconds) */ | |
720 | while (!msg) { | |
721 | /* enable "tx-complete" interrupts, if not already enabled */ | |
722 | rpmsg_upref_sleepers(vrp); | |
723 | ||
724 | /* | |
725 | * sleep until a free buffer is available or 15 secs elapse. | |
726 | * the timeout period is not configurable because there's | |
727 | * little point in asking drivers to specify that. | |
728 | * if later this happens to be required, it'd be easy to add. | |
729 | */ | |
730 | err = wait_event_interruptible_timeout(vrp->sendq, | |
731 | (msg = get_a_tx_buf(vrp)), | |
732 | msecs_to_jiffies(15000)); | |
733 | ||
734 | /* disable "tx-complete" interrupts if we're the last sleeper */ | |
735 | rpmsg_downref_sleepers(vrp); | |
736 | ||
737 | /* timeout ? */ | |
738 | if (!err) { | |
739 | dev_err(dev, "timeout waiting for a tx buffer\n"); | |
740 | return -ERESTARTSYS; | |
741 | } | |
742 | } | |
743 | ||
744 | msg->len = len; | |
745 | msg->flags = 0; | |
746 | msg->src = src; | |
747 | msg->dst = dst; | |
748 | msg->reserved = 0; | |
749 | memcpy(msg->data, data, len); | |
750 | ||
751 | dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n", | |
752 | msg->src, msg->dst, msg->len, | |
753 | msg->flags, msg->reserved); | |
754 | print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1, | |
755 | msg, sizeof(*msg) + msg->len, true); | |
756 | ||
757 | sg_init_one(&sg, msg, sizeof(*msg) + len); | |
758 | ||
759 | mutex_lock(&vrp->tx_lock); | |
760 | ||
761 | /* add message to the remote processor's virtqueue */ | |
cee51d69 | 762 | err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL); |
57e1a373 | 763 | if (err) { |
bcabbcca OBC |
764 | /* |
765 | * need to reclaim the buffer here, otherwise it's lost | |
766 | * (memory won't leak, but rpmsg won't use it again for TX). | |
767 | * this will wait for a buffer management overhaul. | |
768 | */ | |
cee51d69 | 769 | dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err); |
bcabbcca OBC |
770 | goto out; |
771 | } | |
772 | ||
773 | /* tell the remote processor it has a pending message to read */ | |
774 | virtqueue_kick(vrp->svq); | |
bcabbcca OBC |
775 | out: |
776 | mutex_unlock(&vrp->tx_lock); | |
777 | return err; | |
778 | } | |
779 | EXPORT_SYMBOL(rpmsg_send_offchannel_raw); | |
780 | ||
1aa7d6a5 RT |
781 | static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, |
782 | struct rpmsg_hdr *msg, unsigned int len) | |
bcabbcca | 783 | { |
bcabbcca OBC |
784 | struct rpmsg_endpoint *ept; |
785 | struct scatterlist sg; | |
bcabbcca OBC |
786 | int err; |
787 | ||
bcabbcca OBC |
788 | dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n", |
789 | msg->src, msg->dst, msg->len, | |
790 | msg->flags, msg->reserved); | |
791 | print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, | |
792 | msg, sizeof(*msg) + msg->len, true); | |
793 | ||
9648224e OBC |
794 | /* |
795 | * We currently use fixed-sized buffers, so trivially sanitize | |
796 | * the reported payload length. | |
797 | */ | |
798 | if (len > RPMSG_BUF_SIZE || | |
799 | msg->len > (len - sizeof(struct rpmsg_hdr))) { | |
800 | dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len); | |
1aa7d6a5 | 801 | return -EINVAL; |
9648224e OBC |
802 | } |
803 | ||
bcabbcca OBC |
804 | /* use the dst addr to fetch the callback of the appropriate user */ |
805 | mutex_lock(&vrp->endpoints_lock); | |
5a081caa | 806 | |
bcabbcca | 807 | ept = idr_find(&vrp->endpoints, msg->dst); |
5a081caa OBC |
808 | |
809 | /* let's make sure no one deallocates ept while we use it */ | |
810 | if (ept) | |
811 | kref_get(&ept->refcount); | |
812 | ||
bcabbcca OBC |
813 | mutex_unlock(&vrp->endpoints_lock); |
814 | ||
15fd943a OBC |
815 | if (ept) { |
816 | /* make sure ept->cb doesn't go away while we use it */ | |
817 | mutex_lock(&ept->cb_lock); | |
bcabbcca | 818 | |
15fd943a OBC |
819 | if (ept->cb) |
820 | ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, | |
821 | msg->src); | |
822 | ||
823 | mutex_unlock(&ept->cb_lock); | |
824 | ||
825 | /* farewell, ept, we don't need you anymore */ | |
5a081caa | 826 | kref_put(&ept->refcount, __ept_release); |
15fd943a | 827 | } else |
8a168ca7 | 828 | dev_warn(dev, "msg received with no recipient\n"); |
5a081caa | 829 | |
f1d9e9c7 OBC |
830 | /* publish the real size of the buffer */ |
831 | sg_init_one(&sg, msg, RPMSG_BUF_SIZE); | |
bcabbcca OBC |
832 | |
833 | /* add the buffer back to the remote processor's virtqueue */ | |
cee51d69 | 834 | err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL); |
bcabbcca OBC |
835 | if (err < 0) { |
836 | dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); | |
1aa7d6a5 RT |
837 | return err; |
838 | } | |
839 | ||
840 | return 0; | |
841 | } | |
842 | ||
843 | /* called when an rx buffer is used, and it's time to digest a message */ | |
844 | static void rpmsg_recv_done(struct virtqueue *rvq) | |
845 | { | |
846 | struct virtproc_info *vrp = rvq->vdev->priv; | |
847 | struct device *dev = &rvq->vdev->dev; | |
848 | struct rpmsg_hdr *msg; | |
849 | unsigned int len, msgs_received = 0; | |
850 | int err; | |
851 | ||
852 | msg = virtqueue_get_buf(rvq, &len); | |
853 | if (!msg) { | |
854 | dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); | |
bcabbcca OBC |
855 | return; |
856 | } | |
857 | ||
1aa7d6a5 RT |
858 | while (msg) { |
859 | err = rpmsg_recv_single(vrp, dev, msg, len); | |
860 | if (err) | |
861 | break; | |
862 | ||
863 | msgs_received++; | |
864 | ||
865 | msg = virtqueue_get_buf(rvq, &len); | |
866 | }; | |
867 | ||
868 | dev_dbg(dev, "Received %u messages\n", msgs_received); | |
869 | ||
bcabbcca | 870 | /* tell the remote processor we added another available rx buffer */ |
1aa7d6a5 RT |
871 | if (msgs_received) |
872 | virtqueue_kick(vrp->rvq); | |
bcabbcca OBC |
873 | } |
874 | ||
875 | /* | |
876 | * This is invoked whenever the remote processor completed processing | |
877 | * a TX msg we just sent it, and the buffer is put back to the used ring. | |
878 | * | |
879 | * Normally, though, we suppress this "tx complete" interrupt in order to | |
880 | * avoid the incurred overhead. | |
881 | */ | |
882 | static void rpmsg_xmit_done(struct virtqueue *svq) | |
883 | { | |
884 | struct virtproc_info *vrp = svq->vdev->priv; | |
885 | ||
886 | dev_dbg(&svq->vdev->dev, "%s\n", __func__); | |
887 | ||
888 | /* wake up potential senders that are waiting for a tx buffer */ | |
889 | wake_up_interruptible(&vrp->sendq); | |
890 | } | |
891 | ||
892 | /* invoked when a name service announcement arrives */ | |
893 | static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len, | |
894 | void *priv, u32 src) | |
895 | { | |
896 | struct rpmsg_ns_msg *msg = data; | |
897 | struct rpmsg_channel *newch; | |
898 | struct rpmsg_channel_info chinfo; | |
899 | struct virtproc_info *vrp = priv; | |
900 | struct device *dev = &vrp->vdev->dev; | |
901 | int ret; | |
902 | ||
903 | print_hex_dump(KERN_DEBUG, "NS announcement: ", | |
904 | DUMP_PREFIX_NONE, 16, 1, | |
905 | data, len, true); | |
906 | ||
907 | if (len != sizeof(*msg)) { | |
908 | dev_err(dev, "malformed ns msg (%d)\n", len); | |
909 | return; | |
910 | } | |
911 | ||
912 | /* | |
913 | * the name service ept does _not_ belong to a real rpmsg channel, | |
914 | * and is handled by the rpmsg bus itself. | |
915 | * for sanity reasons, make sure a valid rpdev has _not_ sneaked | |
916 | * in somehow. | |
917 | */ | |
918 | if (rpdev) { | |
919 | dev_err(dev, "anomaly: ns ept has an rpdev handle\n"); | |
920 | return; | |
921 | } | |
922 | ||
923 | /* don't trust the remote processor for null terminating the name */ | |
924 | msg->name[RPMSG_NAME_SIZE - 1] = '\0'; | |
925 | ||
926 | dev_info(dev, "%sing channel %s addr 0x%x\n", | |
927 | msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat", | |
928 | msg->name, msg->addr); | |
929 | ||
930 | strncpy(chinfo.name, msg->name, sizeof(chinfo.name)); | |
931 | chinfo.src = RPMSG_ADDR_ANY; | |
932 | chinfo.dst = msg->addr; | |
933 | ||
934 | if (msg->flags & RPMSG_NS_DESTROY) { | |
935 | ret = rpmsg_destroy_channel(vrp, &chinfo); | |
936 | if (ret) | |
937 | dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret); | |
938 | } else { | |
939 | newch = rpmsg_create_channel(vrp, &chinfo); | |
940 | if (!newch) | |
941 | dev_err(dev, "rpmsg_create_channel failed\n"); | |
942 | } | |
943 | } | |
944 | ||
945 | static int rpmsg_probe(struct virtio_device *vdev) | |
946 | { | |
947 | vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done }; | |
948 | const char *names[] = { "input", "output" }; | |
949 | struct virtqueue *vqs[2]; | |
950 | struct virtproc_info *vrp; | |
951 | void *bufs_va; | |
952 | int err = 0, i; | |
b1b98914 | 953 | size_t total_buf_space; |
71e4b8bf | 954 | bool notify; |
bcabbcca OBC |
955 | |
956 | vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); | |
957 | if (!vrp) | |
958 | return -ENOMEM; | |
959 | ||
960 | vrp->vdev = vdev; | |
961 | ||
962 | idr_init(&vrp->endpoints); | |
963 | mutex_init(&vrp->endpoints_lock); | |
964 | mutex_init(&vrp->tx_lock); | |
965 | init_waitqueue_head(&vrp->sendq); | |
966 | ||
967 | /* We expect two virtqueues, rx and tx (and in this order) */ | |
968 | err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names); | |
969 | if (err) | |
970 | goto free_vrp; | |
971 | ||
972 | vrp->rvq = vqs[0]; | |
973 | vrp->svq = vqs[1]; | |
974 | ||
b1b98914 SA |
975 | /* we expect symmetric tx/rx vrings */ |
976 | WARN_ON(virtqueue_get_vring_size(vrp->rvq) != | |
977 | virtqueue_get_vring_size(vrp->svq)); | |
978 | ||
979 | /* we need less buffers if vrings are small */ | |
980 | if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2) | |
981 | vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2; | |
982 | else | |
983 | vrp->num_bufs = MAX_RPMSG_NUM_BUFS; | |
984 | ||
985 | total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE; | |
986 | ||
bcabbcca | 987 | /* allocate coherent memory for the buffers */ |
b5ab5e24 | 988 | bufs_va = dma_alloc_coherent(vdev->dev.parent->parent, |
b1b98914 SA |
989 | total_buf_space, &vrp->bufs_dma, |
990 | GFP_KERNEL); | |
3119b487 WY |
991 | if (!bufs_va) { |
992 | err = -ENOMEM; | |
bcabbcca | 993 | goto vqs_del; |
3119b487 | 994 | } |
bcabbcca | 995 | |
9d8ae5c2 MA |
996 | dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va, |
997 | (unsigned long long)vrp->bufs_dma); | |
bcabbcca OBC |
998 | |
999 | /* half of the buffers is dedicated for RX */ | |
1000 | vrp->rbufs = bufs_va; | |
1001 | ||
1002 | /* and half is dedicated for TX */ | |
b1b98914 | 1003 | vrp->sbufs = bufs_va + total_buf_space / 2; |
bcabbcca OBC |
1004 | |
1005 | /* set up the receive buffers */ | |
b1b98914 | 1006 | for (i = 0; i < vrp->num_bufs / 2; i++) { |
bcabbcca OBC |
1007 | struct scatterlist sg; |
1008 | void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE; | |
1009 | ||
1010 | sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE); | |
1011 | ||
cee51d69 | 1012 | err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr, |
bcabbcca | 1013 | GFP_KERNEL); |
57e1a373 | 1014 | WARN_ON(err); /* sanity check; this can't really happen */ |
bcabbcca OBC |
1015 | } |
1016 | ||
1017 | /* suppress "tx-complete" interrupts */ | |
1018 | virtqueue_disable_cb(vrp->svq); | |
1019 | ||
1020 | vdev->priv = vrp; | |
1021 | ||
1022 | /* if supported by the remote processor, enable the name service */ | |
1023 | if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) { | |
1024 | /* a dedicated endpoint handles the name service msgs */ | |
1025 | vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb, | |
1026 | vrp, RPMSG_NS_ADDR); | |
1027 | if (!vrp->ns_ept) { | |
1028 | dev_err(&vdev->dev, "failed to create the ns ept\n"); | |
1029 | err = -ENOMEM; | |
1030 | goto free_coherent; | |
1031 | } | |
1032 | } | |
1033 | ||
71e4b8bf MT |
1034 | /* |
1035 | * Prepare to kick but don't notify yet - we can't do this before | |
1036 | * device is ready. | |
1037 | */ | |
1038 | notify = virtqueue_kick_prepare(vrp->rvq); | |
1039 | ||
1040 | /* From this point on, we can notify and get callbacks. */ | |
1041 | virtio_device_ready(vdev); | |
1042 | ||
bcabbcca | 1043 | /* tell the remote processor it can start sending messages */ |
71e4b8bf MT |
1044 | /* |
1045 | * this might be concurrent with callbacks, but we are only | |
1046 | * doing notify, not a full kick here, so that's ok. | |
1047 | */ | |
1048 | if (notify) | |
1049 | virtqueue_notify(vrp->rvq); | |
bcabbcca OBC |
1050 | |
1051 | dev_info(&vdev->dev, "rpmsg host is online\n"); | |
1052 | ||
1053 | return 0; | |
1054 | ||
1055 | free_coherent: | |
b1b98914 SA |
1056 | dma_free_coherent(vdev->dev.parent->parent, total_buf_space, |
1057 | bufs_va, vrp->bufs_dma); | |
bcabbcca OBC |
1058 | vqs_del: |
1059 | vdev->config->del_vqs(vrp->vdev); | |
1060 | free_vrp: | |
1061 | kfree(vrp); | |
1062 | return err; | |
1063 | } | |
1064 | ||
1065 | static int rpmsg_remove_device(struct device *dev, void *data) | |
1066 | { | |
1067 | device_unregister(dev); | |
1068 | ||
1069 | return 0; | |
1070 | } | |
1071 | ||
0fe763c5 | 1072 | static void rpmsg_remove(struct virtio_device *vdev) |
bcabbcca OBC |
1073 | { |
1074 | struct virtproc_info *vrp = vdev->priv; | |
b1b98914 | 1075 | size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE; |
bcabbcca OBC |
1076 | int ret; |
1077 | ||
1078 | vdev->config->reset(vdev); | |
1079 | ||
1080 | ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device); | |
1081 | if (ret) | |
1082 | dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret); | |
1083 | ||
fa2d7795 OBC |
1084 | if (vrp->ns_ept) |
1085 | __rpmsg_destroy_ept(vrp, vrp->ns_ept); | |
1086 | ||
bcabbcca OBC |
1087 | idr_destroy(&vrp->endpoints); |
1088 | ||
1089 | vdev->config->del_vqs(vrp->vdev); | |
1090 | ||
b1b98914 SA |
1091 | dma_free_coherent(vdev->dev.parent->parent, total_buf_space, |
1092 | vrp->rbufs, vrp->bufs_dma); | |
bcabbcca OBC |
1093 | |
1094 | kfree(vrp); | |
1095 | } | |
1096 | ||
1097 | static struct virtio_device_id id_table[] = { | |
1098 | { VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID }, | |
1099 | { 0 }, | |
1100 | }; | |
1101 | ||
1102 | static unsigned int features[] = { | |
1103 | VIRTIO_RPMSG_F_NS, | |
1104 | }; | |
1105 | ||
1106 | static struct virtio_driver virtio_ipc_driver = { | |
1107 | .feature_table = features, | |
1108 | .feature_table_size = ARRAY_SIZE(features), | |
1109 | .driver.name = KBUILD_MODNAME, | |
1110 | .driver.owner = THIS_MODULE, | |
1111 | .id_table = id_table, | |
1112 | .probe = rpmsg_probe, | |
0fe763c5 | 1113 | .remove = rpmsg_remove, |
bcabbcca OBC |
1114 | }; |
1115 | ||
1116 | static int __init rpmsg_init(void) | |
1117 | { | |
1118 | int ret; | |
1119 | ||
1120 | ret = bus_register(&rpmsg_bus); | |
1121 | if (ret) { | |
1122 | pr_err("failed to register rpmsg bus: %d\n", ret); | |
1123 | return ret; | |
1124 | } | |
1125 | ||
1126 | ret = register_virtio_driver(&virtio_ipc_driver); | |
1127 | if (ret) { | |
1128 | pr_err("failed to register virtio driver: %d\n", ret); | |
1129 | bus_unregister(&rpmsg_bus); | |
1130 | } | |
1131 | ||
1132 | return ret; | |
1133 | } | |
96342526 | 1134 | subsys_initcall(rpmsg_init); |
bcabbcca OBC |
1135 | |
1136 | static void __exit rpmsg_fini(void) | |
1137 | { | |
1138 | unregister_virtio_driver(&virtio_ipc_driver); | |
1139 | bus_unregister(&rpmsg_bus); | |
1140 | } | |
1141 | module_exit(rpmsg_fini); | |
1142 | ||
1143 | MODULE_DEVICE_TABLE(virtio, id_table); | |
1144 | MODULE_DESCRIPTION("Virtio-based remote processor messaging bus"); | |
1145 | MODULE_LICENSE("GPL v2"); |