2 * Copyright (c) 2016-2017, Linaro Ltd
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/idr.h>
15 #include <linux/interrupt.h>
17 #include <linux/list.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/platform_device.h>
23 #include <linux/regmap.h>
24 #include <linux/rpmsg.h>
25 #include <linux/slab.h>
26 #include <linux/workqueue.h>
27 #include <linux/mailbox_client.h>
29 #include "rpmsg_internal.h"
31 #define RPM_TOC_SIZE 256
32 #define RPM_TOC_MAGIC 0x67727430 /* grt0 */
33 #define RPM_TOC_MAX_ENTRIES ((RPM_TOC_SIZE - sizeof(struct rpm_toc)) / \
34 sizeof(struct rpm_toc_entry))
36 #define RPM_TX_FIFO_ID 0x61703272 /* ap2r */
37 #define RPM_RX_FIFO_ID 0x72326170 /* r2ap */
39 #define GLINK_NAME_SIZE 32
41 #define RPM_GLINK_CID_MIN 1
42 #define RPM_GLINK_CID_MAX 65536
44 struct rpm_toc_entry {
54 struct rpm_toc_entry entries[];
64 struct glink_rpm_pipe {
74 * struct glink_defer_cmd - deferred incoming control message
76 * @msg: message header
77 * data: payload of the message
79 * Copy of a received control message, to be added to @rx_queue and processed
80 * by @rx_work of @glink_rpm.
82 struct glink_defer_cmd {
83 struct list_head node;
90 * struct glink_rpm - driver context, relates to one remote subsystem
91 * @dev: reference to the associated struct device
92 * @doorbell: "rpm_hlos" ipc doorbell
93 * @rx_pipe: pipe object for receive FIFO
94 * @tx_pipe: pipe object for transmit FIFO
95 * @irq: IRQ for signaling incoming events
96 * @rx_work: worker for handling received control messages
97 * @rx_lock: protects the @rx_queue
98 * @rx_queue: queue of received control messages to be processed in @rx_work
99 * @tx_lock: synchronizes operations on the tx fifo
100 * @idr_lock: synchronizes @lcids and @rcids modifications
101 * @lcids: idr of all channels with a known local channel id
102 * @rcids: idr of all channels with a known remote channel id
107 struct mbox_client mbox_client;
108 struct mbox_chan *mbox_chan;
110 struct glink_rpm_pipe rx_pipe;
111 struct glink_rpm_pipe tx_pipe;
115 struct work_struct rx_work;
117 struct list_head rx_queue;
119 struct mutex tx_lock;
121 struct mutex idr_lock;
134 * struct glink_channel - internal representation of a channel
135 * @rpdev: rpdev reference, only used for primary endpoints
136 * @ept: rpmsg endpoint this channel is associated with
137 * @glink: glink_rpm context handle
138 * @refcount: refcount for the channel object
139 * @recv_lock: guard for @ept.cb
140 * @name: unique channel name/identifier
141 * @lcid: channel id, in local space
142 * @rcid: channel id, in remote space
143 * @buf: receive buffer, for gathering fragments
144 * @buf_offset: write offset in @buf
145 * @buf_size: size of current @buf
146 * @open_ack: completed once remote has acked the open-request
147 * @open_req: completed once open-request has been received
149 struct glink_channel {
150 struct rpmsg_endpoint ept;
152 struct rpmsg_device *rpdev;
153 struct glink_rpm *glink;
155 struct kref refcount;
157 spinlock_t recv_lock;
167 struct completion open_ack;
168 struct completion open_req;
171 #define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
173 static const struct rpmsg_endpoint_ops glink_endpoint_ops;
175 #define RPM_CMD_VERSION 0
176 #define RPM_CMD_VERSION_ACK 1
177 #define RPM_CMD_OPEN 2
178 #define RPM_CMD_CLOSE 3
179 #define RPM_CMD_OPEN_ACK 4
180 #define RPM_CMD_TX_DATA 9
181 #define RPM_CMD_CLOSE_ACK 11
182 #define RPM_CMD_TX_DATA_CONT 12
183 #define RPM_CMD_READ_NOTIF 13
185 #define GLINK_FEATURE_INTENTLESS BIT(1)
187 static struct glink_channel *glink_rpm_alloc_channel(struct glink_rpm *glink,
190 struct glink_channel *channel;
192 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
194 return ERR_PTR(-ENOMEM);
196 /* Setup glink internal glink_channel data */
197 spin_lock_init(&channel->recv_lock);
198 channel->glink = glink;
199 channel->name = kstrdup(name, GFP_KERNEL);
201 init_completion(&channel->open_req);
202 init_completion(&channel->open_ack);
204 kref_init(&channel->refcount);
209 static void glink_rpm_channel_release(struct kref *ref)
211 struct glink_channel *channel = container_of(ref, struct glink_channel,
214 kfree(channel->name);
218 static size_t glink_rpm_rx_avail(struct glink_rpm *glink)
220 struct glink_rpm_pipe *pipe = &glink->rx_pipe;
224 head = readl(pipe->head);
225 tail = readl(pipe->tail);
228 return pipe->length - tail + head;
233 static void glink_rpm_rx_peak(struct glink_rpm *glink,
234 void *data, size_t count)
236 struct glink_rpm_pipe *pipe = &glink->rx_pipe;
240 tail = readl(pipe->tail);
242 len = min_t(size_t, count, pipe->length - tail);
244 __ioread32_copy(data, pipe->fifo + tail,
249 __ioread32_copy(data + len, pipe->fifo,
250 (count - len) / sizeof(u32));
254 static void glink_rpm_rx_advance(struct glink_rpm *glink,
257 struct glink_rpm_pipe *pipe = &glink->rx_pipe;
260 tail = readl(pipe->tail);
263 if (tail >= pipe->length)
264 tail -= pipe->length;
266 writel(tail, pipe->tail);
269 static size_t glink_rpm_tx_avail(struct glink_rpm *glink)
271 struct glink_rpm_pipe *pipe = &glink->tx_pipe;
275 head = readl(pipe->head);
276 tail = readl(pipe->tail);
279 return pipe->length - head + tail;
284 static unsigned int glink_rpm_tx_write(struct glink_rpm *glink,
286 const void *data, size_t count)
288 struct glink_rpm_pipe *pipe = &glink->tx_pipe;
291 len = min_t(size_t, count, pipe->length - head);
293 __iowrite32_copy(pipe->fifo + head, data,
298 __iowrite32_copy(pipe->fifo, data + len,
299 (count - len) / sizeof(u32));
303 if (head >= pipe->length)
304 head -= pipe->length;
309 static int glink_rpm_tx(struct glink_rpm *glink,
310 const void *hdr, size_t hlen,
311 const void *data, size_t dlen, bool wait)
313 struct glink_rpm_pipe *pipe = &glink->tx_pipe;
315 unsigned int tlen = hlen + dlen;
318 /* Reject packets that are too big */
319 if (tlen >= glink->tx_pipe.length)
322 if (WARN(tlen % 8, "Unaligned TX request"))
325 ret = mutex_lock_interruptible(&glink->tx_lock);
329 while (glink_rpm_tx_avail(glink) < tlen) {
338 head = readl(pipe->head);
339 head = glink_rpm_tx_write(glink, head, hdr, hlen);
340 head = glink_rpm_tx_write(glink, head, data, dlen);
341 writel(head, pipe->head);
343 mbox_send_message(glink->mbox_chan, NULL);
344 mbox_client_txdone(glink->mbox_chan, 0);
347 mutex_unlock(&glink->tx_lock);
352 static int glink_rpm_send_version(struct glink_rpm *glink)
354 struct glink_msg msg;
356 msg.cmd = cpu_to_le16(RPM_CMD_VERSION);
357 msg.param1 = cpu_to_le16(1);
358 msg.param2 = cpu_to_le32(GLINK_FEATURE_INTENTLESS);
360 return glink_rpm_tx(glink, &msg, sizeof(msg), NULL, 0, true);
363 static void glink_rpm_send_version_ack(struct glink_rpm *glink)
365 struct glink_msg msg;
367 msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK);
368 msg.param1 = cpu_to_le16(1);
369 msg.param2 = cpu_to_le32(0);
371 glink_rpm_tx(glink, &msg, sizeof(msg), NULL, 0, true);
374 static void glink_rpm_send_open_ack(struct glink_rpm *glink,
375 struct glink_channel *channel)
377 struct glink_msg msg;
379 msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK);
380 msg.param1 = cpu_to_le16(channel->rcid);
381 msg.param2 = cpu_to_le32(0);
383 glink_rpm_tx(glink, &msg, sizeof(msg), NULL, 0, true);
387 * glink_rpm_send_open_req() - send a RPM_CMD_OPEN request to the remote
391 * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
392 * Will return with refcount held, regardless of outcome.
394 * Returns 0 on success, negative errno otherwise.
396 static int glink_rpm_send_open_req(struct glink_rpm *glink,
397 struct glink_channel *channel)
400 struct glink_msg msg;
401 u8 name[GLINK_NAME_SIZE];
403 int name_len = strlen(channel->name) + 1;
404 int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
407 kref_get(&channel->refcount);
409 mutex_lock(&glink->idr_lock);
410 ret = idr_alloc_cyclic(&glink->lcids, channel,
411 RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX, GFP_KERNEL);
412 mutex_unlock(&glink->idr_lock);
418 req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
419 req.msg.param1 = cpu_to_le16(channel->lcid);
420 req.msg.param2 = cpu_to_le32(name_len);
421 strcpy(req.name, channel->name);
423 ret = glink_rpm_tx(glink, &req, req_len, NULL, 0, true);
430 mutex_lock(&glink->idr_lock);
431 idr_remove(&glink->lcids, channel->lcid);
433 mutex_unlock(&glink->idr_lock);
438 static void glink_rpm_send_close_req(struct glink_rpm *glink,
439 struct glink_channel *channel)
441 struct glink_msg req;
443 req.cmd = cpu_to_le16(RPM_CMD_CLOSE);
444 req.param1 = cpu_to_le16(channel->lcid);
447 glink_rpm_tx(glink, &req, sizeof(req), NULL, 0, true);
450 static void glink_rpm_send_close_ack(struct glink_rpm *glink, unsigned int rcid)
452 struct glink_msg req;
454 req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK);
455 req.param1 = cpu_to_le16(rcid);
458 glink_rpm_tx(glink, &req, sizeof(req), NULL, 0, true);
461 static int glink_rpm_rx_defer(struct glink_rpm *glink, size_t extra)
463 struct glink_defer_cmd *dcmd;
465 extra = ALIGN(extra, 8);
467 if (glink_rpm_rx_avail(glink) < sizeof(struct glink_msg) + extra) {
468 dev_dbg(glink->dev, "Insufficient data in rx fifo");
472 dcmd = kzalloc(sizeof(*dcmd) + extra, GFP_ATOMIC);
476 INIT_LIST_HEAD(&dcmd->node);
478 glink_rpm_rx_peak(glink, &dcmd->msg, sizeof(dcmd->msg) + extra);
480 spin_lock(&glink->rx_lock);
481 list_add_tail(&dcmd->node, &glink->rx_queue);
482 spin_unlock(&glink->rx_lock);
484 schedule_work(&glink->rx_work);
485 glink_rpm_rx_advance(glink, sizeof(dcmd->msg) + extra);
490 static int glink_rpm_rx_data(struct glink_rpm *glink, size_t avail)
492 struct glink_channel *channel;
494 struct glink_msg msg;
498 unsigned int chunk_size;
499 unsigned int left_size;
502 if (avail < sizeof(hdr)) {
503 dev_dbg(glink->dev, "Not enough data in fifo\n");
507 glink_rpm_rx_peak(glink, &hdr, sizeof(hdr));
508 chunk_size = le32_to_cpu(hdr.chunk_size);
509 left_size = le32_to_cpu(hdr.left_size);
511 if (avail < sizeof(hdr) + chunk_size) {
512 dev_dbg(glink->dev, "Payload not yet in fifo\n");
516 if (WARN(chunk_size % 4, "Incoming data must be word aligned\n"))
519 rcid = le16_to_cpu(hdr.msg.param1);
520 channel = idr_find(&glink->rcids, rcid);
522 dev_dbg(glink->dev, "Data on non-existing channel\n");
524 /* Drop the message */
525 glink_rpm_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8));
529 /* Might have an ongoing, fragmented, message to append */
531 channel->buf = kmalloc(chunk_size + left_size, GFP_ATOMIC);
535 channel->buf_size = chunk_size + left_size;
536 channel->buf_offset = 0;
539 glink_rpm_rx_advance(glink, sizeof(hdr));
541 if (channel->buf_size - channel->buf_offset < chunk_size) {
542 dev_err(glink->dev, "Insufficient space in input buffer\n");
544 /* The packet header lied, drop payload */
545 glink_rpm_rx_advance(glink, chunk_size);
549 glink_rpm_rx_peak(glink, channel->buf + channel->buf_offset, chunk_size);
550 channel->buf_offset += chunk_size;
552 /* Handle message when no fragments remain to be received */
554 spin_lock(&channel->recv_lock);
555 if (channel->ept.cb) {
556 channel->ept.cb(channel->ept.rpdev,
562 spin_unlock(&channel->recv_lock);
566 channel->buf_size = 0;
569 /* Each message starts at 8 byte aligned address */
570 glink_rpm_rx_advance(glink, ALIGN(chunk_size, 8));
575 static int glink_rpm_rx_open_ack(struct glink_rpm *glink, unsigned int lcid)
577 struct glink_channel *channel;
579 channel = idr_find(&glink->lcids, lcid);
581 dev_err(glink->dev, "Invalid open ack packet\n");
585 complete(&channel->open_ack);
590 static irqreturn_t glink_rpm_intr(int irq, void *data)
592 struct glink_rpm *glink = data;
593 struct glink_msg msg;
601 avail = glink_rpm_rx_avail(glink);
602 if (avail < sizeof(msg))
605 glink_rpm_rx_peak(glink, &msg, sizeof(msg));
607 cmd = le16_to_cpu(msg.cmd);
608 param1 = le16_to_cpu(msg.param1);
609 param2 = le32_to_cpu(msg.param2);
612 case RPM_CMD_VERSION:
613 case RPM_CMD_VERSION_ACK:
615 case RPM_CMD_CLOSE_ACK:
616 ret = glink_rpm_rx_defer(glink, 0);
618 case RPM_CMD_OPEN_ACK:
619 ret = glink_rpm_rx_open_ack(glink, param1);
620 glink_rpm_rx_advance(glink, ALIGN(sizeof(msg), 8));
623 ret = glink_rpm_rx_defer(glink, param2);
625 case RPM_CMD_TX_DATA:
626 case RPM_CMD_TX_DATA_CONT:
627 ret = glink_rpm_rx_data(glink, avail);
629 case RPM_CMD_READ_NOTIF:
630 glink_rpm_rx_advance(glink, ALIGN(sizeof(msg), 8));
632 mbox_send_message(glink->mbox_chan, NULL);
633 mbox_client_txdone(glink->mbox_chan, 0);
638 dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
650 /* Locally initiated rpmsg_create_ept */
651 static struct glink_channel *glink_rpm_create_local(struct glink_rpm *glink,
654 struct glink_channel *channel;
657 channel = glink_rpm_alloc_channel(glink, name);
659 return ERR_CAST(channel);
661 ret = glink_rpm_send_open_req(glink, channel);
663 goto release_channel;
665 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
669 ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ);
673 glink_rpm_send_open_ack(glink, channel);
678 /* glink_rpm_send_open_req() did register the channel in lcids*/
679 mutex_lock(&glink->idr_lock);
680 idr_remove(&glink->lcids, channel->lcid);
681 mutex_unlock(&glink->idr_lock);
684 /* Release glink_rpm_send_open_req() reference */
685 kref_put(&channel->refcount, glink_rpm_channel_release);
686 /* Release glink_rpm_alloc_channel() reference */
687 kref_put(&channel->refcount, glink_rpm_channel_release);
689 return ERR_PTR(-ETIMEDOUT);
692 /* Remote initiated rpmsg_create_ept */
693 static int glink_rpm_create_remote(struct glink_rpm *glink,
694 struct glink_channel *channel)
698 glink_rpm_send_open_ack(glink, channel);
700 ret = glink_rpm_send_open_req(glink, channel);
704 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
714 * Send a close request to "undo" our open-ack. The close-ack will
715 * release the last reference.
717 glink_rpm_send_close_req(glink, channel);
719 /* Release glink_rpm_send_open_req() reference */
720 kref_put(&channel->refcount, glink_rpm_channel_release);
725 static struct rpmsg_endpoint *glink_rpm_create_ept(struct rpmsg_device *rpdev,
726 rpmsg_rx_cb_t cb, void *priv,
727 struct rpmsg_channel_info chinfo)
729 struct glink_channel *parent = to_glink_channel(rpdev->ept);
730 struct glink_channel *channel;
731 struct glink_rpm *glink = parent->glink;
732 struct rpmsg_endpoint *ept;
733 const char *name = chinfo.name;
737 idr_for_each_entry(&glink->rcids, channel, cid) {
738 if (!strcmp(channel->name, name))
743 channel = glink_rpm_create_local(glink, name);
747 ret = glink_rpm_create_remote(glink, channel);
756 ept->ops = &glink_endpoint_ops;
761 static void glink_rpm_destroy_ept(struct rpmsg_endpoint *ept)
763 struct glink_channel *channel = to_glink_channel(ept);
764 struct glink_rpm *glink = channel->glink;
767 spin_lock_irqsave(&channel->recv_lock, flags);
768 channel->ept.cb = NULL;
769 spin_unlock_irqrestore(&channel->recv_lock, flags);
771 /* Decouple the potential rpdev from the channel */
772 channel->rpdev = NULL;
774 glink_rpm_send_close_req(glink, channel);
777 static int __glink_rpm_send(struct glink_channel *channel,
778 void *data, int len, bool wait)
780 struct glink_rpm *glink = channel->glink;
782 struct glink_msg msg;
787 if (WARN(len % 8, "RPM GLINK expects 8 byte aligned messages\n"))
790 req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
791 req.msg.param1 = cpu_to_le16(channel->lcid);
792 req.msg.param2 = cpu_to_le32(channel->rcid);
793 req.chunk_size = cpu_to_le32(len);
794 req.left_size = cpu_to_le32(0);
796 return glink_rpm_tx(glink, &req, sizeof(req), data, len, wait);
799 static int glink_rpm_send(struct rpmsg_endpoint *ept, void *data, int len)
801 struct glink_channel *channel = to_glink_channel(ept);
803 return __glink_rpm_send(channel, data, len, true);
806 static int glink_rpm_trysend(struct rpmsg_endpoint *ept, void *data, int len)
808 struct glink_channel *channel = to_glink_channel(ept);
810 return __glink_rpm_send(channel, data, len, false);
814 * Finds the device_node for the glink child interested in this channel.
816 static struct device_node *glink_rpm_match_channel(struct device_node *node,
819 struct device_node *child;
824 for_each_available_child_of_node(node, child) {
825 key = "qcom,glink-channels";
826 ret = of_property_read_string(child, key, &name);
830 if (strcmp(name, channel) == 0)
837 static const struct rpmsg_device_ops glink_device_ops = {
838 .create_ept = glink_rpm_create_ept,
841 static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
842 .destroy_ept = glink_rpm_destroy_ept,
843 .send = glink_rpm_send,
844 .trysend = glink_rpm_trysend,
847 static void glink_rpm_rpdev_release(struct device *dev)
849 struct rpmsg_device *rpdev = to_rpmsg_device(dev);
850 struct glink_channel *channel = to_glink_channel(rpdev->ept);
852 channel->rpdev = NULL;
856 static int glink_rpm_rx_open(struct glink_rpm *glink, unsigned int rcid,
859 struct glink_channel *channel;
860 struct rpmsg_device *rpdev;
861 bool create_device = false;
865 idr_for_each_entry(&glink->lcids, channel, lcid) {
866 if (!strcmp(channel->name, name))
871 channel = glink_rpm_alloc_channel(glink, name);
873 return PTR_ERR(channel);
875 /* The opening dance was initiated by the remote */
876 create_device = true;
879 mutex_lock(&glink->idr_lock);
880 ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_KERNEL);
882 dev_err(glink->dev, "Unable to insert channel into rcid list\n");
883 mutex_unlock(&glink->idr_lock);
887 mutex_unlock(&glink->idr_lock);
889 complete(&channel->open_req);
892 rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
898 rpdev->ept = &channel->ept;
899 strncpy(rpdev->id.name, name, RPMSG_NAME_SIZE);
900 rpdev->src = RPMSG_ADDR_ANY;
901 rpdev->dst = RPMSG_ADDR_ANY;
902 rpdev->ops = &glink_device_ops;
904 rpdev->dev.of_node = glink_rpm_match_channel(glink->dev->of_node, name);
905 rpdev->dev.parent = glink->dev;
906 rpdev->dev.release = glink_rpm_rpdev_release;
908 ret = rpmsg_register_device(rpdev);
912 channel->rpdev = rpdev;
920 mutex_lock(&glink->idr_lock);
921 idr_remove(&glink->rcids, channel->rcid);
923 mutex_unlock(&glink->idr_lock);
925 /* Release the reference, iff we took it */
927 kref_put(&channel->refcount, glink_rpm_channel_release);
932 static void glink_rpm_rx_close(struct glink_rpm *glink, unsigned int rcid)
934 struct rpmsg_channel_info chinfo;
935 struct glink_channel *channel;
937 channel = idr_find(&glink->rcids, rcid);
938 if (WARN(!channel, "close request on unknown channel\n"))
941 if (channel->rpdev) {
942 strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
943 chinfo.src = RPMSG_ADDR_ANY;
944 chinfo.dst = RPMSG_ADDR_ANY;
946 rpmsg_unregister_device(glink->dev, &chinfo);
949 glink_rpm_send_close_ack(glink, channel->rcid);
951 mutex_lock(&glink->idr_lock);
952 idr_remove(&glink->rcids, channel->rcid);
954 mutex_unlock(&glink->idr_lock);
956 kref_put(&channel->refcount, glink_rpm_channel_release);
959 static void glink_rpm_rx_close_ack(struct glink_rpm *glink, unsigned int lcid)
961 struct glink_channel *channel;
963 channel = idr_find(&glink->lcids, lcid);
964 if (WARN(!channel, "close ack on unknown channel\n"))
967 mutex_lock(&glink->idr_lock);
968 idr_remove(&glink->lcids, channel->lcid);
970 mutex_unlock(&glink->idr_lock);
972 kref_put(&channel->refcount, glink_rpm_channel_release);
975 static void glink_rpm_work(struct work_struct *work)
977 struct glink_rpm *glink = container_of(work, struct glink_rpm, rx_work);
978 struct glink_defer_cmd *dcmd;
979 struct glink_msg *msg;
986 spin_lock_irqsave(&glink->rx_lock, flags);
987 if (list_empty(&glink->rx_queue)) {
988 spin_unlock_irqrestore(&glink->rx_lock, flags);
991 dcmd = list_first_entry(&glink->rx_queue, struct glink_defer_cmd, node);
992 list_del(&dcmd->node);
993 spin_unlock_irqrestore(&glink->rx_lock, flags);
996 cmd = le16_to_cpu(msg->cmd);
997 param1 = le16_to_cpu(msg->param1);
998 param2 = le32_to_cpu(msg->param2);
1001 case RPM_CMD_VERSION:
1002 glink_rpm_send_version_ack(glink);
1004 case RPM_CMD_VERSION_ACK:
1007 glink_rpm_rx_open(glink, param1, msg->data);
1010 glink_rpm_rx_close(glink, param1);
1012 case RPM_CMD_CLOSE_ACK:
1013 glink_rpm_rx_close_ack(glink, param1);
1016 WARN(1, "Unknown defer object %d\n", cmd);
1024 static int glink_rpm_parse_toc(struct device *dev,
1025 void __iomem *msg_ram,
1026 size_t msg_ram_size,
1027 struct glink_rpm_pipe *rx,
1028 struct glink_rpm_pipe *tx)
1030 struct rpm_toc *toc;
1038 buf = kzalloc(RPM_TOC_SIZE, GFP_KERNEL);
1042 __ioread32_copy(buf, msg_ram + msg_ram_size - RPM_TOC_SIZE,
1043 RPM_TOC_SIZE / sizeof(u32));
1047 if (le32_to_cpu(toc->magic) != RPM_TOC_MAGIC) {
1048 dev_err(dev, "RPM TOC has invalid magic\n");
1052 num_entries = le32_to_cpu(toc->count);
1053 if (num_entries > RPM_TOC_MAX_ENTRIES) {
1054 dev_err(dev, "Invalid number of toc entries\n");
1058 for (i = 0; i < num_entries; i++) {
1059 id = le32_to_cpu(toc->entries[i].id);
1060 offset = le32_to_cpu(toc->entries[i].offset);
1061 size = le32_to_cpu(toc->entries[i].size);
1063 if (offset > msg_ram_size || offset + size > msg_ram_size) {
1064 dev_err(dev, "TOC entry with invalid size\n");
1069 case RPM_RX_FIFO_ID:
1072 rx->tail = msg_ram + offset;
1073 rx->head = msg_ram + offset + sizeof(u32);
1074 rx->fifo = msg_ram + offset + 2 * sizeof(u32);
1076 case RPM_TX_FIFO_ID:
1079 tx->tail = msg_ram + offset;
1080 tx->head = msg_ram + offset + sizeof(u32);
1081 tx->fifo = msg_ram + offset + 2 * sizeof(u32);
1086 if (!rx->fifo || !tx->fifo) {
1087 dev_err(dev, "Unable to find rx and tx descriptors\n");
1099 static int glink_rpm_probe(struct platform_device *pdev)
1101 struct glink_rpm *glink;
1102 struct device_node *np;
1103 void __iomem *msg_ram;
1104 size_t msg_ram_size;
1105 struct device *dev = &pdev->dev;
1110 glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL);
1116 mutex_init(&glink->tx_lock);
1117 spin_lock_init(&glink->rx_lock);
1118 INIT_LIST_HEAD(&glink->rx_queue);
1119 INIT_WORK(&glink->rx_work, glink_rpm_work);
1121 mutex_init(&glink->idr_lock);
1122 idr_init(&glink->lcids);
1123 idr_init(&glink->rcids);
1125 glink->mbox_client.dev = &pdev->dev;
1126 glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
1127 if (IS_ERR(glink->mbox_chan)) {
1128 if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
1129 dev_err(&pdev->dev, "failed to acquire IPC channel\n");
1130 return PTR_ERR(glink->mbox_chan);
1133 np = of_parse_phandle(dev->of_node, "qcom,rpm-msg-ram", 0);
1134 ret = of_address_to_resource(np, 0, &r);
1139 msg_ram = devm_ioremap(dev, r.start, resource_size(&r));
1140 msg_ram_size = resource_size(&r);
1144 ret = glink_rpm_parse_toc(dev, msg_ram, msg_ram_size,
1145 &glink->rx_pipe, &glink->tx_pipe);
1149 writel(0, glink->tx_pipe.head);
1150 writel(0, glink->rx_pipe.tail);
1152 irq = platform_get_irq(pdev, 0);
1153 ret = devm_request_irq(dev, irq,
1155 IRQF_NO_SUSPEND | IRQF_SHARED,
1156 "glink-rpm", glink);
1158 dev_err(dev, "Failed to request IRQ\n");
1164 ret = glink_rpm_send_version(glink);
1168 platform_set_drvdata(pdev, glink);
1173 static int glink_rpm_remove_device(struct device *dev, void *data)
1175 device_unregister(dev);
1180 static int glink_rpm_remove(struct platform_device *pdev)
1182 struct glink_rpm *glink = platform_get_drvdata(pdev);
1183 struct glink_channel *channel;
1187 disable_irq(glink->irq);
1188 cancel_work_sync(&glink->rx_work);
1190 ret = device_for_each_child(glink->dev, NULL, glink_rpm_remove_device);
1192 dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
1194 /* Release any defunct local channels, waiting for close-ack */
1195 idr_for_each_entry(&glink->lcids, channel, cid)
1196 kref_put(&channel->refcount, glink_rpm_channel_release);
1198 idr_destroy(&glink->lcids);
1199 idr_destroy(&glink->rcids);
1204 static const struct of_device_id glink_rpm_of_match[] = {
1205 { .compatible = "qcom,glink-rpm" },
1208 MODULE_DEVICE_TABLE(of, glink_rpm_of_match);
1210 static struct platform_driver glink_rpm_driver = {
1211 .probe = glink_rpm_probe,
1212 .remove = glink_rpm_remove,
1214 .name = "qcom_glink_rpm",
1215 .of_match_table = glink_rpm_of_match,
1219 static int __init glink_rpm_init(void)
1221 return platform_driver_register(&glink_rpm_driver);
1223 subsys_initcall(glink_rpm_init);
1225 static void __exit glink_rpm_exit(void)
1227 platform_driver_unregister(&glink_rpm_driver);
1229 module_exit(glink_rpm_exit);
1232 MODULE_DESCRIPTION("Qualcomm GLINK RPM driver");
1233 MODULE_LICENSE("GPL v2");