1 /* arch/arm/mach-msm/smd_rpcrouter.c
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2007-2009 QUALCOMM Incorporated.
5 * Author: San Mehat <san@android.com>
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 /* TODO: handle cases where smd_write() will tempfail due to full fifo */
19 /* TODO: thread priority? schedule a work to bump it? */
20 /* TODO: maybe make server_list_lock a mutex */
21 /* TODO: pool fragments to avoid kmalloc/kfree churn */
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include <linux/cdev.h>
28 #include <linux/init.h>
29 #include <linux/device.h>
30 #include <linux/types.h>
31 #include <linux/delay.h>
33 #include <linux/err.h>
34 #include <linux/sched.h>
35 #include <linux/poll.h>
36 #include <asm/uaccess.h>
37 #include <asm/byteorder.h>
38 #include <linux/platform_device.h>
39 #include <linux/uaccess.h>
41 #include <mach/msm_smd.h>
42 #include "smd_rpcrouter.h"
44 #define TRACE_R2R_MSG 0
45 #define TRACE_R2R_RAW 0
46 #define TRACE_RPC_MSG 0
47 #define TRACE_NOTIFY_MSG 0
49 #define MSM_RPCROUTER_DEBUG 0
50 #define MSM_RPCROUTER_DEBUG_PKT 0
51 #define MSM_RPCROUTER_R2R_DEBUG 0
52 #define DUMP_ALL_RECEIVED_HEADERS 0
54 #define DIAG(x...) printk("[RR] ERROR " x)
56 #if MSM_RPCROUTER_DEBUG
57 #define D(x...) printk(x)
59 #define D(x...) do {} while (0)
63 #define RR(x...) printk("[RR] "x)
65 #define RR(x...) do {} while (0)
69 #define IO(x...) printk("[RPC] "x)
71 #define IO(x...) do {} while (0)
75 #define NTFY(x...) printk(KERN_ERR "[NOTIFY] "x)
77 #define NTFY(x...) do {} while (0)
80 static LIST_HEAD(local_endpoints);
81 static LIST_HEAD(remote_endpoints);
83 static LIST_HEAD(server_list);
85 static smd_channel_t *smd_channel;
86 static int initialized;
87 static wait_queue_head_t newserver_wait;
88 static wait_queue_head_t smd_wait;
90 static DEFINE_SPINLOCK(local_endpoints_lock);
91 static DEFINE_SPINLOCK(remote_endpoints_lock);
92 static DEFINE_SPINLOCK(server_list_lock);
93 static DEFINE_SPINLOCK(smd_lock);
95 static struct workqueue_struct *rpcrouter_workqueue;
96 static int rpcrouter_need_len;
98 static atomic_t next_xid = ATOMIC_INIT(1);
99 static uint8_t next_pacmarkid;
101 static void do_read_data(struct work_struct *work);
102 static void do_create_pdevs(struct work_struct *work);
103 static void do_create_rpcrouter_pdev(struct work_struct *work);
105 static DECLARE_WORK(work_read_data, do_read_data);
106 static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
107 static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
109 #define RR_STATE_IDLE 0
110 #define RR_STATE_HEADER 1
111 #define RR_STATE_BODY 2
112 #define RR_STATE_ERROR 3
115 struct rr_packet *pkt;
117 uint32_t state; /* current assembly state */
118 uint32_t count; /* bytes needed in this state */
121 static struct rr_context the_rr_context;
123 static struct platform_device rpcrouter_pdev = {
124 .name = "oncrpc_router",
129 static int rpcrouter_send_control_msg(union rr_control_msg *msg)
131 struct rr_header hdr;
135 if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) && !initialized) {
136 printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, "
137 "router not initialized\n");
141 hdr.version = RPCROUTER_VERSION;
143 hdr.src_pid = RPCROUTER_PID_LOCAL;
144 hdr.src_cid = RPCROUTER_ROUTER_ADDRESS;
146 hdr.size = sizeof(*msg);
148 hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS;
150 /* TODO: what if channel is full? */
152 need = sizeof(hdr) + hdr.size;
153 spin_lock_irqsave(&smd_lock, flags);
154 while (smd_write_avail(smd_channel) < need) {
155 spin_unlock_irqrestore(&smd_lock, flags);
157 spin_lock_irqsave(&smd_lock, flags);
159 smd_write(smd_channel, &hdr, sizeof(hdr));
160 smd_write(smd_channel, msg, hdr.size);
161 spin_unlock_irqrestore(&smd_lock, flags);
165 static struct rr_server *rpcrouter_create_server(uint32_t pid,
170 struct rr_server *server;
174 server = kmalloc(sizeof(struct rr_server), GFP_KERNEL);
176 return ERR_PTR(-ENOMEM);
178 memset(server, 0, sizeof(struct rr_server));
184 spin_lock_irqsave(&server_list_lock, flags);
185 list_add_tail(&server->list, &server_list);
186 spin_unlock_irqrestore(&server_list_lock, flags);
188 if (pid == RPCROUTER_PID_REMOTE) {
189 rc = msm_rpcrouter_create_server_cdev(server);
195 spin_lock_irqsave(&server_list_lock, flags);
196 list_del(&server->list);
197 spin_unlock_irqrestore(&server_list_lock, flags);
202 static void rpcrouter_destroy_server(struct rr_server *server)
206 spin_lock_irqsave(&server_list_lock, flags);
207 list_del(&server->list);
208 spin_unlock_irqrestore(&server_list_lock, flags);
209 device_destroy(msm_rpcrouter_class, server->device_number);
213 static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver)
215 struct rr_server *server;
218 spin_lock_irqsave(&server_list_lock, flags);
219 list_for_each_entry(server, &server_list, list) {
220 if (server->prog == prog
221 && server->vers == ver) {
222 spin_unlock_irqrestore(&server_list_lock, flags);
226 spin_unlock_irqrestore(&server_list_lock, flags);
230 static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev)
232 struct rr_server *server;
235 spin_lock_irqsave(&server_list_lock, flags);
236 list_for_each_entry(server, &server_list, list) {
237 if (server->device_number == dev) {
238 spin_unlock_irqrestore(&server_list_lock, flags);
242 spin_unlock_irqrestore(&server_list_lock, flags);
246 struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
248 struct msm_rpc_endpoint *ept;
251 ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
254 memset(ept, 0, sizeof(struct msm_rpc_endpoint));
256 /* mark no reply outstanding */
257 ept->reply_pid = 0xffffffff;
259 ept->cid = (uint32_t) ept;
260 ept->pid = RPCROUTER_PID_LOCAL;
263 if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) {
264 struct rr_server *srv;
266 * This is a userspace client which opened
267 * a program/ver devicenode. Bind the client
268 * to that destination
270 srv = rpcrouter_lookup_server_by_dev(dev);
271 /* TODO: bug? really? */
274 ept->dst_pid = srv->pid;
275 ept->dst_cid = srv->cid;
276 ept->dst_prog = cpu_to_be32(srv->prog);
277 ept->dst_vers = cpu_to_be32(srv->vers);
279 D("Creating local ept %p @ %08x:%08x\n", ept, srv->prog, srv->vers);
281 /* mark not connected */
282 ept->dst_pid = 0xffffffff;
283 D("Creating a master local ept %p\n", ept);
286 init_waitqueue_head(&ept->wait_q);
287 INIT_LIST_HEAD(&ept->read_q);
288 spin_lock_init(&ept->read_q_lock);
289 INIT_LIST_HEAD(&ept->incomplete);
291 spin_lock_irqsave(&local_endpoints_lock, flags);
292 list_add_tail(&ept->list, &local_endpoints);
293 spin_unlock_irqrestore(&local_endpoints_lock, flags);
297 int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
300 union rr_control_msg msg;
302 msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT;
303 msg.cli.pid = ept->pid;
304 msg.cli.cid = ept->cid;
306 RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
307 rc = rpcrouter_send_control_msg(&msg);
311 list_del(&ept->list);
316 static int rpcrouter_create_remote_endpoint(uint32_t cid)
318 struct rr_remote_endpoint *new_c;
321 new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL);
324 memset(new_c, 0, sizeof(struct rr_remote_endpoint));
327 new_c->pid = RPCROUTER_PID_REMOTE;
328 init_waitqueue_head(&new_c->quota_wait);
329 spin_lock_init(&new_c->quota_lock);
331 spin_lock_irqsave(&remote_endpoints_lock, flags);
332 list_add_tail(&new_c->list, &remote_endpoints);
333 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
337 static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid)
339 struct msm_rpc_endpoint *ept;
342 spin_lock_irqsave(&local_endpoints_lock, flags);
343 list_for_each_entry(ept, &local_endpoints, list) {
344 if (ept->cid == cid) {
345 spin_unlock_irqrestore(&local_endpoints_lock, flags);
349 spin_unlock_irqrestore(&local_endpoints_lock, flags);
353 static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t cid)
355 struct rr_remote_endpoint *ept;
358 spin_lock_irqsave(&remote_endpoints_lock, flags);
359 list_for_each_entry(ept, &remote_endpoints, list) {
360 if (ept->cid == cid) {
361 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
365 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
369 static int process_control_msg(union rr_control_msg *msg, int len)
371 union rr_control_msg ctl;
372 struct rr_server *server;
373 struct rr_remote_endpoint *r_ept;
377 if (len != sizeof(*msg)) {
378 printk(KERN_ERR "rpcrouter: r2r msg size %d != %d\n",
384 case RPCROUTER_CTRL_CMD_HELLO:
388 memset(&ctl, 0, sizeof(ctl));
389 ctl.cmd = RPCROUTER_CTRL_CMD_HELLO;
390 rpcrouter_send_control_msg(&ctl);
394 /* Send list of servers one at a time */
395 ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
397 /* TODO: long time to hold a spinlock... */
398 spin_lock_irqsave(&server_list_lock, flags);
399 list_for_each_entry(server, &server_list, list) {
400 ctl.srv.pid = server->pid;
401 ctl.srv.cid = server->cid;
402 ctl.srv.prog = server->prog;
403 ctl.srv.vers = server->vers;
405 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
406 server->pid, server->cid,
407 server->prog, server->vers);
409 rpcrouter_send_control_msg(&ctl);
411 spin_unlock_irqrestore(&server_list_lock, flags);
413 queue_work(rpcrouter_workqueue, &work_create_rpcrouter_pdev);
416 case RPCROUTER_CTRL_CMD_RESUME_TX:
417 RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
419 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
422 "rpcrouter: Unable to resume client\n");
425 spin_lock_irqsave(&r_ept->quota_lock, flags);
426 r_ept->tx_quota_cntr = 0;
427 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
428 wake_up(&r_ept->quota_wait);
431 case RPCROUTER_CTRL_CMD_NEW_SERVER:
432 RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
433 msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers);
435 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
438 server = rpcrouter_create_server(
439 msg->srv.pid, msg->srv.cid,
440 msg->srv.prog, msg->srv.vers);
444 * XXX: Verify that its okay to add the
445 * client to our remote client list
446 * if we get a NEW_SERVER notification
448 if (!rpcrouter_lookup_remote_endpoint(msg->srv.cid)) {
449 rc = rpcrouter_create_remote_endpoint(
453 "rpcrouter:Client create"
456 schedule_work(&work_create_pdevs);
457 wake_up(&newserver_wait);
459 if ((server->pid == msg->srv.pid) &&
460 (server->cid == msg->srv.cid)) {
461 printk(KERN_ERR "rpcrouter: Duplicate svr\n");
463 server->pid = msg->srv.pid;
464 server->cid = msg->srv.cid;
469 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
470 RR("o REMOVE_SERVER prog=%08x:%d\n",
471 msg->srv.prog, msg->srv.vers);
472 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
474 rpcrouter_destroy_server(server);
477 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
478 RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
479 if (msg->cli.pid != RPCROUTER_PID_REMOTE) {
481 "rpcrouter: Denying remote removal of "
485 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
487 spin_lock_irqsave(&remote_endpoints_lock, flags);
488 list_del(&r_ept->list);
489 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
493 /* Notify local clients of this event */
494 printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n");
499 RR("o UNKNOWN(%08x)\n", msg->cmd);
506 static void do_create_rpcrouter_pdev(struct work_struct *work)
508 platform_device_register(&rpcrouter_pdev);
511 static void do_create_pdevs(struct work_struct *work)
514 struct rr_server *server;
516 /* TODO: race if destroyed while being registered */
517 spin_lock_irqsave(&server_list_lock, flags);
518 list_for_each_entry(server, &server_list, list) {
519 if (server->pid == RPCROUTER_PID_REMOTE) {
520 if (server->pdev_name[0] == 0) {
521 spin_unlock_irqrestore(&server_list_lock,
523 msm_rpcrouter_create_server_pdev(server);
524 schedule_work(&work_create_pdevs);
529 spin_unlock_irqrestore(&server_list_lock, flags);
532 static void rpcrouter_smdnotify(void *_dev, unsigned event)
534 if (event != SMD_EVENT_DATA)
540 static void *rr_malloc(unsigned sz)
542 void *ptr = kmalloc(sz, GFP_KERNEL);
546 printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz);
548 ptr = kmalloc(sz, GFP_KERNEL);
554 /* TODO: deal with channel teardown / restore */
555 static int rr_read(void *data, int len)
559 // printk("rr_read() %d\n", len);
561 spin_lock_irqsave(&smd_lock, flags);
562 if (smd_read_avail(smd_channel) >= len) {
563 rc = smd_read(smd_channel, data, len);
564 spin_unlock_irqrestore(&smd_lock, flags);
570 rpcrouter_need_len = len;
571 spin_unlock_irqrestore(&smd_lock, flags);
573 // printk("rr_read: waiting (%d)\n", len);
574 wait_event(smd_wait, smd_read_avail(smd_channel) >= len);
579 static uint32_t r2r_buf[RPCROUTER_MSGSIZE_MAX];
581 static void do_read_data(struct work_struct *work)
583 struct rr_header hdr;
584 struct rr_packet *pkt;
585 struct rr_fragment *frag;
586 struct msm_rpc_endpoint *ept;
590 if (rr_read(&hdr, sizeof(hdr)))
594 RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
595 hdr.version, hdr.type, hdr.src_pid, hdr.src_cid,
596 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
599 if (hdr.version != RPCROUTER_VERSION) {
600 DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION);
603 if (hdr.size > RPCROUTER_MSGSIZE_MAX) {
604 DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX);
608 if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) {
609 if (rr_read(r2r_buf, hdr.size))
611 process_control_msg((void*) r2r_buf, hdr.size);
615 if (hdr.size < sizeof(pm)) {
616 DIAG("runt packet (no pacmark)\n");
619 if (rr_read(&pm, sizeof(pm)))
622 hdr.size -= sizeof(pm);
624 frag = rr_malloc(hdr.size + sizeof(*frag));
626 frag->length = hdr.size;
627 if (rr_read(frag->data, hdr.size))
630 ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
632 DIAG("no local ept for cid %08x\n", hdr.dst_cid);
637 /* See if there is already a partial packet that matches our mid
638 * and if so, append this fragment to that packet.
640 mid = PACMARK_MID(pm);
641 list_for_each_entry(pkt, &ept->incomplete, list) {
642 if (pkt->mid == mid) {
643 pkt->last->next = frag;
645 pkt->length += frag->length;
646 if (PACMARK_LAST(pm)) {
647 list_del(&pkt->list);
648 goto packet_complete;
653 /* This mid is new -- create a packet for it, and put it on
654 * the incomplete list if this fragment is not a last fragment,
655 * otherwise put it on the read queue.
657 pkt = rr_malloc(sizeof(struct rr_packet));
660 memcpy(&pkt->hdr, &hdr, sizeof(hdr));
662 pkt->length = frag->length;
663 if (!PACMARK_LAST(pm)) {
664 list_add_tail(&pkt->list, &ept->incomplete);
669 spin_lock_irqsave(&ept->read_q_lock, flags);
670 list_add_tail(&pkt->list, &ept->read_q);
671 wake_up(&ept->wait_q);
672 spin_unlock_irqrestore(&ept->read_q_lock, flags);
675 if (hdr.confirm_rx) {
676 union rr_control_msg msg;
678 msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
679 msg.cli.pid = hdr.dst_pid;
680 msg.cli.cid = hdr.dst_cid;
682 RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid);
683 rpcrouter_send_control_msg(&msg);
686 queue_work(rpcrouter_workqueue, &work_read_data);
691 printk(KERN_ERR "rpc_router has died\n");
694 void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog,
695 uint32_t vers, uint32_t proc)
697 memset(hdr, 0, sizeof(struct rpc_request_hdr));
698 hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
699 hdr->rpc_vers = cpu_to_be32(2);
700 hdr->prog = cpu_to_be32(prog);
701 hdr->vers = cpu_to_be32(vers);
702 hdr->procedure = cpu_to_be32(proc);
705 struct msm_rpc_endpoint *msm_rpc_open(void)
707 struct msm_rpc_endpoint *ept;
709 ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0));
711 return ERR_PTR(-ENOMEM);
716 int msm_rpc_close(struct msm_rpc_endpoint *ept)
718 return msm_rpcrouter_destroy_local_endpoint(ept);
720 EXPORT_SYMBOL(msm_rpc_close);
722 int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
724 struct rr_header hdr;
726 struct rpc_request_hdr *rq = buffer;
727 struct rr_remote_endpoint *r_ept;
732 /* TODO: fragmentation for large outbound packets */
733 if (count > (RPCROUTER_MSGSIZE_MAX - sizeof(uint32_t)) || !count)
736 /* snoop the RPC packet and enforce permissions */
738 /* has to have at least the xid and type fields */
739 if (count < (sizeof(uint32_t) * 2)) {
740 printk(KERN_ERR "rr_write: rejecting runt packet\n");
746 if (count < (sizeof(uint32_t) * 6)) {
748 "rr_write: rejecting runt call packet\n");
751 if (ept->dst_pid == 0xffffffff) {
752 printk(KERN_ERR "rr_write: not connected\n");
756 #if CONFIG_MSM_AMSS_VERSION >= 6350
757 if ((ept->dst_prog != rq->prog) ||
758 !msm_rpc_is_compatible_version(
759 be32_to_cpu(ept->dst_vers),
760 be32_to_cpu(rq->vers))) {
762 if (ept->dst_prog != rq->prog || ept->dst_vers != rq->vers) {
765 "rr_write: cannot write to %08x:%d "
766 "(bound to %08x:%d)\n",
767 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
768 be32_to_cpu(ept->dst_prog),
769 be32_to_cpu(ept->dst_vers));
772 hdr.dst_pid = ept->dst_pid;
773 hdr.dst_cid = ept->dst_cid;
774 IO("CALL on ept %p to %08x:%08x @ %d:%08x (%d bytes) (xid %x proc %x)\n",
776 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
777 ept->dst_pid, ept->dst_cid, count,
778 be32_to_cpu(rq->xid), be32_to_cpu(rq->procedure));
782 if (ept->reply_pid == 0xffffffff) {
784 "rr_write: rejecting unexpected reply\n");
787 if (ept->reply_xid != rq->xid) {
789 "rr_write: rejecting packet w/ bad xid\n");
793 hdr.dst_pid = ept->reply_pid;
794 hdr.dst_cid = ept->reply_cid;
796 /* consume this reply */
797 ept->reply_pid = 0xffffffff;
799 IO("REPLY on ept %p to xid=%d @ %d:%08x (%d bytes)\n",
801 be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
804 r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_cid);
808 "msm_rpc_write(): No route to ept "
809 "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid);
810 return -EHOSTUNREACH;
813 /* Create routing header */
814 hdr.type = RPCROUTER_CTRL_CMD_DATA;
815 hdr.version = RPCROUTER_VERSION;
816 hdr.src_pid = ept->pid;
817 hdr.src_cid = ept->cid;
819 hdr.size = count + sizeof(uint32_t);
822 prepare_to_wait(&r_ept->quota_wait, &__wait,
824 spin_lock_irqsave(&r_ept->quota_lock, flags);
825 if (r_ept->tx_quota_cntr < RPCROUTER_DEFAULT_RX_QUOTA)
827 if (signal_pending(current) &&
828 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
830 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
833 finish_wait(&r_ept->quota_wait, &__wait);
835 if (signal_pending(current) &&
836 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
837 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
840 r_ept->tx_quota_cntr++;
841 if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA)
844 /* bump pacmark while interrupts disabled to avoid race
845 * probably should be atomic op instead
847 pacmark = PACMARK(count, ++next_pacmarkid, 0, 1);
849 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
851 spin_lock_irqsave(&smd_lock, flags);
853 needed = sizeof(hdr) + hdr.size;
854 while (smd_write_avail(smd_channel) < needed) {
855 spin_unlock_irqrestore(&smd_lock, flags);
857 spin_lock_irqsave(&smd_lock, flags);
860 /* TODO: deal with full fifo */
861 smd_write(smd_channel, &hdr, sizeof(hdr));
862 smd_write(smd_channel, &pacmark, sizeof(pacmark));
863 smd_write(smd_channel, buffer, count);
865 spin_unlock_irqrestore(&smd_lock, flags);
869 EXPORT_SYMBOL(msm_rpc_write);
872 * NOTE: It is the responsibility of the caller to kfree buffer
874 int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer,
875 unsigned user_len, long timeout)
877 struct rr_fragment *frag, *next;
881 rc = __msm_rpc_read(ept, &frag, user_len, timeout);
885 /* single-fragment messages conveniently can be
886 * returned as-is (the buffer is at the front)
888 if (frag->next == 0) {
889 *buffer = (void*) frag;
893 /* multi-fragment messages, we have to do it the
894 * hard way, which is rather disgusting right now
899 while (frag != NULL) {
900 memcpy(buf, frag->data, frag->length);
910 int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
911 void *_request, int request_size,
914 return msm_rpc_call_reply(ept, proc,
915 _request, request_size,
918 EXPORT_SYMBOL(msm_rpc_call);
920 int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
921 void *_request, int request_size,
922 void *_reply, int reply_size,
925 struct rpc_request_hdr *req = _request;
926 struct rpc_reply_hdr *reply;
929 if (request_size < sizeof(*req))
932 if (ept->dst_pid == 0xffffffff)
935 /* We can't use msm_rpc_setup_req() here, because dst_prog and
936 * dst_vers here are already in BE.
938 memset(req, 0, sizeof(*req));
939 req->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
940 req->rpc_vers = cpu_to_be32(2);
941 req->prog = ept->dst_prog;
942 req->vers = ept->dst_vers;
943 req->procedure = cpu_to_be32(proc);
945 rc = msm_rpc_write(ept, req, request_size);
950 rc = msm_rpc_read(ept, (void*) &reply, -1, timeout);
953 if (rc < (3 * sizeof(uint32_t))) {
957 /* we should not get CALL packets -- ignore them */
958 if (reply->type == 0) {
962 /* If an earlier call timed out, we could get the (no
963 * longer wanted) reply for it. Ignore replies that
966 if (reply->xid != req->xid) {
970 if (reply->reply_stat != 0) {
974 if (reply->data.acc_hdr.accept_stat != 0) {
978 if (_reply == NULL) {
982 if (rc > reply_size) {
985 memcpy(_reply, reply, rc);
992 EXPORT_SYMBOL(msm_rpc_call_reply);
995 static inline int ept_packet_available(struct msm_rpc_endpoint *ept)
999 spin_lock_irqsave(&ept->read_q_lock, flags);
1000 ret = !list_empty(&ept->read_q);
1001 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1005 int __msm_rpc_read(struct msm_rpc_endpoint *ept,
1006 struct rr_fragment **frag_ret,
1007 unsigned len, long timeout)
1009 struct rr_packet *pkt;
1010 struct rpc_request_hdr *rq;
1011 DEFINE_WAIT(__wait);
1012 unsigned long flags;
1015 IO("READ on ept %p\n", ept);
1017 if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) {
1019 wait_event(ept->wait_q, ept_packet_available(ept));
1021 rc = wait_event_timeout(
1022 ept->wait_q, ept_packet_available(ept),
1029 rc = wait_event_interruptible(
1030 ept->wait_q, ept_packet_available(ept));
1034 rc = wait_event_interruptible_timeout(
1035 ept->wait_q, ept_packet_available(ept),
1042 spin_lock_irqsave(&ept->read_q_lock, flags);
1043 if (list_empty(&ept->read_q)) {
1044 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1047 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
1048 if (pkt->length > len) {
1049 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1052 list_del(&pkt->list);
1053 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1057 *frag_ret = pkt->first;
1058 rq = (void*) pkt->first->data;
1059 if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
1060 IO("READ on ept %p is a CALL on %08x:%08x proc %d xid %d\n",
1061 ept, be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1062 be32_to_cpu(rq->procedure),
1063 be32_to_cpu(rq->xid));
1065 if (ept->reply_pid != 0xffffffff) {
1067 "rr_read: lost previous reply xid...\n");
1069 /* TODO: locking? */
1070 ept->reply_pid = pkt->hdr.src_pid;
1071 ept->reply_cid = pkt->hdr.src_cid;
1072 ept->reply_xid = rq->xid;
1075 else if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 1))
1076 IO("READ on ept %p is a REPLY\n", ept);
1077 else IO("READ on ept %p (%d bytes)\n", ept, rc);
1084 #if CONFIG_MSM_AMSS_VERSION >= 6350
1085 int msm_rpc_is_compatible_version(uint32_t server_version,
1086 uint32_t client_version)
1088 if ((server_version & RPC_VERSION_MODE_MASK) !=
1089 (client_version & RPC_VERSION_MODE_MASK))
1092 if (server_version & RPC_VERSION_MODE_MASK)
1093 return server_version == client_version;
1095 return ((server_version & RPC_VERSION_MAJOR_MASK) ==
1096 (client_version & RPC_VERSION_MAJOR_MASK)) &&
1097 ((server_version & RPC_VERSION_MINOR_MASK) >=
1098 (client_version & RPC_VERSION_MINOR_MASK));
1100 EXPORT_SYMBOL(msm_rpc_is_compatible_version);
1102 static int msm_rpc_get_compatible_server(uint32_t prog,
1104 uint32_t *found_vers)
1106 struct rr_server *server;
1107 unsigned long flags;
1108 if (found_vers == NULL)
1111 spin_lock_irqsave(&server_list_lock, flags);
1112 list_for_each_entry(server, &server_list, list) {
1113 if ((server->prog == prog) &&
1114 msm_rpc_is_compatible_version(server->vers, ver)) {
1115 *found_vers = server->vers;
1116 spin_unlock_irqrestore(&server_list_lock, flags);
1120 spin_unlock_irqrestore(&server_list_lock, flags);
1125 struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog, uint32_t vers, unsigned flags)
1127 struct msm_rpc_endpoint *ept;
1128 struct rr_server *server;
1130 #if CONFIG_MSM_AMSS_VERSION >= 6350
1131 if (!(vers & RPC_VERSION_MODE_MASK)) {
1132 uint32_t found_vers;
1133 if (msm_rpc_get_compatible_server(prog, vers, &found_vers) < 0)
1134 return ERR_PTR(-EHOSTUNREACH);
1135 if (found_vers != vers) {
1136 D("RPC using new version %08x:{%08x --> %08x}\n",
1137 prog, vers, found_vers);
1143 server = rpcrouter_lookup_server(prog, vers);
1145 return ERR_PTR(-EHOSTUNREACH);
1147 ept = msm_rpc_open();
1152 ept->dst_pid = server->pid;
1153 ept->dst_cid = server->cid;
1154 ept->dst_prog = cpu_to_be32(prog);
1155 ept->dst_vers = cpu_to_be32(vers);
1159 EXPORT_SYMBOL(msm_rpc_connect);
1161 uint32_t msm_rpc_get_vers(struct msm_rpc_endpoint *ept)
1163 return be32_to_cpu(ept->dst_vers);
1165 EXPORT_SYMBOL(msm_rpc_get_vers);
1167 /* TODO: permission check? */
1168 int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
1169 uint32_t prog, uint32_t vers)
1172 union rr_control_msg msg;
1173 struct rr_server *server;
1175 server = rpcrouter_create_server(ept->pid, ept->cid,
1180 msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
1181 msg.srv.pid = ept->pid;
1182 msg.srv.cid = ept->cid;
1183 msg.srv.prog = prog;
1184 msg.srv.vers = vers;
1186 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
1187 ept->pid, ept->cid, prog, vers);
1189 rc = rpcrouter_send_control_msg(&msg);
1196 /* TODO: permission check -- disallow unreg of somebody else's server */
1197 int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
1198 uint32_t prog, uint32_t vers)
1200 struct rr_server *server;
1201 server = rpcrouter_lookup_server(prog, vers);
1205 rpcrouter_destroy_server(server);
1209 static int msm_rpcrouter_probe(struct platform_device *pdev)
1213 /* Initialize what we need to start processing */
1214 INIT_LIST_HEAD(&local_endpoints);
1215 INIT_LIST_HEAD(&remote_endpoints);
1217 init_waitqueue_head(&newserver_wait);
1218 init_waitqueue_head(&smd_wait);
1220 rpcrouter_workqueue = create_singlethread_workqueue("rpcrouter");
1221 if (!rpcrouter_workqueue)
1224 rc = msm_rpcrouter_init_devices();
1226 goto fail_destroy_workqueue;
1228 /* Open up SMD channel 2 */
1230 rc = smd_open("SMD_RPCCALL", &smd_channel, NULL, rpcrouter_smdnotify);
1232 goto fail_remove_devices;
1234 queue_work(rpcrouter_workqueue, &work_read_data);
1237 fail_remove_devices:
1238 msm_rpcrouter_exit_devices();
1239 fail_destroy_workqueue:
1240 destroy_workqueue(rpcrouter_workqueue);
1244 static struct platform_driver msm_smd_channel2_driver = {
1245 .probe = msm_rpcrouter_probe,
1247 .name = "SMD_RPCCALL",
1248 .owner = THIS_MODULE,
1252 static int __init rpcrouter_init(void)
1254 return platform_driver_register(&msm_smd_channel2_driver);
1257 module_init(rpcrouter_init);
1258 MODULE_DESCRIPTION("MSM RPC Router");
1259 MODULE_AUTHOR("San Mehat <san@android.com>");
1260 MODULE_LICENSE("GPL");