[PATCH] v9fs: zero copy implementation
[pandora-kernel.git] / fs / 9p / mux.c
1 /*
2  * linux/fs/9p/mux.c
3  *
4  * Protocol Multiplexer
5  *
6  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License as published by
11  *  the Free Software Foundation; either version 2 of the License, or
12  *  (at your option) any later version.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to:
21  *  Free Software Foundation
22  *  51 Franklin Street, Fifth Floor
23  *  Boston, MA  02111-1301  USA
24  *
25  */
26
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/fs.h>
31 #include <linux/poll.h>
32 #include <linux/kthread.h>
33 #include <linux/idr.h>
34
35 #include "debug.h"
36 #include "v9fs.h"
37 #include "9p.h"
38 #include "conv.h"
39 #include "transport.h"
40 #include "mux.h"
41
42 #define ERREQFLUSH      1
43 #define SCHED_TIMEOUT   10
44 #define MAXPOLLWADDR    2
45
46 enum {
47         Rworksched = 1,         /* read work scheduled or running */
48         Rpending = 2,           /* can read */
49         Wworksched = 4,         /* write work scheduled or running */
50         Wpending = 8,           /* can write */
51 };
52
53 struct v9fs_mux_poll_task;
54
55 struct v9fs_req {
56         int tag;
57         struct v9fs_fcall *tcall;
58         struct v9fs_fcall *rcall;
59         int err;
60         v9fs_mux_req_callback cb;
61         void *cba;
62         struct list_head req_list;
63 };
64
65 struct v9fs_mux_data {
66         spinlock_t lock;
67         struct list_head mux_list;
68         struct v9fs_mux_poll_task *poll_task;
69         int msize;
70         unsigned char *extended;
71         struct v9fs_transport *trans;
72         struct v9fs_idpool tidpool;
73         int err;
74         wait_queue_head_t equeue;
75         struct list_head req_list;
76         struct list_head unsent_req_list;
77         struct v9fs_fcall *rcall;
78         int rpos;
79         char *rbuf;
80         int wpos;
81         int wsize;
82         char *wbuf;
83         wait_queue_t poll_wait[MAXPOLLWADDR];
84         wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
85         poll_table pt;
86         struct work_struct rq;
87         struct work_struct wq;
88         unsigned long wsched;
89 };
90
91 struct v9fs_mux_poll_task {
92         struct task_struct *task;
93         struct list_head mux_list;
94         int muxnum;
95 };
96
97 struct v9fs_mux_rpc {
98         struct v9fs_mux_data *m;
99         struct v9fs_req *req;
100         int err;
101         struct v9fs_fcall *rcall;
102         wait_queue_head_t wqueue;
103 };
104
105 extern int v9fs_errstr2errno(char *str, int len);
106
107 static int v9fs_poll_proc(void *);
108 static void v9fs_read_work(void *);
109 static void v9fs_write_work(void *);
110 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
111                           poll_table * p);
112 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
113 static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
114
115 static DECLARE_MUTEX(v9fs_mux_task_lock);
116 static struct workqueue_struct *v9fs_mux_wq;
117
118 static int v9fs_mux_num;
119 static int v9fs_mux_poll_task_num;
120 static struct v9fs_mux_poll_task v9fs_mux_poll_tasks[100];
121
122 void v9fs_mux_global_init(void)
123 {
124         int i;
125
126         for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++)
127                 v9fs_mux_poll_tasks[i].task = NULL;
128
129         v9fs_mux_wq = create_workqueue("v9fs");
130 }
131
132 void v9fs_mux_global_exit(void)
133 {
134         destroy_workqueue(v9fs_mux_wq);
135 }
136
137 /**
138  * v9fs_mux_calc_poll_procs - calculates the number of polling procs
139  * based on the number of mounted v9fs filesystems.
140  *
141  * The current implementation returns sqrt of the number of mounts.
142  */
143 inline int v9fs_mux_calc_poll_procs(int muxnum)
144 {
145         int n;
146
147         if (v9fs_mux_poll_task_num)
148                 n = muxnum / v9fs_mux_poll_task_num +
149                     (muxnum % v9fs_mux_poll_task_num ? 1 : 0);
150         else
151                 n = 1;
152
153         if (n > ARRAY_SIZE(v9fs_mux_poll_tasks))
154                 n = ARRAY_SIZE(v9fs_mux_poll_tasks);
155
156         return n;
157 }
158
159 static void v9fs_mux_poll_start(struct v9fs_mux_data *m)
160 {
161         int i, n;
162         struct v9fs_mux_poll_task *vpt, *vptlast;
163
164         dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
165                 v9fs_mux_poll_task_num);
166         up(&v9fs_mux_task_lock);
167
168         n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
169         if (n > v9fs_mux_poll_task_num) {
170                 for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
171                         if (v9fs_mux_poll_tasks[i].task == NULL) {
172                                 vpt = &v9fs_mux_poll_tasks[i];
173                                 dprintk(DEBUG_MUX, "create proc %p\n", vpt);
174                                 vpt->task =
175                                     kthread_create(v9fs_poll_proc, vpt,
176                                                    "v9fs-poll");
177                                 INIT_LIST_HEAD(&vpt->mux_list);
178                                 vpt->muxnum = 0;
179                                 v9fs_mux_poll_task_num++;
180                                 wake_up_process(vpt->task);
181                                 break;
182                         }
183                 }
184
185                 if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks))
186                         dprintk(DEBUG_ERROR, "warning: no free poll slots\n");
187         }
188
189         n = (v9fs_mux_num + 1) / v9fs_mux_poll_task_num +
190             ((v9fs_mux_num + 1) % v9fs_mux_poll_task_num ? 1 : 0);
191
192         vptlast = NULL;
193         for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
194                 vpt = &v9fs_mux_poll_tasks[i];
195                 if (vpt->task != NULL) {
196                         vptlast = vpt;
197                         if (vpt->muxnum < n) {
198                                 dprintk(DEBUG_MUX, "put in proc %d\n", i);
199                                 list_add(&m->mux_list, &vpt->mux_list);
200                                 vpt->muxnum++;
201                                 m->poll_task = vpt;
202                                 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
203                                 init_poll_funcptr(&m->pt, v9fs_pollwait);
204                                 break;
205                         }
206                 }
207         }
208
209         if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks)) {
210                 dprintk(DEBUG_MUX, "put in proc %d\n", i);
211                 list_add(&m->mux_list, &vptlast->mux_list);
212                 vptlast->muxnum++;
213                 m->poll_task = vpt;
214                 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
215                 init_poll_funcptr(&m->pt, v9fs_pollwait);
216         }
217
218         v9fs_mux_num++;
219         down(&v9fs_mux_task_lock);
220 }
221
222 static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
223 {
224         int i;
225         struct v9fs_mux_poll_task *vpt;
226
227         up(&v9fs_mux_task_lock);
228         vpt = m->poll_task;
229         list_del(&m->mux_list);
230         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
231                 if (m->poll_waddr[i] != NULL) {
232                         remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
233                         m->poll_waddr[i] = NULL;
234                 }
235         }
236         vpt->muxnum--;
237         if (!vpt->muxnum) {
238                 dprintk(DEBUG_MUX, "destroy proc %p\n", vpt);
239                 send_sig(SIGKILL, vpt->task, 1);
240                 vpt->task = NULL;
241                 v9fs_mux_poll_task_num--;
242         }
243         v9fs_mux_num--;
244         down(&v9fs_mux_task_lock);
245 }
246
247 /**
248  * v9fs_mux_init - allocate and initialize the per-session mux data
249  * Creates the polling task if this is the first session.
250  *
251  * @trans - transport structure
252  * @msize - maximum message size
253  * @extended - pointer to the extended flag
254  */
255 struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
256                                     unsigned char *extended)
257 {
258         int i, n;
259         struct v9fs_mux_data *m, *mtmp;
260
261         dprintk(DEBUG_MUX, "transport %p msize %d\n", trans, msize);
262         m = kmalloc(sizeof(struct v9fs_mux_data), GFP_KERNEL);
263         if (!m)
264                 return ERR_PTR(-ENOMEM);
265
266         spin_lock_init(&m->lock);
267         INIT_LIST_HEAD(&m->mux_list);
268         m->msize = msize;
269         m->extended = extended;
270         m->trans = trans;
271         idr_init(&m->tidpool.pool);
272         init_MUTEX(&m->tidpool.lock);
273         m->err = 0;
274         init_waitqueue_head(&m->equeue);
275         INIT_LIST_HEAD(&m->req_list);
276         INIT_LIST_HEAD(&m->unsent_req_list);
277         m->rcall = NULL;
278         m->rpos = 0;
279         m->rbuf = NULL;
280         m->wpos = m->wsize = 0;
281         m->wbuf = NULL;
282         INIT_WORK(&m->rq, v9fs_read_work, m);
283         INIT_WORK(&m->wq, v9fs_write_work, m);
284         m->wsched = 0;
285         memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
286         v9fs_mux_poll_start(m);
287
288         n = trans->poll(trans, &m->pt);
289         if (n & POLLIN) {
290                 dprintk(DEBUG_MUX, "mux %p can read\n", m);
291                 set_bit(Rpending, &m->wsched);
292         }
293
294         if (n & POLLOUT) {
295                 dprintk(DEBUG_MUX, "mux %p can write\n", m);
296                 set_bit(Wpending, &m->wsched);
297         }
298
299         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
300                 if (IS_ERR(m->poll_waddr[i])) {
301                         v9fs_mux_poll_stop(m);
302                         mtmp = (void *)m->poll_waddr;   /* the error code */
303                         kfree(m);
304                         m = mtmp;
305                         break;
306                 }
307         }
308
309         return m;
310 }
311
312 /**
313  * v9fs_mux_destroy - cancels all pending requests and frees mux resources
314  */
315 void v9fs_mux_destroy(struct v9fs_mux_data *m)
316 {
317         dprintk(DEBUG_MUX, "mux %p prev %p next %p\n", m,
318                 m->mux_list.prev, m->mux_list.next);
319         v9fs_mux_cancel(m, -ECONNRESET);
320
321         if (!list_empty(&m->req_list)) {
322                 /* wait until all processes waiting on this session exit */
323                 dprintk(DEBUG_MUX, "mux %p waiting for empty request queue\n",
324                         m);
325                 wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
326                 dprintk(DEBUG_MUX, "mux %p request queue empty: %d\n", m,
327                         list_empty(&m->req_list));
328         }
329
330         v9fs_mux_poll_stop(m);
331         m->trans = NULL;
332
333         kfree(m);
334 }
335
336 /**
337  * v9fs_pollwait - called by files poll operation to add v9fs-poll task
338  *      to files wait queue
339  */
340 static void
341 v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
342               poll_table * p)
343 {
344         int i;
345         struct v9fs_mux_data *m;
346
347         m = container_of(p, struct v9fs_mux_data, pt);
348         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
349                 if (m->poll_waddr[i] == NULL)
350                         break;
351
352         if (i >= ARRAY_SIZE(m->poll_waddr)) {
353                 dprintk(DEBUG_ERROR, "not enough wait_address slots\n");
354                 return;
355         }
356
357         m->poll_waddr[i] = wait_address;
358
359         if (!wait_address) {
360                 dprintk(DEBUG_ERROR, "no wait_address\n");
361                 m->poll_waddr[i] = ERR_PTR(-EIO);
362                 return;
363         }
364
365         init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
366         add_wait_queue(wait_address, &m->poll_wait[i]);
367 }
368
369 /**
370  * v9fs_poll_mux - polls a mux and schedules read or write works if necessary
371  */
372 static inline void v9fs_poll_mux(struct v9fs_mux_data *m)
373 {
374         int n;
375
376         if (m->err < 0)
377                 return;
378
379         n = m->trans->poll(m->trans, NULL);
380         if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
381                 dprintk(DEBUG_MUX, "error mux %p err %d\n", m, n);
382                 if (n >= 0)
383                         n = -ECONNRESET;
384                 v9fs_mux_cancel(m, n);
385         }
386
387         if (n & POLLIN) {
388                 set_bit(Rpending, &m->wsched);
389                 dprintk(DEBUG_MUX, "mux %p can read\n", m);
390                 if (!test_and_set_bit(Rworksched, &m->wsched)) {
391                         dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
392                         queue_work(v9fs_mux_wq, &m->rq);
393                 }
394         }
395
396         if (n & POLLOUT) {
397                 set_bit(Wpending, &m->wsched);
398                 dprintk(DEBUG_MUX, "mux %p can write\n", m);
399                 if ((m->wsize || !list_empty(&m->unsent_req_list))
400                     && !test_and_set_bit(Wworksched, &m->wsched)) {
401                         dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
402                         queue_work(v9fs_mux_wq, &m->wq);
403                 }
404         }
405 }
406
407 /**
408  * v9fs_poll_proc - polls all v9fs transports for new events and queues
409  *      the appropriate work to the work queue
410  */
411 static int v9fs_poll_proc(void *a)
412 {
413         struct v9fs_mux_data *m, *mtmp;
414         struct v9fs_mux_poll_task *vpt;
415
416         vpt = a;
417         dprintk(DEBUG_MUX, "start %p %p\n", current, vpt);
418         allow_signal(SIGKILL);
419         while (!kthread_should_stop()) {
420                 set_current_state(TASK_INTERRUPTIBLE);
421                 if (signal_pending(current))
422                         break;
423
424                 list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
425                         v9fs_poll_mux(m);
426                 }
427
428                 dprintk(DEBUG_MUX, "sleeping...\n");
429                 schedule_timeout(SCHED_TIMEOUT * HZ);
430         }
431
432         __set_current_state(TASK_RUNNING);
433         dprintk(DEBUG_MUX, "finish\n");
434         return 0;
435 }
436
437 /**
438  * v9fs_write_work - called when a transport can send some data
439  */
440 static void v9fs_write_work(void *a)
441 {
442         int n, err;
443         struct v9fs_mux_data *m;
444         struct v9fs_req *req;
445
446         m = a;
447
448         if (m->err < 0) {
449                 clear_bit(Wworksched, &m->wsched);
450                 return;
451         }
452
453         if (!m->wsize) {
454                 if (list_empty(&m->unsent_req_list)) {
455                         clear_bit(Wworksched, &m->wsched);
456                         return;
457                 }
458
459                 spin_lock(&m->lock);
460                 req =
461                     list_entry(m->unsent_req_list.next, struct v9fs_req,
462                                req_list);
463                 list_move_tail(&req->req_list, &m->req_list);
464                 m->wbuf = req->tcall->sdata;
465                 m->wsize = req->tcall->size;
466                 m->wpos = 0;
467                 dump_data(m->wbuf, m->wsize);
468                 spin_unlock(&m->lock);
469         }
470
471         dprintk(DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, m->wsize);
472         clear_bit(Wpending, &m->wsched);
473         err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
474         dprintk(DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
475         if (err == -EAGAIN) {
476                 clear_bit(Wworksched, &m->wsched);
477                 return;
478         }
479
480         if (err <= 0)
481                 goto error;
482
483         m->wpos += err;
484         if (m->wpos == m->wsize)
485                 m->wpos = m->wsize = 0;
486
487         if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
488                 if (test_and_clear_bit(Wpending, &m->wsched))
489                         n = POLLOUT;
490                 else
491                         n = m->trans->poll(m->trans, NULL);
492
493                 if (n & POLLOUT) {
494                         dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
495                         queue_work(v9fs_mux_wq, &m->wq);
496                 } else
497                         clear_bit(Wworksched, &m->wsched);
498         } else
499                 clear_bit(Wworksched, &m->wsched);
500
501         return;
502
503       error:
504         v9fs_mux_cancel(m, err);
505         clear_bit(Wworksched, &m->wsched);
506 }
507
508 static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
509 {
510         int ecode, tag;
511         struct v9fs_str *ename;
512
513         tag = req->tag;
514         if (req->rcall->id == RERROR && !req->err) {
515                 ecode = req->rcall->params.rerror.errno;
516                 ename = &req->rcall->params.rerror.error;
517
518                 dprintk(DEBUG_MUX, "Rerror %.*s\n", ename->len, ename->str);
519
520                 if (*m->extended)
521                         req->err = -ecode;
522
523                 if (!req->err) {
524                         req->err = v9fs_errstr2errno(ename->str, ename->len);
525
526                         if (!req->err) {        /* string match failed */
527                                 PRINT_FCALL_ERROR("unknown error", req->rcall);
528                         }
529
530                         if (!req->err)
531                                 req->err = -ESERVERFAULT;
532                 }
533         } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
534                 dprintk(DEBUG_ERROR, "fcall mismatch: expected %d, got %d\n",
535                         req->tcall->id + 1, req->rcall->id);
536                 if (!req->err)
537                         req->err = -EIO;
538         }
539
540         if (req->cb && req->err != ERREQFLUSH) {
541                 dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
542                         req->tcall, req->rcall);
543
544                 (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
545                 req->cb = NULL;
546         } else
547                 kfree(req->rcall);
548
549         v9fs_mux_put_tag(m, tag);
550
551         wake_up(&m->equeue);
552         kfree(req);
553 }
554
555 /**
556  * v9fs_read_work - called when there is some data to be read from a transport
557  */
558 static void v9fs_read_work(void *a)
559 {
560         int n, err;
561         struct v9fs_mux_data *m;
562         struct v9fs_req *req, *rptr, *rreq;
563         struct v9fs_fcall *rcall;
564         char *rbuf;
565
566         m = a;
567
568         if (m->err < 0)
569                 return;
570
571         rcall = NULL;
572         dprintk(DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
573
574         if (!m->rcall) {
575                 m->rcall =
576                     kmalloc(sizeof(struct v9fs_fcall) + m->msize, GFP_KERNEL);
577                 if (!m->rcall) {
578                         err = -ENOMEM;
579                         goto error;
580                 }
581
582                 m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
583                 m->rpos = 0;
584         }
585
586         clear_bit(Rpending, &m->wsched);
587         err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
588         dprintk(DEBUG_MUX, "mux %p got %d bytes\n", m, err);
589         if (err == -EAGAIN) {
590                 clear_bit(Rworksched, &m->wsched);
591                 return;
592         }
593
594         if (err <= 0)
595                 goto error;
596
597         m->rpos += err;
598         while (m->rpos > 4) {
599                 n = le32_to_cpu(*(__le32 *) m->rbuf);
600                 if (n >= m->msize) {
601                         dprintk(DEBUG_ERROR,
602                                 "requested packet size too big: %d\n", n);
603                         err = -EIO;
604                         goto error;
605                 }
606
607                 if (m->rpos < n)
608                         break;
609
610                 dump_data(m->rbuf, n);
611                 err =
612                     v9fs_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
613                 if (err < 0) {
614                         goto error;
615                 }
616
617                 rcall = m->rcall;
618                 rbuf = m->rbuf;
619                 if (m->rpos > n) {
620                         m->rcall = kmalloc(sizeof(struct v9fs_fcall) + m->msize,
621                                            GFP_KERNEL);
622                         if (!m->rcall) {
623                                 err = -ENOMEM;
624                                 goto error;
625                         }
626
627                         m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
628                         memmove(m->rbuf, rbuf + n, m->rpos - n);
629                         m->rpos -= n;
630                 } else {
631                         m->rcall = NULL;
632                         m->rbuf = NULL;
633                         m->rpos = 0;
634                 }
635
636                 dprintk(DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, rcall->id,
637                         rcall->tag);
638
639                 req = NULL;
640                 spin_lock(&m->lock);
641                 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
642                         if (rreq->tag == rcall->tag) {
643                                 req = rreq;
644                                 req->rcall = rcall;
645                                 list_del(&req->req_list);
646                                 spin_unlock(&m->lock);
647                                 process_request(m, req);
648                                 break;
649                         }
650
651                 }
652
653                 if (!req) {
654                         spin_unlock(&m->lock);
655                         if (err >= 0 && rcall->id != RFLUSH)
656                                 dprintk(DEBUG_ERROR,
657                                         "unexpected response mux %p id %d tag %d\n",
658                                         m, rcall->id, rcall->tag);
659                         kfree(rcall);
660                 }
661         }
662
663         if (!list_empty(&m->req_list)) {
664                 if (test_and_clear_bit(Rpending, &m->wsched))
665                         n = POLLIN;
666                 else
667                         n = m->trans->poll(m->trans, NULL);
668
669                 if (n & POLLIN) {
670                         dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
671                         queue_work(v9fs_mux_wq, &m->rq);
672                 } else
673                         clear_bit(Rworksched, &m->wsched);
674         } else
675                 clear_bit(Rworksched, &m->wsched);
676
677         return;
678
679       error:
680         v9fs_mux_cancel(m, err);
681         clear_bit(Rworksched, &m->wsched);
682 }
683
684 /**
685  * v9fs_send_request - send 9P request
686  * The function can sleep until the request is scheduled for sending.
687  * The function can be interrupted. Return from the function is not
688  * a guarantee that the request is sent succesfully. Can return errors
689  * that can be retrieved by PTR_ERR macros.
690  *
691  * @m: mux data
692  * @tc: request to be sent
693  * @cb: callback function to call when response is received
694  * @cba: parameter to pass to the callback function
695  */
696 static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
697                                           struct v9fs_fcall *tc,
698                                           v9fs_mux_req_callback cb, void *cba)
699 {
700         int n;
701         struct v9fs_req *req;
702
703         dprintk(DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
704                 tc, tc->id);
705         if (m->err < 0)
706                 return ERR_PTR(m->err);
707
708         req = kmalloc(sizeof(struct v9fs_req), GFP_KERNEL);
709         if (!req)
710                 return ERR_PTR(-ENOMEM);
711
712         if (tc->id == TVERSION)
713                 n = V9FS_NOTAG;
714         else
715                 n = v9fs_mux_get_tag(m);
716
717         if (n < 0)
718                 return ERR_PTR(-ENOMEM);
719
720         v9fs_set_tag(tc, n);
721
722         req->tag = n;
723         req->tcall = tc;
724         req->rcall = NULL;
725         req->err = 0;
726         req->cb = cb;
727         req->cba = cba;
728
729         spin_lock(&m->lock);
730         list_add_tail(&req->req_list, &m->unsent_req_list);
731         spin_unlock(&m->lock);
732
733         if (test_and_clear_bit(Wpending, &m->wsched))
734                 n = POLLOUT;
735         else
736                 n = m->trans->poll(m->trans, NULL);
737
738         if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
739                 queue_work(v9fs_mux_wq, &m->wq);
740
741         return req;
742 }
743
744 static inline void
745 v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc,
746                   int err)
747 {
748         v9fs_mux_req_callback cb;
749         int tag;
750         struct v9fs_mux_data *m;
751         struct v9fs_req *req, *rptr;
752
753         m = a;
754         dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc,
755                 rc, err, tc->params.tflush.oldtag);
756
757         spin_lock(&m->lock);
758         cb = NULL;
759         tag = tc->params.tflush.oldtag;
760         list_for_each_entry_safe(req, rptr, &m->req_list, req_list) {
761                 if (req->tag == tag) {
762                         list_del(&req->req_list);
763                         if (req->cb) {
764                                 cb = req->cb;
765                                 req->cb = NULL;
766                                 spin_unlock(&m->lock);
767                                 (*cb) (req->cba, req->tcall, req->rcall,
768                                        req->err);
769                         }
770                         kfree(req);
771                         wake_up(&m->equeue);
772                         break;
773                 }
774         }
775
776         if (!cb)
777                 spin_unlock(&m->lock);
778
779         v9fs_mux_put_tag(m, tag);
780         kfree(tc);
781         kfree(rc);
782 }
783
784 static void
785 v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
786 {
787         struct v9fs_fcall *fc;
788
789         dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
790
791         fc = v9fs_create_tflush(req->tag);
792         v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
793 }
794
795 static void
796 v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
797 {
798         struct v9fs_mux_rpc *r;
799
800         if (err == ERREQFLUSH) {
801                 dprintk(DEBUG_MUX, "err req flush\n");
802                 return;
803         }
804
805         r = a;
806         dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req,
807                 tc, rc, err);
808         r->rcall = rc;
809         r->err = err;
810         wake_up(&r->wqueue);
811 }
812
813 /**
814  * v9fs_mux_rpc - sends 9P request and waits until a response is available.
815  *      The function can be interrupted.
816  * @m: mux data
817  * @tc: request to be sent
818  * @rc: pointer where a pointer to the response is stored
819  */
820 int
821 v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
822              struct v9fs_fcall **rc)
823 {
824         int err;
825         unsigned long flags;
826         struct v9fs_req *req;
827         struct v9fs_mux_rpc r;
828
829         r.err = 0;
830         r.rcall = NULL;
831         r.m = m;
832         init_waitqueue_head(&r.wqueue);
833
834         if (rc)
835                 *rc = NULL;
836
837         req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
838         if (IS_ERR(req)) {
839                 err = PTR_ERR(req);
840                 dprintk(DEBUG_MUX, "error %d\n", err);
841                 return PTR_ERR(req);
842         }
843
844         r.req = req;
845         dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
846                 req->tag, &r, req);
847         err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
848         if (r.err < 0)
849                 err = r.err;
850
851         if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
852                 spin_lock(&m->lock);
853                 req->tcall = NULL;
854                 req->err = ERREQFLUSH;
855                 spin_unlock(&m->lock);
856
857                 clear_thread_flag(TIF_SIGPENDING);
858                 v9fs_mux_flush_request(m, req);
859                 spin_lock_irqsave(&current->sighand->siglock, flags);
860                 recalc_sigpending();
861                 spin_unlock_irqrestore(&current->sighand->siglock, flags);
862         }
863
864         if (!err) {
865                 if (r.rcall)
866                         dprintk(DEBUG_MUX, "got response id %d tag %d\n",
867                                 r.rcall->id, r.rcall->tag);
868
869                 if (rc)
870                         *rc = r.rcall;
871                 else
872                         kfree(r.rcall);
873         } else {
874                 kfree(r.rcall);
875                 dprintk(DEBUG_MUX, "got error %d\n", err);
876                 if (err > 0)
877                         err = -EIO;
878         }
879
880         return err;
881 }
882
883 /**
884  * v9fs_mux_rpcnb - sends 9P request without waiting for response.
885  * @m: mux data
886  * @tc: request to be sent
887  * @cb: callback function to be called when response arrives
888  * @cba: value to pass to the callback function
889  */
890 int v9fs_mux_rpcnb(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
891                    v9fs_mux_req_callback cb, void *a)
892 {
893         int err;
894         struct v9fs_req *req;
895
896         req = v9fs_send_request(m, tc, cb, a);
897         if (IS_ERR(req)) {
898                 err = PTR_ERR(req);
899                 dprintk(DEBUG_MUX, "error %d\n", err);
900                 return PTR_ERR(req);
901         }
902
903         dprintk(DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
904         return 0;
905 }
906
907 /**
908  * v9fs_mux_cancel - cancel all pending requests with error
909  * @m: mux data
910  * @err: error code
911  */
912 void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
913 {
914         struct v9fs_req *req, *rtmp;
915         LIST_HEAD(cancel_list);
916
917         dprintk(DEBUG_MUX, "mux %p err %d\n", m, err);
918         m->err = err;
919         spin_lock(&m->lock);
920         list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
921                 list_move(&req->req_list, &cancel_list);
922         }
923         spin_unlock(&m->lock);
924
925         list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
926                 list_del(&req->req_list);
927                 if (!req->err)
928                         req->err = err;
929
930                 if (req->cb)
931                         (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
932                 else
933                         kfree(req->rcall);
934
935                 kfree(req);
936         }
937
938         wake_up(&m->equeue);
939 }
940
941 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *m)
942 {
943         int tag;
944
945         tag = v9fs_get_idpool(&m->tidpool);
946         if (tag < 0)
947                 return V9FS_NOTAG;
948         else
949                 return (u16) tag;
950 }
951
952 static void v9fs_mux_put_tag(struct v9fs_mux_data *m, u16 tag)
953 {
954         if (tag != V9FS_NOTAG && v9fs_check_idpool(tag, &m->tidpool))
955                 v9fs_put_idpool(tag, &m->tidpool);
956 }