Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[pandora-kernel.git] / fs / 9p / mux.c
1 /*
2  * linux/fs/9p/mux.c
3  *
4  * Protocol Multiplexer
5  *
6  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License version 2
11  *  as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to:
20  *  Free Software Foundation
21  *  51 Franklin Street, Fifth Floor
22  *  Boston, MA  02111-1301  USA
23  *
24  */
25
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/fs.h>
29 #include <linux/poll.h>
30 #include <linux/kthread.h>
31 #include <linux/idr.h>
32 #include <linux/mutex.h>
33
34 #include "debug.h"
35 #include "v9fs.h"
36 #include "9p.h"
37 #include "conv.h"
38 #include "transport.h"
39 #include "mux.h"
40
41 #define ERREQFLUSH      1
42 #define SCHED_TIMEOUT   10
43 #define MAXPOLLWADDR    2
44
45 enum {
46         Rworksched = 1,         /* read work scheduled or running */
47         Rpending = 2,           /* can read */
48         Wworksched = 4,         /* write work scheduled or running */
49         Wpending = 8,           /* can write */
50 };
51
52 enum {
53         None,
54         Flushing,
55         Flushed,
56 };
57
58 struct v9fs_mux_poll_task;
59
60 struct v9fs_req {
61         spinlock_t lock;
62         int tag;
63         struct v9fs_fcall *tcall;
64         struct v9fs_fcall *rcall;
65         int err;
66         v9fs_mux_req_callback cb;
67         void *cba;
68         int flush;
69         struct list_head req_list;
70 };
71
72 struct v9fs_mux_data {
73         spinlock_t lock;
74         struct list_head mux_list;
75         struct v9fs_mux_poll_task *poll_task;
76         int msize;
77         unsigned char *extended;
78         struct v9fs_transport *trans;
79         struct v9fs_idpool tagpool;
80         int err;
81         wait_queue_head_t equeue;
82         struct list_head req_list;
83         struct list_head unsent_req_list;
84         struct v9fs_fcall *rcall;
85         int rpos;
86         char *rbuf;
87         int wpos;
88         int wsize;
89         char *wbuf;
90         wait_queue_t poll_wait[MAXPOLLWADDR];
91         wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
92         poll_table pt;
93         struct work_struct rq;
94         struct work_struct wq;
95         unsigned long wsched;
96 };
97
98 struct v9fs_mux_poll_task {
99         struct task_struct *task;
100         struct list_head mux_list;
101         int muxnum;
102 };
103
104 struct v9fs_mux_rpc {
105         struct v9fs_mux_data *m;
106         int err;
107         struct v9fs_fcall *tcall;
108         struct v9fs_fcall *rcall;
109         wait_queue_head_t wqueue;
110 };
111
112 static int v9fs_poll_proc(void *);
113 static void v9fs_read_work(struct work_struct *work);
114 static void v9fs_write_work(struct work_struct *work);
115 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
116                           poll_table * p);
117 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
118 static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
119
120 static DEFINE_MUTEX(v9fs_mux_task_lock);
121 static struct workqueue_struct *v9fs_mux_wq;
122
123 static int v9fs_mux_num;
124 static int v9fs_mux_poll_task_num;
125 static struct v9fs_mux_poll_task v9fs_mux_poll_tasks[100];
126
127 int v9fs_mux_global_init(void)
128 {
129         int i;
130
131         for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++)
132                 v9fs_mux_poll_tasks[i].task = NULL;
133
134         v9fs_mux_wq = create_workqueue("v9fs");
135         if (!v9fs_mux_wq) {
136                 printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
137                 return -ENOMEM;
138         }
139
140         return 0;
141 }
142
143 void v9fs_mux_global_exit(void)
144 {
145         destroy_workqueue(v9fs_mux_wq);
146 }
147
148 /**
149  * v9fs_mux_calc_poll_procs - calculates the number of polling procs
150  * based on the number of mounted v9fs filesystems.
151  *
152  * The current implementation returns sqrt of the number of mounts.
153  */
154 static int v9fs_mux_calc_poll_procs(int muxnum)
155 {
156         int n;
157
158         if (v9fs_mux_poll_task_num)
159                 n = muxnum / v9fs_mux_poll_task_num +
160                     (muxnum % v9fs_mux_poll_task_num ? 1 : 0);
161         else
162                 n = 1;
163
164         if (n > ARRAY_SIZE(v9fs_mux_poll_tasks))
165                 n = ARRAY_SIZE(v9fs_mux_poll_tasks);
166
167         return n;
168 }
169
170 static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
171 {
172         int i, n;
173         struct v9fs_mux_poll_task *vpt, *vptlast;
174         struct task_struct *pproc;
175
176         dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
177                 v9fs_mux_poll_task_num);
178         mutex_lock(&v9fs_mux_task_lock);
179
180         n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
181         if (n > v9fs_mux_poll_task_num) {
182                 for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
183                         if (v9fs_mux_poll_tasks[i].task == NULL) {
184                                 vpt = &v9fs_mux_poll_tasks[i];
185                                 dprintk(DEBUG_MUX, "create proc %p\n", vpt);
186                                 pproc = kthread_create(v9fs_poll_proc, vpt,
187                                                    "v9fs-poll");
188
189                                 if (!IS_ERR(pproc)) {
190                                         vpt->task = pproc;
191                                         INIT_LIST_HEAD(&vpt->mux_list);
192                                         vpt->muxnum = 0;
193                                         v9fs_mux_poll_task_num++;
194                                         wake_up_process(vpt->task);
195                                 }
196                                 break;
197                         }
198                 }
199
200                 if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks))
201                         dprintk(DEBUG_ERROR, "warning: no free poll slots\n");
202         }
203
204         n = (v9fs_mux_num + 1) / v9fs_mux_poll_task_num +
205             ((v9fs_mux_num + 1) % v9fs_mux_poll_task_num ? 1 : 0);
206
207         vptlast = NULL;
208         for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
209                 vpt = &v9fs_mux_poll_tasks[i];
210                 if (vpt->task != NULL) {
211                         vptlast = vpt;
212                         if (vpt->muxnum < n) {
213                                 dprintk(DEBUG_MUX, "put in proc %d\n", i);
214                                 list_add(&m->mux_list, &vpt->mux_list);
215                                 vpt->muxnum++;
216                                 m->poll_task = vpt;
217                                 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
218                                 init_poll_funcptr(&m->pt, v9fs_pollwait);
219                                 break;
220                         }
221                 }
222         }
223
224         if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks)) {
225                 if (vptlast == NULL)
226                         return -ENOMEM;
227
228                 dprintk(DEBUG_MUX, "put in proc %d\n", i);
229                 list_add(&m->mux_list, &vptlast->mux_list);
230                 vptlast->muxnum++;
231                 m->poll_task = vptlast;
232                 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
233                 init_poll_funcptr(&m->pt, v9fs_pollwait);
234         }
235
236         v9fs_mux_num++;
237         mutex_unlock(&v9fs_mux_task_lock);
238
239         return 0;
240 }
241
242 static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
243 {
244         int i;
245         struct v9fs_mux_poll_task *vpt;
246
247         mutex_lock(&v9fs_mux_task_lock);
248         vpt = m->poll_task;
249         list_del(&m->mux_list);
250         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
251                 if (m->poll_waddr[i] != NULL) {
252                         remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
253                         m->poll_waddr[i] = NULL;
254                 }
255         }
256         vpt->muxnum--;
257         if (!vpt->muxnum) {
258                 dprintk(DEBUG_MUX, "destroy proc %p\n", vpt);
259                 send_sig(SIGKILL, vpt->task, 1);
260                 vpt->task = NULL;
261                 v9fs_mux_poll_task_num--;
262         }
263         v9fs_mux_num--;
264         mutex_unlock(&v9fs_mux_task_lock);
265 }
266
267 /**
268  * v9fs_mux_init - allocate and initialize the per-session mux data
269  * Creates the polling task if this is the first session.
270  *
271  * @trans - transport structure
272  * @msize - maximum message size
273  * @extended - pointer to the extended flag
274  */
275 struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
276                                     unsigned char *extended)
277 {
278         int i, n;
279         struct v9fs_mux_data *m, *mtmp;
280
281         dprintk(DEBUG_MUX, "transport %p msize %d\n", trans, msize);
282         m = kmalloc(sizeof(struct v9fs_mux_data), GFP_KERNEL);
283         if (!m)
284                 return ERR_PTR(-ENOMEM);
285
286         spin_lock_init(&m->lock);
287         INIT_LIST_HEAD(&m->mux_list);
288         m->msize = msize;
289         m->extended = extended;
290         m->trans = trans;
291         idr_init(&m->tagpool.pool);
292         init_MUTEX(&m->tagpool.lock);
293         m->err = 0;
294         init_waitqueue_head(&m->equeue);
295         INIT_LIST_HEAD(&m->req_list);
296         INIT_LIST_HEAD(&m->unsent_req_list);
297         m->rcall = NULL;
298         m->rpos = 0;
299         m->rbuf = NULL;
300         m->wpos = m->wsize = 0;
301         m->wbuf = NULL;
302         INIT_WORK(&m->rq, v9fs_read_work);
303         INIT_WORK(&m->wq, v9fs_write_work);
304         m->wsched = 0;
305         memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
306         m->poll_task = NULL;
307         n = v9fs_mux_poll_start(m);
308         if (n)
309                 return ERR_PTR(n);
310
311         n = trans->poll(trans, &m->pt);
312         if (n & POLLIN) {
313                 dprintk(DEBUG_MUX, "mux %p can read\n", m);
314                 set_bit(Rpending, &m->wsched);
315         }
316
317         if (n & POLLOUT) {
318                 dprintk(DEBUG_MUX, "mux %p can write\n", m);
319                 set_bit(Wpending, &m->wsched);
320         }
321
322         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
323                 if (IS_ERR(m->poll_waddr[i])) {
324                         v9fs_mux_poll_stop(m);
325                         mtmp = (void *)m->poll_waddr;   /* the error code */
326                         kfree(m);
327                         m = mtmp;
328                         break;
329                 }
330         }
331
332         return m;
333 }
334
335 /**
336  * v9fs_mux_destroy - cancels all pending requests and frees mux resources
337  */
338 void v9fs_mux_destroy(struct v9fs_mux_data *m)
339 {
340         dprintk(DEBUG_MUX, "mux %p prev %p next %p\n", m,
341                 m->mux_list.prev, m->mux_list.next);
342         v9fs_mux_cancel(m, -ECONNRESET);
343
344         if (!list_empty(&m->req_list)) {
345                 /* wait until all processes waiting on this session exit */
346                 dprintk(DEBUG_MUX, "mux %p waiting for empty request queue\n",
347                         m);
348                 wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
349                 dprintk(DEBUG_MUX, "mux %p request queue empty: %d\n", m,
350                         list_empty(&m->req_list));
351         }
352
353         v9fs_mux_poll_stop(m);
354         m->trans = NULL;
355
356         kfree(m);
357 }
358
359 /**
360  * v9fs_pollwait - called by files poll operation to add v9fs-poll task
361  *      to files wait queue
362  */
363 static void
364 v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
365               poll_table * p)
366 {
367         int i;
368         struct v9fs_mux_data *m;
369
370         m = container_of(p, struct v9fs_mux_data, pt);
371         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
372                 if (m->poll_waddr[i] == NULL)
373                         break;
374
375         if (i >= ARRAY_SIZE(m->poll_waddr)) {
376                 dprintk(DEBUG_ERROR, "not enough wait_address slots\n");
377                 return;
378         }
379
380         m->poll_waddr[i] = wait_address;
381
382         if (!wait_address) {
383                 dprintk(DEBUG_ERROR, "no wait_address\n");
384                 m->poll_waddr[i] = ERR_PTR(-EIO);
385                 return;
386         }
387
388         init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
389         add_wait_queue(wait_address, &m->poll_wait[i]);
390 }
391
392 /**
393  * v9fs_poll_mux - polls a mux and schedules read or write works if necessary
394  */
395 static void v9fs_poll_mux(struct v9fs_mux_data *m)
396 {
397         int n;
398
399         if (m->err < 0)
400                 return;
401
402         n = m->trans->poll(m->trans, NULL);
403         if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
404                 dprintk(DEBUG_MUX, "error mux %p err %d\n", m, n);
405                 if (n >= 0)
406                         n = -ECONNRESET;
407                 v9fs_mux_cancel(m, n);
408         }
409
410         if (n & POLLIN) {
411                 set_bit(Rpending, &m->wsched);
412                 dprintk(DEBUG_MUX, "mux %p can read\n", m);
413                 if (!test_and_set_bit(Rworksched, &m->wsched)) {
414                         dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
415                         queue_work(v9fs_mux_wq, &m->rq);
416                 }
417         }
418
419         if (n & POLLOUT) {
420                 set_bit(Wpending, &m->wsched);
421                 dprintk(DEBUG_MUX, "mux %p can write\n", m);
422                 if ((m->wsize || !list_empty(&m->unsent_req_list))
423                     && !test_and_set_bit(Wworksched, &m->wsched)) {
424                         dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
425                         queue_work(v9fs_mux_wq, &m->wq);
426                 }
427         }
428 }
429
430 /**
431  * v9fs_poll_proc - polls all v9fs transports for new events and queues
432  *      the appropriate work to the work queue
433  */
434 static int v9fs_poll_proc(void *a)
435 {
436         struct v9fs_mux_data *m, *mtmp;
437         struct v9fs_mux_poll_task *vpt;
438
439         vpt = a;
440         dprintk(DEBUG_MUX, "start %p %p\n", current, vpt);
441         allow_signal(SIGKILL);
442         while (!kthread_should_stop()) {
443                 set_current_state(TASK_INTERRUPTIBLE);
444                 if (signal_pending(current))
445                         break;
446
447                 list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
448                         v9fs_poll_mux(m);
449                 }
450
451                 dprintk(DEBUG_MUX, "sleeping...\n");
452                 schedule_timeout(SCHED_TIMEOUT * HZ);
453         }
454
455         __set_current_state(TASK_RUNNING);
456         dprintk(DEBUG_MUX, "finish\n");
457         return 0;
458 }
459
460 /**
461  * v9fs_write_work - called when a transport can send some data
462  */
463 static void v9fs_write_work(struct work_struct *work)
464 {
465         int n, err;
466         struct v9fs_mux_data *m;
467         struct v9fs_req *req;
468
469         m = container_of(work, struct v9fs_mux_data, wq);
470
471         if (m->err < 0) {
472                 clear_bit(Wworksched, &m->wsched);
473                 return;
474         }
475
476         if (!m->wsize) {
477                 if (list_empty(&m->unsent_req_list)) {
478                         clear_bit(Wworksched, &m->wsched);
479                         return;
480                 }
481
482                 spin_lock(&m->lock);
483 again:
484                 req = list_entry(m->unsent_req_list.next, struct v9fs_req,
485                                req_list);
486                 list_move_tail(&req->req_list, &m->req_list);
487                 if (req->err == ERREQFLUSH)
488                         goto again;
489
490                 m->wbuf = req->tcall->sdata;
491                 m->wsize = req->tcall->size;
492                 m->wpos = 0;
493                 dump_data(m->wbuf, m->wsize);
494                 spin_unlock(&m->lock);
495         }
496
497         dprintk(DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, m->wsize);
498         clear_bit(Wpending, &m->wsched);
499         err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
500         dprintk(DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
501         if (err == -EAGAIN) {
502                 clear_bit(Wworksched, &m->wsched);
503                 return;
504         }
505
506         if (err <= 0)
507                 goto error;
508
509         m->wpos += err;
510         if (m->wpos == m->wsize)
511                 m->wpos = m->wsize = 0;
512
513         if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
514                 if (test_and_clear_bit(Wpending, &m->wsched))
515                         n = POLLOUT;
516                 else
517                         n = m->trans->poll(m->trans, NULL);
518
519                 if (n & POLLOUT) {
520                         dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
521                         queue_work(v9fs_mux_wq, &m->wq);
522                 } else
523                         clear_bit(Wworksched, &m->wsched);
524         } else
525                 clear_bit(Wworksched, &m->wsched);
526
527         return;
528
529       error:
530         v9fs_mux_cancel(m, err);
531         clear_bit(Wworksched, &m->wsched);
532 }
533
534 static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
535 {
536         int ecode;
537         struct v9fs_str *ename;
538
539         if (!req->err && req->rcall->id == RERROR) {
540                 ecode = req->rcall->params.rerror.errno;
541                 ename = &req->rcall->params.rerror.error;
542
543                 dprintk(DEBUG_MUX, "Rerror %.*s\n", ename->len, ename->str);
544
545                 if (*m->extended)
546                         req->err = -ecode;
547
548                 if (!req->err) {
549                         req->err = v9fs_errstr2errno(ename->str, ename->len);
550
551                         if (!req->err) {        /* string match failed */
552                                 PRINT_FCALL_ERROR("unknown error", req->rcall);
553                         }
554
555                         if (!req->err)
556                                 req->err = -ESERVERFAULT;
557                 }
558         } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
559                 dprintk(DEBUG_ERROR, "fcall mismatch: expected %d, got %d\n",
560                         req->tcall->id + 1, req->rcall->id);
561                 if (!req->err)
562                         req->err = -EIO;
563         }
564 }
565
566 /**
567  * v9fs_read_work - called when there is some data to be read from a transport
568  */
569 static void v9fs_read_work(struct work_struct *work)
570 {
571         int n, err;
572         struct v9fs_mux_data *m;
573         struct v9fs_req *req, *rptr, *rreq;
574         struct v9fs_fcall *rcall;
575         char *rbuf;
576
577         m = container_of(work, struct v9fs_mux_data, rq);
578
579         if (m->err < 0)
580                 return;
581
582         rcall = NULL;
583         dprintk(DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
584
585         if (!m->rcall) {
586                 m->rcall =
587                     kmalloc(sizeof(struct v9fs_fcall) + m->msize, GFP_KERNEL);
588                 if (!m->rcall) {
589                         err = -ENOMEM;
590                         goto error;
591                 }
592
593                 m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
594                 m->rpos = 0;
595         }
596
597         clear_bit(Rpending, &m->wsched);
598         err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
599         dprintk(DEBUG_MUX, "mux %p got %d bytes\n", m, err);
600         if (err == -EAGAIN) {
601                 clear_bit(Rworksched, &m->wsched);
602                 return;
603         }
604
605         if (err <= 0)
606                 goto error;
607
608         m->rpos += err;
609         while (m->rpos > 4) {
610                 n = le32_to_cpu(*(__le32 *) m->rbuf);
611                 if (n >= m->msize) {
612                         dprintk(DEBUG_ERROR,
613                                 "requested packet size too big: %d\n", n);
614                         err = -EIO;
615                         goto error;
616                 }
617
618                 if (m->rpos < n)
619                         break;
620
621                 dump_data(m->rbuf, n);
622                 err =
623                     v9fs_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
624                 if (err < 0) {
625                         goto error;
626                 }
627
628                 if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
629                         char buf[150];
630
631                         v9fs_printfcall(buf, sizeof(buf), m->rcall,
632                                 *m->extended);
633                         printk(KERN_NOTICE ">>> %p %s\n", m, buf);
634                 }
635
636                 rcall = m->rcall;
637                 rbuf = m->rbuf;
638                 if (m->rpos > n) {
639                         m->rcall = kmalloc(sizeof(struct v9fs_fcall) + m->msize,
640                                            GFP_KERNEL);
641                         if (!m->rcall) {
642                                 err = -ENOMEM;
643                                 goto error;
644                         }
645
646                         m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
647                         memmove(m->rbuf, rbuf + n, m->rpos - n);
648                         m->rpos -= n;
649                 } else {
650                         m->rcall = NULL;
651                         m->rbuf = NULL;
652                         m->rpos = 0;
653                 }
654
655                 dprintk(DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, rcall->id,
656                         rcall->tag);
657
658                 req = NULL;
659                 spin_lock(&m->lock);
660                 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
661                         if (rreq->tag == rcall->tag) {
662                                 req = rreq;
663                                 if (req->flush != Flushing)
664                                         list_del(&req->req_list);
665                                 break;
666                         }
667                 }
668                 spin_unlock(&m->lock);
669
670                 if (req) {
671                         req->rcall = rcall;
672                         process_request(m, req);
673
674                         if (req->flush != Flushing) {
675                                 if (req->cb)
676                                         (*req->cb) (req, req->cba);
677                                 else
678                                         kfree(req->rcall);
679
680                                 wake_up(&m->equeue);
681                         }
682                 } else {
683                         if (err >= 0 && rcall->id != RFLUSH)
684                                 dprintk(DEBUG_ERROR,
685                                         "unexpected response mux %p id %d tag %d\n",
686                                         m, rcall->id, rcall->tag);
687                         kfree(rcall);
688                 }
689         }
690
691         if (!list_empty(&m->req_list)) {
692                 if (test_and_clear_bit(Rpending, &m->wsched))
693                         n = POLLIN;
694                 else
695                         n = m->trans->poll(m->trans, NULL);
696
697                 if (n & POLLIN) {
698                         dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
699                         queue_work(v9fs_mux_wq, &m->rq);
700                 } else
701                         clear_bit(Rworksched, &m->wsched);
702         } else
703                 clear_bit(Rworksched, &m->wsched);
704
705         return;
706
707       error:
708         v9fs_mux_cancel(m, err);
709         clear_bit(Rworksched, &m->wsched);
710 }
711
712 /**
713  * v9fs_send_request - send 9P request
714  * The function can sleep until the request is scheduled for sending.
715  * The function can be interrupted. Return from the function is not
716  * a guarantee that the request is sent successfully. Can return errors
717  * that can be retrieved by PTR_ERR macros.
718  *
719  * @m: mux data
720  * @tc: request to be sent
721  * @cb: callback function to call when response is received
722  * @cba: parameter to pass to the callback function
723  */
724 static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
725                                           struct v9fs_fcall *tc,
726                                           v9fs_mux_req_callback cb, void *cba)
727 {
728         int n;
729         struct v9fs_req *req;
730
731         dprintk(DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
732                 tc, tc->id);
733         if (m->err < 0)
734                 return ERR_PTR(m->err);
735
736         req = kmalloc(sizeof(struct v9fs_req), GFP_KERNEL);
737         if (!req)
738                 return ERR_PTR(-ENOMEM);
739
740         if (tc->id == TVERSION)
741                 n = V9FS_NOTAG;
742         else
743                 n = v9fs_mux_get_tag(m);
744
745         if (n < 0)
746                 return ERR_PTR(-ENOMEM);
747
748         v9fs_set_tag(tc, n);
749         if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
750                 char buf[150];
751
752                 v9fs_printfcall(buf, sizeof(buf), tc, *m->extended);
753                 printk(KERN_NOTICE "<<< %p %s\n", m, buf);
754         }
755
756         spin_lock_init(&req->lock);
757         req->tag = n;
758         req->tcall = tc;
759         req->rcall = NULL;
760         req->err = 0;
761         req->cb = cb;
762         req->cba = cba;
763         req->flush = None;
764
765         spin_lock(&m->lock);
766         list_add_tail(&req->req_list, &m->unsent_req_list);
767         spin_unlock(&m->lock);
768
769         if (test_and_clear_bit(Wpending, &m->wsched))
770                 n = POLLOUT;
771         else
772                 n = m->trans->poll(m->trans, NULL);
773
774         if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
775                 queue_work(v9fs_mux_wq, &m->wq);
776
777         return req;
778 }
779
780 static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req)
781 {
782         v9fs_mux_put_tag(m, req->tag);
783         kfree(req);
784 }
785
786 static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a)
787 {
788         v9fs_mux_req_callback cb;
789         int tag;
790         struct v9fs_mux_data *m;
791         struct v9fs_req *req, *rreq, *rptr;
792
793         m = a;
794         dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
795                 freq->tcall, freq->rcall, freq->err,
796                 freq->tcall->params.tflush.oldtag);
797
798         spin_lock(&m->lock);
799         cb = NULL;
800         tag = freq->tcall->params.tflush.oldtag;
801         req = NULL;
802         list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
803                 if (rreq->tag == tag) {
804                         req = rreq;
805                         list_del(&req->req_list);
806                         break;
807                 }
808         }
809         spin_unlock(&m->lock);
810
811         if (req) {
812                 spin_lock(&req->lock);
813                 req->flush = Flushed;
814                 spin_unlock(&req->lock);
815
816                 if (req->cb)
817                         (*req->cb) (req, req->cba);
818                 else
819                         kfree(req->rcall);
820
821                 wake_up(&m->equeue);
822         }
823
824         kfree(freq->tcall);
825         kfree(freq->rcall);
826         v9fs_mux_free_request(m, freq);
827 }
828
829 static int
830 v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
831 {
832         struct v9fs_fcall *fc;
833         struct v9fs_req *rreq, *rptr;
834
835         dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
836
837         /* if a response was received for a request, do nothing */
838         spin_lock(&req->lock);
839         if (req->rcall || req->err) {
840                 spin_unlock(&req->lock);
841                 dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req);
842                 return 0;
843         }
844
845         req->flush = Flushing;
846         spin_unlock(&req->lock);
847
848         spin_lock(&m->lock);
849         /* if the request is not sent yet, just remove it from the list */
850         list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
851                 if (rreq->tag == req->tag) {
852                         dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req);
853                         list_del(&rreq->req_list);
854                         req->flush = Flushed;
855                         spin_unlock(&m->lock);
856                         if (req->cb)
857                                 (*req->cb) (req, req->cba);
858                         return 0;
859                 }
860         }
861         spin_unlock(&m->lock);
862
863         clear_thread_flag(TIF_SIGPENDING);
864         fc = v9fs_create_tflush(req->tag);
865         v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
866         return 1;
867 }
868
869 static void
870 v9fs_mux_rpc_cb(struct v9fs_req *req, void *a)
871 {
872         struct v9fs_mux_rpc *r;
873
874         dprintk(DEBUG_MUX, "req %p r %p\n", req, a);
875         r = a;
876         r->rcall = req->rcall;
877         r->err = req->err;
878
879         if (req->flush!=None && !req->err)
880                 r->err = -ERESTARTSYS;
881
882         wake_up(&r->wqueue);
883 }
884
885 /**
886  * v9fs_mux_rpc - sends 9P request and waits until a response is available.
887  *      The function can be interrupted.
888  * @m: mux data
889  * @tc: request to be sent
890  * @rc: pointer where a pointer to the response is stored
891  */
892 int
893 v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
894              struct v9fs_fcall **rc)
895 {
896         int err, sigpending;
897         unsigned long flags;
898         struct v9fs_req *req;
899         struct v9fs_mux_rpc r;
900
901         r.err = 0;
902         r.tcall = tc;
903         r.rcall = NULL;
904         r.m = m;
905         init_waitqueue_head(&r.wqueue);
906
907         if (rc)
908                 *rc = NULL;
909
910         sigpending = 0;
911         if (signal_pending(current)) {
912                 sigpending = 1;
913                 clear_thread_flag(TIF_SIGPENDING);
914         }
915
916         req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
917         if (IS_ERR(req)) {
918                 err = PTR_ERR(req);
919                 dprintk(DEBUG_MUX, "error %d\n", err);
920                 return err;
921         }
922
923         err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
924         if (r.err < 0)
925                 err = r.err;
926
927         if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
928                 if (v9fs_mux_flush_request(m, req)) {
929                         /* wait until we get response of the flush message */
930                         do {
931                                 clear_thread_flag(TIF_SIGPENDING);
932                                 err = wait_event_interruptible(r.wqueue,
933                                         r.rcall || r.err);
934                         } while (!r.rcall && !r.err && err==-ERESTARTSYS &&
935                                 m->trans->status==Connected && !m->err);
936
937                         err = -ERESTARTSYS;
938                 }
939                 sigpending = 1;
940         }
941
942         if (sigpending) {
943                 spin_lock_irqsave(&current->sighand->siglock, flags);
944                 recalc_sigpending();
945                 spin_unlock_irqrestore(&current->sighand->siglock, flags);
946         }
947
948         if (rc)
949                 *rc = r.rcall;
950         else
951                 kfree(r.rcall);
952
953         v9fs_mux_free_request(m, req);
954         if (err > 0)
955                 err = -EIO;
956
957         return err;
958 }
959
960 #if 0
961 /**
962  * v9fs_mux_rpcnb - sends 9P request without waiting for response.
963  * @m: mux data
964  * @tc: request to be sent
965  * @cb: callback function to be called when response arrives
966  * @cba: value to pass to the callback function
967  */
968 int v9fs_mux_rpcnb(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
969                    v9fs_mux_req_callback cb, void *a)
970 {
971         int err;
972         struct v9fs_req *req;
973
974         req = v9fs_send_request(m, tc, cb, a);
975         if (IS_ERR(req)) {
976                 err = PTR_ERR(req);
977                 dprintk(DEBUG_MUX, "error %d\n", err);
978                 return PTR_ERR(req);
979         }
980
981         dprintk(DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
982         return 0;
983 }
984 #endif  /*  0  */
985
986 /**
987  * v9fs_mux_cancel - cancel all pending requests with error
988  * @m: mux data
989  * @err: error code
990  */
991 void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
992 {
993         struct v9fs_req *req, *rtmp;
994         LIST_HEAD(cancel_list);
995
996         dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err);
997         m->err = err;
998         spin_lock(&m->lock);
999         list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
1000                 list_move(&req->req_list, &cancel_list);
1001         }
1002         list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
1003                 list_move(&req->req_list, &cancel_list);
1004         }
1005         spin_unlock(&m->lock);
1006
1007         list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
1008                 list_del(&req->req_list);
1009                 if (!req->err)
1010                         req->err = err;
1011
1012                 if (req->cb)
1013                         (*req->cb) (req, req->cba);
1014                 else
1015                         kfree(req->rcall);
1016         }
1017
1018         wake_up(&m->equeue);
1019 }
1020
1021 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *m)
1022 {
1023         int tag;
1024
1025         tag = v9fs_get_idpool(&m->tagpool);
1026         if (tag < 0)
1027                 return V9FS_NOTAG;
1028         else
1029                 return (u16) tag;
1030 }
1031
1032 static void v9fs_mux_put_tag(struct v9fs_mux_data *m, u16 tag)
1033 {
1034         if (tag != V9FS_NOTAG && v9fs_check_idpool(tag, &m->tagpool))
1035                 v9fs_put_idpool(tag, &m->tagpool);
1036 }