NFS: unmark NFS direct I/O as experimental
[pandora-kernel.git] / net / sunrpc / xprtsock.c
1 /*
2  * linux/net/sunrpc/xprtsock.c
3  *
4  * Client-side transport implementation for sockets.
5  *
6  * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
7  * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
8  * TCP NFS related read + write fixes
9  *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10  *
11  * Rewrite of larges part of the code in order to stabilize TCP stuff.
12  * Fix behaviour when socket buffer is full.
13  *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
14  *
15  * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
16  */
17
18 #include <linux/types.h>
19 #include <linux/slab.h>
20 #include <linux/capability.h>
21 #include <linux/sched.h>
22 #include <linux/pagemap.h>
23 #include <linux/errno.h>
24 #include <linux/socket.h>
25 #include <linux/in.h>
26 #include <linux/net.h>
27 #include <linux/mm.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/sunrpc/clnt.h>
31 #include <linux/sunrpc/sched.h>
32 #include <linux/file.h>
33
34 #include <net/sock.h>
35 #include <net/checksum.h>
36 #include <net/udp.h>
37 #include <net/tcp.h>
38
39 /*
40  * xprtsock tunables
41  */
42 unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
43 unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
44
45 unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
46 unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
47
48 /*
49  * How many times to try sending a request on a socket before waiting
50  * for the socket buffer to clear.
51  */
52 #define XS_SENDMSG_RETRY        (10U)
53
54 /*
55  * Time out for an RPC UDP socket connect.  UDP socket connects are
56  * synchronous, but we set a timeout anyway in case of resource
57  * exhaustion on the local host.
58  */
59 #define XS_UDP_CONN_TO          (5U * HZ)
60
61 /*
62  * Wait duration for an RPC TCP connection to be established.  Solaris
63  * NFS over TCP uses 60 seconds, for example, which is in line with how
64  * long a server takes to reboot.
65  */
66 #define XS_TCP_CONN_TO          (60U * HZ)
67
68 /*
69  * Wait duration for a reply from the RPC portmapper.
70  */
71 #define XS_BIND_TO              (60U * HZ)
72
73 /*
74  * Delay if a UDP socket connect error occurs.  This is most likely some
75  * kind of resource problem on the local host.
76  */
77 #define XS_UDP_REEST_TO         (2U * HZ)
78
79 /*
80  * The reestablish timeout allows clients to delay for a bit before attempting
81  * to reconnect to a server that just dropped our connection.
82  *
83  * We implement an exponential backoff when trying to reestablish a TCP
84  * transport connection with the server.  Some servers like to drop a TCP
85  * connection when they are overworked, so we start with a short timeout and
86  * increase over time if the server is down or not responding.
87  */
88 #define XS_TCP_INIT_REEST_TO    (3U * HZ)
89 #define XS_TCP_MAX_REEST_TO     (5U * 60 * HZ)
90
91 /*
92  * TCP idle timeout; client drops the transport socket if it is idle
93  * for this long.  Note that we also timeout UDP sockets to prevent
94  * holding port numbers when there is no RPC traffic.
95  */
96 #define XS_IDLE_DISC_TO         (5U * 60 * HZ)
97
98 #ifdef RPC_DEBUG
99 # undef  RPC_DEBUG_DATA
100 # define RPCDBG_FACILITY        RPCDBG_TRANS
101 #endif
102
103 #ifdef RPC_DEBUG_DATA
104 static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
105 {
106         u8 *buf = (u8 *) packet;
107         int j;
108
109         dprintk("RPC:      %s\n", msg);
110         for (j = 0; j < count && j < 128; j += 4) {
111                 if (!(j & 31)) {
112                         if (j)
113                                 dprintk("\n");
114                         dprintk("0x%04x ", j);
115                 }
116                 dprintk("%02x%02x%02x%02x ",
117                         buf[j], buf[j+1], buf[j+2], buf[j+3]);
118         }
119         dprintk("\n");
120 }
121 #else
122 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
123 {
124         /* NOP */
125 }
126 #endif
127
128 static void xs_format_peer_addresses(struct rpc_xprt *xprt)
129 {
130         struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
131         char *buf;
132
133         buf = kzalloc(20, GFP_KERNEL);
134         if (buf) {
135                 snprintf(buf, 20, "%u.%u.%u.%u",
136                                 NIPQUAD(addr->sin_addr.s_addr));
137         }
138         xprt->address_strings[RPC_DISPLAY_ADDR] = buf;
139
140         buf = kzalloc(8, GFP_KERNEL);
141         if (buf) {
142                 snprintf(buf, 8, "%u",
143                                 ntohs(addr->sin_port));
144         }
145         xprt->address_strings[RPC_DISPLAY_PORT] = buf;
146
147         if (xprt->prot == IPPROTO_UDP)
148                 xprt->address_strings[RPC_DISPLAY_PROTO] = "udp";
149         else
150                 xprt->address_strings[RPC_DISPLAY_PROTO] = "tcp";
151
152         buf = kzalloc(48, GFP_KERNEL);
153         if (buf) {
154                 snprintf(buf, 48, "addr=%u.%u.%u.%u port=%u proto=%s",
155                         NIPQUAD(addr->sin_addr.s_addr),
156                         ntohs(addr->sin_port),
157                         xprt->prot == IPPROTO_UDP ? "udp" : "tcp");
158         }
159         xprt->address_strings[RPC_DISPLAY_ALL] = buf;
160 }
161
162 static void xs_free_peer_addresses(struct rpc_xprt *xprt)
163 {
164         kfree(xprt->address_strings[RPC_DISPLAY_ADDR]);
165         kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
166         kfree(xprt->address_strings[RPC_DISPLAY_ALL]);
167 }
168
169 #define XS_SENDMSG_FLAGS        (MSG_DONTWAIT | MSG_NOSIGNAL)
170
171 static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len)
172 {
173         struct kvec iov = {
174                 .iov_base       = xdr->head[0].iov_base + base,
175                 .iov_len        = len - base,
176         };
177         struct msghdr msg = {
178                 .msg_name       = addr,
179                 .msg_namelen    = addrlen,
180                 .msg_flags      = XS_SENDMSG_FLAGS,
181         };
182
183         if (xdr->len > len)
184                 msg.msg_flags |= MSG_MORE;
185
186         if (likely(iov.iov_len))
187                 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
188         return kernel_sendmsg(sock, &msg, NULL, 0, 0);
189 }
190
191 static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len)
192 {
193         struct kvec iov = {
194                 .iov_base       = xdr->tail[0].iov_base + base,
195                 .iov_len        = len - base,
196         };
197         struct msghdr msg = {
198                 .msg_flags      = XS_SENDMSG_FLAGS,
199         };
200
201         return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
202 }
203
204 /**
205  * xs_sendpages - write pages directly to a socket
206  * @sock: socket to send on
207  * @addr: UDP only -- address of destination
208  * @addrlen: UDP only -- length of destination address
209  * @xdr: buffer containing this request
210  * @base: starting position in the buffer
211  *
212  */
213 static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
214 {
215         struct page **ppage = xdr->pages;
216         unsigned int len, pglen = xdr->page_len;
217         int err, ret = 0;
218         ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
219
220         if (unlikely(!sock))
221                 return -ENOTCONN;
222
223         clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
224
225         len = xdr->head[0].iov_len;
226         if (base < len || (addr != NULL && base == 0)) {
227                 err = xs_send_head(sock, addr, addrlen, xdr, base, len);
228                 if (ret == 0)
229                         ret = err;
230                 else if (err > 0)
231                         ret += err;
232                 if (err != (len - base))
233                         goto out;
234                 base = 0;
235         } else
236                 base -= len;
237
238         if (unlikely(pglen == 0))
239                 goto copy_tail;
240         if (unlikely(base >= pglen)) {
241                 base -= pglen;
242                 goto copy_tail;
243         }
244         if (base || xdr->page_base) {
245                 pglen -= base;
246                 base += xdr->page_base;
247                 ppage += base >> PAGE_CACHE_SHIFT;
248                 base &= ~PAGE_CACHE_MASK;
249         }
250
251         sendpage = sock->ops->sendpage ? : sock_no_sendpage;
252         do {
253                 int flags = XS_SENDMSG_FLAGS;
254
255                 len = PAGE_CACHE_SIZE;
256                 if (base)
257                         len -= base;
258                 if (pglen < len)
259                         len = pglen;
260
261                 if (pglen != len || xdr->tail[0].iov_len != 0)
262                         flags |= MSG_MORE;
263
264                 /* Hmm... We might be dealing with highmem pages */
265                 if (PageHighMem(*ppage))
266                         sendpage = sock_no_sendpage;
267                 err = sendpage(sock, *ppage, base, len, flags);
268                 if (ret == 0)
269                         ret = err;
270                 else if (err > 0)
271                         ret += err;
272                 if (err != len)
273                         goto out;
274                 base = 0;
275                 ppage++;
276         } while ((pglen -= len) != 0);
277 copy_tail:
278         len = xdr->tail[0].iov_len;
279         if (base < len) {
280                 err = xs_send_tail(sock, xdr, base, len);
281                 if (ret == 0)
282                         ret = err;
283                 else if (err > 0)
284                         ret += err;
285         }
286 out:
287         return ret;
288 }
289
290 /**
291  * xs_nospace - place task on wait queue if transmit was incomplete
292  * @task: task to put to sleep
293  *
294  */
295 static void xs_nospace(struct rpc_task *task)
296 {
297         struct rpc_rqst *req = task->tk_rqstp;
298         struct rpc_xprt *xprt = req->rq_xprt;
299
300         dprintk("RPC: %4d xmit incomplete (%u left of %u)\n",
301                         task->tk_pid, req->rq_slen - req->rq_bytes_sent,
302                         req->rq_slen);
303
304         if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
305                 /* Protect against races with write_space */
306                 spin_lock_bh(&xprt->transport_lock);
307
308                 /* Don't race with disconnect */
309                 if (!xprt_connected(xprt))
310                         task->tk_status = -ENOTCONN;
311                 else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags))
312                         xprt_wait_for_buffer_space(task);
313
314                 spin_unlock_bh(&xprt->transport_lock);
315         } else
316                 /* Keep holding the socket if it is blocked */
317                 rpc_delay(task, HZ>>4);
318 }
319
320 /**
321  * xs_udp_send_request - write an RPC request to a UDP socket
322  * @task: address of RPC task that manages the state of an RPC request
323  *
324  * Return values:
325  *        0:    The request has been sent
326  *   EAGAIN:    The socket was blocked, please call again later to
327  *              complete the request
328  * ENOTCONN:    Caller needs to invoke connect logic then call again
329  *    other:    Some other error occured, the request was not sent
330  */
331 static int xs_udp_send_request(struct rpc_task *task)
332 {
333         struct rpc_rqst *req = task->tk_rqstp;
334         struct rpc_xprt *xprt = req->rq_xprt;
335         struct xdr_buf *xdr = &req->rq_snd_buf;
336         int status;
337
338         xs_pktdump("packet data:",
339                                 req->rq_svec->iov_base,
340                                 req->rq_svec->iov_len);
341
342         req->rq_xtime = jiffies;
343         status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr,
344                                 xprt->addrlen, xdr, req->rq_bytes_sent);
345
346         dprintk("RPC:      xs_udp_send_request(%u) = %d\n",
347                         xdr->len - req->rq_bytes_sent, status);
348
349         if (likely(status >= (int) req->rq_slen))
350                 return 0;
351
352         /* Still some bytes left; set up for a retry later. */
353         if (status > 0)
354                 status = -EAGAIN;
355
356         switch (status) {
357         case -ENETUNREACH:
358         case -EPIPE:
359         case -ECONNREFUSED:
360                 /* When the server has died, an ICMP port unreachable message
361                  * prompts ECONNREFUSED. */
362                 break;
363         case -EAGAIN:
364                 xs_nospace(task);
365                 break;
366         default:
367                 dprintk("RPC:      sendmsg returned unrecognized error %d\n",
368                         -status);
369                 break;
370         }
371
372         return status;
373 }
374
375 static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
376 {
377         u32 reclen = buf->len - sizeof(rpc_fraghdr);
378         rpc_fraghdr *base = buf->head[0].iov_base;
379         *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
380 }
381
382 /**
383  * xs_tcp_send_request - write an RPC request to a TCP socket
384  * @task: address of RPC task that manages the state of an RPC request
385  *
386  * Return values:
387  *        0:    The request has been sent
388  *   EAGAIN:    The socket was blocked, please call again later to
389  *              complete the request
390  * ENOTCONN:    Caller needs to invoke connect logic then call again
391  *    other:    Some other error occured, the request was not sent
392  *
393  * XXX: In the case of soft timeouts, should we eventually give up
394  *      if sendmsg is not able to make progress?
395  */
396 static int xs_tcp_send_request(struct rpc_task *task)
397 {
398         struct rpc_rqst *req = task->tk_rqstp;
399         struct rpc_xprt *xprt = req->rq_xprt;
400         struct xdr_buf *xdr = &req->rq_snd_buf;
401         int status, retry = 0;
402
403         xs_encode_tcp_record_marker(&req->rq_snd_buf);
404
405         xs_pktdump("packet data:",
406                                 req->rq_svec->iov_base,
407                                 req->rq_svec->iov_len);
408
409         /* Continue transmitting the packet/record. We must be careful
410          * to cope with writespace callbacks arriving _after_ we have
411          * called sendmsg(). */
412         while (1) {
413                 req->rq_xtime = jiffies;
414                 status = xs_sendpages(xprt->sock, NULL, 0, xdr,
415                                                 req->rq_bytes_sent);
416
417                 dprintk("RPC:      xs_tcp_send_request(%u) = %d\n",
418                                 xdr->len - req->rq_bytes_sent, status);
419
420                 if (unlikely(status < 0))
421                         break;
422
423                 /* If we've sent the entire packet, immediately
424                  * reset the count of bytes sent. */
425                 req->rq_bytes_sent += status;
426                 task->tk_bytes_sent += status;
427                 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
428                         req->rq_bytes_sent = 0;
429                         return 0;
430                 }
431
432                 status = -EAGAIN;
433                 if (retry++ > XS_SENDMSG_RETRY)
434                         break;
435         }
436
437         switch (status) {
438         case -EAGAIN:
439                 xs_nospace(task);
440                 break;
441         case -ECONNREFUSED:
442         case -ECONNRESET:
443         case -ENOTCONN:
444         case -EPIPE:
445                 status = -ENOTCONN;
446                 break;
447         default:
448                 dprintk("RPC:      sendmsg returned unrecognized error %d\n",
449                         -status);
450                 xprt_disconnect(xprt);
451                 break;
452         }
453
454         return status;
455 }
456
457 /**
458  * xs_tcp_release_xprt - clean up after a tcp transmission
459  * @xprt: transport
460  * @task: rpc task
461  *
462  * This cleans up if an error causes us to abort the transmission of a request.
463  * In this case, the socket may need to be reset in order to avoid confusing
464  * the server.
465  */
466 static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
467 {
468         struct rpc_rqst *req;
469
470         if (task != xprt->snd_task)
471                 return;
472         if (task == NULL)
473                 goto out_release;
474         req = task->tk_rqstp;
475         if (req->rq_bytes_sent == 0)
476                 goto out_release;
477         if (req->rq_bytes_sent == req->rq_snd_buf.len)
478                 goto out_release;
479         set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
480 out_release:
481         xprt_release_xprt(xprt, task);
482 }
483
484 /**
485  * xs_close - close a socket
486  * @xprt: transport
487  *
488  * This is used when all requests are complete; ie, no DRC state remains
489  * on the server we want to save.
490  */
491 static void xs_close(struct rpc_xprt *xprt)
492 {
493         struct socket *sock = xprt->sock;
494         struct sock *sk = xprt->inet;
495
496         if (!sk)
497                 goto clear_close_wait;
498
499         dprintk("RPC:      xs_close xprt %p\n", xprt);
500
501         write_lock_bh(&sk->sk_callback_lock);
502         xprt->inet = NULL;
503         xprt->sock = NULL;
504
505         sk->sk_user_data = NULL;
506         sk->sk_data_ready = xprt->old_data_ready;
507         sk->sk_state_change = xprt->old_state_change;
508         sk->sk_write_space = xprt->old_write_space;
509         write_unlock_bh(&sk->sk_callback_lock);
510
511         sk->sk_no_check = 0;
512
513         sock_release(sock);
514 clear_close_wait:
515         smp_mb__before_clear_bit();
516         clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
517         smp_mb__after_clear_bit();
518 }
519
520 /**
521  * xs_destroy - prepare to shutdown a transport
522  * @xprt: doomed transport
523  *
524  */
525 static void xs_destroy(struct rpc_xprt *xprt)
526 {
527         dprintk("RPC:      xs_destroy xprt %p\n", xprt);
528
529         cancel_delayed_work(&xprt->connect_worker);
530         flush_scheduled_work();
531
532         xprt_disconnect(xprt);
533         xs_close(xprt);
534         xs_free_peer_addresses(xprt);
535         kfree(xprt->slot);
536 }
537
538 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
539 {
540         return (struct rpc_xprt *) sk->sk_user_data;
541 }
542
543 /**
544  * xs_udp_data_ready - "data ready" callback for UDP sockets
545  * @sk: socket with data to read
546  * @len: how much data to read
547  *
548  */
549 static void xs_udp_data_ready(struct sock *sk, int len)
550 {
551         struct rpc_task *task;
552         struct rpc_xprt *xprt;
553         struct rpc_rqst *rovr;
554         struct sk_buff *skb;
555         int err, repsize, copied;
556         u32 _xid, *xp;
557
558         read_lock(&sk->sk_callback_lock);
559         dprintk("RPC:      xs_udp_data_ready...\n");
560         if (!(xprt = xprt_from_sock(sk)))
561                 goto out;
562
563         if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
564                 goto out;
565
566         if (xprt->shutdown)
567                 goto dropit;
568
569         repsize = skb->len - sizeof(struct udphdr);
570         if (repsize < 4) {
571                 dprintk("RPC:      impossible RPC reply size %d!\n", repsize);
572                 goto dropit;
573         }
574
575         /* Copy the XID from the skb... */
576         xp = skb_header_pointer(skb, sizeof(struct udphdr),
577                                 sizeof(_xid), &_xid);
578         if (xp == NULL)
579                 goto dropit;
580
581         /* Look up and lock the request corresponding to the given XID */
582         spin_lock(&xprt->transport_lock);
583         rovr = xprt_lookup_rqst(xprt, *xp);
584         if (!rovr)
585                 goto out_unlock;
586         task = rovr->rq_task;
587
588         if ((copied = rovr->rq_private_buf.buflen) > repsize)
589                 copied = repsize;
590
591         /* Suck it into the iovec, verify checksum if not done by hw. */
592         if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
593                 goto out_unlock;
594
595         /* Something worked... */
596         dst_confirm(skb->dst);
597
598         xprt_adjust_cwnd(task, copied);
599         xprt_update_rtt(task);
600         xprt_complete_rqst(task, copied);
601
602  out_unlock:
603         spin_unlock(&xprt->transport_lock);
604  dropit:
605         skb_free_datagram(sk, skb);
606  out:
607         read_unlock(&sk->sk_callback_lock);
608 }
609
610 static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
611 {
612         if (len > desc->count)
613                 len = desc->count;
614         if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
615                 dprintk("RPC:      failed to copy %zu bytes from skb. %zu bytes remain\n",
616                                 len, desc->count);
617                 return 0;
618         }
619         desc->offset += len;
620         desc->count -= len;
621         dprintk("RPC:      copied %zu bytes from skb. %zu bytes remain\n",
622                         len, desc->count);
623         return len;
624 }
625
626 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
627 {
628         size_t len, used;
629         char *p;
630
631         p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
632         len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
633         used = xs_tcp_copy_data(desc, p, len);
634         xprt->tcp_offset += used;
635         if (used != len)
636                 return;
637
638         xprt->tcp_reclen = ntohl(xprt->tcp_recm);
639         if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
640                 xprt->tcp_flags |= XPRT_LAST_FRAG;
641         else
642                 xprt->tcp_flags &= ~XPRT_LAST_FRAG;
643         xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
644
645         xprt->tcp_flags &= ~XPRT_COPY_RECM;
646         xprt->tcp_offset = 0;
647
648         /* Sanity check of the record length */
649         if (unlikely(xprt->tcp_reclen < 4)) {
650                 dprintk("RPC:      invalid TCP record fragment length\n");
651                 xprt_disconnect(xprt);
652                 return;
653         }
654         dprintk("RPC:      reading TCP record fragment of length %d\n",
655                         xprt->tcp_reclen);
656 }
657
658 static void xs_tcp_check_recm(struct rpc_xprt *xprt)
659 {
660         dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
661                         xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
662         if (xprt->tcp_offset == xprt->tcp_reclen) {
663                 xprt->tcp_flags |= XPRT_COPY_RECM;
664                 xprt->tcp_offset = 0;
665                 if (xprt->tcp_flags & XPRT_LAST_FRAG) {
666                         xprt->tcp_flags &= ~XPRT_COPY_DATA;
667                         xprt->tcp_flags |= XPRT_COPY_XID;
668                         xprt->tcp_copied = 0;
669                 }
670         }
671 }
672
673 static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
674 {
675         size_t len, used;
676         char *p;
677
678         len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
679         dprintk("RPC:      reading XID (%Zu bytes)\n", len);
680         p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
681         used = xs_tcp_copy_data(desc, p, len);
682         xprt->tcp_offset += used;
683         if (used != len)
684                 return;
685         xprt->tcp_flags &= ~XPRT_COPY_XID;
686         xprt->tcp_flags |= XPRT_COPY_DATA;
687         xprt->tcp_copied = 4;
688         dprintk("RPC:      reading reply for XID %08x\n",
689                                                 ntohl(xprt->tcp_xid));
690         xs_tcp_check_recm(xprt);
691 }
692
693 static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
694 {
695         struct rpc_rqst *req;
696         struct xdr_buf *rcvbuf;
697         size_t len;
698         ssize_t r;
699
700         /* Find and lock the request corresponding to this xid */
701         spin_lock(&xprt->transport_lock);
702         req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
703         if (!req) {
704                 xprt->tcp_flags &= ~XPRT_COPY_DATA;
705                 dprintk("RPC:      XID %08x request not found!\n",
706                                 ntohl(xprt->tcp_xid));
707                 spin_unlock(&xprt->transport_lock);
708                 return;
709         }
710
711         rcvbuf = &req->rq_private_buf;
712         len = desc->count;
713         if (len > xprt->tcp_reclen - xprt->tcp_offset) {
714                 skb_reader_t my_desc;
715
716                 len = xprt->tcp_reclen - xprt->tcp_offset;
717                 memcpy(&my_desc, desc, sizeof(my_desc));
718                 my_desc.count = len;
719                 r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
720                                           &my_desc, xs_tcp_copy_data);
721                 desc->count -= r;
722                 desc->offset += r;
723         } else
724                 r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
725                                           desc, xs_tcp_copy_data);
726
727         if (r > 0) {
728                 xprt->tcp_copied += r;
729                 xprt->tcp_offset += r;
730         }
731         if (r != len) {
732                 /* Error when copying to the receive buffer,
733                  * usually because we weren't able to allocate
734                  * additional buffer pages. All we can do now
735                  * is turn off XPRT_COPY_DATA, so the request
736                  * will not receive any additional updates,
737                  * and time out.
738                  * Any remaining data from this record will
739                  * be discarded.
740                  */
741                 xprt->tcp_flags &= ~XPRT_COPY_DATA;
742                 dprintk("RPC:      XID %08x truncated request\n",
743                                 ntohl(xprt->tcp_xid));
744                 dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
745                                 xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
746                 goto out;
747         }
748
749         dprintk("RPC:      XID %08x read %Zd bytes\n",
750                         ntohl(xprt->tcp_xid), r);
751         dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
752                         xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
753
754         if (xprt->tcp_copied == req->rq_private_buf.buflen)
755                 xprt->tcp_flags &= ~XPRT_COPY_DATA;
756         else if (xprt->tcp_offset == xprt->tcp_reclen) {
757                 if (xprt->tcp_flags & XPRT_LAST_FRAG)
758                         xprt->tcp_flags &= ~XPRT_COPY_DATA;
759         }
760
761 out:
762         if (!(xprt->tcp_flags & XPRT_COPY_DATA))
763                 xprt_complete_rqst(req->rq_task, xprt->tcp_copied);
764         spin_unlock(&xprt->transport_lock);
765         xs_tcp_check_recm(xprt);
766 }
767
768 static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
769 {
770         size_t len;
771
772         len = xprt->tcp_reclen - xprt->tcp_offset;
773         if (len > desc->count)
774                 len = desc->count;
775         desc->count -= len;
776         desc->offset += len;
777         xprt->tcp_offset += len;
778         dprintk("RPC:      discarded %Zu bytes\n", len);
779         xs_tcp_check_recm(xprt);
780 }
781
782 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
783 {
784         struct rpc_xprt *xprt = rd_desc->arg.data;
785         skb_reader_t desc = {
786                 .skb    = skb,
787                 .offset = offset,
788                 .count  = len,
789                 .csum   = 0
790         };
791
792         dprintk("RPC:      xs_tcp_data_recv started\n");
793         do {
794                 /* Read in a new fragment marker if necessary */
795                 /* Can we ever really expect to get completely empty fragments? */
796                 if (xprt->tcp_flags & XPRT_COPY_RECM) {
797                         xs_tcp_read_fraghdr(xprt, &desc);
798                         continue;
799                 }
800                 /* Read in the xid if necessary */
801                 if (xprt->tcp_flags & XPRT_COPY_XID) {
802                         xs_tcp_read_xid(xprt, &desc);
803                         continue;
804                 }
805                 /* Read in the request data */
806                 if (xprt->tcp_flags & XPRT_COPY_DATA) {
807                         xs_tcp_read_request(xprt, &desc);
808                         continue;
809                 }
810                 /* Skip over any trailing bytes on short reads */
811                 xs_tcp_read_discard(xprt, &desc);
812         } while (desc.count);
813         dprintk("RPC:      xs_tcp_data_recv done\n");
814         return len - desc.count;
815 }
816
817 /**
818  * xs_tcp_data_ready - "data ready" callback for TCP sockets
819  * @sk: socket with data to read
820  * @bytes: how much data to read
821  *
822  */
823 static void xs_tcp_data_ready(struct sock *sk, int bytes)
824 {
825         struct rpc_xprt *xprt;
826         read_descriptor_t rd_desc;
827
828         read_lock(&sk->sk_callback_lock);
829         dprintk("RPC:      xs_tcp_data_ready...\n");
830         if (!(xprt = xprt_from_sock(sk)))
831                 goto out;
832         if (xprt->shutdown)
833                 goto out;
834
835         /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
836         rd_desc.arg.data = xprt;
837         rd_desc.count = 65536;
838         tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
839 out:
840         read_unlock(&sk->sk_callback_lock);
841 }
842
843 /**
844  * xs_tcp_state_change - callback to handle TCP socket state changes
845  * @sk: socket whose state has changed
846  *
847  */
848 static void xs_tcp_state_change(struct sock *sk)
849 {
850         struct rpc_xprt *xprt;
851
852         read_lock(&sk->sk_callback_lock);
853         if (!(xprt = xprt_from_sock(sk)))
854                 goto out;
855         dprintk("RPC:      xs_tcp_state_change client %p...\n", xprt);
856         dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
857                                 sk->sk_state, xprt_connected(xprt),
858                                 sock_flag(sk, SOCK_DEAD),
859                                 sock_flag(sk, SOCK_ZAPPED));
860
861         switch (sk->sk_state) {
862         case TCP_ESTABLISHED:
863                 spin_lock_bh(&xprt->transport_lock);
864                 if (!xprt_test_and_set_connected(xprt)) {
865                         /* Reset TCP record info */
866                         xprt->tcp_offset = 0;
867                         xprt->tcp_reclen = 0;
868                         xprt->tcp_copied = 0;
869                         xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
870                         xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
871                         xprt_wake_pending_tasks(xprt, 0);
872                 }
873                 spin_unlock_bh(&xprt->transport_lock);
874                 break;
875         case TCP_SYN_SENT:
876         case TCP_SYN_RECV:
877                 break;
878         case TCP_CLOSE_WAIT:
879                 /* Try to schedule an autoclose RPC calls */
880                 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
881                 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
882                         schedule_work(&xprt->task_cleanup);
883         default:
884                 xprt_disconnect(xprt);
885         }
886  out:
887         read_unlock(&sk->sk_callback_lock);
888 }
889
890 /**
891  * xs_udp_write_space - callback invoked when socket buffer space
892  *                             becomes available
893  * @sk: socket whose state has changed
894  *
895  * Called when more output buffer space is available for this socket.
896  * We try not to wake our writers until they can make "significant"
897  * progress, otherwise we'll waste resources thrashing kernel_sendmsg
898  * with a bunch of small requests.
899  */
900 static void xs_udp_write_space(struct sock *sk)
901 {
902         read_lock(&sk->sk_callback_lock);
903
904         /* from net/core/sock.c:sock_def_write_space */
905         if (sock_writeable(sk)) {
906                 struct socket *sock;
907                 struct rpc_xprt *xprt;
908
909                 if (unlikely(!(sock = sk->sk_socket)))
910                         goto out;
911                 if (unlikely(!(xprt = xprt_from_sock(sk))))
912                         goto out;
913                 if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
914                         goto out;
915
916                 xprt_write_space(xprt);
917         }
918
919  out:
920         read_unlock(&sk->sk_callback_lock);
921 }
922
923 /**
924  * xs_tcp_write_space - callback invoked when socket buffer space
925  *                             becomes available
926  * @sk: socket whose state has changed
927  *
928  * Called when more output buffer space is available for this socket.
929  * We try not to wake our writers until they can make "significant"
930  * progress, otherwise we'll waste resources thrashing kernel_sendmsg
931  * with a bunch of small requests.
932  */
933 static void xs_tcp_write_space(struct sock *sk)
934 {
935         read_lock(&sk->sk_callback_lock);
936
937         /* from net/core/stream.c:sk_stream_write_space */
938         if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
939                 struct socket *sock;
940                 struct rpc_xprt *xprt;
941
942                 if (unlikely(!(sock = sk->sk_socket)))
943                         goto out;
944                 if (unlikely(!(xprt = xprt_from_sock(sk))))
945                         goto out;
946                 if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
947                         goto out;
948
949                 xprt_write_space(xprt);
950         }
951
952  out:
953         read_unlock(&sk->sk_callback_lock);
954 }
955
956 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
957 {
958         struct sock *sk = xprt->inet;
959
960         if (xprt->rcvsize) {
961                 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
962                 sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs *  2;
963         }
964         if (xprt->sndsize) {
965                 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
966                 sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
967                 sk->sk_write_space(sk);
968         }
969 }
970
971 /**
972  * xs_udp_set_buffer_size - set send and receive limits
973  * @xprt: generic transport
974  * @sndsize: requested size of send buffer, in bytes
975  * @rcvsize: requested size of receive buffer, in bytes
976  *
977  * Set socket send and receive buffer size limits.
978  */
979 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
980 {
981         xprt->sndsize = 0;
982         if (sndsize)
983                 xprt->sndsize = sndsize + 1024;
984         xprt->rcvsize = 0;
985         if (rcvsize)
986                 xprt->rcvsize = rcvsize + 1024;
987
988         xs_udp_do_set_buffer_size(xprt);
989 }
990
991 /**
992  * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
993  * @task: task that timed out
994  *
995  * Adjust the congestion window after a retransmit timeout has occurred.
996  */
997 static void xs_udp_timer(struct rpc_task *task)
998 {
999         xprt_adjust_cwnd(task, -ETIMEDOUT);
1000 }
1001
1002 static unsigned short xs_get_random_port(void)
1003 {
1004         unsigned short range = xprt_max_resvport - xprt_min_resvport;
1005         unsigned short rand = (unsigned short) net_random() % range;
1006         return rand + xprt_min_resvport;
1007 }
1008
1009 /**
1010  * xs_print_peer_address - format an IPv4 address for printing
1011  * @xprt: generic transport
1012  * @format: flags field indicating which parts of the address to render
1013  */
1014 static char *xs_print_peer_address(struct rpc_xprt *xprt, enum rpc_display_format_t format)
1015 {
1016         if (xprt->address_strings[format] != NULL)
1017                 return xprt->address_strings[format];
1018         else
1019                 return "unprintable";
1020 }
1021
1022 /**
1023  * xs_set_port - reset the port number in the remote endpoint address
1024  * @xprt: generic transport
1025  * @port: new port number
1026  *
1027  */
1028 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1029 {
1030         struct sockaddr_in *sap = (struct sockaddr_in *) &xprt->addr;
1031
1032         dprintk("RPC:      setting port for xprt %p to %u\n", xprt, port);
1033
1034         sap->sin_port = htons(port);
1035 }
1036
1037 static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
1038 {
1039         struct sockaddr_in myaddr = {
1040                 .sin_family = AF_INET,
1041         };
1042         int err;
1043         unsigned short port = xprt->port;
1044
1045         do {
1046                 myaddr.sin_port = htons(port);
1047                 err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
1048                                                 sizeof(myaddr));
1049                 if (err == 0) {
1050                         xprt->port = port;
1051                         dprintk("RPC:      xs_bindresvport bound to port %u\n",
1052                                         port);
1053                         return 0;
1054                 }
1055                 if (port <= xprt_min_resvport)
1056                         port = xprt_max_resvport;
1057                 else
1058                         port--;
1059         } while (err == -EADDRINUSE && port != xprt->port);
1060
1061         dprintk("RPC:      can't bind to reserved port (%d).\n", -err);
1062         return err;
1063 }
1064
1065 /**
1066  * xs_udp_connect_worker - set up a UDP socket
1067  * @args: RPC transport to connect
1068  *
1069  * Invoked by a work queue tasklet.
1070  */
1071 static void xs_udp_connect_worker(void *args)
1072 {
1073         struct rpc_xprt *xprt = (struct rpc_xprt *) args;
1074         struct socket *sock = xprt->sock;
1075         int err, status = -EIO;
1076
1077         if (xprt->shutdown || !xprt_bound(xprt))
1078                 goto out;
1079
1080         /* Start by resetting any existing state */
1081         xs_close(xprt);
1082
1083         if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) {
1084                 dprintk("RPC:      can't create UDP transport socket (%d).\n", -err);
1085                 goto out;
1086         }
1087
1088         if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
1089                 sock_release(sock);
1090                 goto out;
1091         }
1092
1093         dprintk("RPC:      worker connecting xprt %p to address: %s\n",
1094                         xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1095
1096         if (!xprt->inet) {
1097                 struct sock *sk = sock->sk;
1098
1099                 write_lock_bh(&sk->sk_callback_lock);
1100
1101                 sk->sk_user_data = xprt;
1102                 xprt->old_data_ready = sk->sk_data_ready;
1103                 xprt->old_state_change = sk->sk_state_change;
1104                 xprt->old_write_space = sk->sk_write_space;
1105                 sk->sk_data_ready = xs_udp_data_ready;
1106                 sk->sk_write_space = xs_udp_write_space;
1107                 sk->sk_no_check = UDP_CSUM_NORCV;
1108                 sk->sk_allocation = GFP_ATOMIC;
1109
1110                 xprt_set_connected(xprt);
1111
1112                 /* Reset to new socket */
1113                 xprt->sock = sock;
1114                 xprt->inet = sk;
1115
1116                 write_unlock_bh(&sk->sk_callback_lock);
1117         }
1118         xs_udp_do_set_buffer_size(xprt);
1119         status = 0;
1120 out:
1121         xprt_wake_pending_tasks(xprt, status);
1122         xprt_clear_connecting(xprt);
1123 }
1124
1125 /*
1126  * We need to preserve the port number so the reply cache on the server can
1127  * find our cached RPC replies when we get around to reconnecting.
1128  */
1129 static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
1130 {
1131         int result;
1132         struct socket *sock = xprt->sock;
1133         struct sockaddr any;
1134
1135         dprintk("RPC:      disconnecting xprt %p to reuse port\n", xprt);
1136
1137         /*
1138          * Disconnect the transport socket by doing a connect operation
1139          * with AF_UNSPEC.  This should return immediately...
1140          */
1141         memset(&any, 0, sizeof(any));
1142         any.sa_family = AF_UNSPEC;
1143         result = sock->ops->connect(sock, &any, sizeof(any), 0);
1144         if (result)
1145                 dprintk("RPC:      AF_UNSPEC connect return code %d\n",
1146                                 result);
1147 }
1148
1149 /**
1150  * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
1151  * @args: RPC transport to connect
1152  *
1153  * Invoked by a work queue tasklet.
1154  */
1155 static void xs_tcp_connect_worker(void *args)
1156 {
1157         struct rpc_xprt *xprt = (struct rpc_xprt *)args;
1158         struct socket *sock = xprt->sock;
1159         int err, status = -EIO;
1160
1161         if (xprt->shutdown || !xprt_bound(xprt))
1162                 goto out;
1163
1164         if (!xprt->sock) {
1165                 /* start from scratch */
1166                 if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
1167                         dprintk("RPC:      can't create TCP transport socket (%d).\n", -err);
1168                         goto out;
1169                 }
1170
1171                 if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
1172                         sock_release(sock);
1173                         goto out;
1174                 }
1175         } else
1176                 /* "close" the socket, preserving the local port */
1177                 xs_tcp_reuse_connection(xprt);
1178
1179         dprintk("RPC:      worker connecting xprt %p to address: %s\n",
1180                         xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1181
1182         if (!xprt->inet) {
1183                 struct sock *sk = sock->sk;
1184
1185                 write_lock_bh(&sk->sk_callback_lock);
1186
1187                 sk->sk_user_data = xprt;
1188                 xprt->old_data_ready = sk->sk_data_ready;
1189                 xprt->old_state_change = sk->sk_state_change;
1190                 xprt->old_write_space = sk->sk_write_space;
1191                 sk->sk_data_ready = xs_tcp_data_ready;
1192                 sk->sk_state_change = xs_tcp_state_change;
1193                 sk->sk_write_space = xs_tcp_write_space;
1194                 sk->sk_allocation = GFP_ATOMIC;
1195
1196                 /* socket options */
1197                 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
1198                 sock_reset_flag(sk, SOCK_LINGER);
1199                 tcp_sk(sk)->linger2 = 0;
1200                 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1201
1202                 xprt_clear_connected(xprt);
1203
1204                 /* Reset to new socket */
1205                 xprt->sock = sock;
1206                 xprt->inet = sk;
1207
1208                 write_unlock_bh(&sk->sk_callback_lock);
1209         }
1210
1211         /* Tell the socket layer to start connecting... */
1212         xprt->stat.connect_count++;
1213         xprt->stat.connect_start = jiffies;
1214         status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
1215                         xprt->addrlen, O_NONBLOCK);
1216         dprintk("RPC: %p  connect status %d connected %d sock state %d\n",
1217                         xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
1218         if (status < 0) {
1219                 switch (status) {
1220                         case -EINPROGRESS:
1221                         case -EALREADY:
1222                                 goto out_clear;
1223                         case -ECONNREFUSED:
1224                         case -ECONNRESET:
1225                                 /* retry with existing socket, after a delay */
1226                                 break;
1227                         default:
1228                                 /* get rid of existing socket, and retry */
1229                                 xs_close(xprt);
1230                                 break;
1231                 }
1232         }
1233 out:
1234         xprt_wake_pending_tasks(xprt, status);
1235 out_clear:
1236         xprt_clear_connecting(xprt);
1237 }
1238
1239 /**
1240  * xs_connect - connect a socket to a remote endpoint
1241  * @task: address of RPC task that manages state of connect request
1242  *
1243  * TCP: If the remote end dropped the connection, delay reconnecting.
1244  *
1245  * UDP socket connects are synchronous, but we use a work queue anyway
1246  * to guarantee that even unprivileged user processes can set up a
1247  * socket on a privileged port.
1248  *
1249  * If a UDP socket connect fails, the delay behavior here prevents
1250  * retry floods (hard mounts).
1251  */
1252 static void xs_connect(struct rpc_task *task)
1253 {
1254         struct rpc_xprt *xprt = task->tk_xprt;
1255
1256         if (xprt_test_and_set_connecting(xprt))
1257                 return;
1258
1259         if (xprt->sock != NULL) {
1260                 dprintk("RPC:      xs_connect delayed xprt %p for %lu seconds\n",
1261                                 xprt, xprt->reestablish_timeout / HZ);
1262                 schedule_delayed_work(&xprt->connect_worker,
1263                                         xprt->reestablish_timeout);
1264                 xprt->reestablish_timeout <<= 1;
1265                 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
1266                         xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
1267         } else {
1268                 dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
1269                 schedule_work(&xprt->connect_worker);
1270
1271                 /* flush_scheduled_work can sleep... */
1272                 if (!RPC_IS_ASYNC(task))
1273                         flush_scheduled_work();
1274         }
1275 }
1276
1277 /**
1278  * xs_udp_print_stats - display UDP socket-specifc stats
1279  * @xprt: rpc_xprt struct containing statistics
1280  * @seq: output file
1281  *
1282  */
1283 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1284 {
1285         seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
1286                         xprt->port,
1287                         xprt->stat.bind_count,
1288                         xprt->stat.sends,
1289                         xprt->stat.recvs,
1290                         xprt->stat.bad_xids,
1291                         xprt->stat.req_u,
1292                         xprt->stat.bklog_u);
1293 }
1294
1295 /**
1296  * xs_tcp_print_stats - display TCP socket-specifc stats
1297  * @xprt: rpc_xprt struct containing statistics
1298  * @seq: output file
1299  *
1300  */
1301 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1302 {
1303         long idle_time = 0;
1304
1305         if (xprt_connected(xprt))
1306                 idle_time = (long)(jiffies - xprt->last_used) / HZ;
1307
1308         seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
1309                         xprt->port,
1310                         xprt->stat.bind_count,
1311                         xprt->stat.connect_count,
1312                         xprt->stat.connect_time,
1313                         idle_time,
1314                         xprt->stat.sends,
1315                         xprt->stat.recvs,
1316                         xprt->stat.bad_xids,
1317                         xprt->stat.req_u,
1318                         xprt->stat.bklog_u);
1319 }
1320
1321 static struct rpc_xprt_ops xs_udp_ops = {
1322         .set_buffer_size        = xs_udp_set_buffer_size,
1323         .print_addr             = xs_print_peer_address,
1324         .reserve_xprt           = xprt_reserve_xprt_cong,
1325         .release_xprt           = xprt_release_xprt_cong,
1326         .rpcbind                = rpc_getport,
1327         .set_port               = xs_set_port,
1328         .connect                = xs_connect,
1329         .buf_alloc              = rpc_malloc,
1330         .buf_free               = rpc_free,
1331         .send_request           = xs_udp_send_request,
1332         .set_retrans_timeout    = xprt_set_retrans_timeout_rtt,
1333         .timer                  = xs_udp_timer,
1334         .release_request        = xprt_release_rqst_cong,
1335         .close                  = xs_close,
1336         .destroy                = xs_destroy,
1337         .print_stats            = xs_udp_print_stats,
1338 };
1339
1340 static struct rpc_xprt_ops xs_tcp_ops = {
1341         .print_addr             = xs_print_peer_address,
1342         .reserve_xprt           = xprt_reserve_xprt,
1343         .release_xprt           = xs_tcp_release_xprt,
1344         .rpcbind                = rpc_getport,
1345         .set_port               = xs_set_port,
1346         .connect                = xs_connect,
1347         .buf_alloc              = rpc_malloc,
1348         .buf_free               = rpc_free,
1349         .send_request           = xs_tcp_send_request,
1350         .set_retrans_timeout    = xprt_set_retrans_timeout_def,
1351         .close                  = xs_close,
1352         .destroy                = xs_destroy,
1353         .print_stats            = xs_tcp_print_stats,
1354 };
1355
1356 /**
1357  * xs_setup_udp - Set up transport to use a UDP socket
1358  * @xprt: transport to set up
1359  * @to:   timeout parameters
1360  *
1361  */
1362 int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1363 {
1364         size_t slot_table_size;
1365         struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
1366
1367         xprt->max_reqs = xprt_udp_slot_table_entries;
1368         slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
1369         xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
1370         if (xprt->slot == NULL)
1371                 return -ENOMEM;
1372
1373         if (ntohs(addr->sin_port != 0))
1374                 xprt_set_bound(xprt);
1375         xprt->port = xs_get_random_port();
1376
1377         xprt->prot = IPPROTO_UDP;
1378         xprt->tsh_size = 0;
1379         /* XXX: header size can vary due to auth type, IPv6, etc. */
1380         xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
1381
1382         INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
1383         xprt->bind_timeout = XS_BIND_TO;
1384         xprt->connect_timeout = XS_UDP_CONN_TO;
1385         xprt->reestablish_timeout = XS_UDP_REEST_TO;
1386         xprt->idle_timeout = XS_IDLE_DISC_TO;
1387
1388         xprt->ops = &xs_udp_ops;
1389
1390         if (to)
1391                 xprt->timeout = *to;
1392         else
1393                 xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
1394
1395         xs_format_peer_addresses(xprt);
1396         dprintk("RPC:      set up transport to address %s\n",
1397                         xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1398
1399         return 0;
1400 }
1401
1402 /**
1403  * xs_setup_tcp - Set up transport to use a TCP socket
1404  * @xprt: transport to set up
1405  * @to: timeout parameters
1406  *
1407  */
1408 int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1409 {
1410         size_t slot_table_size;
1411         struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
1412
1413         xprt->max_reqs = xprt_tcp_slot_table_entries;
1414         slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
1415         xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
1416         if (xprt->slot == NULL)
1417                 return -ENOMEM;
1418
1419         if (ntohs(addr->sin_port) != 0)
1420                 xprt_set_bound(xprt);
1421         xprt->port = xs_get_random_port();
1422
1423         xprt->prot = IPPROTO_TCP;
1424         xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
1425         xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
1426
1427         INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
1428         xprt->bind_timeout = XS_BIND_TO;
1429         xprt->connect_timeout = XS_TCP_CONN_TO;
1430         xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1431         xprt->idle_timeout = XS_IDLE_DISC_TO;
1432
1433         xprt->ops = &xs_tcp_ops;
1434
1435         if (to)
1436                 xprt->timeout = *to;
1437         else
1438                 xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
1439
1440         xs_format_peer_addresses(xprt);
1441         dprintk("RPC:      set up transport to address %s\n",
1442                         xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1443
1444         return 0;
1445 }