Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[pandora-kernel.git] / net / caif / caif_socket.c
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Author:      Sjur Brendeland sjur.brandeland@stericsson.com
4  * License terms: GNU General Public License (GPL) version 2
5  */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
9 #include <linux/fs.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 #include <linux/list.h>
16 #include <linux/wait.h>
17 #include <linux/poll.h>
18 #include <linux/tcp.h>
19 #include <linux/uaccess.h>
20 #include <linux/debugfs.h>
21 #include <linux/caif/caif_socket.h>
22 #include <linux/pkt_sched.h>
23 #include <net/sock.h>
24 #include <net/tcp_states.h>
25 #include <net/caif/caif_layer.h>
26 #include <net/caif/caif_dev.h>
27 #include <net/caif/cfpkt.h>
28
29 MODULE_LICENSE("GPL");
30 MODULE_ALIAS_NETPROTO(AF_CAIF);
31
32 /*
33  * CAIF state is re-using the TCP socket states.
34  * caif_states stored in sk_state reflect the state as reported by
35  * the CAIF stack, while sk_socket->state is the state of the socket.
36  */
37 enum caif_states {
38         CAIF_CONNECTED          = TCP_ESTABLISHED,
39         CAIF_CONNECTING = TCP_SYN_SENT,
40         CAIF_DISCONNECTED       = TCP_CLOSE
41 };
42
43 #define TX_FLOW_ON_BIT  1
44 #define RX_FLOW_ON_BIT  2
45
46 struct caifsock {
47         struct sock sk; /* must be first member */
48         struct cflayer layer;
49         u32 flow_state;
50         struct caif_connect_request conn_req;
51         struct mutex readlock;
52         struct dentry *debugfs_socket_dir;
53         int headroom, tailroom, maxframe;
54 };
55
56 static int rx_flow_is_on(struct caifsock *cf_sk)
57 {
58         return test_bit(RX_FLOW_ON_BIT,
59                         (void *) &cf_sk->flow_state);
60 }
61
62 static int tx_flow_is_on(struct caifsock *cf_sk)
63 {
64         return test_bit(TX_FLOW_ON_BIT,
65                         (void *) &cf_sk->flow_state);
66 }
67
68 static void set_rx_flow_off(struct caifsock *cf_sk)
69 {
70          clear_bit(RX_FLOW_ON_BIT,
71                  (void *) &cf_sk->flow_state);
72 }
73
74 static void set_rx_flow_on(struct caifsock *cf_sk)
75 {
76          set_bit(RX_FLOW_ON_BIT,
77                         (void *) &cf_sk->flow_state);
78 }
79
80 static void set_tx_flow_off(struct caifsock *cf_sk)
81 {
82          clear_bit(TX_FLOW_ON_BIT,
83                 (void *) &cf_sk->flow_state);
84 }
85
86 static void set_tx_flow_on(struct caifsock *cf_sk)
87 {
88          set_bit(TX_FLOW_ON_BIT,
89                 (void *) &cf_sk->flow_state);
90 }
91
92 static void caif_read_lock(struct sock *sk)
93 {
94         struct caifsock *cf_sk;
95         cf_sk = container_of(sk, struct caifsock, sk);
96         mutex_lock(&cf_sk->readlock);
97 }
98
99 static void caif_read_unlock(struct sock *sk)
100 {
101         struct caifsock *cf_sk;
102         cf_sk = container_of(sk, struct caifsock, sk);
103         mutex_unlock(&cf_sk->readlock);
104 }
105
106 static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
107 {
108         /* A quarter of full buffer is used a low water mark */
109         return cf_sk->sk.sk_rcvbuf / 4;
110 }
111
112 static void caif_flow_ctrl(struct sock *sk, int mode)
113 {
114         struct caifsock *cf_sk;
115         cf_sk = container_of(sk, struct caifsock, sk);
116         if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
117                 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
118 }
119
120 /*
121  * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
122  * not dropped, but CAIF is sending flow off instead.
123  */
124 static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
125 {
126         int err;
127         int skb_len;
128         unsigned long flags;
129         struct sk_buff_head *list = &sk->sk_receive_queue;
130         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
131
132         if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
133                 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
134                 if (net_ratelimit())
135                         pr_debug("sending flow OFF (queue len = %d %d)\n",
136                                         atomic_read(&cf_sk->sk.sk_rmem_alloc),
137                                         sk_rcvbuf_lowwater(cf_sk));
138                 set_rx_flow_off(cf_sk);
139                 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
140         }
141
142         err = sk_filter(sk, skb);
143         if (err)
144                 return err;
145         if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
146                 set_rx_flow_off(cf_sk);
147                 if (net_ratelimit())
148                         pr_debug("sending flow OFF due to rmem_schedule\n");
149                 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
150         }
151         skb->dev = NULL;
152         skb_set_owner_r(skb, sk);
153         /* Cache the SKB length before we tack it onto the receive
154          * queue. Once it is added it no longer belongs to us and
155          * may be freed by other threads of control pulling packets
156          * from the queue.
157          */
158         skb_len = skb->len;
159         spin_lock_irqsave(&list->lock, flags);
160         if (!sock_flag(sk, SOCK_DEAD))
161                 __skb_queue_tail(list, skb);
162         spin_unlock_irqrestore(&list->lock, flags);
163
164         if (!sock_flag(sk, SOCK_DEAD))
165                 sk->sk_data_ready(sk, skb_len);
166         else
167                 kfree_skb(skb);
168         return 0;
169 }
170
171 /* Packet Receive Callback function called from CAIF Stack */
172 static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
173 {
174         struct caifsock *cf_sk;
175         struct sk_buff *skb;
176
177         cf_sk = container_of(layr, struct caifsock, layer);
178         skb = cfpkt_tonative(pkt);
179
180         if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
181                 kfree_skb(skb);
182                 return 0;
183         }
184         caif_queue_rcv_skb(&cf_sk->sk, skb);
185         return 0;
186 }
187
188 static void cfsk_hold(struct cflayer *layr)
189 {
190         struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
191         sock_hold(&cf_sk->sk);
192 }
193
194 static void cfsk_put(struct cflayer *layr)
195 {
196         struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
197         sock_put(&cf_sk->sk);
198 }
199
200 /* Packet Control Callback function called from CAIF */
201 static void caif_ctrl_cb(struct cflayer *layr,
202                                 enum caif_ctrlcmd flow,
203                                 int phyid)
204 {
205         struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
206         switch (flow) {
207         case CAIF_CTRLCMD_FLOW_ON_IND:
208                 /* OK from modem to start sending again */
209                 set_tx_flow_on(cf_sk);
210                 cf_sk->sk.sk_state_change(&cf_sk->sk);
211                 break;
212
213         case CAIF_CTRLCMD_FLOW_OFF_IND:
214                 /* Modem asks us to shut up */
215                 set_tx_flow_off(cf_sk);
216                 cf_sk->sk.sk_state_change(&cf_sk->sk);
217                 break;
218
219         case CAIF_CTRLCMD_INIT_RSP:
220                 /* We're now connected */
221                 caif_client_register_refcnt(&cf_sk->layer,
222                                                 cfsk_hold, cfsk_put);
223                 cf_sk->sk.sk_state = CAIF_CONNECTED;
224                 set_tx_flow_on(cf_sk);
225                 cf_sk->sk.sk_state_change(&cf_sk->sk);
226                 break;
227
228         case CAIF_CTRLCMD_DEINIT_RSP:
229                 /* We're now disconnected */
230                 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
231                 cf_sk->sk.sk_state_change(&cf_sk->sk);
232                 break;
233
234         case CAIF_CTRLCMD_INIT_FAIL_RSP:
235                 /* Connect request failed */
236                 cf_sk->sk.sk_err = ECONNREFUSED;
237                 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
238                 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
239                 /*
240                  * Socket "standards" seems to require POLLOUT to
241                  * be set at connect failure.
242                  */
243                 set_tx_flow_on(cf_sk);
244                 cf_sk->sk.sk_state_change(&cf_sk->sk);
245                 break;
246
247         case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
248                 /* Modem has closed this connection, or device is down. */
249                 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
250                 cf_sk->sk.sk_err = ECONNRESET;
251                 set_rx_flow_on(cf_sk);
252                 cf_sk->sk.sk_error_report(&cf_sk->sk);
253                 break;
254
255         default:
256                 pr_debug("Unexpected flow command %d\n", flow);
257         }
258 }
259
260 static void caif_check_flow_release(struct sock *sk)
261 {
262         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
263
264         if (rx_flow_is_on(cf_sk))
265                 return;
266
267         if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
268                         set_rx_flow_on(cf_sk);
269                         caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
270         }
271 }
272
273 /*
274  * Copied from unix_dgram_recvmsg, but removed credit checks,
275  * changed locking, address handling and added MSG_TRUNC.
276  */
277 static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
278                                 struct msghdr *m, size_t len, int flags)
279
280 {
281         struct sock *sk = sock->sk;
282         struct sk_buff *skb;
283         int ret;
284         int copylen;
285
286         ret = -EOPNOTSUPP;
287         if (m->msg_flags&MSG_OOB)
288                 goto read_error;
289
290         skb = skb_recv_datagram(sk, flags, 0 , &ret);
291         if (!skb)
292                 goto read_error;
293         copylen = skb->len;
294         if (len < copylen) {
295                 m->msg_flags |= MSG_TRUNC;
296                 copylen = len;
297         }
298
299         ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
300         if (ret)
301                 goto out_free;
302
303         ret = (flags & MSG_TRUNC) ? skb->len : copylen;
304 out_free:
305         skb_free_datagram(sk, skb);
306         caif_check_flow_release(sk);
307         return ret;
308
309 read_error:
310         return ret;
311 }
312
313
314 /* Copied from unix_stream_wait_data, identical except for lock call. */
315 static long caif_stream_data_wait(struct sock *sk, long timeo)
316 {
317         DEFINE_WAIT(wait);
318         lock_sock(sk);
319
320         for (;;) {
321                 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
322
323                 if (!skb_queue_empty(&sk->sk_receive_queue) ||
324                         sk->sk_err ||
325                         sk->sk_state != CAIF_CONNECTED ||
326                         sock_flag(sk, SOCK_DEAD) ||
327                         (sk->sk_shutdown & RCV_SHUTDOWN) ||
328                         signal_pending(current) ||
329                         !timeo)
330                         break;
331
332                 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
333                 release_sock(sk);
334                 timeo = schedule_timeout(timeo);
335                 lock_sock(sk);
336                 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
337         }
338
339         finish_wait(sk_sleep(sk), &wait);
340         release_sock(sk);
341         return timeo;
342 }
343
344
345 /*
346  * Copied from unix_stream_recvmsg, but removed credit checks,
347  * changed locking calls, changed address handling.
348  */
349 static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
350                                 struct msghdr *msg, size_t size,
351                                 int flags)
352 {
353         struct sock *sk = sock->sk;
354         int copied = 0;
355         int target;
356         int err = 0;
357         long timeo;
358
359         err = -EOPNOTSUPP;
360         if (flags&MSG_OOB)
361                 goto out;
362
363         msg->msg_namelen = 0;
364
365         /*
366          * Lock the socket to prevent queue disordering
367          * while sleeps in memcpy_tomsg
368          */
369         err = -EAGAIN;
370         if (sk->sk_state == CAIF_CONNECTING)
371                 goto out;
372
373         caif_read_lock(sk);
374         target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
375         timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
376
377         do {
378                 int chunk;
379                 struct sk_buff *skb;
380
381                 lock_sock(sk);
382                 skb = skb_dequeue(&sk->sk_receive_queue);
383                 caif_check_flow_release(sk);
384
385                 if (skb == NULL) {
386                         if (copied >= target)
387                                 goto unlock;
388                         /*
389                          *      POSIX 1003.1g mandates this order.
390                          */
391                         err = sock_error(sk);
392                         if (err)
393                                 goto unlock;
394                         err = -ECONNRESET;
395                         if (sk->sk_shutdown & RCV_SHUTDOWN)
396                                 goto unlock;
397
398                         err = -EPIPE;
399                         if (sk->sk_state != CAIF_CONNECTED)
400                                 goto unlock;
401                         if (sock_flag(sk, SOCK_DEAD))
402                                 goto unlock;
403
404                         release_sock(sk);
405
406                         err = -EAGAIN;
407                         if (!timeo)
408                                 break;
409
410                         caif_read_unlock(sk);
411
412                         timeo = caif_stream_data_wait(sk, timeo);
413
414                         if (signal_pending(current)) {
415                                 err = sock_intr_errno(timeo);
416                                 goto out;
417                         }
418                         caif_read_lock(sk);
419                         continue;
420 unlock:
421                         release_sock(sk);
422                         break;
423                 }
424                 release_sock(sk);
425                 chunk = min_t(unsigned int, skb->len, size);
426                 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
427                         skb_queue_head(&sk->sk_receive_queue, skb);
428                         if (copied == 0)
429                                 copied = -EFAULT;
430                         break;
431                 }
432                 copied += chunk;
433                 size -= chunk;
434
435                 /* Mark read part of skb as used */
436                 if (!(flags & MSG_PEEK)) {
437                         skb_pull(skb, chunk);
438
439                         /* put the skb back if we didn't use it up. */
440                         if (skb->len) {
441                                 skb_queue_head(&sk->sk_receive_queue, skb);
442                                 break;
443                         }
444                         kfree_skb(skb);
445
446                 } else {
447                         /*
448                          * It is questionable, see note in unix_dgram_recvmsg.
449                          */
450                         /* put message back and return */
451                         skb_queue_head(&sk->sk_receive_queue, skb);
452                         break;
453                 }
454         } while (size);
455         caif_read_unlock(sk);
456
457 out:
458         return copied ? : err;
459 }
460
461 /*
462  * Copied from sock.c:sock_wait_for_wmem, but change to wait for
463  * CAIF flow-on and sock_writable.
464  */
465 static long caif_wait_for_flow_on(struct caifsock *cf_sk,
466                                 int wait_writeable, long timeo, int *err)
467 {
468         struct sock *sk = &cf_sk->sk;
469         DEFINE_WAIT(wait);
470         for (;;) {
471                 *err = 0;
472                 if (tx_flow_is_on(cf_sk) &&
473                         (!wait_writeable || sock_writeable(&cf_sk->sk)))
474                         break;
475                 *err = -ETIMEDOUT;
476                 if (!timeo)
477                         break;
478                 *err = -ERESTARTSYS;
479                 if (signal_pending(current))
480                         break;
481                 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
482                 *err = -ECONNRESET;
483                 if (sk->sk_shutdown & SHUTDOWN_MASK)
484                         break;
485                 *err = -sk->sk_err;
486                 if (sk->sk_err)
487                         break;
488                 *err = -EPIPE;
489                 if (cf_sk->sk.sk_state != CAIF_CONNECTED)
490                         break;
491                 timeo = schedule_timeout(timeo);
492         }
493         finish_wait(sk_sleep(sk), &wait);
494         return timeo;
495 }
496
497 /*
498  * Transmit a SKB. The device may temporarily request re-transmission
499  * by returning EAGAIN.
500  */
501 static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
502                         int noblock, long timeo)
503 {
504         struct cfpkt *pkt;
505
506         pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
507         memset(skb->cb, 0, sizeof(struct caif_payload_info));
508         cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
509
510         if (cf_sk->layer.dn == NULL) {
511                 kfree_skb(skb);
512                 return -EINVAL;
513         }
514
515         return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
516 }
517
518 /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
519 static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
520                         struct msghdr *msg, size_t len)
521 {
522         struct sock *sk = sock->sk;
523         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
524         int buffer_size;
525         int ret = 0;
526         struct sk_buff *skb = NULL;
527         int noblock;
528         long timeo;
529         caif_assert(cf_sk);
530         ret = sock_error(sk);
531         if (ret)
532                 goto err;
533
534         ret = -EOPNOTSUPP;
535         if (msg->msg_flags&MSG_OOB)
536                 goto err;
537
538         ret = -EOPNOTSUPP;
539         if (msg->msg_namelen)
540                 goto err;
541
542         ret = -EINVAL;
543         if (unlikely(msg->msg_iov->iov_base == NULL))
544                 goto err;
545         noblock = msg->msg_flags & MSG_DONTWAIT;
546
547         timeo = sock_sndtimeo(sk, noblock);
548         timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
549                                 1, timeo, &ret);
550
551         if (ret)
552                 goto err;
553         ret = -EPIPE;
554         if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
555                 sock_flag(sk, SOCK_DEAD) ||
556                 (sk->sk_shutdown & RCV_SHUTDOWN))
557                 goto err;
558
559         /* Error if trying to write more than maximum frame size. */
560         ret = -EMSGSIZE;
561         if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM)
562                 goto err;
563
564         buffer_size = len + cf_sk->headroom + cf_sk->tailroom;
565
566         ret = -ENOMEM;
567         skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
568
569         if (!skb || skb_tailroom(skb) < buffer_size)
570                 goto err;
571
572         skb_reserve(skb, cf_sk->headroom);
573
574         ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
575
576         if (ret)
577                 goto err;
578         ret = transmit_skb(skb, cf_sk, noblock, timeo);
579         if (ret < 0)
580                 /* skb is already freed */
581                 return ret;
582
583         return len;
584 err:
585         kfree_skb(skb);
586         return ret;
587 }
588
589 /*
590  * Copied from unix_stream_sendmsg and adapted to CAIF:
591  * Changed removed permission handling and added waiting for flow on
592  * and other minor adaptations.
593  */
594 static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
595                                 struct msghdr *msg, size_t len)
596 {
597         struct sock *sk = sock->sk;
598         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
599         int err, size;
600         struct sk_buff *skb;
601         int sent = 0;
602         long timeo;
603
604         err = -EOPNOTSUPP;
605         if (unlikely(msg->msg_flags&MSG_OOB))
606                 goto out_err;
607
608         if (unlikely(msg->msg_namelen))
609                 goto out_err;
610
611         timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
612         timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
613
614         if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
615                 goto pipe_err;
616
617         while (sent < len) {
618
619                 size = len-sent;
620
621                 if (size > cf_sk->maxframe)
622                         size = cf_sk->maxframe;
623
624                 /* If size is more than half of sndbuf, chop up message */
625                 if (size > ((sk->sk_sndbuf >> 1) - 64))
626                         size = (sk->sk_sndbuf >> 1) - 64;
627
628                 if (size > SKB_MAX_ALLOC)
629                         size = SKB_MAX_ALLOC;
630
631                 skb = sock_alloc_send_skb(sk,
632                                         size + cf_sk->headroom +
633                                         cf_sk->tailroom,
634                                         msg->msg_flags&MSG_DONTWAIT,
635                                         &err);
636                 if (skb == NULL)
637                         goto out_err;
638
639                 skb_reserve(skb, cf_sk->headroom);
640                 /*
641                  *      If you pass two values to the sock_alloc_send_skb
642                  *      it tries to grab the large buffer with GFP_NOFS
643                  *      (which can fail easily), and if it fails grab the
644                  *      fallback size buffer which is under a page and will
645                  *      succeed. [Alan]
646                  */
647                 size = min_t(int, size, skb_tailroom(skb));
648
649                 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
650                 if (err) {
651                         kfree_skb(skb);
652                         goto out_err;
653                 }
654                 err = transmit_skb(skb, cf_sk,
655                                 msg->msg_flags&MSG_DONTWAIT, timeo);
656                 if (err < 0)
657                         /* skb is already freed */
658                         goto pipe_err;
659
660                 sent += size;
661         }
662
663         return sent;
664
665 pipe_err:
666         if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
667                 send_sig(SIGPIPE, current, 0);
668         err = -EPIPE;
669 out_err:
670         return sent ? : err;
671 }
672
673 static int setsockopt(struct socket *sock,
674                         int lvl, int opt, char __user *ov, unsigned int ol)
675 {
676         struct sock *sk = sock->sk;
677         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
678         int linksel;
679
680         if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
681                 return -ENOPROTOOPT;
682
683         switch (opt) {
684         case CAIFSO_LINK_SELECT:
685                 if (ol < sizeof(int))
686                         return -EINVAL;
687                 if (lvl != SOL_CAIF)
688                         goto bad_sol;
689                 if (copy_from_user(&linksel, ov, sizeof(int)))
690                         return -EINVAL;
691                 lock_sock(&(cf_sk->sk));
692                 cf_sk->conn_req.link_selector = linksel;
693                 release_sock(&cf_sk->sk);
694                 return 0;
695
696         case CAIFSO_REQ_PARAM:
697                 if (lvl != SOL_CAIF)
698                         goto bad_sol;
699                 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
700                         return -ENOPROTOOPT;
701                 lock_sock(&(cf_sk->sk));
702                 if (ol > sizeof(cf_sk->conn_req.param.data) ||
703                         copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
704                         release_sock(&cf_sk->sk);
705                         return -EINVAL;
706                 }
707                 cf_sk->conn_req.param.size = ol;
708                 release_sock(&cf_sk->sk);
709                 return 0;
710
711         default:
712                 return -ENOPROTOOPT;
713         }
714
715         return 0;
716 bad_sol:
717         return -ENOPROTOOPT;
718
719 }
720
721 /*
722  * caif_connect() - Connect a CAIF Socket
723  * Copied and modified af_irda.c:irda_connect().
724  *
725  * Note : by consulting "errno", the user space caller may learn the cause
726  * of the failure. Most of them are visible in the function, others may come
727  * from subroutines called and are listed here :
728  *  o -EAFNOSUPPORT: bad socket family or type.
729  *  o -ESOCKTNOSUPPORT: bad socket type or protocol
730  *  o -EINVAL: bad socket address, or CAIF link type
731  *  o -ECONNREFUSED: remote end refused the connection.
732  *  o -EINPROGRESS: connect request sent but timed out (or non-blocking)
733  *  o -EISCONN: already connected.
734  *  o -ETIMEDOUT: Connection timed out (send timeout)
735  *  o -ENODEV: No link layer to send request
736  *  o -ECONNRESET: Received Shutdown indication or lost link layer
737  *  o -ENOMEM: Out of memory
738  *
739  *  State Strategy:
740  *  o sk_state: holds the CAIF_* protocol state, it's updated by
741  *      caif_ctrl_cb.
742  *  o sock->state: holds the SS_* socket state and is updated by connect and
743  *      disconnect.
744  */
745 static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
746                         int addr_len, int flags)
747 {
748         struct sock *sk = sock->sk;
749         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
750         long timeo;
751         int err;
752         int ifindex, headroom, tailroom;
753         unsigned int mtu;
754         struct net_device *dev;
755
756         lock_sock(sk);
757
758         err = -EAFNOSUPPORT;
759         if (uaddr->sa_family != AF_CAIF)
760                 goto out;
761
762         switch (sock->state) {
763         case SS_UNCONNECTED:
764                 /* Normal case, a fresh connect */
765                 caif_assert(sk->sk_state == CAIF_DISCONNECTED);
766                 break;
767         case SS_CONNECTING:
768                 switch (sk->sk_state) {
769                 case CAIF_CONNECTED:
770                         sock->state = SS_CONNECTED;
771                         err = -EISCONN;
772                         goto out;
773                 case CAIF_DISCONNECTED:
774                         /* Reconnect allowed */
775                         break;
776                 case CAIF_CONNECTING:
777                         err = -EALREADY;
778                         if (flags & O_NONBLOCK)
779                                 goto out;
780                         goto wait_connect;
781                 }
782                 break;
783         case SS_CONNECTED:
784                 caif_assert(sk->sk_state == CAIF_CONNECTED ||
785                                 sk->sk_state == CAIF_DISCONNECTED);
786                 if (sk->sk_shutdown & SHUTDOWN_MASK) {
787                         /* Allow re-connect after SHUTDOWN_IND */
788                         caif_disconnect_client(sock_net(sk), &cf_sk->layer);
789                         caif_free_client(&cf_sk->layer);
790                         break;
791                 }
792                 /* No reconnect on a seqpacket socket */
793                 err = -EISCONN;
794                 goto out;
795         case SS_DISCONNECTING:
796         case SS_FREE:
797                 caif_assert(1); /*Should never happen */
798                 break;
799         }
800         sk->sk_state = CAIF_DISCONNECTED;
801         sock->state = SS_UNCONNECTED;
802         sk_stream_kill_queues(&cf_sk->sk);
803
804         err = -EINVAL;
805         if (addr_len != sizeof(struct sockaddr_caif))
806                 goto out;
807
808         memcpy(&cf_sk->conn_req.sockaddr, uaddr,
809                 sizeof(struct sockaddr_caif));
810
811         /* Move to connecting socket, start sending Connect Requests */
812         sock->state = SS_CONNECTING;
813         sk->sk_state = CAIF_CONNECTING;
814
815         /* Check priority value comming from socket */
816         /* if priority value is out of range it will be ajusted */
817         if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
818                 cf_sk->conn_req.priority = CAIF_PRIO_MAX;
819         else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
820                 cf_sk->conn_req.priority = CAIF_PRIO_MIN;
821         else
822                 cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
823
824         /*ifindex = id of the interface.*/
825         cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
826
827         cf_sk->layer.receive = caif_sktrecv_cb;
828
829         err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
830                                 &cf_sk->layer, &ifindex, &headroom, &tailroom);
831
832         if (err < 0) {
833                 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
834                 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
835                 goto out;
836         }
837
838         err = -ENODEV;
839         rcu_read_lock();
840         dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
841         if (!dev) {
842                 rcu_read_unlock();
843                 goto out;
844         }
845         cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
846         mtu = dev->mtu;
847         rcu_read_unlock();
848
849         cf_sk->tailroom = tailroom;
850         cf_sk->maxframe = mtu - (headroom + tailroom);
851         if (cf_sk->maxframe < 1) {
852                 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
853                 err = -ENODEV;
854                 goto out;
855         }
856
857         err = -EINPROGRESS;
858 wait_connect:
859
860         if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
861                 goto out;
862
863         timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
864
865         release_sock(sk);
866         err = -ERESTARTSYS;
867         timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
868                         sk->sk_state != CAIF_CONNECTING,
869                         timeo);
870         lock_sock(sk);
871         if (timeo < 0)
872                 goto out; /* -ERESTARTSYS */
873
874         err = -ETIMEDOUT;
875         if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
876                 goto out;
877         if (sk->sk_state != CAIF_CONNECTED) {
878                 sock->state = SS_UNCONNECTED;
879                 err = sock_error(sk);
880                 if (!err)
881                         err = -ECONNREFUSED;
882                 goto out;
883         }
884         sock->state = SS_CONNECTED;
885         err = 0;
886 out:
887         release_sock(sk);
888         return err;
889 }
890
891 /*
892  * caif_release() - Disconnect a CAIF Socket
893  * Copied and modified af_irda.c:irda_release().
894  */
895 static int caif_release(struct socket *sock)
896 {
897         struct sock *sk = sock->sk;
898         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
899
900         if (!sk)
901                 return 0;
902
903         set_tx_flow_off(cf_sk);
904
905         /*
906          * Ensure that packets are not queued after this point in time.
907          * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
908          * this ensures no packets when sock is dead.
909          */
910         spin_lock_bh(&sk->sk_receive_queue.lock);
911         sock_set_flag(sk, SOCK_DEAD);
912         spin_unlock_bh(&sk->sk_receive_queue.lock);
913         sock->sk = NULL;
914
915         WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
916         if (cf_sk->debugfs_socket_dir != NULL)
917                 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
918
919         lock_sock(&(cf_sk->sk));
920         sk->sk_state = CAIF_DISCONNECTED;
921         sk->sk_shutdown = SHUTDOWN_MASK;
922
923         caif_disconnect_client(sock_net(sk), &cf_sk->layer);
924         cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
925         wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
926
927         sock_orphan(sk);
928         sk_stream_kill_queues(&cf_sk->sk);
929         release_sock(sk);
930         sock_put(sk);
931         return 0;
932 }
933
934 /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
935 static unsigned int caif_poll(struct file *file,
936                                 struct socket *sock, poll_table *wait)
937 {
938         struct sock *sk = sock->sk;
939         unsigned int mask;
940         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
941
942         sock_poll_wait(file, sk_sleep(sk), wait);
943         mask = 0;
944
945         /* exceptional events? */
946         if (sk->sk_err)
947                 mask |= POLLERR;
948         if (sk->sk_shutdown == SHUTDOWN_MASK)
949                 mask |= POLLHUP;
950         if (sk->sk_shutdown & RCV_SHUTDOWN)
951                 mask |= POLLRDHUP;
952
953         /* readable? */
954         if (!skb_queue_empty(&sk->sk_receive_queue) ||
955                 (sk->sk_shutdown & RCV_SHUTDOWN))
956                 mask |= POLLIN | POLLRDNORM;
957
958         /*
959          * we set writable also when the other side has shut down the
960          * connection. This prevents stuck sockets.
961          */
962         if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
963                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
964
965         return mask;
966 }
967
968 static const struct proto_ops caif_seqpacket_ops = {
969         .family = PF_CAIF,
970         .owner = THIS_MODULE,
971         .release = caif_release,
972         .bind = sock_no_bind,
973         .connect = caif_connect,
974         .socketpair = sock_no_socketpair,
975         .accept = sock_no_accept,
976         .getname = sock_no_getname,
977         .poll = caif_poll,
978         .ioctl = sock_no_ioctl,
979         .listen = sock_no_listen,
980         .shutdown = sock_no_shutdown,
981         .setsockopt = setsockopt,
982         .getsockopt = sock_no_getsockopt,
983         .sendmsg = caif_seqpkt_sendmsg,
984         .recvmsg = caif_seqpkt_recvmsg,
985         .mmap = sock_no_mmap,
986         .sendpage = sock_no_sendpage,
987 };
988
989 static const struct proto_ops caif_stream_ops = {
990         .family = PF_CAIF,
991         .owner = THIS_MODULE,
992         .release = caif_release,
993         .bind = sock_no_bind,
994         .connect = caif_connect,
995         .socketpair = sock_no_socketpair,
996         .accept = sock_no_accept,
997         .getname = sock_no_getname,
998         .poll = caif_poll,
999         .ioctl = sock_no_ioctl,
1000         .listen = sock_no_listen,
1001         .shutdown = sock_no_shutdown,
1002         .setsockopt = setsockopt,
1003         .getsockopt = sock_no_getsockopt,
1004         .sendmsg = caif_stream_sendmsg,
1005         .recvmsg = caif_stream_recvmsg,
1006         .mmap = sock_no_mmap,
1007         .sendpage = sock_no_sendpage,
1008 };
1009
1010 /* This function is called when a socket is finally destroyed. */
1011 static void caif_sock_destructor(struct sock *sk)
1012 {
1013         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
1014         caif_assert(!atomic_read(&sk->sk_wmem_alloc));
1015         caif_assert(sk_unhashed(sk));
1016         caif_assert(!sk->sk_socket);
1017         if (!sock_flag(sk, SOCK_DEAD)) {
1018                 pr_debug("Attempt to release alive CAIF socket: %p\n", sk);
1019                 return;
1020         }
1021         sk_stream_kill_queues(&cf_sk->sk);
1022         caif_free_client(&cf_sk->layer);
1023 }
1024
1025 static int caif_create(struct net *net, struct socket *sock, int protocol,
1026                         int kern)
1027 {
1028         struct sock *sk = NULL;
1029         struct caifsock *cf_sk = NULL;
1030         static struct proto prot = {.name = "PF_CAIF",
1031                 .owner = THIS_MODULE,
1032                 .obj_size = sizeof(struct caifsock),
1033         };
1034
1035         if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN))
1036                 return -EPERM;
1037         /*
1038          * The sock->type specifies the socket type to use.
1039          * The CAIF socket is a packet stream in the sense
1040          * that it is packet based. CAIF trusts the reliability
1041          * of the link, no resending is implemented.
1042          */
1043         if (sock->type == SOCK_SEQPACKET)
1044                 sock->ops = &caif_seqpacket_ops;
1045         else if (sock->type == SOCK_STREAM)
1046                 sock->ops = &caif_stream_ops;
1047         else
1048                 return -ESOCKTNOSUPPORT;
1049
1050         if (protocol < 0 || protocol >= CAIFPROTO_MAX)
1051                 return -EPROTONOSUPPORT;
1052         /*
1053          * Set the socket state to unconnected.  The socket state
1054          * is really not used at all in the net/core or socket.c but the
1055          * initialization makes sure that sock->state is not uninitialized.
1056          */
1057         sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
1058         if (!sk)
1059                 return -ENOMEM;
1060
1061         cf_sk = container_of(sk, struct caifsock, sk);
1062
1063         /* Store the protocol */
1064         sk->sk_protocol = (unsigned char) protocol;
1065
1066         /* Initialize default priority for well-known cases */
1067         switch (protocol) {
1068         case CAIFPROTO_AT:
1069                 sk->sk_priority = TC_PRIO_CONTROL;
1070                 break;
1071         case CAIFPROTO_RFM:
1072                 sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1073                 break;
1074         default:
1075                 sk->sk_priority = TC_PRIO_BESTEFFORT;
1076         }
1077
1078         /*
1079          * Lock in order to try to stop someone from opening the socket
1080          * too early.
1081          */
1082         lock_sock(&(cf_sk->sk));
1083
1084         /* Initialize the nozero default sock structure data. */
1085         sock_init_data(sock, sk);
1086         sk->sk_destruct = caif_sock_destructor;
1087
1088         mutex_init(&cf_sk->readlock); /* single task reading lock */
1089         cf_sk->layer.ctrlcmd = caif_ctrl_cb;
1090         cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
1091         cf_sk->sk.sk_state = CAIF_DISCONNECTED;
1092
1093         set_tx_flow_off(cf_sk);
1094         set_rx_flow_on(cf_sk);
1095
1096         /* Set default options on configuration */
1097         cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1098         cf_sk->conn_req.protocol = protocol;
1099         release_sock(&cf_sk->sk);
1100         return 0;
1101 }
1102
1103
1104 static struct net_proto_family caif_family_ops = {
1105         .family = PF_CAIF,
1106         .create = caif_create,
1107         .owner = THIS_MODULE,
1108 };
1109
1110 static int __init caif_sktinit_module(void)
1111 {
1112         int err = sock_register(&caif_family_ops);
1113         if (!err)
1114                 return err;
1115         return 0;
1116 }
1117
1118 static void __exit caif_sktexit_module(void)
1119 {
1120         sock_unregister(PF_CAIF);
1121 }
1122 module_init(caif_sktinit_module);
1123 module_exit(caif_sktexit_module);