2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
78 BT_DBG("sock %p state %d", sk, sk->sk_state);
81 __l2cap_sock_close(sk, ETIMEDOUT);
88 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
90 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
91 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
94 static void l2cap_sock_clear_timer(struct sock *sk)
96 BT_DBG("sock %p state %d", sk, sk->sk_state);
97 sk_stop_timer(sk, &sk->sk_timer);
100 static void l2cap_sock_init_timer(struct sock *sk)
102 init_timer(&sk->sk_timer);
103 sk->sk_timer.function = l2cap_sock_timeout;
104 sk->sk_timer.data = (unsigned long)sk;
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
193 l2cap_pi(next)->prev_c = prev;
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
227 bt_accept_enqueue(parent, sk);
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
258 sk->sk_state_change(sk);
261 /* ---- L2CAP connections ---- */
262 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
264 struct l2cap_conn *conn = hcon->l2cap_data;
269 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
273 hcon->l2cap_data = conn;
276 BT_DBG("hcon %p conn %p", hcon, conn);
278 conn->mtu = hcon->hdev->acl_mtu;
279 conn->src = &hcon->hdev->bdaddr;
280 conn->dst = &hcon->dst;
282 spin_lock_init(&conn->lock);
283 rwlock_init(&conn->chan_list.lock);
288 static void l2cap_conn_del(struct hci_conn *hcon, int err)
290 struct l2cap_conn *conn = hcon->l2cap_data;
296 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
299 kfree_skb(conn->rx_skb);
302 while ((sk = conn->chan_list.head)) {
304 l2cap_chan_del(sk, err);
309 hcon->l2cap_data = NULL;
313 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
315 struct l2cap_chan_list *l = &conn->chan_list;
316 write_lock_bh(&l->lock);
317 __l2cap_chan_add(conn, sk, parent);
318 write_unlock_bh(&l->lock);
321 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
325 /* Get next available identificator.
326 * 1 - 128 are used by kernel.
327 * 129 - 199 are reserved.
328 * 200 - 254 are used by utilities like l2ping, etc.
331 spin_lock_bh(&conn->lock);
333 if (++conn->tx_ident > 128)
338 spin_unlock_bh(&conn->lock);
343 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
345 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
347 BT_DBG("code 0x%2.2x", code);
352 return hci_send_acl(conn->hcon, skb, 0);
355 /* ---- Socket interface ---- */
356 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
359 struct hlist_node *node;
360 sk_for_each(sk, node, &l2cap_sk_list.head)
361 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
368 /* Find socket with psm and source bdaddr.
369 * Returns closest match.
371 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
373 struct sock *sk = NULL, *sk1 = NULL;
374 struct hlist_node *node;
376 sk_for_each(sk, node, &l2cap_sk_list.head) {
377 if (state && sk->sk_state != state)
380 if (l2cap_pi(sk)->psm == psm) {
382 if (!bacmp(&bt_sk(sk)->src, src))
386 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
390 return node ? sk : sk1;
393 /* Find socket with given address (psm, src).
394 * Returns locked socket */
395 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
398 read_lock(&l2cap_sk_list.lock);
399 s = __l2cap_get_sock_by_psm(state, psm, src);
400 if (s) bh_lock_sock(s);
401 read_unlock(&l2cap_sk_list.lock);
405 static void l2cap_sock_destruct(struct sock *sk)
409 skb_queue_purge(&sk->sk_receive_queue);
410 skb_queue_purge(&sk->sk_write_queue);
413 static void l2cap_sock_cleanup_listen(struct sock *parent)
417 BT_DBG("parent %p", parent);
419 /* Close not yet accepted channels */
420 while ((sk = bt_accept_dequeue(parent, NULL)))
421 l2cap_sock_close(sk);
423 parent->sk_state = BT_CLOSED;
424 sock_set_flag(parent, SOCK_ZAPPED);
427 /* Kill socket (only if zapped and orphan)
428 * Must be called on unlocked socket.
430 static void l2cap_sock_kill(struct sock *sk)
432 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
435 BT_DBG("sk %p state %d", sk, sk->sk_state);
437 /* Kill poor orphan */
438 bt_sock_unlink(&l2cap_sk_list, sk);
439 sock_set_flag(sk, SOCK_DEAD);
443 static void __l2cap_sock_close(struct sock *sk, int reason)
445 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
447 switch (sk->sk_state) {
449 l2cap_sock_cleanup_listen(sk);
455 if (sk->sk_type == SOCK_SEQPACKET) {
456 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
457 struct l2cap_disconn_req req;
459 sk->sk_state = BT_DISCONN;
460 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
462 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
463 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
464 l2cap_send_cmd(conn, l2cap_get_ident(conn),
465 L2CAP_DISCONN_REQ, sizeof(req), &req);
467 l2cap_chan_del(sk, reason);
473 l2cap_chan_del(sk, reason);
477 sock_set_flag(sk, SOCK_ZAPPED);
482 /* Must be called on unlocked socket. */
483 static void l2cap_sock_close(struct sock *sk)
485 l2cap_sock_clear_timer(sk);
487 __l2cap_sock_close(sk, ECONNRESET);
492 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
494 struct l2cap_pinfo *pi = l2cap_pi(sk);
499 sk->sk_type = parent->sk_type;
500 pi->imtu = l2cap_pi(parent)->imtu;
501 pi->omtu = l2cap_pi(parent)->omtu;
502 pi->link_mode = l2cap_pi(parent)->link_mode;
504 pi->imtu = L2CAP_DEFAULT_MTU;
509 /* Default config options */
511 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
514 static struct proto l2cap_proto = {
516 .owner = THIS_MODULE,
517 .obj_size = sizeof(struct l2cap_pinfo)
520 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
524 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, 1);
528 sock_init_data(sock, sk);
529 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
531 sk->sk_destruct = l2cap_sock_destruct;
532 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
534 sock_reset_flag(sk, SOCK_ZAPPED);
536 sk->sk_protocol = proto;
537 sk->sk_state = BT_OPEN;
539 l2cap_sock_init_timer(sk);
541 bt_sock_link(&l2cap_sk_list, sk);
545 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
549 BT_DBG("sock %p", sock);
551 sock->state = SS_UNCONNECTED;
553 if (sock->type != SOCK_SEQPACKET &&
554 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
555 return -ESOCKTNOSUPPORT;
557 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
560 sock->ops = &l2cap_sock_ops;
562 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
566 l2cap_sock_init(sk, NULL);
570 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
572 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
573 struct sock *sk = sock->sk;
576 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
578 if (!addr || addr->sa_family != AF_BLUETOOTH)
583 if (sk->sk_state != BT_OPEN) {
588 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
589 !capable(CAP_NET_BIND_SERVICE)) {
594 write_lock_bh(&l2cap_sk_list.lock);
596 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
599 /* Save source address */
600 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
601 l2cap_pi(sk)->psm = la->l2_psm;
602 l2cap_pi(sk)->sport = la->l2_psm;
603 sk->sk_state = BT_BOUND;
606 write_unlock_bh(&l2cap_sk_list.lock);
613 static int l2cap_do_connect(struct sock *sk)
615 bdaddr_t *src = &bt_sk(sk)->src;
616 bdaddr_t *dst = &bt_sk(sk)->dst;
617 struct l2cap_conn *conn;
618 struct hci_conn *hcon;
619 struct hci_dev *hdev;
622 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
624 if (!(hdev = hci_get_route(dst, src)))
625 return -EHOSTUNREACH;
627 hci_dev_lock_bh(hdev);
631 hcon = hci_connect(hdev, ACL_LINK, dst);
635 conn = l2cap_conn_add(hcon, 0);
643 /* Update source addr of the socket */
644 bacpy(src, conn->src);
646 l2cap_chan_add(conn, sk, NULL);
648 sk->sk_state = BT_CONNECT;
649 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
651 if (hcon->state == BT_CONNECTED) {
652 if (sk->sk_type == SOCK_SEQPACKET) {
653 struct l2cap_conn_req req;
654 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
655 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
656 req.psm = l2cap_pi(sk)->psm;
657 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
658 L2CAP_CONN_REQ, sizeof(req), &req);
660 l2cap_sock_clear_timer(sk);
661 sk->sk_state = BT_CONNECTED;
666 hci_dev_unlock_bh(hdev);
671 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
673 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
674 struct sock *sk = sock->sk;
681 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
686 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
691 switch(sk->sk_state) {
695 /* Already connecting */
699 /* Already connected */
712 /* Set destination address and psm */
713 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
714 l2cap_pi(sk)->psm = la->l2_psm;
716 if ((err = l2cap_do_connect(sk)))
720 err = bt_sock_wait_state(sk, BT_CONNECTED,
721 sock_sndtimeo(sk, flags & O_NONBLOCK));
727 static int l2cap_sock_listen(struct socket *sock, int backlog)
729 struct sock *sk = sock->sk;
732 BT_DBG("sk %p backlog %d", sk, backlog);
736 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
741 if (!l2cap_pi(sk)->psm) {
742 bdaddr_t *src = &bt_sk(sk)->src;
747 write_lock_bh(&l2cap_sk_list.lock);
749 for (psm = 0x1001; psm < 0x1100; psm += 2)
750 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
751 l2cap_pi(sk)->psm = htobs(psm);
752 l2cap_pi(sk)->sport = htobs(psm);
757 write_unlock_bh(&l2cap_sk_list.lock);
763 sk->sk_max_ack_backlog = backlog;
764 sk->sk_ack_backlog = 0;
765 sk->sk_state = BT_LISTEN;
772 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
774 DECLARE_WAITQUEUE(wait, current);
775 struct sock *sk = sock->sk, *nsk;
779 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
781 if (sk->sk_state != BT_LISTEN) {
786 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
788 BT_DBG("sk %p timeo %ld", sk, timeo);
790 /* Wait for an incoming connection. (wake-one). */
791 add_wait_queue_exclusive(sk->sk_sleep, &wait);
792 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
793 set_current_state(TASK_INTERRUPTIBLE);
800 timeo = schedule_timeout(timeo);
801 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
803 if (sk->sk_state != BT_LISTEN) {
808 if (signal_pending(current)) {
809 err = sock_intr_errno(timeo);
813 set_current_state(TASK_RUNNING);
814 remove_wait_queue(sk->sk_sleep, &wait);
819 newsock->state = SS_CONNECTED;
821 BT_DBG("new socket %p", nsk);
828 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
830 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
831 struct sock *sk = sock->sk;
833 BT_DBG("sock %p, sk %p", sock, sk);
835 addr->sa_family = AF_BLUETOOTH;
836 *len = sizeof(struct sockaddr_l2);
839 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
841 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
843 la->l2_psm = l2cap_pi(sk)->psm;
847 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
849 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
850 struct sk_buff *skb, **frag;
851 int err, hlen, count, sent=0;
852 struct l2cap_hdr *lh;
854 BT_DBG("sk %p len %d", sk, len);
856 /* First fragment (with L2CAP header) */
857 if (sk->sk_type == SOCK_DGRAM)
858 hlen = L2CAP_HDR_SIZE + 2;
860 hlen = L2CAP_HDR_SIZE;
862 count = min_t(unsigned int, (conn->mtu - hlen), len);
864 skb = bt_skb_send_alloc(sk, hlen + count,
865 msg->msg_flags & MSG_DONTWAIT, &err);
869 /* Create L2CAP header */
870 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
871 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
872 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
874 if (sk->sk_type == SOCK_DGRAM)
875 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
877 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
885 /* Continuation fragments (no L2CAP header) */
886 frag = &skb_shinfo(skb)->frag_list;
888 count = min_t(unsigned int, conn->mtu, len);
890 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
894 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
902 frag = &(*frag)->next;
905 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
915 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
917 struct sock *sk = sock->sk;
920 BT_DBG("sock %p, sk %p", sock, sk);
922 err = sock_error(sk);
926 if (msg->msg_flags & MSG_OOB)
929 /* Check outgoing MTU */
930 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
935 if (sk->sk_state == BT_CONNECTED)
936 err = l2cap_do_send(sk, msg, len);
944 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
946 struct sock *sk = sock->sk;
947 struct l2cap_options opts;
957 opts.imtu = l2cap_pi(sk)->imtu;
958 opts.omtu = l2cap_pi(sk)->omtu;
959 opts.flush_to = l2cap_pi(sk)->flush_to;
962 len = min_t(unsigned int, sizeof(opts), optlen);
963 if (copy_from_user((char *) &opts, optval, len)) {
968 l2cap_pi(sk)->imtu = opts.imtu;
969 l2cap_pi(sk)->omtu = opts.omtu;
973 if (get_user(opt, (u32 __user *) optval)) {
978 l2cap_pi(sk)->link_mode = opt;
990 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
992 struct sock *sk = sock->sk;
993 struct l2cap_options opts;
994 struct l2cap_conninfo cinfo;
999 if (get_user(len, optlen))
1006 opts.imtu = l2cap_pi(sk)->imtu;
1007 opts.omtu = l2cap_pi(sk)->omtu;
1008 opts.flush_to = l2cap_pi(sk)->flush_to;
1011 len = min_t(unsigned int, len, sizeof(opts));
1012 if (copy_to_user(optval, (char *) &opts, len))
1018 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1022 case L2CAP_CONNINFO:
1023 if (sk->sk_state != BT_CONNECTED) {
1028 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1029 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1031 len = min_t(unsigned int, len, sizeof(cinfo));
1032 if (copy_to_user(optval, (char *) &cinfo, len))
1046 static int l2cap_sock_shutdown(struct socket *sock, int how)
1048 struct sock *sk = sock->sk;
1051 BT_DBG("sock %p, sk %p", sock, sk);
1057 if (!sk->sk_shutdown) {
1058 sk->sk_shutdown = SHUTDOWN_MASK;
1059 l2cap_sock_clear_timer(sk);
1060 __l2cap_sock_close(sk, 0);
1062 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1063 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1069 static int l2cap_sock_release(struct socket *sock)
1071 struct sock *sk = sock->sk;
1074 BT_DBG("sock %p, sk %p", sock, sk);
1079 err = l2cap_sock_shutdown(sock, 2);
1082 l2cap_sock_kill(sk);
1086 static void l2cap_conn_ready(struct l2cap_conn *conn)
1088 struct l2cap_chan_list *l = &conn->chan_list;
1091 BT_DBG("conn %p", conn);
1093 read_lock(&l->lock);
1095 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1098 if (sk->sk_type != SOCK_SEQPACKET) {
1099 l2cap_sock_clear_timer(sk);
1100 sk->sk_state = BT_CONNECTED;
1101 sk->sk_state_change(sk);
1102 } else if (sk->sk_state == BT_CONNECT) {
1103 struct l2cap_conn_req req;
1104 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1105 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1106 req.psm = l2cap_pi(sk)->psm;
1107 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1113 read_unlock(&l->lock);
1116 /* Notify sockets that we cannot guaranty reliability anymore */
1117 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1119 struct l2cap_chan_list *l = &conn->chan_list;
1122 BT_DBG("conn %p", conn);
1124 read_lock(&l->lock);
1125 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1126 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1129 read_unlock(&l->lock);
1132 static void l2cap_chan_ready(struct sock *sk)
1134 struct sock *parent = bt_sk(sk)->parent;
1136 BT_DBG("sk %p, parent %p", sk, parent);
1138 l2cap_pi(sk)->conf_state = 0;
1139 l2cap_sock_clear_timer(sk);
1142 /* Outgoing channel.
1143 * Wake up socket sleeping on connect.
1145 sk->sk_state = BT_CONNECTED;
1146 sk->sk_state_change(sk);
1148 /* Incoming channel.
1149 * Wake up socket sleeping on accept.
1151 parent->sk_data_ready(parent, 0);
1155 /* Copy frame to all raw sockets on that connection */
1156 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1158 struct l2cap_chan_list *l = &conn->chan_list;
1159 struct sk_buff *nskb;
1162 BT_DBG("conn %p", conn);
1164 read_lock(&l->lock);
1165 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1166 if (sk->sk_type != SOCK_RAW)
1169 /* Don't send frame to the socket it came from */
1173 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1176 if (sock_queue_rcv_skb(sk, nskb))
1179 read_unlock(&l->lock);
1182 /* ---- L2CAP signalling commands ---- */
1183 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1184 u8 code, u8 ident, u16 dlen, void *data)
1186 struct sk_buff *skb, **frag;
1187 struct l2cap_cmd_hdr *cmd;
1188 struct l2cap_hdr *lh;
1191 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1193 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1194 count = min_t(unsigned int, conn->mtu, len);
1196 skb = bt_skb_alloc(count, GFP_ATOMIC);
1200 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1201 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1202 lh->cid = cpu_to_le16(0x0001);
1204 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1207 cmd->len = cpu_to_le16(dlen);
1210 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1211 memcpy(skb_put(skb, count), data, count);
1217 /* Continuation fragments (no L2CAP header) */
1218 frag = &skb_shinfo(skb)->frag_list;
1220 count = min_t(unsigned int, conn->mtu, len);
1222 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1226 memcpy(skb_put(*frag, count), data, count);
1231 frag = &(*frag)->next;
1241 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1243 struct l2cap_conf_opt *opt = *ptr;
1246 len = L2CAP_CONF_OPT_SIZE + opt->len;
1254 *val = *((u8 *) opt->val);
1258 *val = __le16_to_cpu(*((__le16 *) opt->val));
1262 *val = __le32_to_cpu(*((__le32 *) opt->val));
1266 *val = (unsigned long) opt->val;
1270 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1274 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1276 struct l2cap_conf_opt *opt = *ptr;
1278 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1285 *((u8 *) opt->val) = val;
1289 *((__le16 *) opt->val) = cpu_to_le16(val);
1293 *((__le32 *) opt->val) = cpu_to_le32(val);
1297 memcpy(opt->val, (void *) val, len);
1301 *ptr += L2CAP_CONF_OPT_SIZE + len;
1304 static int l2cap_build_conf_req(struct sock *sk, void *data)
1306 struct l2cap_pinfo *pi = l2cap_pi(sk);
1307 struct l2cap_conf_req *req = data;
1308 void *ptr = req->data;
1310 BT_DBG("sk %p", sk);
1312 if (pi->imtu != L2CAP_DEFAULT_MTU)
1313 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1315 /* FIXME: Need actual value of the flush timeout */
1316 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1317 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1319 req->dcid = cpu_to_le16(pi->dcid);
1320 req->flags = cpu_to_le16(0);
1325 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1327 struct l2cap_pinfo *pi = l2cap_pi(sk);
1328 struct l2cap_conf_rsp *rsp = data;
1329 void *ptr = rsp->data;
1330 void *req = pi->conf_req;
1331 int len = pi->conf_len;
1332 int type, hint, olen;
1334 u16 mtu = L2CAP_DEFAULT_MTU;
1335 u16 result = L2CAP_CONF_SUCCESS;
1337 BT_DBG("sk %p", sk);
1339 while (len >= L2CAP_CONF_OPT_SIZE) {
1340 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1346 case L2CAP_CONF_MTU:
1350 case L2CAP_CONF_FLUSH_TO:
1354 case L2CAP_CONF_QOS:
1361 result = L2CAP_CONF_UNKNOWN;
1362 *((u8 *) ptr++) = type;
1367 if (result == L2CAP_CONF_SUCCESS) {
1368 /* Configure output options and let the other side know
1369 * which ones we don't like. */
1372 result = L2CAP_CONF_UNACCEPT;
1375 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1378 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1381 rsp->scid = cpu_to_le16(pi->dcid);
1382 rsp->result = cpu_to_le16(result);
1383 rsp->flags = cpu_to_le16(0x0000);
1388 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1390 struct l2cap_conf_rsp *rsp = data;
1391 void *ptr = rsp->data;
1393 BT_DBG("sk %p", sk);
1395 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1396 rsp->result = cpu_to_le16(result);
1397 rsp->flags = cpu_to_le16(flags);
1402 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1404 struct l2cap_chan_list *list = &conn->chan_list;
1405 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1406 struct l2cap_conn_rsp rsp;
1407 struct sock *sk, *parent;
1408 int result = 0, status = 0;
1410 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1411 __le16 psm = req->psm;
1413 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1415 /* Check if we have socket listening on psm */
1416 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1418 result = L2CAP_CR_BAD_PSM;
1422 result = L2CAP_CR_NO_MEM;
1424 /* Check for backlog size */
1425 if (sk_acceptq_is_full(parent)) {
1426 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1430 sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1434 write_lock_bh(&list->lock);
1436 /* Check if we already have channel with that dcid */
1437 if (__l2cap_get_chan_by_dcid(list, scid)) {
1438 write_unlock_bh(&list->lock);
1439 sock_set_flag(sk, SOCK_ZAPPED);
1440 l2cap_sock_kill(sk);
1444 hci_conn_hold(conn->hcon);
1446 l2cap_sock_init(sk, parent);
1447 bacpy(&bt_sk(sk)->src, conn->src);
1448 bacpy(&bt_sk(sk)->dst, conn->dst);
1449 l2cap_pi(sk)->psm = psm;
1450 l2cap_pi(sk)->dcid = scid;
1452 __l2cap_chan_add(conn, sk, parent);
1453 dcid = l2cap_pi(sk)->scid;
1455 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1457 /* Service level security */
1458 result = L2CAP_CR_PEND;
1459 status = L2CAP_CS_AUTHEN_PEND;
1460 sk->sk_state = BT_CONNECT2;
1461 l2cap_pi(sk)->ident = cmd->ident;
1463 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1464 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1465 if (!hci_conn_encrypt(conn->hcon))
1467 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1468 if (!hci_conn_auth(conn->hcon))
1472 sk->sk_state = BT_CONFIG;
1473 result = status = 0;
1476 write_unlock_bh(&list->lock);
1479 bh_unlock_sock(parent);
1482 rsp.scid = cpu_to_le16(scid);
1483 rsp.dcid = cpu_to_le16(dcid);
1484 rsp.result = cpu_to_le16(result);
1485 rsp.status = cpu_to_le16(status);
1486 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1490 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1492 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1493 u16 scid, dcid, result, status;
1497 scid = __le16_to_cpu(rsp->scid);
1498 dcid = __le16_to_cpu(rsp->dcid);
1499 result = __le16_to_cpu(rsp->result);
1500 status = __le16_to_cpu(rsp->status);
1502 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1505 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1508 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1513 case L2CAP_CR_SUCCESS:
1514 sk->sk_state = BT_CONFIG;
1515 l2cap_pi(sk)->ident = 0;
1516 l2cap_pi(sk)->dcid = dcid;
1517 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1519 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1520 l2cap_build_conf_req(sk, req), req);
1527 l2cap_chan_del(sk, ECONNREFUSED);
1535 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1537 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1543 dcid = __le16_to_cpu(req->dcid);
1544 flags = __le16_to_cpu(req->flags);
1546 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1548 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1551 if (sk->sk_state == BT_DISCONN)
1554 /* Reject if config buffer is too small. */
1555 len = cmd_len - sizeof(*req);
1556 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1557 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1558 l2cap_build_conf_rsp(sk, rsp,
1559 L2CAP_CONF_REJECT, flags), rsp);
1564 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1565 l2cap_pi(sk)->conf_len += len;
1567 if (flags & 0x0001) {
1568 /* Incomplete config. Send empty response. */
1569 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1570 l2cap_build_conf_rsp(sk, rsp,
1571 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1575 /* Complete config. */
1576 len = l2cap_parse_conf_req(sk, rsp);
1580 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1582 /* Reset config buffer. */
1583 l2cap_pi(sk)->conf_len = 0;
1585 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1588 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1589 sk->sk_state = BT_CONNECTED;
1590 l2cap_chan_ready(sk);
1594 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1596 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1597 l2cap_build_conf_req(sk, req), req);
1605 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1607 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1608 u16 scid, flags, result;
1611 scid = __le16_to_cpu(rsp->scid);
1612 flags = __le16_to_cpu(rsp->flags);
1613 result = __le16_to_cpu(rsp->result);
1615 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1617 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1621 case L2CAP_CONF_SUCCESS:
1624 case L2CAP_CONF_UNACCEPT:
1625 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1627 /* It does not make sense to adjust L2CAP parameters
1628 * that are currently defined in the spec. We simply
1629 * resend config request that we sent earlier. It is
1630 * stupid, but it helps qualification testing which
1631 * expects at least some response from us. */
1632 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1633 l2cap_build_conf_req(sk, req), req);
1638 sk->sk_state = BT_DISCONN;
1639 sk->sk_err = ECONNRESET;
1640 l2cap_sock_set_timer(sk, HZ * 5);
1642 struct l2cap_disconn_req req;
1643 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1644 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1645 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1646 L2CAP_DISCONN_REQ, sizeof(req), &req);
1654 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1656 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1657 sk->sk_state = BT_CONNECTED;
1658 l2cap_chan_ready(sk);
1666 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1668 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1669 struct l2cap_disconn_rsp rsp;
1673 scid = __le16_to_cpu(req->scid);
1674 dcid = __le16_to_cpu(req->dcid);
1676 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1678 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1681 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1682 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1683 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1685 sk->sk_shutdown = SHUTDOWN_MASK;
1687 l2cap_chan_del(sk, ECONNRESET);
1690 l2cap_sock_kill(sk);
1694 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1696 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1700 scid = __le16_to_cpu(rsp->scid);
1701 dcid = __le16_to_cpu(rsp->dcid);
1703 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1705 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1708 l2cap_chan_del(sk, 0);
1711 l2cap_sock_kill(sk);
1715 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1717 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1718 struct l2cap_info_rsp rsp;
1721 type = __le16_to_cpu(req->type);
1723 BT_DBG("type 0x%4.4x", type);
1725 rsp.type = cpu_to_le16(type);
1726 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1727 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1732 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1734 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1737 type = __le16_to_cpu(rsp->type);
1738 result = __le16_to_cpu(rsp->result);
1740 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1745 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1747 u8 *data = skb->data;
1749 struct l2cap_cmd_hdr cmd;
1752 l2cap_raw_recv(conn, skb);
1754 while (len >= L2CAP_CMD_HDR_SIZE) {
1756 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1757 data += L2CAP_CMD_HDR_SIZE;
1758 len -= L2CAP_CMD_HDR_SIZE;
1760 cmd_len = le16_to_cpu(cmd.len);
1762 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1764 if (cmd_len > len || !cmd.ident) {
1765 BT_DBG("corrupted command");
1770 case L2CAP_COMMAND_REJ:
1771 /* FIXME: We should process this */
1774 case L2CAP_CONN_REQ:
1775 err = l2cap_connect_req(conn, &cmd, data);
1778 case L2CAP_CONN_RSP:
1779 err = l2cap_connect_rsp(conn, &cmd, data);
1782 case L2CAP_CONF_REQ:
1783 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1786 case L2CAP_CONF_RSP:
1787 err = l2cap_config_rsp(conn, &cmd, data);
1790 case L2CAP_DISCONN_REQ:
1791 err = l2cap_disconnect_req(conn, &cmd, data);
1794 case L2CAP_DISCONN_RSP:
1795 err = l2cap_disconnect_rsp(conn, &cmd, data);
1798 case L2CAP_ECHO_REQ:
1799 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1802 case L2CAP_ECHO_RSP:
1805 case L2CAP_INFO_REQ:
1806 err = l2cap_information_req(conn, &cmd, data);
1809 case L2CAP_INFO_RSP:
1810 err = l2cap_information_rsp(conn, &cmd, data);
1814 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1820 struct l2cap_cmd_rej rej;
1821 BT_DBG("error %d", err);
1823 /* FIXME: Map err to a valid reason */
1824 rej.reason = cpu_to_le16(0);
1825 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1835 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1839 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1841 BT_DBG("unknown cid 0x%4.4x", cid);
1845 BT_DBG("sk %p, len %d", sk, skb->len);
1847 if (sk->sk_state != BT_CONNECTED)
1850 if (l2cap_pi(sk)->imtu < skb->len)
1853 /* If socket recv buffers overflows we drop data here
1854 * which is *bad* because L2CAP has to be reliable.
1855 * But we don't have any other choice. L2CAP doesn't
1856 * provide flow control mechanism. */
1858 if (!sock_queue_rcv_skb(sk, skb))
1871 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1875 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1879 BT_DBG("sk %p, len %d", sk, skb->len);
1881 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1884 if (l2cap_pi(sk)->imtu < skb->len)
1887 if (!sock_queue_rcv_skb(sk, skb))
1894 if (sk) bh_unlock_sock(sk);
1898 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1900 struct l2cap_hdr *lh = (void *) skb->data;
1904 skb_pull(skb, L2CAP_HDR_SIZE);
1905 cid = __le16_to_cpu(lh->cid);
1906 len = __le16_to_cpu(lh->len);
1908 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1912 l2cap_sig_channel(conn, skb);
1916 psm = get_unaligned((__le16 *) skb->data);
1918 l2cap_conless_channel(conn, psm, skb);
1922 l2cap_data_channel(conn, cid, skb);
1927 /* ---- L2CAP interface with lower layer (HCI) ---- */
1929 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1931 int exact = 0, lm1 = 0, lm2 = 0;
1932 register struct sock *sk;
1933 struct hlist_node *node;
1935 if (type != ACL_LINK)
1938 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1940 /* Find listening sockets and check their link_mode */
1941 read_lock(&l2cap_sk_list.lock);
1942 sk_for_each(sk, node, &l2cap_sk_list.head) {
1943 if (sk->sk_state != BT_LISTEN)
1946 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1947 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1949 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1950 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1952 read_unlock(&l2cap_sk_list.lock);
1954 return exact ? lm1 : lm2;
1957 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1959 struct l2cap_conn *conn;
1961 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1963 if (hcon->type != ACL_LINK)
1967 conn = l2cap_conn_add(hcon, status);
1969 l2cap_conn_ready(conn);
1971 l2cap_conn_del(hcon, bt_err(status));
1976 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1978 BT_DBG("hcon %p reason %d", hcon, reason);
1980 if (hcon->type != ACL_LINK)
1983 l2cap_conn_del(hcon, bt_err(reason));
1988 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1990 struct l2cap_chan_list *l;
1991 struct l2cap_conn *conn = conn = hcon->l2cap_data;
1992 struct l2cap_conn_rsp rsp;
1999 l = &conn->chan_list;
2001 BT_DBG("conn %p", conn);
2003 read_lock(&l->lock);
2005 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2008 if (sk->sk_state != BT_CONNECT2 ||
2009 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2010 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2016 sk->sk_state = BT_CONFIG;
2019 sk->sk_state = BT_DISCONN;
2020 l2cap_sock_set_timer(sk, HZ/10);
2021 result = L2CAP_CR_SEC_BLOCK;
2024 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2025 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2026 rsp.result = cpu_to_le16(result);
2027 rsp.status = cpu_to_le16(0);
2028 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2029 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2034 read_unlock(&l->lock);
2038 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2040 struct l2cap_chan_list *l;
2041 struct l2cap_conn *conn = hcon->l2cap_data;
2042 struct l2cap_conn_rsp rsp;
2049 l = &conn->chan_list;
2051 BT_DBG("conn %p", conn);
2053 read_lock(&l->lock);
2055 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2058 if (sk->sk_state != BT_CONNECT2) {
2064 sk->sk_state = BT_CONFIG;
2067 sk->sk_state = BT_DISCONN;
2068 l2cap_sock_set_timer(sk, HZ/10);
2069 result = L2CAP_CR_SEC_BLOCK;
2072 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2073 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2074 rsp.result = cpu_to_le16(result);
2075 rsp.status = cpu_to_le16(0);
2076 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2077 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2079 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2080 hci_conn_change_link_key(hcon);
2085 read_unlock(&l->lock);
2089 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2091 struct l2cap_conn *conn = hcon->l2cap_data;
2093 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2096 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2098 if (flags & ACL_START) {
2099 struct l2cap_hdr *hdr;
2103 BT_ERR("Unexpected start frame (len %d)", skb->len);
2104 kfree_skb(conn->rx_skb);
2105 conn->rx_skb = NULL;
2107 l2cap_conn_unreliable(conn, ECOMM);
2111 BT_ERR("Frame is too short (len %d)", skb->len);
2112 l2cap_conn_unreliable(conn, ECOMM);
2116 hdr = (struct l2cap_hdr *) skb->data;
2117 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2119 if (len == skb->len) {
2120 /* Complete frame received */
2121 l2cap_recv_frame(conn, skb);
2125 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2127 if (skb->len > len) {
2128 BT_ERR("Frame is too long (len %d, expected len %d)",
2130 l2cap_conn_unreliable(conn, ECOMM);
2134 /* Allocate skb for the complete frame (with header) */
2135 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2138 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2140 conn->rx_len = len - skb->len;
2142 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2144 if (!conn->rx_len) {
2145 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2146 l2cap_conn_unreliable(conn, ECOMM);
2150 if (skb->len > conn->rx_len) {
2151 BT_ERR("Fragment is too long (len %d, expected %d)",
2152 skb->len, conn->rx_len);
2153 kfree_skb(conn->rx_skb);
2154 conn->rx_skb = NULL;
2156 l2cap_conn_unreliable(conn, ECOMM);
2160 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2162 conn->rx_len -= skb->len;
2164 if (!conn->rx_len) {
2165 /* Complete frame received */
2166 l2cap_recv_frame(conn, conn->rx_skb);
2167 conn->rx_skb = NULL;
2176 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2179 struct hlist_node *node;
2182 read_lock_bh(&l2cap_sk_list.lock);
2184 sk_for_each(sk, node, &l2cap_sk_list.head) {
2185 struct l2cap_pinfo *pi = l2cap_pi(sk);
2187 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2188 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2189 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2190 pi->imtu, pi->omtu, pi->link_mode);
2193 read_unlock_bh(&l2cap_sk_list.lock);
2198 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2200 static const struct proto_ops l2cap_sock_ops = {
2201 .family = PF_BLUETOOTH,
2202 .owner = THIS_MODULE,
2203 .release = l2cap_sock_release,
2204 .bind = l2cap_sock_bind,
2205 .connect = l2cap_sock_connect,
2206 .listen = l2cap_sock_listen,
2207 .accept = l2cap_sock_accept,
2208 .getname = l2cap_sock_getname,
2209 .sendmsg = l2cap_sock_sendmsg,
2210 .recvmsg = bt_sock_recvmsg,
2211 .poll = bt_sock_poll,
2212 .mmap = sock_no_mmap,
2213 .socketpair = sock_no_socketpair,
2214 .ioctl = sock_no_ioctl,
2215 .shutdown = l2cap_sock_shutdown,
2216 .setsockopt = l2cap_sock_setsockopt,
2217 .getsockopt = l2cap_sock_getsockopt
2220 static struct net_proto_family l2cap_sock_family_ops = {
2221 .family = PF_BLUETOOTH,
2222 .owner = THIS_MODULE,
2223 .create = l2cap_sock_create,
2226 static struct hci_proto l2cap_hci_proto = {
2228 .id = HCI_PROTO_L2CAP,
2229 .connect_ind = l2cap_connect_ind,
2230 .connect_cfm = l2cap_connect_cfm,
2231 .disconn_ind = l2cap_disconn_ind,
2232 .auth_cfm = l2cap_auth_cfm,
2233 .encrypt_cfm = l2cap_encrypt_cfm,
2234 .recv_acldata = l2cap_recv_acldata
2237 static int __init l2cap_init(void)
2241 err = proto_register(&l2cap_proto, 0);
2245 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2247 BT_ERR("L2CAP socket registration failed");
2251 err = hci_register_proto(&l2cap_hci_proto);
2253 BT_ERR("L2CAP protocol registration failed");
2254 bt_sock_unregister(BTPROTO_L2CAP);
2258 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2259 BT_ERR("Failed to create L2CAP info file");
2261 BT_INFO("L2CAP ver %s", VERSION);
2262 BT_INFO("L2CAP socket layer initialized");
2267 proto_unregister(&l2cap_proto);
2271 static void __exit l2cap_exit(void)
2273 class_remove_file(bt_class, &class_attr_l2cap);
2275 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2276 BT_ERR("L2CAP socket unregistration failed");
2278 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2279 BT_ERR("L2CAP protocol unregistration failed");
2281 proto_unregister(&l2cap_proto);
2284 void l2cap_load(void)
2286 /* Dummy function to trigger automatic L2CAP module loading by
2287 * other modules that use L2CAP sockets but don't use any other
2288 * symbols from it. */
2291 EXPORT_SYMBOL(l2cap_load);
2293 module_init(l2cap_init);
2294 module_exit(l2cap_exit);
2296 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2297 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2298 MODULE_VERSION(VERSION);
2299 MODULE_LICENSE("GPL");
2300 MODULE_ALIAS("bt-proto-0");