2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.11"
55 static u32 l2cap_feat_mask = 0x0000;
57 static const struct proto_ops l2cap_sock_ops;
59 static struct bt_sock_list l2cap_sk_list = {
60 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
63 static void __l2cap_sock_close(struct sock *sk, int reason);
64 static void l2cap_sock_close(struct sock *sk);
65 static void l2cap_sock_kill(struct sock *sk);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
70 /* ---- L2CAP timers ---- */
71 static void l2cap_sock_timeout(unsigned long arg)
73 struct sock *sk = (struct sock *) arg;
76 BT_DBG("sock %p state %d", sk, sk->sk_state);
80 if (sk->sk_state == BT_CONNECT &&
81 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
82 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
83 reason = ECONNREFUSED;
87 __l2cap_sock_close(sk, reason);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
193 l2cap_pi(next)->prev_c = prev;
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
227 bt_accept_enqueue(parent, sk);
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
258 sk->sk_state_change(sk);
261 /* Service level security */
262 static inline int l2cap_check_link_mode(struct sock *sk)
264 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
266 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
267 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
268 return hci_conn_encrypt(conn->hcon);
270 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
271 return hci_conn_auth(conn->hcon);
276 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
280 /* Get next available identificator.
281 * 1 - 128 are used by kernel.
282 * 129 - 199 are reserved.
283 * 200 - 254 are used by utilities like l2ping, etc.
286 spin_lock_bh(&conn->lock);
288 if (++conn->tx_ident > 128)
293 spin_unlock_bh(&conn->lock);
298 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
300 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
302 BT_DBG("code 0x%2.2x", code);
307 return hci_send_acl(conn->hcon, skb, 0);
310 static void l2cap_do_start(struct sock *sk)
312 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
314 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
315 if (l2cap_check_link_mode(sk)) {
316 struct l2cap_conn_req req;
317 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
318 req.psm = l2cap_pi(sk)->psm;
320 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
322 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
323 L2CAP_CONN_REQ, sizeof(req), &req);
326 struct l2cap_info_req req;
327 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
329 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
330 conn->info_ident = l2cap_get_ident(conn);
332 mod_timer(&conn->info_timer, jiffies +
333 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
335 l2cap_send_cmd(conn, conn->info_ident,
336 L2CAP_INFO_REQ, sizeof(req), &req);
340 /* ---- L2CAP connections ---- */
341 static void l2cap_conn_start(struct l2cap_conn *conn)
343 struct l2cap_chan_list *l = &conn->chan_list;
346 BT_DBG("conn %p", conn);
350 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
353 if (sk->sk_type != SOCK_SEQPACKET) {
358 if (sk->sk_state == BT_CONNECT) {
359 if (l2cap_check_link_mode(sk)) {
360 struct l2cap_conn_req req;
361 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
362 req.psm = l2cap_pi(sk)->psm;
364 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
366 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
367 L2CAP_CONN_REQ, sizeof(req), &req);
369 } else if (sk->sk_state == BT_CONNECT2) {
370 struct l2cap_conn_rsp rsp;
371 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
372 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
374 if (l2cap_check_link_mode(sk)) {
375 if (bt_sk(sk)->defer_setup) {
376 struct sock *parent = bt_sk(sk)->parent;
377 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
378 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
379 parent->sk_data_ready(parent, 0);
382 sk->sk_state = BT_CONFIG;
383 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
384 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
387 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
388 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
391 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
392 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
398 read_unlock(&l->lock);
401 static void l2cap_conn_ready(struct l2cap_conn *conn)
403 struct l2cap_chan_list *l = &conn->chan_list;
406 BT_DBG("conn %p", conn);
410 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
413 if (sk->sk_type != SOCK_SEQPACKET) {
414 l2cap_sock_clear_timer(sk);
415 sk->sk_state = BT_CONNECTED;
416 sk->sk_state_change(sk);
417 } else if (sk->sk_state == BT_CONNECT)
423 read_unlock(&l->lock);
426 /* Notify sockets that we cannot guaranty reliability anymore */
427 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
429 struct l2cap_chan_list *l = &conn->chan_list;
432 BT_DBG("conn %p", conn);
436 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
437 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
441 read_unlock(&l->lock);
444 static void l2cap_info_timeout(unsigned long arg)
446 struct l2cap_conn *conn = (void *) arg;
448 conn->info_ident = 0;
450 l2cap_conn_start(conn);
453 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
455 struct l2cap_conn *conn = hcon->l2cap_data;
460 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
464 hcon->l2cap_data = conn;
467 BT_DBG("hcon %p conn %p", hcon, conn);
469 conn->mtu = hcon->hdev->acl_mtu;
470 conn->src = &hcon->hdev->bdaddr;
471 conn->dst = &hcon->dst;
475 setup_timer(&conn->info_timer, l2cap_info_timeout,
476 (unsigned long) conn);
478 spin_lock_init(&conn->lock);
479 rwlock_init(&conn->chan_list.lock);
484 static void l2cap_conn_del(struct hci_conn *hcon, int err)
486 struct l2cap_conn *conn = hcon->l2cap_data;
492 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
495 kfree_skb(conn->rx_skb);
498 while ((sk = conn->chan_list.head)) {
500 l2cap_chan_del(sk, err);
505 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
506 del_timer_sync(&conn->info_timer);
508 hcon->l2cap_data = NULL;
512 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
514 struct l2cap_chan_list *l = &conn->chan_list;
515 write_lock_bh(&l->lock);
516 __l2cap_chan_add(conn, sk, parent);
517 write_unlock_bh(&l->lock);
520 /* ---- Socket interface ---- */
521 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
524 struct hlist_node *node;
525 sk_for_each(sk, node, &l2cap_sk_list.head)
526 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
533 /* Find socket with psm and source bdaddr.
534 * Returns closest match.
536 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
538 struct sock *sk = NULL, *sk1 = NULL;
539 struct hlist_node *node;
541 sk_for_each(sk, node, &l2cap_sk_list.head) {
542 if (state && sk->sk_state != state)
545 if (l2cap_pi(sk)->psm == psm) {
547 if (!bacmp(&bt_sk(sk)->src, src))
551 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
555 return node ? sk : sk1;
558 /* Find socket with given address (psm, src).
559 * Returns locked socket */
560 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
563 read_lock(&l2cap_sk_list.lock);
564 s = __l2cap_get_sock_by_psm(state, psm, src);
565 if (s) bh_lock_sock(s);
566 read_unlock(&l2cap_sk_list.lock);
570 static void l2cap_sock_destruct(struct sock *sk)
574 skb_queue_purge(&sk->sk_receive_queue);
575 skb_queue_purge(&sk->sk_write_queue);
578 static void l2cap_sock_cleanup_listen(struct sock *parent)
582 BT_DBG("parent %p", parent);
584 /* Close not yet accepted channels */
585 while ((sk = bt_accept_dequeue(parent, NULL)))
586 l2cap_sock_close(sk);
588 parent->sk_state = BT_CLOSED;
589 sock_set_flag(parent, SOCK_ZAPPED);
592 /* Kill socket (only if zapped and orphan)
593 * Must be called on unlocked socket.
595 static void l2cap_sock_kill(struct sock *sk)
597 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
600 BT_DBG("sk %p state %d", sk, sk->sk_state);
602 /* Kill poor orphan */
603 bt_sock_unlink(&l2cap_sk_list, sk);
604 sock_set_flag(sk, SOCK_DEAD);
608 static void __l2cap_sock_close(struct sock *sk, int reason)
610 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
612 switch (sk->sk_state) {
614 l2cap_sock_cleanup_listen(sk);
619 if (sk->sk_type == SOCK_SEQPACKET) {
620 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
621 struct l2cap_disconn_req req;
623 sk->sk_state = BT_DISCONN;
624 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
626 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
627 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
628 l2cap_send_cmd(conn, l2cap_get_ident(conn),
629 L2CAP_DISCONN_REQ, sizeof(req), &req);
631 l2cap_chan_del(sk, reason);
635 if (sk->sk_type == SOCK_SEQPACKET) {
636 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
637 struct l2cap_conn_rsp rsp;
640 if (bt_sk(sk)->defer_setup)
641 result = L2CAP_CR_SEC_BLOCK;
643 result = L2CAP_CR_BAD_PSM;
645 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
646 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
647 rsp.result = cpu_to_le16(result);
648 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
649 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
650 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
652 l2cap_chan_del(sk, reason);
657 l2cap_chan_del(sk, reason);
661 sock_set_flag(sk, SOCK_ZAPPED);
666 /* Must be called on unlocked socket. */
667 static void l2cap_sock_close(struct sock *sk)
669 l2cap_sock_clear_timer(sk);
671 __l2cap_sock_close(sk, ECONNRESET);
676 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
678 struct l2cap_pinfo *pi = l2cap_pi(sk);
683 sk->sk_type = parent->sk_type;
684 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
686 pi->imtu = l2cap_pi(parent)->imtu;
687 pi->omtu = l2cap_pi(parent)->omtu;
688 pi->link_mode = l2cap_pi(parent)->link_mode;
690 pi->imtu = L2CAP_DEFAULT_MTU;
695 /* Default config options */
697 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
700 static struct proto l2cap_proto = {
702 .owner = THIS_MODULE,
703 .obj_size = sizeof(struct l2cap_pinfo)
706 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
710 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
714 sock_init_data(sock, sk);
715 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
717 sk->sk_destruct = l2cap_sock_destruct;
718 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
720 sock_reset_flag(sk, SOCK_ZAPPED);
722 sk->sk_protocol = proto;
723 sk->sk_state = BT_OPEN;
725 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
727 bt_sock_link(&l2cap_sk_list, sk);
731 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
735 BT_DBG("sock %p", sock);
737 sock->state = SS_UNCONNECTED;
739 if (sock->type != SOCK_SEQPACKET &&
740 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
741 return -ESOCKTNOSUPPORT;
743 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
746 sock->ops = &l2cap_sock_ops;
748 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
752 l2cap_sock_init(sk, NULL);
756 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
758 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
759 struct sock *sk = sock->sk;
762 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
764 if (!addr || addr->sa_family != AF_BLUETOOTH)
769 if (sk->sk_state != BT_OPEN) {
774 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
775 !capable(CAP_NET_BIND_SERVICE)) {
780 write_lock_bh(&l2cap_sk_list.lock);
782 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
785 /* Save source address */
786 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
787 l2cap_pi(sk)->psm = la->l2_psm;
788 l2cap_pi(sk)->sport = la->l2_psm;
789 sk->sk_state = BT_BOUND;
792 write_unlock_bh(&l2cap_sk_list.lock);
799 static int l2cap_do_connect(struct sock *sk)
801 bdaddr_t *src = &bt_sk(sk)->src;
802 bdaddr_t *dst = &bt_sk(sk)->dst;
803 struct l2cap_conn *conn;
804 struct hci_conn *hcon;
805 struct hci_dev *hdev;
809 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
811 if (!(hdev = hci_get_route(dst, src)))
812 return -EHOSTUNREACH;
814 hci_dev_lock_bh(hdev);
818 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH ||
819 l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT ||
820 l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
821 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
822 auth_type = HCI_AT_NO_BONDING_MITM;
824 auth_type = HCI_AT_GENERAL_BONDING_MITM;
826 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
827 auth_type = HCI_AT_NO_BONDING;
829 auth_type = HCI_AT_GENERAL_BONDING;
832 hcon = hci_connect(hdev, ACL_LINK, dst, auth_type);
836 conn = l2cap_conn_add(hcon, 0);
844 /* Update source addr of the socket */
845 bacpy(src, conn->src);
847 l2cap_chan_add(conn, sk, NULL);
849 sk->sk_state = BT_CONNECT;
850 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
852 if (hcon->state == BT_CONNECTED) {
853 if (sk->sk_type != SOCK_SEQPACKET) {
854 l2cap_sock_clear_timer(sk);
855 sk->sk_state = BT_CONNECTED;
861 hci_dev_unlock_bh(hdev);
866 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
868 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
869 struct sock *sk = sock->sk;
876 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
881 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
886 switch(sk->sk_state) {
890 /* Already connecting */
894 /* Already connected */
907 /* Set destination address and psm */
908 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
909 l2cap_pi(sk)->psm = la->l2_psm;
911 if ((err = l2cap_do_connect(sk)))
915 err = bt_sock_wait_state(sk, BT_CONNECTED,
916 sock_sndtimeo(sk, flags & O_NONBLOCK));
922 static int l2cap_sock_listen(struct socket *sock, int backlog)
924 struct sock *sk = sock->sk;
927 BT_DBG("sk %p backlog %d", sk, backlog);
931 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
936 if (!l2cap_pi(sk)->psm) {
937 bdaddr_t *src = &bt_sk(sk)->src;
942 write_lock_bh(&l2cap_sk_list.lock);
944 for (psm = 0x1001; psm < 0x1100; psm += 2)
945 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
946 l2cap_pi(sk)->psm = htobs(psm);
947 l2cap_pi(sk)->sport = htobs(psm);
952 write_unlock_bh(&l2cap_sk_list.lock);
958 sk->sk_max_ack_backlog = backlog;
959 sk->sk_ack_backlog = 0;
960 sk->sk_state = BT_LISTEN;
967 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
969 DECLARE_WAITQUEUE(wait, current);
970 struct sock *sk = sock->sk, *nsk;
974 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
976 if (sk->sk_state != BT_LISTEN) {
981 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
983 BT_DBG("sk %p timeo %ld", sk, timeo);
985 /* Wait for an incoming connection. (wake-one). */
986 add_wait_queue_exclusive(sk->sk_sleep, &wait);
987 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
988 set_current_state(TASK_INTERRUPTIBLE);
995 timeo = schedule_timeout(timeo);
996 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
998 if (sk->sk_state != BT_LISTEN) {
1003 if (signal_pending(current)) {
1004 err = sock_intr_errno(timeo);
1008 set_current_state(TASK_RUNNING);
1009 remove_wait_queue(sk->sk_sleep, &wait);
1014 newsock->state = SS_CONNECTED;
1016 BT_DBG("new socket %p", nsk);
1023 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1025 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1026 struct sock *sk = sock->sk;
1028 BT_DBG("sock %p, sk %p", sock, sk);
1030 addr->sa_family = AF_BLUETOOTH;
1031 *len = sizeof(struct sockaddr_l2);
1034 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1036 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1038 la->l2_psm = l2cap_pi(sk)->psm;
1042 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1044 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1045 struct sk_buff *skb, **frag;
1046 int err, hlen, count, sent=0;
1047 struct l2cap_hdr *lh;
1049 BT_DBG("sk %p len %d", sk, len);
1051 /* First fragment (with L2CAP header) */
1052 if (sk->sk_type == SOCK_DGRAM)
1053 hlen = L2CAP_HDR_SIZE + 2;
1055 hlen = L2CAP_HDR_SIZE;
1057 count = min_t(unsigned int, (conn->mtu - hlen), len);
1059 skb = bt_skb_send_alloc(sk, hlen + count,
1060 msg->msg_flags & MSG_DONTWAIT, &err);
1064 /* Create L2CAP header */
1065 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1066 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1067 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1069 if (sk->sk_type == SOCK_DGRAM)
1070 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1072 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1080 /* Continuation fragments (no L2CAP header) */
1081 frag = &skb_shinfo(skb)->frag_list;
1083 count = min_t(unsigned int, conn->mtu, len);
1085 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1089 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1097 frag = &(*frag)->next;
1100 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1110 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1112 struct sock *sk = sock->sk;
1115 BT_DBG("sock %p, sk %p", sock, sk);
1117 err = sock_error(sk);
1121 if (msg->msg_flags & MSG_OOB)
1124 /* Check outgoing MTU */
1125 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1130 if (sk->sk_state == BT_CONNECTED)
1131 err = l2cap_do_send(sk, msg, len);
1139 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1141 struct sock *sk = sock->sk;
1145 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1146 struct l2cap_conn_rsp rsp;
1148 sk->sk_state = BT_CONFIG;
1150 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1151 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1152 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1153 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1154 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1155 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1163 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1166 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1168 struct sock *sk = sock->sk;
1169 struct l2cap_options opts;
1173 BT_DBG("sk %p", sk);
1179 opts.imtu = l2cap_pi(sk)->imtu;
1180 opts.omtu = l2cap_pi(sk)->omtu;
1181 opts.flush_to = l2cap_pi(sk)->flush_to;
1182 opts.mode = L2CAP_MODE_BASIC;
1184 len = min_t(unsigned int, sizeof(opts), optlen);
1185 if (copy_from_user((char *) &opts, optval, len)) {
1190 l2cap_pi(sk)->imtu = opts.imtu;
1191 l2cap_pi(sk)->omtu = opts.omtu;
1195 if (get_user(opt, (u32 __user *) optval)) {
1200 l2cap_pi(sk)->link_mode = opt;
1212 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1214 struct sock *sk = sock->sk;
1218 BT_DBG("sk %p", sk);
1220 if (level == SOL_L2CAP)
1221 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1226 case BT_DEFER_SETUP:
1227 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1232 if (get_user(opt, (u32 __user *) optval)) {
1237 bt_sk(sk)->defer_setup = opt;
1249 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1251 struct sock *sk = sock->sk;
1252 struct l2cap_options opts;
1253 struct l2cap_conninfo cinfo;
1256 BT_DBG("sk %p", sk);
1258 if (get_user(len, optlen))
1265 opts.imtu = l2cap_pi(sk)->imtu;
1266 opts.omtu = l2cap_pi(sk)->omtu;
1267 opts.flush_to = l2cap_pi(sk)->flush_to;
1268 opts.mode = L2CAP_MODE_BASIC;
1270 len = min_t(unsigned int, len, sizeof(opts));
1271 if (copy_to_user(optval, (char *) &opts, len))
1277 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1281 case L2CAP_CONNINFO:
1282 if (sk->sk_state != BT_CONNECTED &&
1283 !(sk->sk_state == BT_CONNECT2 &&
1284 bt_sk(sk)->defer_setup)) {
1289 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1290 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1292 len = min_t(unsigned int, len, sizeof(cinfo));
1293 if (copy_to_user(optval, (char *) &cinfo, len))
1307 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1309 struct sock *sk = sock->sk;
1312 BT_DBG("sk %p", sk);
1314 if (level == SOL_L2CAP)
1315 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1317 if (get_user(len, optlen))
1323 case BT_DEFER_SETUP:
1324 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1329 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1343 static int l2cap_sock_shutdown(struct socket *sock, int how)
1345 struct sock *sk = sock->sk;
1348 BT_DBG("sock %p, sk %p", sock, sk);
1354 if (!sk->sk_shutdown) {
1355 sk->sk_shutdown = SHUTDOWN_MASK;
1356 l2cap_sock_clear_timer(sk);
1357 __l2cap_sock_close(sk, 0);
1359 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1360 err = bt_sock_wait_state(sk, BT_CLOSED,
1367 static int l2cap_sock_release(struct socket *sock)
1369 struct sock *sk = sock->sk;
1372 BT_DBG("sock %p, sk %p", sock, sk);
1377 err = l2cap_sock_shutdown(sock, 2);
1380 l2cap_sock_kill(sk);
1384 static void l2cap_chan_ready(struct sock *sk)
1386 struct sock *parent = bt_sk(sk)->parent;
1388 BT_DBG("sk %p, parent %p", sk, parent);
1390 l2cap_pi(sk)->conf_state = 0;
1391 l2cap_sock_clear_timer(sk);
1394 /* Outgoing channel.
1395 * Wake up socket sleeping on connect.
1397 sk->sk_state = BT_CONNECTED;
1398 sk->sk_state_change(sk);
1400 /* Incoming channel.
1401 * Wake up socket sleeping on accept.
1403 parent->sk_data_ready(parent, 0);
1406 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
1407 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1408 hci_conn_change_link_key(conn->hcon);
1412 /* Copy frame to all raw sockets on that connection */
1413 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1415 struct l2cap_chan_list *l = &conn->chan_list;
1416 struct sk_buff *nskb;
1419 BT_DBG("conn %p", conn);
1421 read_lock(&l->lock);
1422 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1423 if (sk->sk_type != SOCK_RAW)
1426 /* Don't send frame to the socket it came from */
1430 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1433 if (sock_queue_rcv_skb(sk, nskb))
1436 read_unlock(&l->lock);
1439 /* ---- L2CAP signalling commands ---- */
1440 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1441 u8 code, u8 ident, u16 dlen, void *data)
1443 struct sk_buff *skb, **frag;
1444 struct l2cap_cmd_hdr *cmd;
1445 struct l2cap_hdr *lh;
1448 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1450 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1451 count = min_t(unsigned int, conn->mtu, len);
1453 skb = bt_skb_alloc(count, GFP_ATOMIC);
1457 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1458 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1459 lh->cid = cpu_to_le16(0x0001);
1461 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1464 cmd->len = cpu_to_le16(dlen);
1467 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1468 memcpy(skb_put(skb, count), data, count);
1474 /* Continuation fragments (no L2CAP header) */
1475 frag = &skb_shinfo(skb)->frag_list;
1477 count = min_t(unsigned int, conn->mtu, len);
1479 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1483 memcpy(skb_put(*frag, count), data, count);
1488 frag = &(*frag)->next;
1498 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1500 struct l2cap_conf_opt *opt = *ptr;
1503 len = L2CAP_CONF_OPT_SIZE + opt->len;
1511 *val = *((u8 *) opt->val);
1515 *val = __le16_to_cpu(*((__le16 *) opt->val));
1519 *val = __le32_to_cpu(*((__le32 *) opt->val));
1523 *val = (unsigned long) opt->val;
1527 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1531 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1533 struct l2cap_conf_opt *opt = *ptr;
1535 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1542 *((u8 *) opt->val) = val;
1546 *((__le16 *) opt->val) = cpu_to_le16(val);
1550 *((__le32 *) opt->val) = cpu_to_le32(val);
1554 memcpy(opt->val, (void *) val, len);
1558 *ptr += L2CAP_CONF_OPT_SIZE + len;
1561 static int l2cap_build_conf_req(struct sock *sk, void *data)
1563 struct l2cap_pinfo *pi = l2cap_pi(sk);
1564 struct l2cap_conf_req *req = data;
1565 void *ptr = req->data;
1567 BT_DBG("sk %p", sk);
1569 if (pi->imtu != L2CAP_DEFAULT_MTU)
1570 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1572 /* FIXME: Need actual value of the flush timeout */
1573 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1574 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1576 req->dcid = cpu_to_le16(pi->dcid);
1577 req->flags = cpu_to_le16(0);
1582 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1584 struct l2cap_pinfo *pi = l2cap_pi(sk);
1585 struct l2cap_conf_rsp *rsp = data;
1586 void *ptr = rsp->data;
1587 void *req = pi->conf_req;
1588 int len = pi->conf_len;
1589 int type, hint, olen;
1591 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1592 u16 mtu = L2CAP_DEFAULT_MTU;
1593 u16 result = L2CAP_CONF_SUCCESS;
1595 BT_DBG("sk %p", sk);
1597 while (len >= L2CAP_CONF_OPT_SIZE) {
1598 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1604 case L2CAP_CONF_MTU:
1608 case L2CAP_CONF_FLUSH_TO:
1612 case L2CAP_CONF_QOS:
1615 case L2CAP_CONF_RFC:
1616 if (olen == sizeof(rfc))
1617 memcpy(&rfc, (void *) val, olen);
1624 result = L2CAP_CONF_UNKNOWN;
1625 *((u8 *) ptr++) = type;
1630 if (result == L2CAP_CONF_SUCCESS) {
1631 /* Configure output options and let the other side know
1632 * which ones we don't like. */
1634 if (rfc.mode == L2CAP_MODE_BASIC) {
1636 result = L2CAP_CONF_UNACCEPT;
1639 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1642 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1644 result = L2CAP_CONF_UNACCEPT;
1646 memset(&rfc, 0, sizeof(rfc));
1647 rfc.mode = L2CAP_MODE_BASIC;
1649 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1650 sizeof(rfc), (unsigned long) &rfc);
1654 rsp->scid = cpu_to_le16(pi->dcid);
1655 rsp->result = cpu_to_le16(result);
1656 rsp->flags = cpu_to_le16(0x0000);
1661 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1663 struct l2cap_conf_rsp *rsp = data;
1664 void *ptr = rsp->data;
1666 BT_DBG("sk %p", sk);
1668 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1669 rsp->result = cpu_to_le16(result);
1670 rsp->flags = cpu_to_le16(flags);
1675 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1677 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1679 if (rej->reason != 0x0000)
1682 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1683 cmd->ident == conn->info_ident) {
1684 conn->info_ident = 0;
1685 del_timer(&conn->info_timer);
1686 l2cap_conn_start(conn);
1692 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1694 struct l2cap_chan_list *list = &conn->chan_list;
1695 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1696 struct l2cap_conn_rsp rsp;
1697 struct sock *sk, *parent;
1698 int result, status = L2CAP_CS_NO_INFO;
1700 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1701 __le16 psm = req->psm;
1703 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1705 /* Check if we have socket listening on psm */
1706 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1708 result = L2CAP_CR_BAD_PSM;
1712 /* Check if the ACL is secure enough (if not SDP) */
1713 if (psm != cpu_to_le16(0x0001) &&
1714 !hci_conn_check_link_mode(conn->hcon)) {
1715 result = L2CAP_CR_SEC_BLOCK;
1719 result = L2CAP_CR_NO_MEM;
1721 /* Check for backlog size */
1722 if (sk_acceptq_is_full(parent)) {
1723 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1727 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1731 write_lock_bh(&list->lock);
1733 /* Check if we already have channel with that dcid */
1734 if (__l2cap_get_chan_by_dcid(list, scid)) {
1735 write_unlock_bh(&list->lock);
1736 sock_set_flag(sk, SOCK_ZAPPED);
1737 l2cap_sock_kill(sk);
1741 hci_conn_hold(conn->hcon);
1743 l2cap_sock_init(sk, parent);
1744 bacpy(&bt_sk(sk)->src, conn->src);
1745 bacpy(&bt_sk(sk)->dst, conn->dst);
1746 l2cap_pi(sk)->psm = psm;
1747 l2cap_pi(sk)->dcid = scid;
1749 __l2cap_chan_add(conn, sk, parent);
1750 dcid = l2cap_pi(sk)->scid;
1752 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1754 l2cap_pi(sk)->ident = cmd->ident;
1756 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1757 if (l2cap_check_link_mode(sk)) {
1758 if (bt_sk(sk)->defer_setup) {
1759 sk->sk_state = BT_CONNECT2;
1760 result = L2CAP_CR_PEND;
1761 status = L2CAP_CS_AUTHOR_PEND;
1762 parent->sk_data_ready(parent, 0);
1764 sk->sk_state = BT_CONFIG;
1765 result = L2CAP_CR_SUCCESS;
1766 status = L2CAP_CS_NO_INFO;
1769 sk->sk_state = BT_CONNECT2;
1770 result = L2CAP_CR_PEND;
1771 status = L2CAP_CS_AUTHEN_PEND;
1774 sk->sk_state = BT_CONNECT2;
1775 result = L2CAP_CR_PEND;
1776 status = L2CAP_CS_NO_INFO;
1779 write_unlock_bh(&list->lock);
1782 bh_unlock_sock(parent);
1785 rsp.scid = cpu_to_le16(scid);
1786 rsp.dcid = cpu_to_le16(dcid);
1787 rsp.result = cpu_to_le16(result);
1788 rsp.status = cpu_to_le16(status);
1789 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1791 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1792 struct l2cap_info_req info;
1793 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1795 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1796 conn->info_ident = l2cap_get_ident(conn);
1798 mod_timer(&conn->info_timer, jiffies +
1799 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1801 l2cap_send_cmd(conn, conn->info_ident,
1802 L2CAP_INFO_REQ, sizeof(info), &info);
1808 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1810 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1811 u16 scid, dcid, result, status;
1815 scid = __le16_to_cpu(rsp->scid);
1816 dcid = __le16_to_cpu(rsp->dcid);
1817 result = __le16_to_cpu(rsp->result);
1818 status = __le16_to_cpu(rsp->status);
1820 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1823 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1826 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1831 case L2CAP_CR_SUCCESS:
1832 sk->sk_state = BT_CONFIG;
1833 l2cap_pi(sk)->ident = 0;
1834 l2cap_pi(sk)->dcid = dcid;
1835 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1837 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1838 l2cap_build_conf_req(sk, req), req);
1845 l2cap_chan_del(sk, ECONNREFUSED);
1853 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1855 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1861 dcid = __le16_to_cpu(req->dcid);
1862 flags = __le16_to_cpu(req->flags);
1864 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1866 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1869 if (sk->sk_state == BT_DISCONN)
1872 /* Reject if config buffer is too small. */
1873 len = cmd_len - sizeof(*req);
1874 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1875 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1876 l2cap_build_conf_rsp(sk, rsp,
1877 L2CAP_CONF_REJECT, flags), rsp);
1882 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1883 l2cap_pi(sk)->conf_len += len;
1885 if (flags & 0x0001) {
1886 /* Incomplete config. Send empty response. */
1887 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1888 l2cap_build_conf_rsp(sk, rsp,
1889 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1893 /* Complete config. */
1894 len = l2cap_parse_conf_req(sk, rsp);
1898 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1900 /* Reset config buffer. */
1901 l2cap_pi(sk)->conf_len = 0;
1903 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1906 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1907 sk->sk_state = BT_CONNECTED;
1908 l2cap_chan_ready(sk);
1912 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1914 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1915 l2cap_build_conf_req(sk, buf), buf);
1923 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1925 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1926 u16 scid, flags, result;
1929 scid = __le16_to_cpu(rsp->scid);
1930 flags = __le16_to_cpu(rsp->flags);
1931 result = __le16_to_cpu(rsp->result);
1933 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1935 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1939 case L2CAP_CONF_SUCCESS:
1942 case L2CAP_CONF_UNACCEPT:
1943 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1945 /* It does not make sense to adjust L2CAP parameters
1946 * that are currently defined in the spec. We simply
1947 * resend config request that we sent earlier. It is
1948 * stupid, but it helps qualification testing which
1949 * expects at least some response from us. */
1950 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1951 l2cap_build_conf_req(sk, req), req);
1956 sk->sk_state = BT_DISCONN;
1957 sk->sk_err = ECONNRESET;
1958 l2cap_sock_set_timer(sk, HZ * 5);
1960 struct l2cap_disconn_req req;
1961 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1962 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1963 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1964 L2CAP_DISCONN_REQ, sizeof(req), &req);
1972 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1974 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1975 sk->sk_state = BT_CONNECTED;
1976 l2cap_chan_ready(sk);
1984 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1986 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1987 struct l2cap_disconn_rsp rsp;
1991 scid = __le16_to_cpu(req->scid);
1992 dcid = __le16_to_cpu(req->dcid);
1994 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1996 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1999 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2000 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2001 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2003 sk->sk_shutdown = SHUTDOWN_MASK;
2005 l2cap_chan_del(sk, ECONNRESET);
2008 l2cap_sock_kill(sk);
2012 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2014 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2018 scid = __le16_to_cpu(rsp->scid);
2019 dcid = __le16_to_cpu(rsp->dcid);
2021 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2023 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2026 l2cap_chan_del(sk, 0);
2029 l2cap_sock_kill(sk);
2033 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2035 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2038 type = __le16_to_cpu(req->type);
2040 BT_DBG("type 0x%4.4x", type);
2042 if (type == L2CAP_IT_FEAT_MASK) {
2044 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2045 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2046 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2047 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
2048 l2cap_send_cmd(conn, cmd->ident,
2049 L2CAP_INFO_RSP, sizeof(buf), buf);
2051 struct l2cap_info_rsp rsp;
2052 rsp.type = cpu_to_le16(type);
2053 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2054 l2cap_send_cmd(conn, cmd->ident,
2055 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2061 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2063 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2066 type = __le16_to_cpu(rsp->type);
2067 result = __le16_to_cpu(rsp->result);
2069 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2071 conn->info_ident = 0;
2073 del_timer(&conn->info_timer);
2075 if (type == L2CAP_IT_FEAT_MASK)
2076 conn->feat_mask = get_unaligned_le32(rsp->data);
2078 l2cap_conn_start(conn);
2083 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2085 u8 *data = skb->data;
2087 struct l2cap_cmd_hdr cmd;
2090 l2cap_raw_recv(conn, skb);
2092 while (len >= L2CAP_CMD_HDR_SIZE) {
2094 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2095 data += L2CAP_CMD_HDR_SIZE;
2096 len -= L2CAP_CMD_HDR_SIZE;
2098 cmd_len = le16_to_cpu(cmd.len);
2100 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2102 if (cmd_len > len || !cmd.ident) {
2103 BT_DBG("corrupted command");
2108 case L2CAP_COMMAND_REJ:
2109 l2cap_command_rej(conn, &cmd, data);
2112 case L2CAP_CONN_REQ:
2113 err = l2cap_connect_req(conn, &cmd, data);
2116 case L2CAP_CONN_RSP:
2117 err = l2cap_connect_rsp(conn, &cmd, data);
2120 case L2CAP_CONF_REQ:
2121 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2124 case L2CAP_CONF_RSP:
2125 err = l2cap_config_rsp(conn, &cmd, data);
2128 case L2CAP_DISCONN_REQ:
2129 err = l2cap_disconnect_req(conn, &cmd, data);
2132 case L2CAP_DISCONN_RSP:
2133 err = l2cap_disconnect_rsp(conn, &cmd, data);
2136 case L2CAP_ECHO_REQ:
2137 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2140 case L2CAP_ECHO_RSP:
2143 case L2CAP_INFO_REQ:
2144 err = l2cap_information_req(conn, &cmd, data);
2147 case L2CAP_INFO_RSP:
2148 err = l2cap_information_rsp(conn, &cmd, data);
2152 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2158 struct l2cap_cmd_rej rej;
2159 BT_DBG("error %d", err);
2161 /* FIXME: Map err to a valid reason */
2162 rej.reason = cpu_to_le16(0);
2163 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2173 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2177 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2179 BT_DBG("unknown cid 0x%4.4x", cid);
2183 BT_DBG("sk %p, len %d", sk, skb->len);
2185 if (sk->sk_state != BT_CONNECTED)
2188 if (l2cap_pi(sk)->imtu < skb->len)
2191 /* If socket recv buffers overflows we drop data here
2192 * which is *bad* because L2CAP has to be reliable.
2193 * But we don't have any other choice. L2CAP doesn't
2194 * provide flow control mechanism. */
2196 if (!sock_queue_rcv_skb(sk, skb))
2209 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2213 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2217 BT_DBG("sk %p, len %d", sk, skb->len);
2219 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2222 if (l2cap_pi(sk)->imtu < skb->len)
2225 if (!sock_queue_rcv_skb(sk, skb))
2232 if (sk) bh_unlock_sock(sk);
2236 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2238 struct l2cap_hdr *lh = (void *) skb->data;
2242 skb_pull(skb, L2CAP_HDR_SIZE);
2243 cid = __le16_to_cpu(lh->cid);
2244 len = __le16_to_cpu(lh->len);
2246 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2250 l2cap_sig_channel(conn, skb);
2254 psm = get_unaligned((__le16 *) skb->data);
2256 l2cap_conless_channel(conn, psm, skb);
2260 l2cap_data_channel(conn, cid, skb);
2265 /* ---- L2CAP interface with lower layer (HCI) ---- */
2267 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2269 int exact = 0, lm1 = 0, lm2 = 0;
2270 register struct sock *sk;
2271 struct hlist_node *node;
2273 if (type != ACL_LINK)
2276 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2278 /* Find listening sockets and check their link_mode */
2279 read_lock(&l2cap_sk_list.lock);
2280 sk_for_each(sk, node, &l2cap_sk_list.head) {
2281 if (sk->sk_state != BT_LISTEN)
2284 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2285 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2287 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2288 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2290 read_unlock(&l2cap_sk_list.lock);
2292 return exact ? lm1 : lm2;
2295 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2297 struct l2cap_conn *conn;
2299 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2301 if (hcon->type != ACL_LINK)
2305 conn = l2cap_conn_add(hcon, status);
2307 l2cap_conn_ready(conn);
2309 l2cap_conn_del(hcon, bt_err(status));
2314 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2316 BT_DBG("hcon %p reason %d", hcon, reason);
2318 if (hcon->type != ACL_LINK)
2321 l2cap_conn_del(hcon, bt_err(reason));
2326 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2328 struct l2cap_chan_list *l;
2329 struct l2cap_conn *conn = hcon->l2cap_data;
2335 l = &conn->chan_list;
2337 BT_DBG("conn %p", conn);
2339 read_lock(&l->lock);
2341 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2342 struct l2cap_pinfo *pi = l2cap_pi(sk);
2346 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2347 !(hcon->link_mode & HCI_LM_ENCRYPT) &&
2353 if (sk->sk_state == BT_CONNECT) {
2355 struct l2cap_conn_req req;
2356 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2357 req.psm = l2cap_pi(sk)->psm;
2359 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2361 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2362 L2CAP_CONN_REQ, sizeof(req), &req);
2364 l2cap_sock_clear_timer(sk);
2365 l2cap_sock_set_timer(sk, HZ / 10);
2367 } else if (sk->sk_state == BT_CONNECT2) {
2368 struct l2cap_conn_rsp rsp;
2372 sk->sk_state = BT_CONFIG;
2373 result = L2CAP_CR_SUCCESS;
2375 sk->sk_state = BT_DISCONN;
2376 l2cap_sock_set_timer(sk, HZ / 10);
2377 result = L2CAP_CR_SEC_BLOCK;
2380 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2381 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2382 rsp.result = cpu_to_le16(result);
2383 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2384 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2385 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2391 read_unlock(&l->lock);
2396 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2398 struct l2cap_chan_list *l;
2399 struct l2cap_conn *conn = hcon->l2cap_data;
2405 l = &conn->chan_list;
2407 BT_DBG("conn %p", conn);
2409 read_lock(&l->lock);
2411 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2412 struct l2cap_pinfo *pi = l2cap_pi(sk);
2416 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2417 (sk->sk_state == BT_CONNECTED ||
2418 sk->sk_state == BT_CONFIG) &&
2419 !status && encrypt == 0x00) {
2420 __l2cap_sock_close(sk, ECONNREFUSED);
2425 if (sk->sk_state == BT_CONNECT) {
2427 struct l2cap_conn_req req;
2428 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2429 req.psm = l2cap_pi(sk)->psm;
2431 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2433 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2434 L2CAP_CONN_REQ, sizeof(req), &req);
2436 l2cap_sock_clear_timer(sk);
2437 l2cap_sock_set_timer(sk, HZ / 10);
2439 } else if (sk->sk_state == BT_CONNECT2) {
2440 struct l2cap_conn_rsp rsp;
2444 sk->sk_state = BT_CONFIG;
2445 result = L2CAP_CR_SUCCESS;
2447 sk->sk_state = BT_DISCONN;
2448 l2cap_sock_set_timer(sk, HZ / 10);
2449 result = L2CAP_CR_SEC_BLOCK;
2452 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2453 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2454 rsp.result = cpu_to_le16(result);
2455 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2456 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2457 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2463 read_unlock(&l->lock);
2468 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2470 struct l2cap_conn *conn = hcon->l2cap_data;
2472 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2475 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2477 if (flags & ACL_START) {
2478 struct l2cap_hdr *hdr;
2482 BT_ERR("Unexpected start frame (len %d)", skb->len);
2483 kfree_skb(conn->rx_skb);
2484 conn->rx_skb = NULL;
2486 l2cap_conn_unreliable(conn, ECOMM);
2490 BT_ERR("Frame is too short (len %d)", skb->len);
2491 l2cap_conn_unreliable(conn, ECOMM);
2495 hdr = (struct l2cap_hdr *) skb->data;
2496 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2498 if (len == skb->len) {
2499 /* Complete frame received */
2500 l2cap_recv_frame(conn, skb);
2504 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2506 if (skb->len > len) {
2507 BT_ERR("Frame is too long (len %d, expected len %d)",
2509 l2cap_conn_unreliable(conn, ECOMM);
2513 /* Allocate skb for the complete frame (with header) */
2514 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2517 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2519 conn->rx_len = len - skb->len;
2521 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2523 if (!conn->rx_len) {
2524 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2525 l2cap_conn_unreliable(conn, ECOMM);
2529 if (skb->len > conn->rx_len) {
2530 BT_ERR("Fragment is too long (len %d, expected %d)",
2531 skb->len, conn->rx_len);
2532 kfree_skb(conn->rx_skb);
2533 conn->rx_skb = NULL;
2535 l2cap_conn_unreliable(conn, ECOMM);
2539 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2541 conn->rx_len -= skb->len;
2543 if (!conn->rx_len) {
2544 /* Complete frame received */
2545 l2cap_recv_frame(conn, conn->rx_skb);
2546 conn->rx_skb = NULL;
2555 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2558 struct hlist_node *node;
2561 read_lock_bh(&l2cap_sk_list.lock);
2563 sk_for_each(sk, node, &l2cap_sk_list.head) {
2564 struct l2cap_pinfo *pi = l2cap_pi(sk);
2566 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2567 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2568 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2569 pi->imtu, pi->omtu, pi->link_mode);
2572 read_unlock_bh(&l2cap_sk_list.lock);
2577 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2579 static const struct proto_ops l2cap_sock_ops = {
2580 .family = PF_BLUETOOTH,
2581 .owner = THIS_MODULE,
2582 .release = l2cap_sock_release,
2583 .bind = l2cap_sock_bind,
2584 .connect = l2cap_sock_connect,
2585 .listen = l2cap_sock_listen,
2586 .accept = l2cap_sock_accept,
2587 .getname = l2cap_sock_getname,
2588 .sendmsg = l2cap_sock_sendmsg,
2589 .recvmsg = l2cap_sock_recvmsg,
2590 .poll = bt_sock_poll,
2591 .ioctl = bt_sock_ioctl,
2592 .mmap = sock_no_mmap,
2593 .socketpair = sock_no_socketpair,
2594 .shutdown = l2cap_sock_shutdown,
2595 .setsockopt = l2cap_sock_setsockopt,
2596 .getsockopt = l2cap_sock_getsockopt
2599 static struct net_proto_family l2cap_sock_family_ops = {
2600 .family = PF_BLUETOOTH,
2601 .owner = THIS_MODULE,
2602 .create = l2cap_sock_create,
2605 static struct hci_proto l2cap_hci_proto = {
2607 .id = HCI_PROTO_L2CAP,
2608 .connect_ind = l2cap_connect_ind,
2609 .connect_cfm = l2cap_connect_cfm,
2610 .disconn_ind = l2cap_disconn_ind,
2611 .auth_cfm = l2cap_auth_cfm,
2612 .encrypt_cfm = l2cap_encrypt_cfm,
2613 .recv_acldata = l2cap_recv_acldata
2616 static int __init l2cap_init(void)
2620 err = proto_register(&l2cap_proto, 0);
2624 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2626 BT_ERR("L2CAP socket registration failed");
2630 err = hci_register_proto(&l2cap_hci_proto);
2632 BT_ERR("L2CAP protocol registration failed");
2633 bt_sock_unregister(BTPROTO_L2CAP);
2637 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2638 BT_ERR("Failed to create L2CAP info file");
2640 BT_INFO("L2CAP ver %s", VERSION);
2641 BT_INFO("L2CAP socket layer initialized");
2646 proto_unregister(&l2cap_proto);
2650 static void __exit l2cap_exit(void)
2652 class_remove_file(bt_class, &class_attr_l2cap);
2654 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2655 BT_ERR("L2CAP socket unregistration failed");
2657 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2658 BT_ERR("L2CAP protocol unregistration failed");
2660 proto_unregister(&l2cap_proto);
2663 void l2cap_load(void)
2665 /* Dummy function to trigger automatic L2CAP module loading by
2666 * other modules that use L2CAP sockets but don't use any other
2667 * symbols from it. */
2670 EXPORT_SYMBOL(l2cap_load);
2672 module_init(l2cap_init);
2673 module_exit(l2cap_exit);
2675 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2676 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2677 MODULE_VERSION(VERSION);
2678 MODULE_LICENSE("GPL");
2679 MODULE_ALIAS("bt-proto-0");