2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.11"
55 static u32 l2cap_feat_mask = 0x0000;
57 static const struct proto_ops l2cap_sock_ops;
59 static struct bt_sock_list l2cap_sk_list = {
60 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
63 static void __l2cap_sock_close(struct sock *sk, int reason);
64 static void l2cap_sock_close(struct sock *sk);
65 static void l2cap_sock_kill(struct sock *sk);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
70 /* ---- L2CAP timers ---- */
71 static void l2cap_sock_timeout(unsigned long arg)
73 struct sock *sk = (struct sock *) arg;
76 BT_DBG("sock %p state %d", sk, sk->sk_state);
80 if (sk->sk_state == BT_CONNECT &&
81 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
82 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
83 reason = ECONNREFUSED;
87 __l2cap_sock_close(sk, reason);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
193 l2cap_pi(next)->prev_c = prev;
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
227 bt_accept_enqueue(parent, sk);
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
258 sk->sk_state_change(sk);
261 /* Service level security */
262 static inline int l2cap_check_link_mode(struct sock *sk)
264 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
266 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
267 return hci_conn_security(conn->hcon, BT_SECURITY_HIGH);
269 if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)
270 return hci_conn_security(conn->hcon, BT_SECURITY_MEDIUM);
272 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
273 return hci_conn_security(conn->hcon, BT_SECURITY_LOW);
275 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
276 return hci_conn_security(conn->hcon, BT_SECURITY_SDP);
281 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
285 /* Get next available identificator.
286 * 1 - 128 are used by kernel.
287 * 129 - 199 are reserved.
288 * 200 - 254 are used by utilities like l2ping, etc.
291 spin_lock_bh(&conn->lock);
293 if (++conn->tx_ident > 128)
298 spin_unlock_bh(&conn->lock);
303 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
305 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
307 BT_DBG("code 0x%2.2x", code);
312 return hci_send_acl(conn->hcon, skb, 0);
315 static void l2cap_do_start(struct sock *sk)
317 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
319 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
320 if (l2cap_check_link_mode(sk)) {
321 struct l2cap_conn_req req;
322 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
323 req.psm = l2cap_pi(sk)->psm;
325 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
327 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
328 L2CAP_CONN_REQ, sizeof(req), &req);
331 struct l2cap_info_req req;
332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
335 conn->info_ident = l2cap_get_ident(conn);
337 mod_timer(&conn->info_timer, jiffies +
338 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
340 l2cap_send_cmd(conn, conn->info_ident,
341 L2CAP_INFO_REQ, sizeof(req), &req);
345 /* ---- L2CAP connections ---- */
346 static void l2cap_conn_start(struct l2cap_conn *conn)
348 struct l2cap_chan_list *l = &conn->chan_list;
351 BT_DBG("conn %p", conn);
355 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
358 if (sk->sk_type != SOCK_SEQPACKET) {
363 if (sk->sk_state == BT_CONNECT) {
364 if (l2cap_check_link_mode(sk)) {
365 struct l2cap_conn_req req;
366 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
367 req.psm = l2cap_pi(sk)->psm;
369 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
371 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
372 L2CAP_CONN_REQ, sizeof(req), &req);
374 } else if (sk->sk_state == BT_CONNECT2) {
375 struct l2cap_conn_rsp rsp;
376 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
377 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
379 if (l2cap_check_link_mode(sk)) {
380 if (bt_sk(sk)->defer_setup) {
381 struct sock *parent = bt_sk(sk)->parent;
382 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
383 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
384 parent->sk_data_ready(parent, 0);
387 sk->sk_state = BT_CONFIG;
388 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
389 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
392 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
393 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
396 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
397 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
403 read_unlock(&l->lock);
406 static void l2cap_conn_ready(struct l2cap_conn *conn)
408 struct l2cap_chan_list *l = &conn->chan_list;
411 BT_DBG("conn %p", conn);
415 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
418 if (sk->sk_type != SOCK_SEQPACKET) {
419 l2cap_sock_clear_timer(sk);
420 sk->sk_state = BT_CONNECTED;
421 sk->sk_state_change(sk);
422 } else if (sk->sk_state == BT_CONNECT)
428 read_unlock(&l->lock);
431 /* Notify sockets that we cannot guaranty reliability anymore */
432 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
434 struct l2cap_chan_list *l = &conn->chan_list;
437 BT_DBG("conn %p", conn);
441 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
442 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
446 read_unlock(&l->lock);
449 static void l2cap_info_timeout(unsigned long arg)
451 struct l2cap_conn *conn = (void *) arg;
453 conn->info_ident = 0;
455 l2cap_conn_start(conn);
458 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
460 struct l2cap_conn *conn = hcon->l2cap_data;
465 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
469 hcon->l2cap_data = conn;
472 BT_DBG("hcon %p conn %p", hcon, conn);
474 conn->mtu = hcon->hdev->acl_mtu;
475 conn->src = &hcon->hdev->bdaddr;
476 conn->dst = &hcon->dst;
480 setup_timer(&conn->info_timer, l2cap_info_timeout,
481 (unsigned long) conn);
483 spin_lock_init(&conn->lock);
484 rwlock_init(&conn->chan_list.lock);
489 static void l2cap_conn_del(struct hci_conn *hcon, int err)
491 struct l2cap_conn *conn = hcon->l2cap_data;
497 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
500 kfree_skb(conn->rx_skb);
503 while ((sk = conn->chan_list.head)) {
505 l2cap_chan_del(sk, err);
510 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
511 del_timer_sync(&conn->info_timer);
513 hcon->l2cap_data = NULL;
517 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
519 struct l2cap_chan_list *l = &conn->chan_list;
520 write_lock_bh(&l->lock);
521 __l2cap_chan_add(conn, sk, parent);
522 write_unlock_bh(&l->lock);
525 /* ---- Socket interface ---- */
526 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
529 struct hlist_node *node;
530 sk_for_each(sk, node, &l2cap_sk_list.head)
531 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
538 /* Find socket with psm and source bdaddr.
539 * Returns closest match.
541 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
543 struct sock *sk = NULL, *sk1 = NULL;
544 struct hlist_node *node;
546 sk_for_each(sk, node, &l2cap_sk_list.head) {
547 if (state && sk->sk_state != state)
550 if (l2cap_pi(sk)->psm == psm) {
552 if (!bacmp(&bt_sk(sk)->src, src))
556 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
560 return node ? sk : sk1;
563 /* Find socket with given address (psm, src).
564 * Returns locked socket */
565 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
568 read_lock(&l2cap_sk_list.lock);
569 s = __l2cap_get_sock_by_psm(state, psm, src);
570 if (s) bh_lock_sock(s);
571 read_unlock(&l2cap_sk_list.lock);
575 static void l2cap_sock_destruct(struct sock *sk)
579 skb_queue_purge(&sk->sk_receive_queue);
580 skb_queue_purge(&sk->sk_write_queue);
583 static void l2cap_sock_cleanup_listen(struct sock *parent)
587 BT_DBG("parent %p", parent);
589 /* Close not yet accepted channels */
590 while ((sk = bt_accept_dequeue(parent, NULL)))
591 l2cap_sock_close(sk);
593 parent->sk_state = BT_CLOSED;
594 sock_set_flag(parent, SOCK_ZAPPED);
597 /* Kill socket (only if zapped and orphan)
598 * Must be called on unlocked socket.
600 static void l2cap_sock_kill(struct sock *sk)
602 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
605 BT_DBG("sk %p state %d", sk, sk->sk_state);
607 /* Kill poor orphan */
608 bt_sock_unlink(&l2cap_sk_list, sk);
609 sock_set_flag(sk, SOCK_DEAD);
613 static void __l2cap_sock_close(struct sock *sk, int reason)
615 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
617 switch (sk->sk_state) {
619 l2cap_sock_cleanup_listen(sk);
624 if (sk->sk_type == SOCK_SEQPACKET) {
625 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
626 struct l2cap_disconn_req req;
628 sk->sk_state = BT_DISCONN;
629 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
631 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
632 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
633 l2cap_send_cmd(conn, l2cap_get_ident(conn),
634 L2CAP_DISCONN_REQ, sizeof(req), &req);
636 l2cap_chan_del(sk, reason);
640 if (sk->sk_type == SOCK_SEQPACKET) {
641 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
642 struct l2cap_conn_rsp rsp;
645 if (bt_sk(sk)->defer_setup)
646 result = L2CAP_CR_SEC_BLOCK;
648 result = L2CAP_CR_BAD_PSM;
650 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
651 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
652 rsp.result = cpu_to_le16(result);
653 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
654 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
655 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
657 l2cap_chan_del(sk, reason);
662 l2cap_chan_del(sk, reason);
666 sock_set_flag(sk, SOCK_ZAPPED);
671 /* Must be called on unlocked socket. */
672 static void l2cap_sock_close(struct sock *sk)
674 l2cap_sock_clear_timer(sk);
676 __l2cap_sock_close(sk, ECONNRESET);
681 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
683 struct l2cap_pinfo *pi = l2cap_pi(sk);
688 sk->sk_type = parent->sk_type;
689 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
691 pi->imtu = l2cap_pi(parent)->imtu;
692 pi->omtu = l2cap_pi(parent)->omtu;
693 pi->link_mode = l2cap_pi(parent)->link_mode;
695 pi->imtu = L2CAP_DEFAULT_MTU;
700 /* Default config options */
702 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
705 static struct proto l2cap_proto = {
707 .owner = THIS_MODULE,
708 .obj_size = sizeof(struct l2cap_pinfo)
711 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
715 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
719 sock_init_data(sock, sk);
720 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
722 sk->sk_destruct = l2cap_sock_destruct;
723 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
725 sock_reset_flag(sk, SOCK_ZAPPED);
727 sk->sk_protocol = proto;
728 sk->sk_state = BT_OPEN;
730 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
732 bt_sock_link(&l2cap_sk_list, sk);
736 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
740 BT_DBG("sock %p", sock);
742 sock->state = SS_UNCONNECTED;
744 if (sock->type != SOCK_SEQPACKET &&
745 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
746 return -ESOCKTNOSUPPORT;
748 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
751 sock->ops = &l2cap_sock_ops;
753 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
757 l2cap_sock_init(sk, NULL);
761 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
763 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
764 struct sock *sk = sock->sk;
767 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
769 if (!addr || addr->sa_family != AF_BLUETOOTH)
774 if (sk->sk_state != BT_OPEN) {
779 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
780 !capable(CAP_NET_BIND_SERVICE)) {
785 write_lock_bh(&l2cap_sk_list.lock);
787 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
790 /* Save source address */
791 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
792 l2cap_pi(sk)->psm = la->l2_psm;
793 l2cap_pi(sk)->sport = la->l2_psm;
794 sk->sk_state = BT_BOUND;
797 write_unlock_bh(&l2cap_sk_list.lock);
804 static int l2cap_do_connect(struct sock *sk)
806 bdaddr_t *src = &bt_sk(sk)->src;
807 bdaddr_t *dst = &bt_sk(sk)->dst;
808 struct l2cap_conn *conn;
809 struct hci_conn *hcon;
810 struct hci_dev *hdev;
815 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
817 if (!(hdev = hci_get_route(dst, src)))
818 return -EHOSTUNREACH;
820 hci_dev_lock_bh(hdev);
824 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
825 sec_level = BT_SECURITY_HIGH;
826 else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
827 sec_level = BT_SECURITY_SDP;
828 else if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)
829 sec_level = BT_SECURITY_MEDIUM;
831 sec_level = BT_SECURITY_LOW;
833 if (sk->sk_type == SOCK_RAW) {
834 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
835 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
836 else if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)
837 auth_type = HCI_AT_DEDICATED_BONDING;
839 auth_type = HCI_AT_NO_BONDING;
840 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
841 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
842 auth_type = HCI_AT_NO_BONDING_MITM;
844 auth_type = HCI_AT_NO_BONDING;
846 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
847 auth_type = HCI_AT_GENERAL_BONDING_MITM;
848 else if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)
849 auth_type = HCI_AT_GENERAL_BONDING;
851 auth_type = HCI_AT_NO_BONDING;
854 hcon = hci_connect(hdev, ACL_LINK, dst, sec_level, auth_type);
858 conn = l2cap_conn_add(hcon, 0);
866 /* Update source addr of the socket */
867 bacpy(src, conn->src);
869 l2cap_chan_add(conn, sk, NULL);
871 sk->sk_state = BT_CONNECT;
872 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
874 if (hcon->state == BT_CONNECTED) {
875 if (sk->sk_type != SOCK_SEQPACKET) {
876 l2cap_sock_clear_timer(sk);
877 sk->sk_state = BT_CONNECTED;
883 hci_dev_unlock_bh(hdev);
888 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
890 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
891 struct sock *sk = sock->sk;
898 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
903 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
908 switch(sk->sk_state) {
912 /* Already connecting */
916 /* Already connected */
929 /* Set destination address and psm */
930 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
931 l2cap_pi(sk)->psm = la->l2_psm;
933 if ((err = l2cap_do_connect(sk)))
937 err = bt_sock_wait_state(sk, BT_CONNECTED,
938 sock_sndtimeo(sk, flags & O_NONBLOCK));
944 static int l2cap_sock_listen(struct socket *sock, int backlog)
946 struct sock *sk = sock->sk;
949 BT_DBG("sk %p backlog %d", sk, backlog);
953 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
958 if (!l2cap_pi(sk)->psm) {
959 bdaddr_t *src = &bt_sk(sk)->src;
964 write_lock_bh(&l2cap_sk_list.lock);
966 for (psm = 0x1001; psm < 0x1100; psm += 2)
967 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
968 l2cap_pi(sk)->psm = htobs(psm);
969 l2cap_pi(sk)->sport = htobs(psm);
974 write_unlock_bh(&l2cap_sk_list.lock);
980 sk->sk_max_ack_backlog = backlog;
981 sk->sk_ack_backlog = 0;
982 sk->sk_state = BT_LISTEN;
989 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
991 DECLARE_WAITQUEUE(wait, current);
992 struct sock *sk = sock->sk, *nsk;
996 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
998 if (sk->sk_state != BT_LISTEN) {
1003 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1005 BT_DBG("sk %p timeo %ld", sk, timeo);
1007 /* Wait for an incoming connection. (wake-one). */
1008 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1009 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1010 set_current_state(TASK_INTERRUPTIBLE);
1017 timeo = schedule_timeout(timeo);
1018 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1020 if (sk->sk_state != BT_LISTEN) {
1025 if (signal_pending(current)) {
1026 err = sock_intr_errno(timeo);
1030 set_current_state(TASK_RUNNING);
1031 remove_wait_queue(sk->sk_sleep, &wait);
1036 newsock->state = SS_CONNECTED;
1038 BT_DBG("new socket %p", nsk);
1045 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1047 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1048 struct sock *sk = sock->sk;
1050 BT_DBG("sock %p, sk %p", sock, sk);
1052 addr->sa_family = AF_BLUETOOTH;
1053 *len = sizeof(struct sockaddr_l2);
1056 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1058 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1060 la->l2_psm = l2cap_pi(sk)->psm;
1064 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1066 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1067 struct sk_buff *skb, **frag;
1068 int err, hlen, count, sent=0;
1069 struct l2cap_hdr *lh;
1071 BT_DBG("sk %p len %d", sk, len);
1073 /* First fragment (with L2CAP header) */
1074 if (sk->sk_type == SOCK_DGRAM)
1075 hlen = L2CAP_HDR_SIZE + 2;
1077 hlen = L2CAP_HDR_SIZE;
1079 count = min_t(unsigned int, (conn->mtu - hlen), len);
1081 skb = bt_skb_send_alloc(sk, hlen + count,
1082 msg->msg_flags & MSG_DONTWAIT, &err);
1086 /* Create L2CAP header */
1087 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1088 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1089 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1091 if (sk->sk_type == SOCK_DGRAM)
1092 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1094 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1102 /* Continuation fragments (no L2CAP header) */
1103 frag = &skb_shinfo(skb)->frag_list;
1105 count = min_t(unsigned int, conn->mtu, len);
1107 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1111 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1119 frag = &(*frag)->next;
1122 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1132 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1134 struct sock *sk = sock->sk;
1137 BT_DBG("sock %p, sk %p", sock, sk);
1139 err = sock_error(sk);
1143 if (msg->msg_flags & MSG_OOB)
1146 /* Check outgoing MTU */
1147 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1152 if (sk->sk_state == BT_CONNECTED)
1153 err = l2cap_do_send(sk, msg, len);
1161 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1163 struct sock *sk = sock->sk;
1167 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1168 struct l2cap_conn_rsp rsp;
1170 sk->sk_state = BT_CONFIG;
1172 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1173 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1174 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1175 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1176 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1177 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1185 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1188 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1190 struct sock *sk = sock->sk;
1191 struct l2cap_options opts;
1195 BT_DBG("sk %p", sk);
1201 opts.imtu = l2cap_pi(sk)->imtu;
1202 opts.omtu = l2cap_pi(sk)->omtu;
1203 opts.flush_to = l2cap_pi(sk)->flush_to;
1204 opts.mode = L2CAP_MODE_BASIC;
1206 len = min_t(unsigned int, sizeof(opts), optlen);
1207 if (copy_from_user((char *) &opts, optval, len)) {
1212 l2cap_pi(sk)->imtu = opts.imtu;
1213 l2cap_pi(sk)->omtu = opts.omtu;
1217 if (get_user(opt, (u32 __user *) optval)) {
1222 l2cap_pi(sk)->link_mode = opt;
1234 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1236 struct sock *sk = sock->sk;
1240 BT_DBG("sk %p", sk);
1242 if (level == SOL_L2CAP)
1243 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1248 case BT_DEFER_SETUP:
1249 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1254 if (get_user(opt, (u32 __user *) optval)) {
1259 bt_sk(sk)->defer_setup = opt;
1271 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1273 struct sock *sk = sock->sk;
1274 struct l2cap_options opts;
1275 struct l2cap_conninfo cinfo;
1278 BT_DBG("sk %p", sk);
1280 if (get_user(len, optlen))
1287 opts.imtu = l2cap_pi(sk)->imtu;
1288 opts.omtu = l2cap_pi(sk)->omtu;
1289 opts.flush_to = l2cap_pi(sk)->flush_to;
1290 opts.mode = L2CAP_MODE_BASIC;
1292 len = min_t(unsigned int, len, sizeof(opts));
1293 if (copy_to_user(optval, (char *) &opts, len))
1299 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1303 case L2CAP_CONNINFO:
1304 if (sk->sk_state != BT_CONNECTED &&
1305 !(sk->sk_state == BT_CONNECT2 &&
1306 bt_sk(sk)->defer_setup)) {
1311 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1312 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1314 len = min_t(unsigned int, len, sizeof(cinfo));
1315 if (copy_to_user(optval, (char *) &cinfo, len))
1329 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1331 struct sock *sk = sock->sk;
1334 BT_DBG("sk %p", sk);
1336 if (level == SOL_L2CAP)
1337 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1339 if (get_user(len, optlen))
1345 case BT_DEFER_SETUP:
1346 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1351 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1365 static int l2cap_sock_shutdown(struct socket *sock, int how)
1367 struct sock *sk = sock->sk;
1370 BT_DBG("sock %p, sk %p", sock, sk);
1376 if (!sk->sk_shutdown) {
1377 sk->sk_shutdown = SHUTDOWN_MASK;
1378 l2cap_sock_clear_timer(sk);
1379 __l2cap_sock_close(sk, 0);
1381 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1382 err = bt_sock_wait_state(sk, BT_CLOSED,
1389 static int l2cap_sock_release(struct socket *sock)
1391 struct sock *sk = sock->sk;
1394 BT_DBG("sock %p, sk %p", sock, sk);
1399 err = l2cap_sock_shutdown(sock, 2);
1402 l2cap_sock_kill(sk);
1406 static void l2cap_chan_ready(struct sock *sk)
1408 struct sock *parent = bt_sk(sk)->parent;
1410 BT_DBG("sk %p, parent %p", sk, parent);
1412 l2cap_pi(sk)->conf_state = 0;
1413 l2cap_sock_clear_timer(sk);
1416 /* Outgoing channel.
1417 * Wake up socket sleeping on connect.
1419 sk->sk_state = BT_CONNECTED;
1420 sk->sk_state_change(sk);
1422 /* Incoming channel.
1423 * Wake up socket sleeping on accept.
1425 parent->sk_data_ready(parent, 0);
1429 /* Copy frame to all raw sockets on that connection */
1430 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1432 struct l2cap_chan_list *l = &conn->chan_list;
1433 struct sk_buff *nskb;
1436 BT_DBG("conn %p", conn);
1438 read_lock(&l->lock);
1439 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1440 if (sk->sk_type != SOCK_RAW)
1443 /* Don't send frame to the socket it came from */
1447 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1450 if (sock_queue_rcv_skb(sk, nskb))
1453 read_unlock(&l->lock);
1456 /* ---- L2CAP signalling commands ---- */
1457 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1458 u8 code, u8 ident, u16 dlen, void *data)
1460 struct sk_buff *skb, **frag;
1461 struct l2cap_cmd_hdr *cmd;
1462 struct l2cap_hdr *lh;
1465 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1467 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1468 count = min_t(unsigned int, conn->mtu, len);
1470 skb = bt_skb_alloc(count, GFP_ATOMIC);
1474 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1475 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1476 lh->cid = cpu_to_le16(0x0001);
1478 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1481 cmd->len = cpu_to_le16(dlen);
1484 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1485 memcpy(skb_put(skb, count), data, count);
1491 /* Continuation fragments (no L2CAP header) */
1492 frag = &skb_shinfo(skb)->frag_list;
1494 count = min_t(unsigned int, conn->mtu, len);
1496 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1500 memcpy(skb_put(*frag, count), data, count);
1505 frag = &(*frag)->next;
1515 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1517 struct l2cap_conf_opt *opt = *ptr;
1520 len = L2CAP_CONF_OPT_SIZE + opt->len;
1528 *val = *((u8 *) opt->val);
1532 *val = __le16_to_cpu(*((__le16 *) opt->val));
1536 *val = __le32_to_cpu(*((__le32 *) opt->val));
1540 *val = (unsigned long) opt->val;
1544 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1548 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1550 struct l2cap_conf_opt *opt = *ptr;
1552 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1559 *((u8 *) opt->val) = val;
1563 *((__le16 *) opt->val) = cpu_to_le16(val);
1567 *((__le32 *) opt->val) = cpu_to_le32(val);
1571 memcpy(opt->val, (void *) val, len);
1575 *ptr += L2CAP_CONF_OPT_SIZE + len;
1578 static int l2cap_build_conf_req(struct sock *sk, void *data)
1580 struct l2cap_pinfo *pi = l2cap_pi(sk);
1581 struct l2cap_conf_req *req = data;
1582 void *ptr = req->data;
1584 BT_DBG("sk %p", sk);
1586 if (pi->imtu != L2CAP_DEFAULT_MTU)
1587 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1589 /* FIXME: Need actual value of the flush timeout */
1590 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1591 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1593 req->dcid = cpu_to_le16(pi->dcid);
1594 req->flags = cpu_to_le16(0);
1599 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1601 struct l2cap_pinfo *pi = l2cap_pi(sk);
1602 struct l2cap_conf_rsp *rsp = data;
1603 void *ptr = rsp->data;
1604 void *req = pi->conf_req;
1605 int len = pi->conf_len;
1606 int type, hint, olen;
1608 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1609 u16 mtu = L2CAP_DEFAULT_MTU;
1610 u16 result = L2CAP_CONF_SUCCESS;
1612 BT_DBG("sk %p", sk);
1614 while (len >= L2CAP_CONF_OPT_SIZE) {
1615 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1621 case L2CAP_CONF_MTU:
1625 case L2CAP_CONF_FLUSH_TO:
1629 case L2CAP_CONF_QOS:
1632 case L2CAP_CONF_RFC:
1633 if (olen == sizeof(rfc))
1634 memcpy(&rfc, (void *) val, olen);
1641 result = L2CAP_CONF_UNKNOWN;
1642 *((u8 *) ptr++) = type;
1647 if (result == L2CAP_CONF_SUCCESS) {
1648 /* Configure output options and let the other side know
1649 * which ones we don't like. */
1651 if (rfc.mode == L2CAP_MODE_BASIC) {
1653 result = L2CAP_CONF_UNACCEPT;
1656 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1659 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1661 result = L2CAP_CONF_UNACCEPT;
1663 memset(&rfc, 0, sizeof(rfc));
1664 rfc.mode = L2CAP_MODE_BASIC;
1666 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1667 sizeof(rfc), (unsigned long) &rfc);
1671 rsp->scid = cpu_to_le16(pi->dcid);
1672 rsp->result = cpu_to_le16(result);
1673 rsp->flags = cpu_to_le16(0x0000);
1678 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1680 struct l2cap_conf_rsp *rsp = data;
1681 void *ptr = rsp->data;
1683 BT_DBG("sk %p", sk);
1685 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1686 rsp->result = cpu_to_le16(result);
1687 rsp->flags = cpu_to_le16(flags);
1692 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1694 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1696 if (rej->reason != 0x0000)
1699 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1700 cmd->ident == conn->info_ident) {
1701 conn->info_ident = 0;
1702 del_timer(&conn->info_timer);
1703 l2cap_conn_start(conn);
1709 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1711 struct l2cap_chan_list *list = &conn->chan_list;
1712 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1713 struct l2cap_conn_rsp rsp;
1714 struct sock *sk, *parent;
1715 int result, status = L2CAP_CS_NO_INFO;
1717 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1718 __le16 psm = req->psm;
1720 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1722 /* Check if we have socket listening on psm */
1723 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1725 result = L2CAP_CR_BAD_PSM;
1729 /* Check if the ACL is secure enough (if not SDP) */
1730 if (psm != cpu_to_le16(0x0001) &&
1731 !hci_conn_check_link_mode(conn->hcon)) {
1732 result = L2CAP_CR_SEC_BLOCK;
1736 result = L2CAP_CR_NO_MEM;
1738 /* Check for backlog size */
1739 if (sk_acceptq_is_full(parent)) {
1740 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1744 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1748 write_lock_bh(&list->lock);
1750 /* Check if we already have channel with that dcid */
1751 if (__l2cap_get_chan_by_dcid(list, scid)) {
1752 write_unlock_bh(&list->lock);
1753 sock_set_flag(sk, SOCK_ZAPPED);
1754 l2cap_sock_kill(sk);
1758 hci_conn_hold(conn->hcon);
1760 l2cap_sock_init(sk, parent);
1761 bacpy(&bt_sk(sk)->src, conn->src);
1762 bacpy(&bt_sk(sk)->dst, conn->dst);
1763 l2cap_pi(sk)->psm = psm;
1764 l2cap_pi(sk)->dcid = scid;
1766 __l2cap_chan_add(conn, sk, parent);
1767 dcid = l2cap_pi(sk)->scid;
1769 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1771 l2cap_pi(sk)->ident = cmd->ident;
1773 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1774 if (l2cap_check_link_mode(sk)) {
1775 if (bt_sk(sk)->defer_setup) {
1776 sk->sk_state = BT_CONNECT2;
1777 result = L2CAP_CR_PEND;
1778 status = L2CAP_CS_AUTHOR_PEND;
1779 parent->sk_data_ready(parent, 0);
1781 sk->sk_state = BT_CONFIG;
1782 result = L2CAP_CR_SUCCESS;
1783 status = L2CAP_CS_NO_INFO;
1786 sk->sk_state = BT_CONNECT2;
1787 result = L2CAP_CR_PEND;
1788 status = L2CAP_CS_AUTHEN_PEND;
1791 sk->sk_state = BT_CONNECT2;
1792 result = L2CAP_CR_PEND;
1793 status = L2CAP_CS_NO_INFO;
1796 write_unlock_bh(&list->lock);
1799 bh_unlock_sock(parent);
1802 rsp.scid = cpu_to_le16(scid);
1803 rsp.dcid = cpu_to_le16(dcid);
1804 rsp.result = cpu_to_le16(result);
1805 rsp.status = cpu_to_le16(status);
1806 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1808 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1809 struct l2cap_info_req info;
1810 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1812 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1813 conn->info_ident = l2cap_get_ident(conn);
1815 mod_timer(&conn->info_timer, jiffies +
1816 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1818 l2cap_send_cmd(conn, conn->info_ident,
1819 L2CAP_INFO_REQ, sizeof(info), &info);
1825 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1827 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1828 u16 scid, dcid, result, status;
1832 scid = __le16_to_cpu(rsp->scid);
1833 dcid = __le16_to_cpu(rsp->dcid);
1834 result = __le16_to_cpu(rsp->result);
1835 status = __le16_to_cpu(rsp->status);
1837 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1840 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1843 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1848 case L2CAP_CR_SUCCESS:
1849 sk->sk_state = BT_CONFIG;
1850 l2cap_pi(sk)->ident = 0;
1851 l2cap_pi(sk)->dcid = dcid;
1852 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1854 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1855 l2cap_build_conf_req(sk, req), req);
1862 l2cap_chan_del(sk, ECONNREFUSED);
1870 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1872 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1878 dcid = __le16_to_cpu(req->dcid);
1879 flags = __le16_to_cpu(req->flags);
1881 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1883 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1886 if (sk->sk_state == BT_DISCONN)
1889 /* Reject if config buffer is too small. */
1890 len = cmd_len - sizeof(*req);
1891 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1892 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1893 l2cap_build_conf_rsp(sk, rsp,
1894 L2CAP_CONF_REJECT, flags), rsp);
1899 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1900 l2cap_pi(sk)->conf_len += len;
1902 if (flags & 0x0001) {
1903 /* Incomplete config. Send empty response. */
1904 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1905 l2cap_build_conf_rsp(sk, rsp,
1906 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1910 /* Complete config. */
1911 len = l2cap_parse_conf_req(sk, rsp);
1915 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1917 /* Reset config buffer. */
1918 l2cap_pi(sk)->conf_len = 0;
1920 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1923 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1924 sk->sk_state = BT_CONNECTED;
1925 l2cap_chan_ready(sk);
1929 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1931 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1932 l2cap_build_conf_req(sk, buf), buf);
1940 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1942 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1943 u16 scid, flags, result;
1946 scid = __le16_to_cpu(rsp->scid);
1947 flags = __le16_to_cpu(rsp->flags);
1948 result = __le16_to_cpu(rsp->result);
1950 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1952 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1956 case L2CAP_CONF_SUCCESS:
1959 case L2CAP_CONF_UNACCEPT:
1960 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1962 /* It does not make sense to adjust L2CAP parameters
1963 * that are currently defined in the spec. We simply
1964 * resend config request that we sent earlier. It is
1965 * stupid, but it helps qualification testing which
1966 * expects at least some response from us. */
1967 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1968 l2cap_build_conf_req(sk, req), req);
1973 sk->sk_state = BT_DISCONN;
1974 sk->sk_err = ECONNRESET;
1975 l2cap_sock_set_timer(sk, HZ * 5);
1977 struct l2cap_disconn_req req;
1978 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1979 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1980 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1981 L2CAP_DISCONN_REQ, sizeof(req), &req);
1989 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1991 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1992 sk->sk_state = BT_CONNECTED;
1993 l2cap_chan_ready(sk);
2001 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2003 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2004 struct l2cap_disconn_rsp rsp;
2008 scid = __le16_to_cpu(req->scid);
2009 dcid = __le16_to_cpu(req->dcid);
2011 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2013 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2016 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2017 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2018 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2020 sk->sk_shutdown = SHUTDOWN_MASK;
2022 l2cap_chan_del(sk, ECONNRESET);
2025 l2cap_sock_kill(sk);
2029 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2031 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2035 scid = __le16_to_cpu(rsp->scid);
2036 dcid = __le16_to_cpu(rsp->dcid);
2038 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2040 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2043 l2cap_chan_del(sk, 0);
2046 l2cap_sock_kill(sk);
2050 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2052 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2055 type = __le16_to_cpu(req->type);
2057 BT_DBG("type 0x%4.4x", type);
2059 if (type == L2CAP_IT_FEAT_MASK) {
2061 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2062 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2063 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2064 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
2065 l2cap_send_cmd(conn, cmd->ident,
2066 L2CAP_INFO_RSP, sizeof(buf), buf);
2068 struct l2cap_info_rsp rsp;
2069 rsp.type = cpu_to_le16(type);
2070 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2071 l2cap_send_cmd(conn, cmd->ident,
2072 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2078 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2080 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2083 type = __le16_to_cpu(rsp->type);
2084 result = __le16_to_cpu(rsp->result);
2086 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2088 conn->info_ident = 0;
2090 del_timer(&conn->info_timer);
2092 if (type == L2CAP_IT_FEAT_MASK)
2093 conn->feat_mask = get_unaligned_le32(rsp->data);
2095 l2cap_conn_start(conn);
2100 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2102 u8 *data = skb->data;
2104 struct l2cap_cmd_hdr cmd;
2107 l2cap_raw_recv(conn, skb);
2109 while (len >= L2CAP_CMD_HDR_SIZE) {
2111 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2112 data += L2CAP_CMD_HDR_SIZE;
2113 len -= L2CAP_CMD_HDR_SIZE;
2115 cmd_len = le16_to_cpu(cmd.len);
2117 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2119 if (cmd_len > len || !cmd.ident) {
2120 BT_DBG("corrupted command");
2125 case L2CAP_COMMAND_REJ:
2126 l2cap_command_rej(conn, &cmd, data);
2129 case L2CAP_CONN_REQ:
2130 err = l2cap_connect_req(conn, &cmd, data);
2133 case L2CAP_CONN_RSP:
2134 err = l2cap_connect_rsp(conn, &cmd, data);
2137 case L2CAP_CONF_REQ:
2138 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2141 case L2CAP_CONF_RSP:
2142 err = l2cap_config_rsp(conn, &cmd, data);
2145 case L2CAP_DISCONN_REQ:
2146 err = l2cap_disconnect_req(conn, &cmd, data);
2149 case L2CAP_DISCONN_RSP:
2150 err = l2cap_disconnect_rsp(conn, &cmd, data);
2153 case L2CAP_ECHO_REQ:
2154 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2157 case L2CAP_ECHO_RSP:
2160 case L2CAP_INFO_REQ:
2161 err = l2cap_information_req(conn, &cmd, data);
2164 case L2CAP_INFO_RSP:
2165 err = l2cap_information_rsp(conn, &cmd, data);
2169 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2175 struct l2cap_cmd_rej rej;
2176 BT_DBG("error %d", err);
2178 /* FIXME: Map err to a valid reason */
2179 rej.reason = cpu_to_le16(0);
2180 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2190 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2194 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2196 BT_DBG("unknown cid 0x%4.4x", cid);
2200 BT_DBG("sk %p, len %d", sk, skb->len);
2202 if (sk->sk_state != BT_CONNECTED)
2205 if (l2cap_pi(sk)->imtu < skb->len)
2208 /* If socket recv buffers overflows we drop data here
2209 * which is *bad* because L2CAP has to be reliable.
2210 * But we don't have any other choice. L2CAP doesn't
2211 * provide flow control mechanism. */
2213 if (!sock_queue_rcv_skb(sk, skb))
2226 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2230 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2234 BT_DBG("sk %p, len %d", sk, skb->len);
2236 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2239 if (l2cap_pi(sk)->imtu < skb->len)
2242 if (!sock_queue_rcv_skb(sk, skb))
2249 if (sk) bh_unlock_sock(sk);
2253 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2255 struct l2cap_hdr *lh = (void *) skb->data;
2259 skb_pull(skb, L2CAP_HDR_SIZE);
2260 cid = __le16_to_cpu(lh->cid);
2261 len = __le16_to_cpu(lh->len);
2263 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2267 l2cap_sig_channel(conn, skb);
2271 psm = get_unaligned((__le16 *) skb->data);
2273 l2cap_conless_channel(conn, psm, skb);
2277 l2cap_data_channel(conn, cid, skb);
2282 /* ---- L2CAP interface with lower layer (HCI) ---- */
2284 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2286 int exact = 0, lm1 = 0, lm2 = 0;
2287 register struct sock *sk;
2288 struct hlist_node *node;
2290 if (type != ACL_LINK)
2293 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2295 /* Find listening sockets and check their link_mode */
2296 read_lock(&l2cap_sk_list.lock);
2297 sk_for_each(sk, node, &l2cap_sk_list.head) {
2298 if (sk->sk_state != BT_LISTEN)
2301 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2302 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2304 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2305 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2307 read_unlock(&l2cap_sk_list.lock);
2309 return exact ? lm1 : lm2;
2312 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2314 struct l2cap_conn *conn;
2316 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2318 if (hcon->type != ACL_LINK)
2322 conn = l2cap_conn_add(hcon, status);
2324 l2cap_conn_ready(conn);
2326 l2cap_conn_del(hcon, bt_err(status));
2331 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2333 BT_DBG("hcon %p reason %d", hcon, reason);
2335 if (hcon->type != ACL_LINK)
2338 l2cap_conn_del(hcon, bt_err(reason));
2343 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2345 struct l2cap_chan_list *l;
2346 struct l2cap_conn *conn = hcon->l2cap_data;
2352 l = &conn->chan_list;
2354 BT_DBG("conn %p", conn);
2356 read_lock(&l->lock);
2358 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2359 struct l2cap_pinfo *pi = l2cap_pi(sk);
2363 if (!status && encrypt == 0x00 &&
2364 (pi->link_mode & L2CAP_LM_SECURE) &&
2365 (sk->sk_state == BT_CONNECTED ||
2366 sk->sk_state == BT_CONFIG)) {
2367 __l2cap_sock_close(sk, ECONNREFUSED);
2372 if (sk->sk_state == BT_CONNECT) {
2374 struct l2cap_conn_req req;
2375 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2376 req.psm = l2cap_pi(sk)->psm;
2378 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2380 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2381 L2CAP_CONN_REQ, sizeof(req), &req);
2383 l2cap_sock_clear_timer(sk);
2384 l2cap_sock_set_timer(sk, HZ / 10);
2386 } else if (sk->sk_state == BT_CONNECT2) {
2387 struct l2cap_conn_rsp rsp;
2391 sk->sk_state = BT_CONFIG;
2392 result = L2CAP_CR_SUCCESS;
2394 sk->sk_state = BT_DISCONN;
2395 l2cap_sock_set_timer(sk, HZ / 10);
2396 result = L2CAP_CR_SEC_BLOCK;
2399 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2400 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2401 rsp.result = cpu_to_le16(result);
2402 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2403 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2404 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2410 read_unlock(&l->lock);
2415 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2417 struct l2cap_conn *conn = hcon->l2cap_data;
2419 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2422 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2424 if (flags & ACL_START) {
2425 struct l2cap_hdr *hdr;
2429 BT_ERR("Unexpected start frame (len %d)", skb->len);
2430 kfree_skb(conn->rx_skb);
2431 conn->rx_skb = NULL;
2433 l2cap_conn_unreliable(conn, ECOMM);
2437 BT_ERR("Frame is too short (len %d)", skb->len);
2438 l2cap_conn_unreliable(conn, ECOMM);
2442 hdr = (struct l2cap_hdr *) skb->data;
2443 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2445 if (len == skb->len) {
2446 /* Complete frame received */
2447 l2cap_recv_frame(conn, skb);
2451 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2453 if (skb->len > len) {
2454 BT_ERR("Frame is too long (len %d, expected len %d)",
2456 l2cap_conn_unreliable(conn, ECOMM);
2460 /* Allocate skb for the complete frame (with header) */
2461 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2464 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2466 conn->rx_len = len - skb->len;
2468 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2470 if (!conn->rx_len) {
2471 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2472 l2cap_conn_unreliable(conn, ECOMM);
2476 if (skb->len > conn->rx_len) {
2477 BT_ERR("Fragment is too long (len %d, expected %d)",
2478 skb->len, conn->rx_len);
2479 kfree_skb(conn->rx_skb);
2480 conn->rx_skb = NULL;
2482 l2cap_conn_unreliable(conn, ECOMM);
2486 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2488 conn->rx_len -= skb->len;
2490 if (!conn->rx_len) {
2491 /* Complete frame received */
2492 l2cap_recv_frame(conn, conn->rx_skb);
2493 conn->rx_skb = NULL;
2502 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2505 struct hlist_node *node;
2508 read_lock_bh(&l2cap_sk_list.lock);
2510 sk_for_each(sk, node, &l2cap_sk_list.head) {
2511 struct l2cap_pinfo *pi = l2cap_pi(sk);
2513 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2514 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2515 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2516 pi->imtu, pi->omtu, pi->link_mode);
2519 read_unlock_bh(&l2cap_sk_list.lock);
2524 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2526 static const struct proto_ops l2cap_sock_ops = {
2527 .family = PF_BLUETOOTH,
2528 .owner = THIS_MODULE,
2529 .release = l2cap_sock_release,
2530 .bind = l2cap_sock_bind,
2531 .connect = l2cap_sock_connect,
2532 .listen = l2cap_sock_listen,
2533 .accept = l2cap_sock_accept,
2534 .getname = l2cap_sock_getname,
2535 .sendmsg = l2cap_sock_sendmsg,
2536 .recvmsg = l2cap_sock_recvmsg,
2537 .poll = bt_sock_poll,
2538 .ioctl = bt_sock_ioctl,
2539 .mmap = sock_no_mmap,
2540 .socketpair = sock_no_socketpair,
2541 .shutdown = l2cap_sock_shutdown,
2542 .setsockopt = l2cap_sock_setsockopt,
2543 .getsockopt = l2cap_sock_getsockopt
2546 static struct net_proto_family l2cap_sock_family_ops = {
2547 .family = PF_BLUETOOTH,
2548 .owner = THIS_MODULE,
2549 .create = l2cap_sock_create,
2552 static struct hci_proto l2cap_hci_proto = {
2554 .id = HCI_PROTO_L2CAP,
2555 .connect_ind = l2cap_connect_ind,
2556 .connect_cfm = l2cap_connect_cfm,
2557 .disconn_ind = l2cap_disconn_ind,
2558 .security_cfm = l2cap_security_cfm,
2559 .recv_acldata = l2cap_recv_acldata
2562 static int __init l2cap_init(void)
2566 err = proto_register(&l2cap_proto, 0);
2570 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2572 BT_ERR("L2CAP socket registration failed");
2576 err = hci_register_proto(&l2cap_hci_proto);
2578 BT_ERR("L2CAP protocol registration failed");
2579 bt_sock_unregister(BTPROTO_L2CAP);
2583 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2584 BT_ERR("Failed to create L2CAP info file");
2586 BT_INFO("L2CAP ver %s", VERSION);
2587 BT_INFO("L2CAP socket layer initialized");
2592 proto_unregister(&l2cap_proto);
2596 static void __exit l2cap_exit(void)
2598 class_remove_file(bt_class, &class_attr_l2cap);
2600 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2601 BT_ERR("L2CAP socket unregistration failed");
2603 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2604 BT_ERR("L2CAP protocol unregistration failed");
2606 proto_unregister(&l2cap_proto);
2609 void l2cap_load(void)
2611 /* Dummy function to trigger automatic L2CAP module loading by
2612 * other modules that use L2CAP sockets but don't use any other
2613 * symbols from it. */
2616 EXPORT_SYMBOL(l2cap_load);
2618 module_init(l2cap_init);
2619 module_exit(l2cap_exit);
2621 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2622 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2623 MODULE_VERSION(VERSION);
2624 MODULE_LICENSE("GPL");
2625 MODULE_ALIAS("bt-proto-0");