2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.11"
55 static u32 l2cap_feat_mask = 0x0000;
57 static const struct proto_ops l2cap_sock_ops;
59 static struct bt_sock_list l2cap_sk_list = {
60 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
63 static void __l2cap_sock_close(struct sock *sk, int reason);
64 static void l2cap_sock_close(struct sock *sk);
65 static void l2cap_sock_kill(struct sock *sk);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
70 /* ---- L2CAP timers ---- */
71 static void l2cap_sock_timeout(unsigned long arg)
73 struct sock *sk = (struct sock *) arg;
76 BT_DBG("sock %p state %d", sk, sk->sk_state);
80 if (sk->sk_state == BT_CONNECT &&
81 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
82 reason = ECONNREFUSED;
86 __l2cap_sock_close(sk, reason);
94 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
96 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
97 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
100 static void l2cap_sock_clear_timer(struct sock *sk)
102 BT_DBG("sock %p state %d", sk, sk->sk_state);
103 sk_stop_timer(sk, &sk->sk_timer);
106 /* ---- L2CAP channels ---- */
107 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
110 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
111 if (l2cap_pi(s)->dcid == cid)
117 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
121 if (l2cap_pi(s)->scid == cid)
127 /* Find channel with given SCID.
128 * Returns locked socket */
129 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
133 s = __l2cap_get_chan_by_scid(l, cid);
134 if (s) bh_lock_sock(s);
135 read_unlock(&l->lock);
139 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
142 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
143 if (l2cap_pi(s)->ident == ident)
149 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 s = __l2cap_get_chan_by_ident(l, ident);
154 if (s) bh_lock_sock(s);
155 read_unlock(&l->lock);
159 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
163 for (; cid < 0xffff; cid++) {
164 if(!__l2cap_get_chan_by_scid(l, cid))
171 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
176 l2cap_pi(l->head)->prev_c = sk;
178 l2cap_pi(sk)->next_c = l->head;
179 l2cap_pi(sk)->prev_c = NULL;
183 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
185 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
187 write_lock_bh(&l->lock);
192 l2cap_pi(next)->prev_c = prev;
194 l2cap_pi(prev)->next_c = next;
195 write_unlock_bh(&l->lock);
200 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
202 struct l2cap_chan_list *l = &conn->chan_list;
204 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
206 l2cap_pi(sk)->conn = conn;
208 if (sk->sk_type == SOCK_SEQPACKET) {
209 /* Alloc CID for connection-oriented socket */
210 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
211 } else if (sk->sk_type == SOCK_DGRAM) {
212 /* Connectionless socket */
213 l2cap_pi(sk)->scid = 0x0002;
214 l2cap_pi(sk)->dcid = 0x0002;
215 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
217 /* Raw socket can send/recv signalling messages only */
218 l2cap_pi(sk)->scid = 0x0001;
219 l2cap_pi(sk)->dcid = 0x0001;
220 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
223 __l2cap_chan_link(l, sk);
226 bt_accept_enqueue(parent, sk);
230 * Must be called on the locked socket. */
231 static void l2cap_chan_del(struct sock *sk, int err)
233 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
234 struct sock *parent = bt_sk(sk)->parent;
236 l2cap_sock_clear_timer(sk);
238 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
241 /* Unlink from channel list */
242 l2cap_chan_unlink(&conn->chan_list, sk);
243 l2cap_pi(sk)->conn = NULL;
244 hci_conn_put(conn->hcon);
247 sk->sk_state = BT_CLOSED;
248 sock_set_flag(sk, SOCK_ZAPPED);
254 bt_accept_unlink(sk);
255 parent->sk_data_ready(parent, 0);
257 sk->sk_state_change(sk);
260 /* Service level security */
261 static inline int l2cap_check_security(struct sock *sk)
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
265 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level);
268 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
272 /* Get next available identificator.
273 * 1 - 128 are used by kernel.
274 * 129 - 199 are reserved.
275 * 200 - 254 are used by utilities like l2ping, etc.
278 spin_lock_bh(&conn->lock);
280 if (++conn->tx_ident > 128)
285 spin_unlock_bh(&conn->lock);
290 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
292 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
294 BT_DBG("code 0x%2.2x", code);
299 return hci_send_acl(conn->hcon, skb, 0);
302 static void l2cap_do_start(struct sock *sk)
304 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
306 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
307 if (l2cap_check_security(sk)) {
308 struct l2cap_conn_req req;
309 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
310 req.psm = l2cap_pi(sk)->psm;
312 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
314 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
315 L2CAP_CONN_REQ, sizeof(req), &req);
318 struct l2cap_info_req req;
319 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
321 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
322 conn->info_ident = l2cap_get_ident(conn);
324 mod_timer(&conn->info_timer, jiffies +
325 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
327 l2cap_send_cmd(conn, conn->info_ident,
328 L2CAP_INFO_REQ, sizeof(req), &req);
332 /* ---- L2CAP connections ---- */
333 static void l2cap_conn_start(struct l2cap_conn *conn)
335 struct l2cap_chan_list *l = &conn->chan_list;
338 BT_DBG("conn %p", conn);
342 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
345 if (sk->sk_type != SOCK_SEQPACKET) {
350 if (sk->sk_state == BT_CONNECT) {
351 if (l2cap_check_security(sk)) {
352 struct l2cap_conn_req req;
353 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
354 req.psm = l2cap_pi(sk)->psm;
356 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
358 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
359 L2CAP_CONN_REQ, sizeof(req), &req);
361 } else if (sk->sk_state == BT_CONNECT2) {
362 struct l2cap_conn_rsp rsp;
363 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
364 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
366 if (l2cap_check_security(sk)) {
367 if (bt_sk(sk)->defer_setup) {
368 struct sock *parent = bt_sk(sk)->parent;
369 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
370 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
371 parent->sk_data_ready(parent, 0);
374 sk->sk_state = BT_CONFIG;
375 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
376 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
379 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
380 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
383 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
384 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
390 read_unlock(&l->lock);
393 static void l2cap_conn_ready(struct l2cap_conn *conn)
395 struct l2cap_chan_list *l = &conn->chan_list;
398 BT_DBG("conn %p", conn);
402 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
405 if (sk->sk_type != SOCK_SEQPACKET) {
406 l2cap_sock_clear_timer(sk);
407 sk->sk_state = BT_CONNECTED;
408 sk->sk_state_change(sk);
409 } else if (sk->sk_state == BT_CONNECT)
415 read_unlock(&l->lock);
418 /* Notify sockets that we cannot guaranty reliability anymore */
419 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
421 struct l2cap_chan_list *l = &conn->chan_list;
424 BT_DBG("conn %p", conn);
428 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
429 if (l2cap_pi(sk)->force_reliable)
433 read_unlock(&l->lock);
436 static void l2cap_info_timeout(unsigned long arg)
438 struct l2cap_conn *conn = (void *) arg;
440 conn->info_ident = 0;
442 l2cap_conn_start(conn);
445 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
447 struct l2cap_conn *conn = hcon->l2cap_data;
452 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
456 hcon->l2cap_data = conn;
459 BT_DBG("hcon %p conn %p", hcon, conn);
461 conn->mtu = hcon->hdev->acl_mtu;
462 conn->src = &hcon->hdev->bdaddr;
463 conn->dst = &hcon->dst;
467 setup_timer(&conn->info_timer, l2cap_info_timeout,
468 (unsigned long) conn);
470 spin_lock_init(&conn->lock);
471 rwlock_init(&conn->chan_list.lock);
476 static void l2cap_conn_del(struct hci_conn *hcon, int err)
478 struct l2cap_conn *conn = hcon->l2cap_data;
484 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
487 kfree_skb(conn->rx_skb);
490 while ((sk = conn->chan_list.head)) {
492 l2cap_chan_del(sk, err);
497 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
498 del_timer_sync(&conn->info_timer);
500 hcon->l2cap_data = NULL;
504 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
506 struct l2cap_chan_list *l = &conn->chan_list;
507 write_lock_bh(&l->lock);
508 __l2cap_chan_add(conn, sk, parent);
509 write_unlock_bh(&l->lock);
512 /* ---- Socket interface ---- */
513 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
516 struct hlist_node *node;
517 sk_for_each(sk, node, &l2cap_sk_list.head)
518 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
525 /* Find socket with psm and source bdaddr.
526 * Returns closest match.
528 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
530 struct sock *sk = NULL, *sk1 = NULL;
531 struct hlist_node *node;
533 sk_for_each(sk, node, &l2cap_sk_list.head) {
534 if (state && sk->sk_state != state)
537 if (l2cap_pi(sk)->psm == psm) {
539 if (!bacmp(&bt_sk(sk)->src, src))
543 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
547 return node ? sk : sk1;
550 /* Find socket with given address (psm, src).
551 * Returns locked socket */
552 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
555 read_lock(&l2cap_sk_list.lock);
556 s = __l2cap_get_sock_by_psm(state, psm, src);
557 if (s) bh_lock_sock(s);
558 read_unlock(&l2cap_sk_list.lock);
562 static void l2cap_sock_destruct(struct sock *sk)
566 skb_queue_purge(&sk->sk_receive_queue);
567 skb_queue_purge(&sk->sk_write_queue);
570 static void l2cap_sock_cleanup_listen(struct sock *parent)
574 BT_DBG("parent %p", parent);
576 /* Close not yet accepted channels */
577 while ((sk = bt_accept_dequeue(parent, NULL)))
578 l2cap_sock_close(sk);
580 parent->sk_state = BT_CLOSED;
581 sock_set_flag(parent, SOCK_ZAPPED);
584 /* Kill socket (only if zapped and orphan)
585 * Must be called on unlocked socket.
587 static void l2cap_sock_kill(struct sock *sk)
589 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
592 BT_DBG("sk %p state %d", sk, sk->sk_state);
594 /* Kill poor orphan */
595 bt_sock_unlink(&l2cap_sk_list, sk);
596 sock_set_flag(sk, SOCK_DEAD);
600 static void __l2cap_sock_close(struct sock *sk, int reason)
602 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
604 switch (sk->sk_state) {
606 l2cap_sock_cleanup_listen(sk);
611 if (sk->sk_type == SOCK_SEQPACKET) {
612 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
613 struct l2cap_disconn_req req;
615 sk->sk_state = BT_DISCONN;
616 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
618 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
619 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
620 l2cap_send_cmd(conn, l2cap_get_ident(conn),
621 L2CAP_DISCONN_REQ, sizeof(req), &req);
623 l2cap_chan_del(sk, reason);
627 if (sk->sk_type == SOCK_SEQPACKET) {
628 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
629 struct l2cap_conn_rsp rsp;
632 if (bt_sk(sk)->defer_setup)
633 result = L2CAP_CR_SEC_BLOCK;
635 result = L2CAP_CR_BAD_PSM;
637 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
638 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
639 rsp.result = cpu_to_le16(result);
640 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
641 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
642 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
644 l2cap_chan_del(sk, reason);
649 l2cap_chan_del(sk, reason);
653 sock_set_flag(sk, SOCK_ZAPPED);
658 /* Must be called on unlocked socket. */
659 static void l2cap_sock_close(struct sock *sk)
661 l2cap_sock_clear_timer(sk);
663 __l2cap_sock_close(sk, ECONNRESET);
668 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
670 struct l2cap_pinfo *pi = l2cap_pi(sk);
675 sk->sk_type = parent->sk_type;
676 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
678 pi->imtu = l2cap_pi(parent)->imtu;
679 pi->omtu = l2cap_pi(parent)->omtu;
680 pi->sec_level = l2cap_pi(parent)->sec_level;
681 pi->role_switch = l2cap_pi(parent)->role_switch;
682 pi->force_reliable = l2cap_pi(parent)->force_reliable;
684 pi->imtu = L2CAP_DEFAULT_MTU;
686 pi->sec_level = BT_SECURITY_LOW;
688 pi->force_reliable = 0;
691 /* Default config options */
693 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
696 static struct proto l2cap_proto = {
698 .owner = THIS_MODULE,
699 .obj_size = sizeof(struct l2cap_pinfo)
702 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
706 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
710 sock_init_data(sock, sk);
711 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
713 sk->sk_destruct = l2cap_sock_destruct;
714 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
716 sock_reset_flag(sk, SOCK_ZAPPED);
718 sk->sk_protocol = proto;
719 sk->sk_state = BT_OPEN;
721 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
723 bt_sock_link(&l2cap_sk_list, sk);
727 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
731 BT_DBG("sock %p", sock);
733 sock->state = SS_UNCONNECTED;
735 if (sock->type != SOCK_SEQPACKET &&
736 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
737 return -ESOCKTNOSUPPORT;
739 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
742 sock->ops = &l2cap_sock_ops;
744 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
748 l2cap_sock_init(sk, NULL);
752 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
754 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
755 struct sock *sk = sock->sk;
758 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
760 if (!addr || addr->sa_family != AF_BLUETOOTH)
765 if (sk->sk_state != BT_OPEN) {
770 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
771 !capable(CAP_NET_BIND_SERVICE)) {
776 write_lock_bh(&l2cap_sk_list.lock);
778 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
781 /* Save source address */
782 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
783 l2cap_pi(sk)->psm = la->l2_psm;
784 l2cap_pi(sk)->sport = la->l2_psm;
785 sk->sk_state = BT_BOUND;
787 if (btohs(la->l2_psm) == 0x0001)
788 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
791 write_unlock_bh(&l2cap_sk_list.lock);
798 static int l2cap_do_connect(struct sock *sk)
800 bdaddr_t *src = &bt_sk(sk)->src;
801 bdaddr_t *dst = &bt_sk(sk)->dst;
802 struct l2cap_conn *conn;
803 struct hci_conn *hcon;
804 struct hci_dev *hdev;
808 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
810 if (!(hdev = hci_get_route(dst, src)))
811 return -EHOSTUNREACH;
813 hci_dev_lock_bh(hdev);
817 if (sk->sk_type == SOCK_RAW) {
818 switch (l2cap_pi(sk)->sec_level) {
819 case BT_SECURITY_HIGH:
820 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
822 case BT_SECURITY_MEDIUM:
823 auth_type = HCI_AT_DEDICATED_BONDING;
826 auth_type = HCI_AT_NO_BONDING;
829 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
830 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
831 auth_type = HCI_AT_NO_BONDING_MITM;
833 auth_type = HCI_AT_NO_BONDING;
835 switch (l2cap_pi(sk)->sec_level) {
836 case BT_SECURITY_HIGH:
837 auth_type = HCI_AT_GENERAL_BONDING_MITM;
839 case BT_SECURITY_MEDIUM:
840 auth_type = HCI_AT_GENERAL_BONDING;
843 auth_type = HCI_AT_NO_BONDING;
848 hcon = hci_connect(hdev, ACL_LINK, dst,
849 l2cap_pi(sk)->sec_level, auth_type);
853 conn = l2cap_conn_add(hcon, 0);
861 /* Update source addr of the socket */
862 bacpy(src, conn->src);
864 l2cap_chan_add(conn, sk, NULL);
866 sk->sk_state = BT_CONNECT;
867 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
869 if (hcon->state == BT_CONNECTED) {
870 if (sk->sk_type != SOCK_SEQPACKET) {
871 l2cap_sock_clear_timer(sk);
872 sk->sk_state = BT_CONNECTED;
878 hci_dev_unlock_bh(hdev);
883 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
885 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
886 struct sock *sk = sock->sk;
893 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
898 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
903 switch(sk->sk_state) {
907 /* Already connecting */
911 /* Already connected */
924 /* Set destination address and psm */
925 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
926 l2cap_pi(sk)->psm = la->l2_psm;
928 if ((err = l2cap_do_connect(sk)))
932 err = bt_sock_wait_state(sk, BT_CONNECTED,
933 sock_sndtimeo(sk, flags & O_NONBLOCK));
939 static int l2cap_sock_listen(struct socket *sock, int backlog)
941 struct sock *sk = sock->sk;
944 BT_DBG("sk %p backlog %d", sk, backlog);
948 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
953 if (!l2cap_pi(sk)->psm) {
954 bdaddr_t *src = &bt_sk(sk)->src;
959 write_lock_bh(&l2cap_sk_list.lock);
961 for (psm = 0x1001; psm < 0x1100; psm += 2)
962 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
963 l2cap_pi(sk)->psm = htobs(psm);
964 l2cap_pi(sk)->sport = htobs(psm);
969 write_unlock_bh(&l2cap_sk_list.lock);
975 sk->sk_max_ack_backlog = backlog;
976 sk->sk_ack_backlog = 0;
977 sk->sk_state = BT_LISTEN;
984 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
986 DECLARE_WAITQUEUE(wait, current);
987 struct sock *sk = sock->sk, *nsk;
991 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
993 if (sk->sk_state != BT_LISTEN) {
998 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1000 BT_DBG("sk %p timeo %ld", sk, timeo);
1002 /* Wait for an incoming connection. (wake-one). */
1003 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1004 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1005 set_current_state(TASK_INTERRUPTIBLE);
1012 timeo = schedule_timeout(timeo);
1013 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1015 if (sk->sk_state != BT_LISTEN) {
1020 if (signal_pending(current)) {
1021 err = sock_intr_errno(timeo);
1025 set_current_state(TASK_RUNNING);
1026 remove_wait_queue(sk->sk_sleep, &wait);
1031 newsock->state = SS_CONNECTED;
1033 BT_DBG("new socket %p", nsk);
1040 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1042 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1043 struct sock *sk = sock->sk;
1045 BT_DBG("sock %p, sk %p", sock, sk);
1047 addr->sa_family = AF_BLUETOOTH;
1048 *len = sizeof(struct sockaddr_l2);
1051 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1053 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1055 la->l2_psm = l2cap_pi(sk)->psm;
1059 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1061 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1062 struct sk_buff *skb, **frag;
1063 int err, hlen, count, sent=0;
1064 struct l2cap_hdr *lh;
1066 BT_DBG("sk %p len %d", sk, len);
1068 /* First fragment (with L2CAP header) */
1069 if (sk->sk_type == SOCK_DGRAM)
1070 hlen = L2CAP_HDR_SIZE + 2;
1072 hlen = L2CAP_HDR_SIZE;
1074 count = min_t(unsigned int, (conn->mtu - hlen), len);
1076 skb = bt_skb_send_alloc(sk, hlen + count,
1077 msg->msg_flags & MSG_DONTWAIT, &err);
1081 /* Create L2CAP header */
1082 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1083 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1084 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1086 if (sk->sk_type == SOCK_DGRAM)
1087 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1089 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1097 /* Continuation fragments (no L2CAP header) */
1098 frag = &skb_shinfo(skb)->frag_list;
1100 count = min_t(unsigned int, conn->mtu, len);
1102 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1106 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1114 frag = &(*frag)->next;
1117 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1127 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1129 struct sock *sk = sock->sk;
1132 BT_DBG("sock %p, sk %p", sock, sk);
1134 err = sock_error(sk);
1138 if (msg->msg_flags & MSG_OOB)
1141 /* Check outgoing MTU */
1142 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1147 if (sk->sk_state == BT_CONNECTED)
1148 err = l2cap_do_send(sk, msg, len);
1156 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1158 struct sock *sk = sock->sk;
1162 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1163 struct l2cap_conn_rsp rsp;
1165 sk->sk_state = BT_CONFIG;
1167 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1168 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1169 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1170 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1171 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1172 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1180 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1183 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1185 struct sock *sk = sock->sk;
1186 struct l2cap_options opts;
1190 BT_DBG("sk %p", sk);
1196 opts.imtu = l2cap_pi(sk)->imtu;
1197 opts.omtu = l2cap_pi(sk)->omtu;
1198 opts.flush_to = l2cap_pi(sk)->flush_to;
1199 opts.mode = L2CAP_MODE_BASIC;
1201 len = min_t(unsigned int, sizeof(opts), optlen);
1202 if (copy_from_user((char *) &opts, optval, len)) {
1207 l2cap_pi(sk)->imtu = opts.imtu;
1208 l2cap_pi(sk)->omtu = opts.omtu;
1212 if (get_user(opt, (u32 __user *) optval)) {
1217 if (opt & L2CAP_LM_AUTH)
1218 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1219 if (opt & L2CAP_LM_ENCRYPT)
1220 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1221 if (opt & L2CAP_LM_SECURE)
1222 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1224 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1225 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1237 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1239 struct sock *sk = sock->sk;
1240 struct bt_security sec;
1244 BT_DBG("sk %p", sk);
1246 if (level == SOL_L2CAP)
1247 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1253 sec.level = BT_SECURITY_LOW;
1255 len = min_t(unsigned int, sizeof(sec), optlen);
1256 if (copy_from_user((char *) &sec, optval, len)) {
1261 if (sec.level < BT_SECURITY_LOW ||
1262 sec.level > BT_SECURITY_HIGH) {
1267 l2cap_pi(sk)->sec_level = sec.level;
1270 case BT_DEFER_SETUP:
1271 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1276 if (get_user(opt, (u32 __user *) optval)) {
1281 bt_sk(sk)->defer_setup = opt;
1293 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1295 struct sock *sk = sock->sk;
1296 struct l2cap_options opts;
1297 struct l2cap_conninfo cinfo;
1301 BT_DBG("sk %p", sk);
1303 if (get_user(len, optlen))
1310 opts.imtu = l2cap_pi(sk)->imtu;
1311 opts.omtu = l2cap_pi(sk)->omtu;
1312 opts.flush_to = l2cap_pi(sk)->flush_to;
1313 opts.mode = L2CAP_MODE_BASIC;
1315 len = min_t(unsigned int, len, sizeof(opts));
1316 if (copy_to_user(optval, (char *) &opts, len))
1322 switch (l2cap_pi(sk)->sec_level) {
1323 case BT_SECURITY_LOW:
1324 opt = L2CAP_LM_AUTH;
1326 case BT_SECURITY_MEDIUM:
1327 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1329 case BT_SECURITY_HIGH:
1330 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1338 if (l2cap_pi(sk)->role_switch)
1339 opt |= L2CAP_LM_MASTER;
1341 if (l2cap_pi(sk)->force_reliable)
1342 opt |= L2CAP_LM_RELIABLE;
1344 if (put_user(opt, (u32 __user *) optval))
1348 case L2CAP_CONNINFO:
1349 if (sk->sk_state != BT_CONNECTED &&
1350 !(sk->sk_state == BT_CONNECT2 &&
1351 bt_sk(sk)->defer_setup)) {
1356 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1357 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1359 len = min_t(unsigned int, len, sizeof(cinfo));
1360 if (copy_to_user(optval, (char *) &cinfo, len))
1374 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1376 struct sock *sk = sock->sk;
1377 struct bt_security sec;
1380 BT_DBG("sk %p", sk);
1382 if (level == SOL_L2CAP)
1383 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1385 if (get_user(len, optlen))
1392 sec.level = l2cap_pi(sk)->sec_level;
1394 len = min_t(unsigned int, len, sizeof(sec));
1395 if (copy_to_user(optval, (char *) &sec, len))
1400 case BT_DEFER_SETUP:
1401 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1406 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1420 static int l2cap_sock_shutdown(struct socket *sock, int how)
1422 struct sock *sk = sock->sk;
1425 BT_DBG("sock %p, sk %p", sock, sk);
1431 if (!sk->sk_shutdown) {
1432 sk->sk_shutdown = SHUTDOWN_MASK;
1433 l2cap_sock_clear_timer(sk);
1434 __l2cap_sock_close(sk, 0);
1436 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1437 err = bt_sock_wait_state(sk, BT_CLOSED,
1444 static int l2cap_sock_release(struct socket *sock)
1446 struct sock *sk = sock->sk;
1449 BT_DBG("sock %p, sk %p", sock, sk);
1454 err = l2cap_sock_shutdown(sock, 2);
1457 l2cap_sock_kill(sk);
1461 static void l2cap_chan_ready(struct sock *sk)
1463 struct sock *parent = bt_sk(sk)->parent;
1465 BT_DBG("sk %p, parent %p", sk, parent);
1467 l2cap_pi(sk)->conf_state = 0;
1468 l2cap_sock_clear_timer(sk);
1471 /* Outgoing channel.
1472 * Wake up socket sleeping on connect.
1474 sk->sk_state = BT_CONNECTED;
1475 sk->sk_state_change(sk);
1477 /* Incoming channel.
1478 * Wake up socket sleeping on accept.
1480 parent->sk_data_ready(parent, 0);
1484 /* Copy frame to all raw sockets on that connection */
1485 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1487 struct l2cap_chan_list *l = &conn->chan_list;
1488 struct sk_buff *nskb;
1491 BT_DBG("conn %p", conn);
1493 read_lock(&l->lock);
1494 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1495 if (sk->sk_type != SOCK_RAW)
1498 /* Don't send frame to the socket it came from */
1502 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1505 if (sock_queue_rcv_skb(sk, nskb))
1508 read_unlock(&l->lock);
1511 /* ---- L2CAP signalling commands ---- */
1512 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1513 u8 code, u8 ident, u16 dlen, void *data)
1515 struct sk_buff *skb, **frag;
1516 struct l2cap_cmd_hdr *cmd;
1517 struct l2cap_hdr *lh;
1520 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1522 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1523 count = min_t(unsigned int, conn->mtu, len);
1525 skb = bt_skb_alloc(count, GFP_ATOMIC);
1529 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1530 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1531 lh->cid = cpu_to_le16(0x0001);
1533 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1536 cmd->len = cpu_to_le16(dlen);
1539 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1540 memcpy(skb_put(skb, count), data, count);
1546 /* Continuation fragments (no L2CAP header) */
1547 frag = &skb_shinfo(skb)->frag_list;
1549 count = min_t(unsigned int, conn->mtu, len);
1551 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1555 memcpy(skb_put(*frag, count), data, count);
1560 frag = &(*frag)->next;
1570 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1572 struct l2cap_conf_opt *opt = *ptr;
1575 len = L2CAP_CONF_OPT_SIZE + opt->len;
1583 *val = *((u8 *) opt->val);
1587 *val = __le16_to_cpu(*((__le16 *) opt->val));
1591 *val = __le32_to_cpu(*((__le32 *) opt->val));
1595 *val = (unsigned long) opt->val;
1599 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1603 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1605 struct l2cap_conf_opt *opt = *ptr;
1607 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1614 *((u8 *) opt->val) = val;
1618 *((__le16 *) opt->val) = cpu_to_le16(val);
1622 *((__le32 *) opt->val) = cpu_to_le32(val);
1626 memcpy(opt->val, (void *) val, len);
1630 *ptr += L2CAP_CONF_OPT_SIZE + len;
1633 static int l2cap_build_conf_req(struct sock *sk, void *data)
1635 struct l2cap_pinfo *pi = l2cap_pi(sk);
1636 struct l2cap_conf_req *req = data;
1637 void *ptr = req->data;
1639 BT_DBG("sk %p", sk);
1641 if (pi->imtu != L2CAP_DEFAULT_MTU)
1642 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1644 /* FIXME: Need actual value of the flush timeout */
1645 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1646 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1648 req->dcid = cpu_to_le16(pi->dcid);
1649 req->flags = cpu_to_le16(0);
1654 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1656 struct l2cap_pinfo *pi = l2cap_pi(sk);
1657 struct l2cap_conf_rsp *rsp = data;
1658 void *ptr = rsp->data;
1659 void *req = pi->conf_req;
1660 int len = pi->conf_len;
1661 int type, hint, olen;
1663 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1664 u16 mtu = L2CAP_DEFAULT_MTU;
1665 u16 result = L2CAP_CONF_SUCCESS;
1667 BT_DBG("sk %p", sk);
1669 while (len >= L2CAP_CONF_OPT_SIZE) {
1670 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1676 case L2CAP_CONF_MTU:
1680 case L2CAP_CONF_FLUSH_TO:
1684 case L2CAP_CONF_QOS:
1687 case L2CAP_CONF_RFC:
1688 if (olen == sizeof(rfc))
1689 memcpy(&rfc, (void *) val, olen);
1696 result = L2CAP_CONF_UNKNOWN;
1697 *((u8 *) ptr++) = type;
1702 if (result == L2CAP_CONF_SUCCESS) {
1703 /* Configure output options and let the other side know
1704 * which ones we don't like. */
1706 if (rfc.mode == L2CAP_MODE_BASIC) {
1708 result = L2CAP_CONF_UNACCEPT;
1711 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1714 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1716 result = L2CAP_CONF_UNACCEPT;
1718 memset(&rfc, 0, sizeof(rfc));
1719 rfc.mode = L2CAP_MODE_BASIC;
1721 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1722 sizeof(rfc), (unsigned long) &rfc);
1726 rsp->scid = cpu_to_le16(pi->dcid);
1727 rsp->result = cpu_to_le16(result);
1728 rsp->flags = cpu_to_le16(0x0000);
1733 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1735 struct l2cap_conf_rsp *rsp = data;
1736 void *ptr = rsp->data;
1738 BT_DBG("sk %p", sk);
1740 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1741 rsp->result = cpu_to_le16(result);
1742 rsp->flags = cpu_to_le16(flags);
1747 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1749 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1751 if (rej->reason != 0x0000)
1754 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1755 cmd->ident == conn->info_ident) {
1756 conn->info_ident = 0;
1757 del_timer(&conn->info_timer);
1758 l2cap_conn_start(conn);
1764 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1766 struct l2cap_chan_list *list = &conn->chan_list;
1767 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1768 struct l2cap_conn_rsp rsp;
1769 struct sock *sk, *parent;
1770 int result, status = L2CAP_CS_NO_INFO;
1772 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1773 __le16 psm = req->psm;
1775 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1777 /* Check if we have socket listening on psm */
1778 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1780 result = L2CAP_CR_BAD_PSM;
1784 /* Check if the ACL is secure enough (if not SDP) */
1785 if (psm != cpu_to_le16(0x0001) &&
1786 !hci_conn_check_link_mode(conn->hcon)) {
1787 result = L2CAP_CR_SEC_BLOCK;
1791 result = L2CAP_CR_NO_MEM;
1793 /* Check for backlog size */
1794 if (sk_acceptq_is_full(parent)) {
1795 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1799 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1803 write_lock_bh(&list->lock);
1805 /* Check if we already have channel with that dcid */
1806 if (__l2cap_get_chan_by_dcid(list, scid)) {
1807 write_unlock_bh(&list->lock);
1808 sock_set_flag(sk, SOCK_ZAPPED);
1809 l2cap_sock_kill(sk);
1813 hci_conn_hold(conn->hcon);
1815 l2cap_sock_init(sk, parent);
1816 bacpy(&bt_sk(sk)->src, conn->src);
1817 bacpy(&bt_sk(sk)->dst, conn->dst);
1818 l2cap_pi(sk)->psm = psm;
1819 l2cap_pi(sk)->dcid = scid;
1821 __l2cap_chan_add(conn, sk, parent);
1822 dcid = l2cap_pi(sk)->scid;
1824 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1826 l2cap_pi(sk)->ident = cmd->ident;
1828 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1829 if (l2cap_check_security(sk)) {
1830 if (bt_sk(sk)->defer_setup) {
1831 sk->sk_state = BT_CONNECT2;
1832 result = L2CAP_CR_PEND;
1833 status = L2CAP_CS_AUTHOR_PEND;
1834 parent->sk_data_ready(parent, 0);
1836 sk->sk_state = BT_CONFIG;
1837 result = L2CAP_CR_SUCCESS;
1838 status = L2CAP_CS_NO_INFO;
1841 sk->sk_state = BT_CONNECT2;
1842 result = L2CAP_CR_PEND;
1843 status = L2CAP_CS_AUTHEN_PEND;
1846 sk->sk_state = BT_CONNECT2;
1847 result = L2CAP_CR_PEND;
1848 status = L2CAP_CS_NO_INFO;
1851 write_unlock_bh(&list->lock);
1854 bh_unlock_sock(parent);
1857 rsp.scid = cpu_to_le16(scid);
1858 rsp.dcid = cpu_to_le16(dcid);
1859 rsp.result = cpu_to_le16(result);
1860 rsp.status = cpu_to_le16(status);
1861 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1863 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1864 struct l2cap_info_req info;
1865 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1867 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1868 conn->info_ident = l2cap_get_ident(conn);
1870 mod_timer(&conn->info_timer, jiffies +
1871 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1873 l2cap_send_cmd(conn, conn->info_ident,
1874 L2CAP_INFO_REQ, sizeof(info), &info);
1880 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1882 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1883 u16 scid, dcid, result, status;
1887 scid = __le16_to_cpu(rsp->scid);
1888 dcid = __le16_to_cpu(rsp->dcid);
1889 result = __le16_to_cpu(rsp->result);
1890 status = __le16_to_cpu(rsp->status);
1892 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1895 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1898 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1903 case L2CAP_CR_SUCCESS:
1904 sk->sk_state = BT_CONFIG;
1905 l2cap_pi(sk)->ident = 0;
1906 l2cap_pi(sk)->dcid = dcid;
1907 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1909 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1910 l2cap_build_conf_req(sk, req), req);
1917 l2cap_chan_del(sk, ECONNREFUSED);
1925 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1927 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1933 dcid = __le16_to_cpu(req->dcid);
1934 flags = __le16_to_cpu(req->flags);
1936 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1938 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1941 if (sk->sk_state == BT_DISCONN)
1944 /* Reject if config buffer is too small. */
1945 len = cmd_len - sizeof(*req);
1946 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1947 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1948 l2cap_build_conf_rsp(sk, rsp,
1949 L2CAP_CONF_REJECT, flags), rsp);
1954 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1955 l2cap_pi(sk)->conf_len += len;
1957 if (flags & 0x0001) {
1958 /* Incomplete config. Send empty response. */
1959 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1960 l2cap_build_conf_rsp(sk, rsp,
1961 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1965 /* Complete config. */
1966 len = l2cap_parse_conf_req(sk, rsp);
1970 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1972 /* Reset config buffer. */
1973 l2cap_pi(sk)->conf_len = 0;
1975 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1978 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1979 sk->sk_state = BT_CONNECTED;
1980 l2cap_chan_ready(sk);
1984 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1986 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1987 l2cap_build_conf_req(sk, buf), buf);
1995 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1997 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1998 u16 scid, flags, result;
2001 scid = __le16_to_cpu(rsp->scid);
2002 flags = __le16_to_cpu(rsp->flags);
2003 result = __le16_to_cpu(rsp->result);
2005 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
2007 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2011 case L2CAP_CONF_SUCCESS:
2014 case L2CAP_CONF_UNACCEPT:
2015 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
2017 /* It does not make sense to adjust L2CAP parameters
2018 * that are currently defined in the spec. We simply
2019 * resend config request that we sent earlier. It is
2020 * stupid, but it helps qualification testing which
2021 * expects at least some response from us. */
2022 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2023 l2cap_build_conf_req(sk, req), req);
2028 sk->sk_state = BT_DISCONN;
2029 sk->sk_err = ECONNRESET;
2030 l2cap_sock_set_timer(sk, HZ * 5);
2032 struct l2cap_disconn_req req;
2033 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2034 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2035 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2036 L2CAP_DISCONN_REQ, sizeof(req), &req);
2044 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2046 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2047 sk->sk_state = BT_CONNECTED;
2048 l2cap_chan_ready(sk);
2056 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2058 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2059 struct l2cap_disconn_rsp rsp;
2063 scid = __le16_to_cpu(req->scid);
2064 dcid = __le16_to_cpu(req->dcid);
2066 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2068 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2071 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2072 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2073 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2075 sk->sk_shutdown = SHUTDOWN_MASK;
2077 l2cap_chan_del(sk, ECONNRESET);
2080 l2cap_sock_kill(sk);
2084 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2086 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2090 scid = __le16_to_cpu(rsp->scid);
2091 dcid = __le16_to_cpu(rsp->dcid);
2093 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2095 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2098 l2cap_chan_del(sk, 0);
2101 l2cap_sock_kill(sk);
2105 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2107 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2110 type = __le16_to_cpu(req->type);
2112 BT_DBG("type 0x%4.4x", type);
2114 if (type == L2CAP_IT_FEAT_MASK) {
2116 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2117 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2118 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2119 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
2120 l2cap_send_cmd(conn, cmd->ident,
2121 L2CAP_INFO_RSP, sizeof(buf), buf);
2123 struct l2cap_info_rsp rsp;
2124 rsp.type = cpu_to_le16(type);
2125 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2126 l2cap_send_cmd(conn, cmd->ident,
2127 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2133 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2135 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2138 type = __le16_to_cpu(rsp->type);
2139 result = __le16_to_cpu(rsp->result);
2141 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2143 conn->info_ident = 0;
2145 del_timer(&conn->info_timer);
2147 if (type == L2CAP_IT_FEAT_MASK)
2148 conn->feat_mask = get_unaligned_le32(rsp->data);
2150 l2cap_conn_start(conn);
2155 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2157 u8 *data = skb->data;
2159 struct l2cap_cmd_hdr cmd;
2162 l2cap_raw_recv(conn, skb);
2164 while (len >= L2CAP_CMD_HDR_SIZE) {
2166 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2167 data += L2CAP_CMD_HDR_SIZE;
2168 len -= L2CAP_CMD_HDR_SIZE;
2170 cmd_len = le16_to_cpu(cmd.len);
2172 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2174 if (cmd_len > len || !cmd.ident) {
2175 BT_DBG("corrupted command");
2180 case L2CAP_COMMAND_REJ:
2181 l2cap_command_rej(conn, &cmd, data);
2184 case L2CAP_CONN_REQ:
2185 err = l2cap_connect_req(conn, &cmd, data);
2188 case L2CAP_CONN_RSP:
2189 err = l2cap_connect_rsp(conn, &cmd, data);
2192 case L2CAP_CONF_REQ:
2193 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2196 case L2CAP_CONF_RSP:
2197 err = l2cap_config_rsp(conn, &cmd, data);
2200 case L2CAP_DISCONN_REQ:
2201 err = l2cap_disconnect_req(conn, &cmd, data);
2204 case L2CAP_DISCONN_RSP:
2205 err = l2cap_disconnect_rsp(conn, &cmd, data);
2208 case L2CAP_ECHO_REQ:
2209 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2212 case L2CAP_ECHO_RSP:
2215 case L2CAP_INFO_REQ:
2216 err = l2cap_information_req(conn, &cmd, data);
2219 case L2CAP_INFO_RSP:
2220 err = l2cap_information_rsp(conn, &cmd, data);
2224 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2230 struct l2cap_cmd_rej rej;
2231 BT_DBG("error %d", err);
2233 /* FIXME: Map err to a valid reason */
2234 rej.reason = cpu_to_le16(0);
2235 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2245 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2249 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2251 BT_DBG("unknown cid 0x%4.4x", cid);
2255 BT_DBG("sk %p, len %d", sk, skb->len);
2257 if (sk->sk_state != BT_CONNECTED)
2260 if (l2cap_pi(sk)->imtu < skb->len)
2263 /* If socket recv buffers overflows we drop data here
2264 * which is *bad* because L2CAP has to be reliable.
2265 * But we don't have any other choice. L2CAP doesn't
2266 * provide flow control mechanism. */
2268 if (!sock_queue_rcv_skb(sk, skb))
2281 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2285 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2289 BT_DBG("sk %p, len %d", sk, skb->len);
2291 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2294 if (l2cap_pi(sk)->imtu < skb->len)
2297 if (!sock_queue_rcv_skb(sk, skb))
2304 if (sk) bh_unlock_sock(sk);
2308 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2310 struct l2cap_hdr *lh = (void *) skb->data;
2314 skb_pull(skb, L2CAP_HDR_SIZE);
2315 cid = __le16_to_cpu(lh->cid);
2316 len = __le16_to_cpu(lh->len);
2318 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2322 l2cap_sig_channel(conn, skb);
2326 psm = get_unaligned((__le16 *) skb->data);
2328 l2cap_conless_channel(conn, psm, skb);
2332 l2cap_data_channel(conn, cid, skb);
2337 /* ---- L2CAP interface with lower layer (HCI) ---- */
2339 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2341 int exact = 0, lm1 = 0, lm2 = 0;
2342 register struct sock *sk;
2343 struct hlist_node *node;
2345 if (type != ACL_LINK)
2348 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2350 /* Find listening sockets and check their link_mode */
2351 read_lock(&l2cap_sk_list.lock);
2352 sk_for_each(sk, node, &l2cap_sk_list.head) {
2353 if (sk->sk_state != BT_LISTEN)
2356 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2357 lm1 |= HCI_LM_ACCEPT;
2358 if (l2cap_pi(sk)->role_switch)
2359 lm1 |= HCI_LM_MASTER;
2361 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2362 lm2 |= HCI_LM_ACCEPT;
2363 if (l2cap_pi(sk)->role_switch)
2364 lm2 |= HCI_LM_MASTER;
2367 read_unlock(&l2cap_sk_list.lock);
2369 return exact ? lm1 : lm2;
2372 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2374 struct l2cap_conn *conn;
2376 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2378 if (hcon->type != ACL_LINK)
2382 conn = l2cap_conn_add(hcon, status);
2384 l2cap_conn_ready(conn);
2386 l2cap_conn_del(hcon, bt_err(status));
2391 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2393 BT_DBG("hcon %p reason %d", hcon, reason);
2395 if (hcon->type != ACL_LINK)
2398 l2cap_conn_del(hcon, bt_err(reason));
2403 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2405 struct l2cap_chan_list *l;
2406 struct l2cap_conn *conn = hcon->l2cap_data;
2412 l = &conn->chan_list;
2414 BT_DBG("conn %p", conn);
2416 read_lock(&l->lock);
2418 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2419 struct l2cap_pinfo *pi = l2cap_pi(sk);
2423 if (!status && encrypt == 0x00 &&
2424 pi->sec_level == BT_SECURITY_HIGH &&
2425 (sk->sk_state == BT_CONNECTED ||
2426 sk->sk_state == BT_CONFIG)) {
2427 __l2cap_sock_close(sk, ECONNREFUSED);
2432 if (sk->sk_state == BT_CONNECT) {
2434 struct l2cap_conn_req req;
2435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2436 req.psm = l2cap_pi(sk)->psm;
2438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2440 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2441 L2CAP_CONN_REQ, sizeof(req), &req);
2443 l2cap_sock_clear_timer(sk);
2444 l2cap_sock_set_timer(sk, HZ / 10);
2446 } else if (sk->sk_state == BT_CONNECT2) {
2447 struct l2cap_conn_rsp rsp;
2451 sk->sk_state = BT_CONFIG;
2452 result = L2CAP_CR_SUCCESS;
2454 sk->sk_state = BT_DISCONN;
2455 l2cap_sock_set_timer(sk, HZ / 10);
2456 result = L2CAP_CR_SEC_BLOCK;
2459 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2460 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2461 rsp.result = cpu_to_le16(result);
2462 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2463 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2464 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2470 read_unlock(&l->lock);
2475 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2477 struct l2cap_conn *conn = hcon->l2cap_data;
2479 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2482 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2484 if (flags & ACL_START) {
2485 struct l2cap_hdr *hdr;
2489 BT_ERR("Unexpected start frame (len %d)", skb->len);
2490 kfree_skb(conn->rx_skb);
2491 conn->rx_skb = NULL;
2493 l2cap_conn_unreliable(conn, ECOMM);
2497 BT_ERR("Frame is too short (len %d)", skb->len);
2498 l2cap_conn_unreliable(conn, ECOMM);
2502 hdr = (struct l2cap_hdr *) skb->data;
2503 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2505 if (len == skb->len) {
2506 /* Complete frame received */
2507 l2cap_recv_frame(conn, skb);
2511 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2513 if (skb->len > len) {
2514 BT_ERR("Frame is too long (len %d, expected len %d)",
2516 l2cap_conn_unreliable(conn, ECOMM);
2520 /* Allocate skb for the complete frame (with header) */
2521 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2524 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2526 conn->rx_len = len - skb->len;
2528 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2530 if (!conn->rx_len) {
2531 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2532 l2cap_conn_unreliable(conn, ECOMM);
2536 if (skb->len > conn->rx_len) {
2537 BT_ERR("Fragment is too long (len %d, expected %d)",
2538 skb->len, conn->rx_len);
2539 kfree_skb(conn->rx_skb);
2540 conn->rx_skb = NULL;
2542 l2cap_conn_unreliable(conn, ECOMM);
2546 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2548 conn->rx_len -= skb->len;
2550 if (!conn->rx_len) {
2551 /* Complete frame received */
2552 l2cap_recv_frame(conn, conn->rx_skb);
2553 conn->rx_skb = NULL;
2562 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2565 struct hlist_node *node;
2568 read_lock_bh(&l2cap_sk_list.lock);
2570 sk_for_each(sk, node, &l2cap_sk_list.head) {
2571 struct l2cap_pinfo *pi = l2cap_pi(sk);
2573 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2574 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2575 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2576 pi->imtu, pi->omtu, pi->sec_level);
2579 read_unlock_bh(&l2cap_sk_list.lock);
2584 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2586 static const struct proto_ops l2cap_sock_ops = {
2587 .family = PF_BLUETOOTH,
2588 .owner = THIS_MODULE,
2589 .release = l2cap_sock_release,
2590 .bind = l2cap_sock_bind,
2591 .connect = l2cap_sock_connect,
2592 .listen = l2cap_sock_listen,
2593 .accept = l2cap_sock_accept,
2594 .getname = l2cap_sock_getname,
2595 .sendmsg = l2cap_sock_sendmsg,
2596 .recvmsg = l2cap_sock_recvmsg,
2597 .poll = bt_sock_poll,
2598 .ioctl = bt_sock_ioctl,
2599 .mmap = sock_no_mmap,
2600 .socketpair = sock_no_socketpair,
2601 .shutdown = l2cap_sock_shutdown,
2602 .setsockopt = l2cap_sock_setsockopt,
2603 .getsockopt = l2cap_sock_getsockopt
2606 static struct net_proto_family l2cap_sock_family_ops = {
2607 .family = PF_BLUETOOTH,
2608 .owner = THIS_MODULE,
2609 .create = l2cap_sock_create,
2612 static struct hci_proto l2cap_hci_proto = {
2614 .id = HCI_PROTO_L2CAP,
2615 .connect_ind = l2cap_connect_ind,
2616 .connect_cfm = l2cap_connect_cfm,
2617 .disconn_ind = l2cap_disconn_ind,
2618 .security_cfm = l2cap_security_cfm,
2619 .recv_acldata = l2cap_recv_acldata
2622 static int __init l2cap_init(void)
2626 err = proto_register(&l2cap_proto, 0);
2630 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2632 BT_ERR("L2CAP socket registration failed");
2636 err = hci_register_proto(&l2cap_hci_proto);
2638 BT_ERR("L2CAP protocol registration failed");
2639 bt_sock_unregister(BTPROTO_L2CAP);
2643 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2644 BT_ERR("Failed to create L2CAP info file");
2646 BT_INFO("L2CAP ver %s", VERSION);
2647 BT_INFO("L2CAP socket layer initialized");
2652 proto_unregister(&l2cap_proto);
2656 static void __exit l2cap_exit(void)
2658 class_remove_file(bt_class, &class_attr_l2cap);
2660 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2661 BT_ERR("L2CAP socket unregistration failed");
2663 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2664 BT_ERR("L2CAP protocol unregistration failed");
2666 proto_unregister(&l2cap_proto);
2669 void l2cap_load(void)
2671 /* Dummy function to trigger automatic L2CAP module loading by
2672 * other modules that use L2CAP sockets but don't use any other
2673 * symbols from it. */
2676 EXPORT_SYMBOL(l2cap_load);
2678 module_init(l2cap_init);
2679 module_exit(l2cap_exit);
2681 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2682 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2683 MODULE_VERSION(VERSION);
2684 MODULE_LICENSE("GPL");
2685 MODULE_ALIAS("bt-proto-0");