2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.12"
55 static u32 l2cap_feat_mask = 0x0000;
57 static const struct proto_ops l2cap_sock_ops;
59 static struct bt_sock_list l2cap_sk_list = {
60 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
63 static void __l2cap_sock_close(struct sock *sk, int reason);
64 static void l2cap_sock_close(struct sock *sk);
65 static void l2cap_sock_kill(struct sock *sk);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
70 /* ---- L2CAP timers ---- */
71 static void l2cap_sock_timeout(unsigned long arg)
73 struct sock *sk = (struct sock *) arg;
76 BT_DBG("sock %p state %d", sk, sk->sk_state);
80 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
81 reason = ECONNREFUSED;
82 else if (sk->sk_state == BT_CONNECT &&
83 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
84 reason = ECONNREFUSED;
88 __l2cap_sock_close(sk, reason);
96 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
98 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
99 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
102 static void l2cap_sock_clear_timer(struct sock *sk)
104 BT_DBG("sock %p state %d", sk, sk->sk_state);
105 sk_stop_timer(sk, &sk->sk_timer);
108 /* ---- L2CAP channels ---- */
109 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
112 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
113 if (l2cap_pi(s)->dcid == cid)
119 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->scid == cid)
129 /* Find channel with given SCID.
130 * Returns locked socket */
131 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
135 s = __l2cap_get_chan_by_scid(l, cid);
136 if (s) bh_lock_sock(s);
137 read_unlock(&l->lock);
141 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
144 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
145 if (l2cap_pi(s)->ident == ident)
151 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 s = __l2cap_get_chan_by_ident(l, ident);
156 if (s) bh_lock_sock(s);
157 read_unlock(&l->lock);
161 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
165 for (; cid < 0xffff; cid++) {
166 if(!__l2cap_get_chan_by_scid(l, cid))
173 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
178 l2cap_pi(l->head)->prev_c = sk;
180 l2cap_pi(sk)->next_c = l->head;
181 l2cap_pi(sk)->prev_c = NULL;
185 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
187 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
189 write_lock_bh(&l->lock);
194 l2cap_pi(next)->prev_c = prev;
196 l2cap_pi(prev)->next_c = next;
197 write_unlock_bh(&l->lock);
202 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
204 struct l2cap_chan_list *l = &conn->chan_list;
206 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
208 l2cap_pi(sk)->conn = conn;
210 if (sk->sk_type == SOCK_SEQPACKET) {
211 /* Alloc CID for connection-oriented socket */
212 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
213 } else if (sk->sk_type == SOCK_DGRAM) {
214 /* Connectionless socket */
215 l2cap_pi(sk)->scid = 0x0002;
216 l2cap_pi(sk)->dcid = 0x0002;
217 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
219 /* Raw socket can send/recv signalling messages only */
220 l2cap_pi(sk)->scid = 0x0001;
221 l2cap_pi(sk)->dcid = 0x0001;
222 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
225 __l2cap_chan_link(l, sk);
228 bt_accept_enqueue(parent, sk);
232 * Must be called on the locked socket. */
233 static void l2cap_chan_del(struct sock *sk, int err)
235 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
236 struct sock *parent = bt_sk(sk)->parent;
238 l2cap_sock_clear_timer(sk);
240 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
243 /* Unlink from channel list */
244 l2cap_chan_unlink(&conn->chan_list, sk);
245 l2cap_pi(sk)->conn = NULL;
246 hci_conn_put(conn->hcon);
249 sk->sk_state = BT_CLOSED;
250 sock_set_flag(sk, SOCK_ZAPPED);
256 bt_accept_unlink(sk);
257 parent->sk_data_ready(parent, 0);
259 sk->sk_state_change(sk);
262 /* Service level security */
263 static inline int l2cap_check_security(struct sock *sk)
265 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
268 switch (l2cap_pi(sk)->sec_level) {
269 case BT_SECURITY_HIGH:
270 auth_type = HCI_AT_GENERAL_BONDING_MITM;
272 case BT_SECURITY_MEDIUM:
273 auth_type = HCI_AT_GENERAL_BONDING;
276 auth_type = HCI_AT_NO_BONDING;
280 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
284 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
288 /* Get next available identificator.
289 * 1 - 128 are used by kernel.
290 * 129 - 199 are reserved.
291 * 200 - 254 are used by utilities like l2ping, etc.
294 spin_lock_bh(&conn->lock);
296 if (++conn->tx_ident > 128)
301 spin_unlock_bh(&conn->lock);
306 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
308 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
310 BT_DBG("code 0x%2.2x", code);
315 return hci_send_acl(conn->hcon, skb, 0);
318 static void l2cap_do_start(struct sock *sk)
320 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
322 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
323 if (l2cap_check_security(sk)) {
324 struct l2cap_conn_req req;
325 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
326 req.psm = l2cap_pi(sk)->psm;
328 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
330 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
331 L2CAP_CONN_REQ, sizeof(req), &req);
334 struct l2cap_info_req req;
335 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
337 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
338 conn->info_ident = l2cap_get_ident(conn);
340 mod_timer(&conn->info_timer, jiffies +
341 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
343 l2cap_send_cmd(conn, conn->info_ident,
344 L2CAP_INFO_REQ, sizeof(req), &req);
348 /* ---- L2CAP connections ---- */
349 static void l2cap_conn_start(struct l2cap_conn *conn)
351 struct l2cap_chan_list *l = &conn->chan_list;
354 BT_DBG("conn %p", conn);
358 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
361 if (sk->sk_type != SOCK_SEQPACKET) {
366 if (sk->sk_state == BT_CONNECT) {
367 if (l2cap_check_security(sk)) {
368 struct l2cap_conn_req req;
369 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
370 req.psm = l2cap_pi(sk)->psm;
372 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
374 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
375 L2CAP_CONN_REQ, sizeof(req), &req);
377 } else if (sk->sk_state == BT_CONNECT2) {
378 struct l2cap_conn_rsp rsp;
379 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
380 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
382 if (l2cap_check_security(sk)) {
383 if (bt_sk(sk)->defer_setup) {
384 struct sock *parent = bt_sk(sk)->parent;
385 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
386 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
387 parent->sk_data_ready(parent, 0);
390 sk->sk_state = BT_CONFIG;
391 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
392 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
395 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
396 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
399 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
400 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
406 read_unlock(&l->lock);
409 static void l2cap_conn_ready(struct l2cap_conn *conn)
411 struct l2cap_chan_list *l = &conn->chan_list;
414 BT_DBG("conn %p", conn);
418 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
421 if (sk->sk_type != SOCK_SEQPACKET) {
422 l2cap_sock_clear_timer(sk);
423 sk->sk_state = BT_CONNECTED;
424 sk->sk_state_change(sk);
425 } else if (sk->sk_state == BT_CONNECT)
431 read_unlock(&l->lock);
434 /* Notify sockets that we cannot guaranty reliability anymore */
435 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
437 struct l2cap_chan_list *l = &conn->chan_list;
440 BT_DBG("conn %p", conn);
444 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
445 if (l2cap_pi(sk)->force_reliable)
449 read_unlock(&l->lock);
452 static void l2cap_info_timeout(unsigned long arg)
454 struct l2cap_conn *conn = (void *) arg;
456 conn->info_ident = 0;
458 l2cap_conn_start(conn);
461 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
463 struct l2cap_conn *conn = hcon->l2cap_data;
468 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
472 hcon->l2cap_data = conn;
475 BT_DBG("hcon %p conn %p", hcon, conn);
477 conn->mtu = hcon->hdev->acl_mtu;
478 conn->src = &hcon->hdev->bdaddr;
479 conn->dst = &hcon->dst;
483 setup_timer(&conn->info_timer, l2cap_info_timeout,
484 (unsigned long) conn);
486 spin_lock_init(&conn->lock);
487 rwlock_init(&conn->chan_list.lock);
492 static void l2cap_conn_del(struct hci_conn *hcon, int err)
494 struct l2cap_conn *conn = hcon->l2cap_data;
500 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
503 kfree_skb(conn->rx_skb);
506 while ((sk = conn->chan_list.head)) {
508 l2cap_chan_del(sk, err);
513 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
514 del_timer_sync(&conn->info_timer);
516 hcon->l2cap_data = NULL;
520 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
522 struct l2cap_chan_list *l = &conn->chan_list;
523 write_lock_bh(&l->lock);
524 __l2cap_chan_add(conn, sk, parent);
525 write_unlock_bh(&l->lock);
528 /* ---- Socket interface ---- */
529 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
532 struct hlist_node *node;
533 sk_for_each(sk, node, &l2cap_sk_list.head)
534 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
541 /* Find socket with psm and source bdaddr.
542 * Returns closest match.
544 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
546 struct sock *sk = NULL, *sk1 = NULL;
547 struct hlist_node *node;
549 sk_for_each(sk, node, &l2cap_sk_list.head) {
550 if (state && sk->sk_state != state)
553 if (l2cap_pi(sk)->psm == psm) {
555 if (!bacmp(&bt_sk(sk)->src, src))
559 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
563 return node ? sk : sk1;
566 /* Find socket with given address (psm, src).
567 * Returns locked socket */
568 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
571 read_lock(&l2cap_sk_list.lock);
572 s = __l2cap_get_sock_by_psm(state, psm, src);
573 if (s) bh_lock_sock(s);
574 read_unlock(&l2cap_sk_list.lock);
578 static void l2cap_sock_destruct(struct sock *sk)
582 skb_queue_purge(&sk->sk_receive_queue);
583 skb_queue_purge(&sk->sk_write_queue);
586 static void l2cap_sock_cleanup_listen(struct sock *parent)
590 BT_DBG("parent %p", parent);
592 /* Close not yet accepted channels */
593 while ((sk = bt_accept_dequeue(parent, NULL)))
594 l2cap_sock_close(sk);
596 parent->sk_state = BT_CLOSED;
597 sock_set_flag(parent, SOCK_ZAPPED);
600 /* Kill socket (only if zapped and orphan)
601 * Must be called on unlocked socket.
603 static void l2cap_sock_kill(struct sock *sk)
605 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
608 BT_DBG("sk %p state %d", sk, sk->sk_state);
610 /* Kill poor orphan */
611 bt_sock_unlink(&l2cap_sk_list, sk);
612 sock_set_flag(sk, SOCK_DEAD);
616 static void __l2cap_sock_close(struct sock *sk, int reason)
618 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
620 switch (sk->sk_state) {
622 l2cap_sock_cleanup_listen(sk);
627 if (sk->sk_type == SOCK_SEQPACKET) {
628 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
629 struct l2cap_disconn_req req;
631 sk->sk_state = BT_DISCONN;
632 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
634 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
635 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
636 l2cap_send_cmd(conn, l2cap_get_ident(conn),
637 L2CAP_DISCONN_REQ, sizeof(req), &req);
639 l2cap_chan_del(sk, reason);
643 if (sk->sk_type == SOCK_SEQPACKET) {
644 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
645 struct l2cap_conn_rsp rsp;
648 if (bt_sk(sk)->defer_setup)
649 result = L2CAP_CR_SEC_BLOCK;
651 result = L2CAP_CR_BAD_PSM;
653 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
654 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
655 rsp.result = cpu_to_le16(result);
656 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
657 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
658 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
660 l2cap_chan_del(sk, reason);
665 l2cap_chan_del(sk, reason);
669 sock_set_flag(sk, SOCK_ZAPPED);
674 /* Must be called on unlocked socket. */
675 static void l2cap_sock_close(struct sock *sk)
677 l2cap_sock_clear_timer(sk);
679 __l2cap_sock_close(sk, ECONNRESET);
684 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
686 struct l2cap_pinfo *pi = l2cap_pi(sk);
691 sk->sk_type = parent->sk_type;
692 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
694 pi->imtu = l2cap_pi(parent)->imtu;
695 pi->omtu = l2cap_pi(parent)->omtu;
696 pi->sec_level = l2cap_pi(parent)->sec_level;
697 pi->role_switch = l2cap_pi(parent)->role_switch;
698 pi->force_reliable = l2cap_pi(parent)->force_reliable;
700 pi->imtu = L2CAP_DEFAULT_MTU;
702 pi->sec_level = BT_SECURITY_LOW;
704 pi->force_reliable = 0;
707 /* Default config options */
709 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
712 static struct proto l2cap_proto = {
714 .owner = THIS_MODULE,
715 .obj_size = sizeof(struct l2cap_pinfo)
718 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
722 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
726 sock_init_data(sock, sk);
727 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
729 sk->sk_destruct = l2cap_sock_destruct;
730 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
732 sock_reset_flag(sk, SOCK_ZAPPED);
734 sk->sk_protocol = proto;
735 sk->sk_state = BT_OPEN;
737 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
739 bt_sock_link(&l2cap_sk_list, sk);
743 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
747 BT_DBG("sock %p", sock);
749 sock->state = SS_UNCONNECTED;
751 if (sock->type != SOCK_SEQPACKET &&
752 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
753 return -ESOCKTNOSUPPORT;
755 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
758 sock->ops = &l2cap_sock_ops;
760 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
764 l2cap_sock_init(sk, NULL);
768 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
770 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
771 struct sock *sk = sock->sk;
774 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
776 if (!addr || addr->sa_family != AF_BLUETOOTH)
781 if (sk->sk_state != BT_OPEN) {
786 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
787 !capable(CAP_NET_BIND_SERVICE)) {
792 write_lock_bh(&l2cap_sk_list.lock);
794 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
797 /* Save source address */
798 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
799 l2cap_pi(sk)->psm = la->l2_psm;
800 l2cap_pi(sk)->sport = la->l2_psm;
801 sk->sk_state = BT_BOUND;
803 if (btohs(la->l2_psm) == 0x0001)
804 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
807 write_unlock_bh(&l2cap_sk_list.lock);
814 static int l2cap_do_connect(struct sock *sk)
816 bdaddr_t *src = &bt_sk(sk)->src;
817 bdaddr_t *dst = &bt_sk(sk)->dst;
818 struct l2cap_conn *conn;
819 struct hci_conn *hcon;
820 struct hci_dev *hdev;
824 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
826 if (!(hdev = hci_get_route(dst, src)))
827 return -EHOSTUNREACH;
829 hci_dev_lock_bh(hdev);
833 if (sk->sk_type == SOCK_RAW) {
834 switch (l2cap_pi(sk)->sec_level) {
835 case BT_SECURITY_HIGH:
836 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
838 case BT_SECURITY_MEDIUM:
839 auth_type = HCI_AT_DEDICATED_BONDING;
842 auth_type = HCI_AT_NO_BONDING;
845 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
846 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
847 auth_type = HCI_AT_NO_BONDING_MITM;
849 auth_type = HCI_AT_NO_BONDING;
851 switch (l2cap_pi(sk)->sec_level) {
852 case BT_SECURITY_HIGH:
853 auth_type = HCI_AT_GENERAL_BONDING_MITM;
855 case BT_SECURITY_MEDIUM:
856 auth_type = HCI_AT_GENERAL_BONDING;
859 auth_type = HCI_AT_NO_BONDING;
864 hcon = hci_connect(hdev, ACL_LINK, dst,
865 l2cap_pi(sk)->sec_level, auth_type);
869 conn = l2cap_conn_add(hcon, 0);
877 /* Update source addr of the socket */
878 bacpy(src, conn->src);
880 l2cap_chan_add(conn, sk, NULL);
882 sk->sk_state = BT_CONNECT;
883 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
885 if (hcon->state == BT_CONNECTED) {
886 if (sk->sk_type != SOCK_SEQPACKET) {
887 l2cap_sock_clear_timer(sk);
888 sk->sk_state = BT_CONNECTED;
894 hci_dev_unlock_bh(hdev);
899 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
901 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
902 struct sock *sk = sock->sk;
909 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
914 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
919 switch(sk->sk_state) {
923 /* Already connecting */
927 /* Already connected */
940 /* Set destination address and psm */
941 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
942 l2cap_pi(sk)->psm = la->l2_psm;
944 if ((err = l2cap_do_connect(sk)))
948 err = bt_sock_wait_state(sk, BT_CONNECTED,
949 sock_sndtimeo(sk, flags & O_NONBLOCK));
955 static int l2cap_sock_listen(struct socket *sock, int backlog)
957 struct sock *sk = sock->sk;
960 BT_DBG("sk %p backlog %d", sk, backlog);
964 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
969 if (!l2cap_pi(sk)->psm) {
970 bdaddr_t *src = &bt_sk(sk)->src;
975 write_lock_bh(&l2cap_sk_list.lock);
977 for (psm = 0x1001; psm < 0x1100; psm += 2)
978 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
979 l2cap_pi(sk)->psm = htobs(psm);
980 l2cap_pi(sk)->sport = htobs(psm);
985 write_unlock_bh(&l2cap_sk_list.lock);
991 sk->sk_max_ack_backlog = backlog;
992 sk->sk_ack_backlog = 0;
993 sk->sk_state = BT_LISTEN;
1000 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1002 DECLARE_WAITQUEUE(wait, current);
1003 struct sock *sk = sock->sk, *nsk;
1007 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1009 if (sk->sk_state != BT_LISTEN) {
1014 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1016 BT_DBG("sk %p timeo %ld", sk, timeo);
1018 /* Wait for an incoming connection. (wake-one). */
1019 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1020 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1021 set_current_state(TASK_INTERRUPTIBLE);
1028 timeo = schedule_timeout(timeo);
1029 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1031 if (sk->sk_state != BT_LISTEN) {
1036 if (signal_pending(current)) {
1037 err = sock_intr_errno(timeo);
1041 set_current_state(TASK_RUNNING);
1042 remove_wait_queue(sk->sk_sleep, &wait);
1047 newsock->state = SS_CONNECTED;
1049 BT_DBG("new socket %p", nsk);
1056 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1058 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1059 struct sock *sk = sock->sk;
1061 BT_DBG("sock %p, sk %p", sock, sk);
1063 addr->sa_family = AF_BLUETOOTH;
1064 *len = sizeof(struct sockaddr_l2);
1067 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1069 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1071 la->l2_psm = l2cap_pi(sk)->psm;
1075 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1077 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1078 struct sk_buff *skb, **frag;
1079 int err, hlen, count, sent=0;
1080 struct l2cap_hdr *lh;
1082 BT_DBG("sk %p len %d", sk, len);
1084 /* First fragment (with L2CAP header) */
1085 if (sk->sk_type == SOCK_DGRAM)
1086 hlen = L2CAP_HDR_SIZE + 2;
1088 hlen = L2CAP_HDR_SIZE;
1090 count = min_t(unsigned int, (conn->mtu - hlen), len);
1092 skb = bt_skb_send_alloc(sk, hlen + count,
1093 msg->msg_flags & MSG_DONTWAIT, &err);
1097 /* Create L2CAP header */
1098 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1099 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1100 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1102 if (sk->sk_type == SOCK_DGRAM)
1103 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1105 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1113 /* Continuation fragments (no L2CAP header) */
1114 frag = &skb_shinfo(skb)->frag_list;
1116 count = min_t(unsigned int, conn->mtu, len);
1118 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1122 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1130 frag = &(*frag)->next;
1133 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1143 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1145 struct sock *sk = sock->sk;
1148 BT_DBG("sock %p, sk %p", sock, sk);
1150 err = sock_error(sk);
1154 if (msg->msg_flags & MSG_OOB)
1157 /* Check outgoing MTU */
1158 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1163 if (sk->sk_state == BT_CONNECTED)
1164 err = l2cap_do_send(sk, msg, len);
1172 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1174 struct sock *sk = sock->sk;
1178 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1179 struct l2cap_conn_rsp rsp;
1181 sk->sk_state = BT_CONFIG;
1183 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1184 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1185 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1186 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1187 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1188 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1196 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1199 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1201 struct sock *sk = sock->sk;
1202 struct l2cap_options opts;
1206 BT_DBG("sk %p", sk);
1212 opts.imtu = l2cap_pi(sk)->imtu;
1213 opts.omtu = l2cap_pi(sk)->omtu;
1214 opts.flush_to = l2cap_pi(sk)->flush_to;
1215 opts.mode = L2CAP_MODE_BASIC;
1217 len = min_t(unsigned int, sizeof(opts), optlen);
1218 if (copy_from_user((char *) &opts, optval, len)) {
1223 l2cap_pi(sk)->imtu = opts.imtu;
1224 l2cap_pi(sk)->omtu = opts.omtu;
1228 if (get_user(opt, (u32 __user *) optval)) {
1233 if (opt & L2CAP_LM_AUTH)
1234 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1235 if (opt & L2CAP_LM_ENCRYPT)
1236 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1237 if (opt & L2CAP_LM_SECURE)
1238 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1240 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1241 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1253 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1255 struct sock *sk = sock->sk;
1256 struct bt_security sec;
1260 BT_DBG("sk %p", sk);
1262 if (level == SOL_L2CAP)
1263 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1265 if (level != SOL_BLUETOOTH)
1266 return -ENOPROTOOPT;
1272 if (sk->sk_type != SOCK_SEQPACKET) {
1277 sec.level = BT_SECURITY_LOW;
1279 len = min_t(unsigned int, sizeof(sec), optlen);
1280 if (copy_from_user((char *) &sec, optval, len)) {
1285 if (sec.level < BT_SECURITY_LOW ||
1286 sec.level > BT_SECURITY_HIGH) {
1291 l2cap_pi(sk)->sec_level = sec.level;
1294 case BT_DEFER_SETUP:
1295 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1300 if (get_user(opt, (u32 __user *) optval)) {
1305 bt_sk(sk)->defer_setup = opt;
1317 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1319 struct sock *sk = sock->sk;
1320 struct l2cap_options opts;
1321 struct l2cap_conninfo cinfo;
1325 BT_DBG("sk %p", sk);
1327 if (get_user(len, optlen))
1334 opts.imtu = l2cap_pi(sk)->imtu;
1335 opts.omtu = l2cap_pi(sk)->omtu;
1336 opts.flush_to = l2cap_pi(sk)->flush_to;
1337 opts.mode = L2CAP_MODE_BASIC;
1339 len = min_t(unsigned int, len, sizeof(opts));
1340 if (copy_to_user(optval, (char *) &opts, len))
1346 switch (l2cap_pi(sk)->sec_level) {
1347 case BT_SECURITY_LOW:
1348 opt = L2CAP_LM_AUTH;
1350 case BT_SECURITY_MEDIUM:
1351 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1353 case BT_SECURITY_HIGH:
1354 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1362 if (l2cap_pi(sk)->role_switch)
1363 opt |= L2CAP_LM_MASTER;
1365 if (l2cap_pi(sk)->force_reliable)
1366 opt |= L2CAP_LM_RELIABLE;
1368 if (put_user(opt, (u32 __user *) optval))
1372 case L2CAP_CONNINFO:
1373 if (sk->sk_state != BT_CONNECTED &&
1374 !(sk->sk_state == BT_CONNECT2 &&
1375 bt_sk(sk)->defer_setup)) {
1380 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1381 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1383 len = min_t(unsigned int, len, sizeof(cinfo));
1384 if (copy_to_user(optval, (char *) &cinfo, len))
1398 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1400 struct sock *sk = sock->sk;
1401 struct bt_security sec;
1404 BT_DBG("sk %p", sk);
1406 if (level == SOL_L2CAP)
1407 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1409 if (level != SOL_BLUETOOTH)
1410 return -ENOPROTOOPT;
1412 if (get_user(len, optlen))
1419 if (sk->sk_type != SOCK_SEQPACKET) {
1424 sec.level = l2cap_pi(sk)->sec_level;
1426 len = min_t(unsigned int, len, sizeof(sec));
1427 if (copy_to_user(optval, (char *) &sec, len))
1432 case BT_DEFER_SETUP:
1433 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1438 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1452 static int l2cap_sock_shutdown(struct socket *sock, int how)
1454 struct sock *sk = sock->sk;
1457 BT_DBG("sock %p, sk %p", sock, sk);
1463 if (!sk->sk_shutdown) {
1464 sk->sk_shutdown = SHUTDOWN_MASK;
1465 l2cap_sock_clear_timer(sk);
1466 __l2cap_sock_close(sk, 0);
1468 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1469 err = bt_sock_wait_state(sk, BT_CLOSED,
1476 static int l2cap_sock_release(struct socket *sock)
1478 struct sock *sk = sock->sk;
1481 BT_DBG("sock %p, sk %p", sock, sk);
1486 err = l2cap_sock_shutdown(sock, 2);
1489 l2cap_sock_kill(sk);
1493 static void l2cap_chan_ready(struct sock *sk)
1495 struct sock *parent = bt_sk(sk)->parent;
1497 BT_DBG("sk %p, parent %p", sk, parent);
1499 l2cap_pi(sk)->conf_state = 0;
1500 l2cap_sock_clear_timer(sk);
1503 /* Outgoing channel.
1504 * Wake up socket sleeping on connect.
1506 sk->sk_state = BT_CONNECTED;
1507 sk->sk_state_change(sk);
1509 /* Incoming channel.
1510 * Wake up socket sleeping on accept.
1512 parent->sk_data_ready(parent, 0);
1516 /* Copy frame to all raw sockets on that connection */
1517 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1519 struct l2cap_chan_list *l = &conn->chan_list;
1520 struct sk_buff *nskb;
1523 BT_DBG("conn %p", conn);
1525 read_lock(&l->lock);
1526 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1527 if (sk->sk_type != SOCK_RAW)
1530 /* Don't send frame to the socket it came from */
1534 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1537 if (sock_queue_rcv_skb(sk, nskb))
1540 read_unlock(&l->lock);
1543 /* ---- L2CAP signalling commands ---- */
1544 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1545 u8 code, u8 ident, u16 dlen, void *data)
1547 struct sk_buff *skb, **frag;
1548 struct l2cap_cmd_hdr *cmd;
1549 struct l2cap_hdr *lh;
1552 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1554 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1555 count = min_t(unsigned int, conn->mtu, len);
1557 skb = bt_skb_alloc(count, GFP_ATOMIC);
1561 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1562 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1563 lh->cid = cpu_to_le16(0x0001);
1565 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1568 cmd->len = cpu_to_le16(dlen);
1571 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1572 memcpy(skb_put(skb, count), data, count);
1578 /* Continuation fragments (no L2CAP header) */
1579 frag = &skb_shinfo(skb)->frag_list;
1581 count = min_t(unsigned int, conn->mtu, len);
1583 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1587 memcpy(skb_put(*frag, count), data, count);
1592 frag = &(*frag)->next;
1602 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1604 struct l2cap_conf_opt *opt = *ptr;
1607 len = L2CAP_CONF_OPT_SIZE + opt->len;
1615 *val = *((u8 *) opt->val);
1619 *val = __le16_to_cpu(*((__le16 *) opt->val));
1623 *val = __le32_to_cpu(*((__le32 *) opt->val));
1627 *val = (unsigned long) opt->val;
1631 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1635 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1637 struct l2cap_conf_opt *opt = *ptr;
1639 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1646 *((u8 *) opt->val) = val;
1650 *((__le16 *) opt->val) = cpu_to_le16(val);
1654 *((__le32 *) opt->val) = cpu_to_le32(val);
1658 memcpy(opt->val, (void *) val, len);
1662 *ptr += L2CAP_CONF_OPT_SIZE + len;
1665 static int l2cap_build_conf_req(struct sock *sk, void *data)
1667 struct l2cap_pinfo *pi = l2cap_pi(sk);
1668 struct l2cap_conf_req *req = data;
1669 void *ptr = req->data;
1671 BT_DBG("sk %p", sk);
1673 if (pi->imtu != L2CAP_DEFAULT_MTU)
1674 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1676 /* FIXME: Need actual value of the flush timeout */
1677 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1678 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1680 req->dcid = cpu_to_le16(pi->dcid);
1681 req->flags = cpu_to_le16(0);
1686 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1688 struct l2cap_pinfo *pi = l2cap_pi(sk);
1689 struct l2cap_conf_rsp *rsp = data;
1690 void *ptr = rsp->data;
1691 void *req = pi->conf_req;
1692 int len = pi->conf_len;
1693 int type, hint, olen;
1695 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1696 u16 mtu = L2CAP_DEFAULT_MTU;
1697 u16 result = L2CAP_CONF_SUCCESS;
1699 BT_DBG("sk %p", sk);
1701 while (len >= L2CAP_CONF_OPT_SIZE) {
1702 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1708 case L2CAP_CONF_MTU:
1712 case L2CAP_CONF_FLUSH_TO:
1716 case L2CAP_CONF_QOS:
1719 case L2CAP_CONF_RFC:
1720 if (olen == sizeof(rfc))
1721 memcpy(&rfc, (void *) val, olen);
1728 result = L2CAP_CONF_UNKNOWN;
1729 *((u8 *) ptr++) = type;
1734 if (result == L2CAP_CONF_SUCCESS) {
1735 /* Configure output options and let the other side know
1736 * which ones we don't like. */
1738 if (rfc.mode == L2CAP_MODE_BASIC) {
1740 result = L2CAP_CONF_UNACCEPT;
1743 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1746 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1748 result = L2CAP_CONF_UNACCEPT;
1750 memset(&rfc, 0, sizeof(rfc));
1751 rfc.mode = L2CAP_MODE_BASIC;
1753 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1754 sizeof(rfc), (unsigned long) &rfc);
1758 rsp->scid = cpu_to_le16(pi->dcid);
1759 rsp->result = cpu_to_le16(result);
1760 rsp->flags = cpu_to_le16(0x0000);
1765 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1767 struct l2cap_conf_rsp *rsp = data;
1768 void *ptr = rsp->data;
1770 BT_DBG("sk %p", sk);
1772 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1773 rsp->result = cpu_to_le16(result);
1774 rsp->flags = cpu_to_le16(flags);
1779 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1781 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1783 if (rej->reason != 0x0000)
1786 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1787 cmd->ident == conn->info_ident) {
1788 conn->info_ident = 0;
1789 del_timer(&conn->info_timer);
1790 l2cap_conn_start(conn);
1796 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1798 struct l2cap_chan_list *list = &conn->chan_list;
1799 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1800 struct l2cap_conn_rsp rsp;
1801 struct sock *sk, *parent;
1802 int result, status = L2CAP_CS_NO_INFO;
1804 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1805 __le16 psm = req->psm;
1807 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1809 /* Check if we have socket listening on psm */
1810 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1812 result = L2CAP_CR_BAD_PSM;
1816 /* Check if the ACL is secure enough (if not SDP) */
1817 if (psm != cpu_to_le16(0x0001) &&
1818 !hci_conn_check_link_mode(conn->hcon)) {
1819 result = L2CAP_CR_SEC_BLOCK;
1823 result = L2CAP_CR_NO_MEM;
1825 /* Check for backlog size */
1826 if (sk_acceptq_is_full(parent)) {
1827 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1831 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1835 write_lock_bh(&list->lock);
1837 /* Check if we already have channel with that dcid */
1838 if (__l2cap_get_chan_by_dcid(list, scid)) {
1839 write_unlock_bh(&list->lock);
1840 sock_set_flag(sk, SOCK_ZAPPED);
1841 l2cap_sock_kill(sk);
1845 hci_conn_hold(conn->hcon);
1847 l2cap_sock_init(sk, parent);
1848 bacpy(&bt_sk(sk)->src, conn->src);
1849 bacpy(&bt_sk(sk)->dst, conn->dst);
1850 l2cap_pi(sk)->psm = psm;
1851 l2cap_pi(sk)->dcid = scid;
1853 __l2cap_chan_add(conn, sk, parent);
1854 dcid = l2cap_pi(sk)->scid;
1856 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1858 l2cap_pi(sk)->ident = cmd->ident;
1860 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1861 if (l2cap_check_security(sk)) {
1862 if (bt_sk(sk)->defer_setup) {
1863 sk->sk_state = BT_CONNECT2;
1864 result = L2CAP_CR_PEND;
1865 status = L2CAP_CS_AUTHOR_PEND;
1866 parent->sk_data_ready(parent, 0);
1868 sk->sk_state = BT_CONFIG;
1869 result = L2CAP_CR_SUCCESS;
1870 status = L2CAP_CS_NO_INFO;
1873 sk->sk_state = BT_CONNECT2;
1874 result = L2CAP_CR_PEND;
1875 status = L2CAP_CS_AUTHEN_PEND;
1878 sk->sk_state = BT_CONNECT2;
1879 result = L2CAP_CR_PEND;
1880 status = L2CAP_CS_NO_INFO;
1883 write_unlock_bh(&list->lock);
1886 bh_unlock_sock(parent);
1889 rsp.scid = cpu_to_le16(scid);
1890 rsp.dcid = cpu_to_le16(dcid);
1891 rsp.result = cpu_to_le16(result);
1892 rsp.status = cpu_to_le16(status);
1893 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1895 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1896 struct l2cap_info_req info;
1897 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1899 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1900 conn->info_ident = l2cap_get_ident(conn);
1902 mod_timer(&conn->info_timer, jiffies +
1903 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1905 l2cap_send_cmd(conn, conn->info_ident,
1906 L2CAP_INFO_REQ, sizeof(info), &info);
1912 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1914 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1915 u16 scid, dcid, result, status;
1919 scid = __le16_to_cpu(rsp->scid);
1920 dcid = __le16_to_cpu(rsp->dcid);
1921 result = __le16_to_cpu(rsp->result);
1922 status = __le16_to_cpu(rsp->status);
1924 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1927 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1930 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1935 case L2CAP_CR_SUCCESS:
1936 sk->sk_state = BT_CONFIG;
1937 l2cap_pi(sk)->ident = 0;
1938 l2cap_pi(sk)->dcid = dcid;
1939 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1941 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1942 l2cap_build_conf_req(sk, req), req);
1949 l2cap_chan_del(sk, ECONNREFUSED);
1957 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1959 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1965 dcid = __le16_to_cpu(req->dcid);
1966 flags = __le16_to_cpu(req->flags);
1968 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1970 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1973 if (sk->sk_state == BT_DISCONN)
1976 /* Reject if config buffer is too small. */
1977 len = cmd_len - sizeof(*req);
1978 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1979 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1980 l2cap_build_conf_rsp(sk, rsp,
1981 L2CAP_CONF_REJECT, flags), rsp);
1986 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1987 l2cap_pi(sk)->conf_len += len;
1989 if (flags & 0x0001) {
1990 /* Incomplete config. Send empty response. */
1991 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1992 l2cap_build_conf_rsp(sk, rsp,
1993 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1997 /* Complete config. */
1998 len = l2cap_parse_conf_req(sk, rsp);
2002 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2004 /* Reset config buffer. */
2005 l2cap_pi(sk)->conf_len = 0;
2007 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2010 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2011 sk->sk_state = BT_CONNECTED;
2012 l2cap_chan_ready(sk);
2016 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2018 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2019 l2cap_build_conf_req(sk, buf), buf);
2027 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2029 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2030 u16 scid, flags, result;
2033 scid = __le16_to_cpu(rsp->scid);
2034 flags = __le16_to_cpu(rsp->flags);
2035 result = __le16_to_cpu(rsp->result);
2037 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
2039 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2043 case L2CAP_CONF_SUCCESS:
2046 case L2CAP_CONF_UNACCEPT:
2047 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
2049 /* It does not make sense to adjust L2CAP parameters
2050 * that are currently defined in the spec. We simply
2051 * resend config request that we sent earlier. It is
2052 * stupid, but it helps qualification testing which
2053 * expects at least some response from us. */
2054 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2055 l2cap_build_conf_req(sk, req), req);
2060 sk->sk_state = BT_DISCONN;
2061 sk->sk_err = ECONNRESET;
2062 l2cap_sock_set_timer(sk, HZ * 5);
2064 struct l2cap_disconn_req req;
2065 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2066 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2067 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2068 L2CAP_DISCONN_REQ, sizeof(req), &req);
2076 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2078 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2079 sk->sk_state = BT_CONNECTED;
2080 l2cap_chan_ready(sk);
2088 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2090 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2091 struct l2cap_disconn_rsp rsp;
2095 scid = __le16_to_cpu(req->scid);
2096 dcid = __le16_to_cpu(req->dcid);
2098 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2100 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2103 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2104 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2105 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2107 sk->sk_shutdown = SHUTDOWN_MASK;
2109 l2cap_chan_del(sk, ECONNRESET);
2112 l2cap_sock_kill(sk);
2116 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2118 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2122 scid = __le16_to_cpu(rsp->scid);
2123 dcid = __le16_to_cpu(rsp->dcid);
2125 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2127 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2130 l2cap_chan_del(sk, 0);
2133 l2cap_sock_kill(sk);
2137 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2139 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2142 type = __le16_to_cpu(req->type);
2144 BT_DBG("type 0x%4.4x", type);
2146 if (type == L2CAP_IT_FEAT_MASK) {
2148 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2149 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2150 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2151 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
2152 l2cap_send_cmd(conn, cmd->ident,
2153 L2CAP_INFO_RSP, sizeof(buf), buf);
2155 struct l2cap_info_rsp rsp;
2156 rsp.type = cpu_to_le16(type);
2157 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2158 l2cap_send_cmd(conn, cmd->ident,
2159 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2165 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2167 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2170 type = __le16_to_cpu(rsp->type);
2171 result = __le16_to_cpu(rsp->result);
2173 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2175 conn->info_ident = 0;
2177 del_timer(&conn->info_timer);
2179 if (type == L2CAP_IT_FEAT_MASK)
2180 conn->feat_mask = get_unaligned_le32(rsp->data);
2182 l2cap_conn_start(conn);
2187 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2189 u8 *data = skb->data;
2191 struct l2cap_cmd_hdr cmd;
2194 l2cap_raw_recv(conn, skb);
2196 while (len >= L2CAP_CMD_HDR_SIZE) {
2198 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2199 data += L2CAP_CMD_HDR_SIZE;
2200 len -= L2CAP_CMD_HDR_SIZE;
2202 cmd_len = le16_to_cpu(cmd.len);
2204 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2206 if (cmd_len > len || !cmd.ident) {
2207 BT_DBG("corrupted command");
2212 case L2CAP_COMMAND_REJ:
2213 l2cap_command_rej(conn, &cmd, data);
2216 case L2CAP_CONN_REQ:
2217 err = l2cap_connect_req(conn, &cmd, data);
2220 case L2CAP_CONN_RSP:
2221 err = l2cap_connect_rsp(conn, &cmd, data);
2224 case L2CAP_CONF_REQ:
2225 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2228 case L2CAP_CONF_RSP:
2229 err = l2cap_config_rsp(conn, &cmd, data);
2232 case L2CAP_DISCONN_REQ:
2233 err = l2cap_disconnect_req(conn, &cmd, data);
2236 case L2CAP_DISCONN_RSP:
2237 err = l2cap_disconnect_rsp(conn, &cmd, data);
2240 case L2CAP_ECHO_REQ:
2241 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2244 case L2CAP_ECHO_RSP:
2247 case L2CAP_INFO_REQ:
2248 err = l2cap_information_req(conn, &cmd, data);
2251 case L2CAP_INFO_RSP:
2252 err = l2cap_information_rsp(conn, &cmd, data);
2256 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2262 struct l2cap_cmd_rej rej;
2263 BT_DBG("error %d", err);
2265 /* FIXME: Map err to a valid reason */
2266 rej.reason = cpu_to_le16(0);
2267 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2277 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2281 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2283 BT_DBG("unknown cid 0x%4.4x", cid);
2287 BT_DBG("sk %p, len %d", sk, skb->len);
2289 if (sk->sk_state != BT_CONNECTED)
2292 if (l2cap_pi(sk)->imtu < skb->len)
2295 /* If socket recv buffers overflows we drop data here
2296 * which is *bad* because L2CAP has to be reliable.
2297 * But we don't have any other choice. L2CAP doesn't
2298 * provide flow control mechanism. */
2300 if (!sock_queue_rcv_skb(sk, skb))
2313 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2317 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2321 BT_DBG("sk %p, len %d", sk, skb->len);
2323 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2326 if (l2cap_pi(sk)->imtu < skb->len)
2329 if (!sock_queue_rcv_skb(sk, skb))
2336 if (sk) bh_unlock_sock(sk);
2340 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2342 struct l2cap_hdr *lh = (void *) skb->data;
2346 skb_pull(skb, L2CAP_HDR_SIZE);
2347 cid = __le16_to_cpu(lh->cid);
2348 len = __le16_to_cpu(lh->len);
2350 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2354 l2cap_sig_channel(conn, skb);
2358 psm = get_unaligned((__le16 *) skb->data);
2360 l2cap_conless_channel(conn, psm, skb);
2364 l2cap_data_channel(conn, cid, skb);
2369 /* ---- L2CAP interface with lower layer (HCI) ---- */
2371 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2373 int exact = 0, lm1 = 0, lm2 = 0;
2374 register struct sock *sk;
2375 struct hlist_node *node;
2377 if (type != ACL_LINK)
2380 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2382 /* Find listening sockets and check their link_mode */
2383 read_lock(&l2cap_sk_list.lock);
2384 sk_for_each(sk, node, &l2cap_sk_list.head) {
2385 if (sk->sk_state != BT_LISTEN)
2388 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2389 lm1 |= HCI_LM_ACCEPT;
2390 if (l2cap_pi(sk)->role_switch)
2391 lm1 |= HCI_LM_MASTER;
2393 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2394 lm2 |= HCI_LM_ACCEPT;
2395 if (l2cap_pi(sk)->role_switch)
2396 lm2 |= HCI_LM_MASTER;
2399 read_unlock(&l2cap_sk_list.lock);
2401 return exact ? lm1 : lm2;
2404 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2406 struct l2cap_conn *conn;
2408 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2410 if (hcon->type != ACL_LINK)
2414 conn = l2cap_conn_add(hcon, status);
2416 l2cap_conn_ready(conn);
2418 l2cap_conn_del(hcon, bt_err(status));
2423 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2425 BT_DBG("hcon %p reason %d", hcon, reason);
2427 if (hcon->type != ACL_LINK)
2430 l2cap_conn_del(hcon, bt_err(reason));
2435 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
2437 if (sk->sk_type != SOCK_SEQPACKET)
2440 if (encrypt == 0x00) {
2441 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
2442 l2cap_sock_clear_timer(sk);
2443 l2cap_sock_set_timer(sk, HZ * 5);
2444 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
2445 __l2cap_sock_close(sk, ECONNREFUSED);
2447 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
2448 l2cap_sock_clear_timer(sk);
2452 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2454 struct l2cap_chan_list *l;
2455 struct l2cap_conn *conn = hcon->l2cap_data;
2461 l = &conn->chan_list;
2463 BT_DBG("conn %p", conn);
2465 read_lock(&l->lock);
2467 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2470 if (!status && (sk->sk_state == BT_CONNECTED ||
2471 sk->sk_state == BT_CONFIG)) {
2472 l2cap_check_encryption(sk, encrypt);
2477 if (sk->sk_state == BT_CONNECT) {
2479 struct l2cap_conn_req req;
2480 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2481 req.psm = l2cap_pi(sk)->psm;
2483 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2485 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2486 L2CAP_CONN_REQ, sizeof(req), &req);
2488 l2cap_sock_clear_timer(sk);
2489 l2cap_sock_set_timer(sk, HZ / 10);
2491 } else if (sk->sk_state == BT_CONNECT2) {
2492 struct l2cap_conn_rsp rsp;
2496 sk->sk_state = BT_CONFIG;
2497 result = L2CAP_CR_SUCCESS;
2499 sk->sk_state = BT_DISCONN;
2500 l2cap_sock_set_timer(sk, HZ / 10);
2501 result = L2CAP_CR_SEC_BLOCK;
2504 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2505 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2506 rsp.result = cpu_to_le16(result);
2507 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2508 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2509 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2515 read_unlock(&l->lock);
2520 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2522 struct l2cap_conn *conn = hcon->l2cap_data;
2524 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2527 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2529 if (flags & ACL_START) {
2530 struct l2cap_hdr *hdr;
2534 BT_ERR("Unexpected start frame (len %d)", skb->len);
2535 kfree_skb(conn->rx_skb);
2536 conn->rx_skb = NULL;
2538 l2cap_conn_unreliable(conn, ECOMM);
2542 BT_ERR("Frame is too short (len %d)", skb->len);
2543 l2cap_conn_unreliable(conn, ECOMM);
2547 hdr = (struct l2cap_hdr *) skb->data;
2548 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2550 if (len == skb->len) {
2551 /* Complete frame received */
2552 l2cap_recv_frame(conn, skb);
2556 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2558 if (skb->len > len) {
2559 BT_ERR("Frame is too long (len %d, expected len %d)",
2561 l2cap_conn_unreliable(conn, ECOMM);
2565 /* Allocate skb for the complete frame (with header) */
2566 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2569 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2571 conn->rx_len = len - skb->len;
2573 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2575 if (!conn->rx_len) {
2576 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2577 l2cap_conn_unreliable(conn, ECOMM);
2581 if (skb->len > conn->rx_len) {
2582 BT_ERR("Fragment is too long (len %d, expected %d)",
2583 skb->len, conn->rx_len);
2584 kfree_skb(conn->rx_skb);
2585 conn->rx_skb = NULL;
2587 l2cap_conn_unreliable(conn, ECOMM);
2591 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2593 conn->rx_len -= skb->len;
2595 if (!conn->rx_len) {
2596 /* Complete frame received */
2597 l2cap_recv_frame(conn, conn->rx_skb);
2598 conn->rx_skb = NULL;
2607 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2610 struct hlist_node *node;
2613 read_lock_bh(&l2cap_sk_list.lock);
2615 sk_for_each(sk, node, &l2cap_sk_list.head) {
2616 struct l2cap_pinfo *pi = l2cap_pi(sk);
2618 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2619 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2620 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2621 pi->imtu, pi->omtu, pi->sec_level);
2624 read_unlock_bh(&l2cap_sk_list.lock);
2629 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2631 static const struct proto_ops l2cap_sock_ops = {
2632 .family = PF_BLUETOOTH,
2633 .owner = THIS_MODULE,
2634 .release = l2cap_sock_release,
2635 .bind = l2cap_sock_bind,
2636 .connect = l2cap_sock_connect,
2637 .listen = l2cap_sock_listen,
2638 .accept = l2cap_sock_accept,
2639 .getname = l2cap_sock_getname,
2640 .sendmsg = l2cap_sock_sendmsg,
2641 .recvmsg = l2cap_sock_recvmsg,
2642 .poll = bt_sock_poll,
2643 .ioctl = bt_sock_ioctl,
2644 .mmap = sock_no_mmap,
2645 .socketpair = sock_no_socketpair,
2646 .shutdown = l2cap_sock_shutdown,
2647 .setsockopt = l2cap_sock_setsockopt,
2648 .getsockopt = l2cap_sock_getsockopt
2651 static struct net_proto_family l2cap_sock_family_ops = {
2652 .family = PF_BLUETOOTH,
2653 .owner = THIS_MODULE,
2654 .create = l2cap_sock_create,
2657 static struct hci_proto l2cap_hci_proto = {
2659 .id = HCI_PROTO_L2CAP,
2660 .connect_ind = l2cap_connect_ind,
2661 .connect_cfm = l2cap_connect_cfm,
2662 .disconn_ind = l2cap_disconn_ind,
2663 .security_cfm = l2cap_security_cfm,
2664 .recv_acldata = l2cap_recv_acldata
2667 static int __init l2cap_init(void)
2671 err = proto_register(&l2cap_proto, 0);
2675 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2677 BT_ERR("L2CAP socket registration failed");
2681 err = hci_register_proto(&l2cap_hci_proto);
2683 BT_ERR("L2CAP protocol registration failed");
2684 bt_sock_unregister(BTPROTO_L2CAP);
2688 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2689 BT_ERR("Failed to create L2CAP info file");
2691 BT_INFO("L2CAP ver %s", VERSION);
2692 BT_INFO("L2CAP socket layer initialized");
2697 proto_unregister(&l2cap_proto);
2701 static void __exit l2cap_exit(void)
2703 class_remove_file(bt_class, &class_attr_l2cap);
2705 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2706 BT_ERR("L2CAP socket unregistration failed");
2708 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2709 BT_ERR("L2CAP protocol unregistration failed");
2711 proto_unregister(&l2cap_proto);
2714 void l2cap_load(void)
2716 /* Dummy function to trigger automatic L2CAP module loading by
2717 * other modules that use L2CAP sockets but don't use any other
2718 * symbols from it. */
2721 EXPORT_SYMBOL(l2cap_load);
2723 module_init(l2cap_init);
2724 module_exit(l2cap_exit);
2726 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2727 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2728 MODULE_VERSION(VERSION);
2729 MODULE_LICENSE("GPL");
2730 MODULE_ALIAS("bt-proto-0");