2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.12"
55 static u32 l2cap_feat_mask = 0x0000;
57 static const struct proto_ops l2cap_sock_ops;
59 static struct bt_sock_list l2cap_sk_list = {
60 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
63 static void __l2cap_sock_close(struct sock *sk, int reason);
64 static void l2cap_sock_close(struct sock *sk);
65 static void l2cap_sock_kill(struct sock *sk);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
70 /* ---- L2CAP timers ---- */
71 static void l2cap_sock_timeout(unsigned long arg)
73 struct sock *sk = (struct sock *) arg;
76 BT_DBG("sock %p state %d", sk, sk->sk_state);
80 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
81 reason = ECONNREFUSED;
82 else if (sk->sk_state == BT_CONNECT &&
83 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
84 reason = ECONNREFUSED;
88 __l2cap_sock_close(sk, reason);
96 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
98 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
99 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
102 static void l2cap_sock_clear_timer(struct sock *sk)
104 BT_DBG("sock %p state %d", sk, sk->sk_state);
105 sk_stop_timer(sk, &sk->sk_timer);
108 /* ---- L2CAP channels ---- */
109 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
112 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
113 if (l2cap_pi(s)->dcid == cid)
119 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->scid == cid)
129 /* Find channel with given SCID.
130 * Returns locked socket */
131 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
135 s = __l2cap_get_chan_by_scid(l, cid);
136 if (s) bh_lock_sock(s);
137 read_unlock(&l->lock);
141 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
144 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
145 if (l2cap_pi(s)->ident == ident)
151 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 s = __l2cap_get_chan_by_ident(l, ident);
156 if (s) bh_lock_sock(s);
157 read_unlock(&l->lock);
161 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
165 for (; cid < 0xffff; cid++) {
166 if(!__l2cap_get_chan_by_scid(l, cid))
173 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
178 l2cap_pi(l->head)->prev_c = sk;
180 l2cap_pi(sk)->next_c = l->head;
181 l2cap_pi(sk)->prev_c = NULL;
185 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
187 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
189 write_lock_bh(&l->lock);
194 l2cap_pi(next)->prev_c = prev;
196 l2cap_pi(prev)->next_c = next;
197 write_unlock_bh(&l->lock);
202 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
204 struct l2cap_chan_list *l = &conn->chan_list;
206 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
208 l2cap_pi(sk)->conn = conn;
210 if (sk->sk_type == SOCK_SEQPACKET) {
211 /* Alloc CID for connection-oriented socket */
212 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
213 } else if (sk->sk_type == SOCK_DGRAM) {
214 /* Connectionless socket */
215 l2cap_pi(sk)->scid = 0x0002;
216 l2cap_pi(sk)->dcid = 0x0002;
217 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
219 /* Raw socket can send/recv signalling messages only */
220 l2cap_pi(sk)->scid = 0x0001;
221 l2cap_pi(sk)->dcid = 0x0001;
222 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
225 __l2cap_chan_link(l, sk);
228 bt_accept_enqueue(parent, sk);
232 * Must be called on the locked socket. */
233 static void l2cap_chan_del(struct sock *sk, int err)
235 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
236 struct sock *parent = bt_sk(sk)->parent;
238 l2cap_sock_clear_timer(sk);
240 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
243 /* Unlink from channel list */
244 l2cap_chan_unlink(&conn->chan_list, sk);
245 l2cap_pi(sk)->conn = NULL;
246 hci_conn_put(conn->hcon);
249 sk->sk_state = BT_CLOSED;
250 sock_set_flag(sk, SOCK_ZAPPED);
256 bt_accept_unlink(sk);
257 parent->sk_data_ready(parent, 0);
259 sk->sk_state_change(sk);
262 /* Service level security */
263 static inline int l2cap_check_security(struct sock *sk)
265 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
268 switch (l2cap_pi(sk)->sec_level) {
269 case BT_SECURITY_HIGH:
270 auth_type = HCI_AT_GENERAL_BONDING_MITM;
272 case BT_SECURITY_MEDIUM:
273 auth_type = HCI_AT_GENERAL_BONDING;
276 auth_type = HCI_AT_NO_BONDING;
280 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
284 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
288 /* Get next available identificator.
289 * 1 - 128 are used by kernel.
290 * 129 - 199 are reserved.
291 * 200 - 254 are used by utilities like l2ping, etc.
294 spin_lock_bh(&conn->lock);
296 if (++conn->tx_ident > 128)
301 spin_unlock_bh(&conn->lock);
306 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
308 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
310 BT_DBG("code 0x%2.2x", code);
315 return hci_send_acl(conn->hcon, skb, 0);
318 static void l2cap_do_start(struct sock *sk)
320 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
322 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
323 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
326 if (l2cap_check_security(sk)) {
327 struct l2cap_conn_req req;
328 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
329 req.psm = l2cap_pi(sk)->psm;
331 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
333 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
334 L2CAP_CONN_REQ, sizeof(req), &req);
337 struct l2cap_info_req req;
338 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
340 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
341 conn->info_ident = l2cap_get_ident(conn);
343 mod_timer(&conn->info_timer, jiffies +
344 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
346 l2cap_send_cmd(conn, conn->info_ident,
347 L2CAP_INFO_REQ, sizeof(req), &req);
351 /* ---- L2CAP connections ---- */
352 static void l2cap_conn_start(struct l2cap_conn *conn)
354 struct l2cap_chan_list *l = &conn->chan_list;
357 BT_DBG("conn %p", conn);
361 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
364 if (sk->sk_type != SOCK_SEQPACKET) {
369 if (sk->sk_state == BT_CONNECT) {
370 if (l2cap_check_security(sk)) {
371 struct l2cap_conn_req req;
372 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
373 req.psm = l2cap_pi(sk)->psm;
375 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
377 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
378 L2CAP_CONN_REQ, sizeof(req), &req);
380 } else if (sk->sk_state == BT_CONNECT2) {
381 struct l2cap_conn_rsp rsp;
382 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
383 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
385 if (l2cap_check_security(sk)) {
386 if (bt_sk(sk)->defer_setup) {
387 struct sock *parent = bt_sk(sk)->parent;
388 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
389 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
390 parent->sk_data_ready(parent, 0);
393 sk->sk_state = BT_CONFIG;
394 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
395 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
398 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
399 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
402 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
403 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
409 read_unlock(&l->lock);
412 static void l2cap_conn_ready(struct l2cap_conn *conn)
414 struct l2cap_chan_list *l = &conn->chan_list;
417 BT_DBG("conn %p", conn);
421 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
424 if (sk->sk_type != SOCK_SEQPACKET) {
425 l2cap_sock_clear_timer(sk);
426 sk->sk_state = BT_CONNECTED;
427 sk->sk_state_change(sk);
428 } else if (sk->sk_state == BT_CONNECT)
434 read_unlock(&l->lock);
437 /* Notify sockets that we cannot guaranty reliability anymore */
438 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
440 struct l2cap_chan_list *l = &conn->chan_list;
443 BT_DBG("conn %p", conn);
447 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
448 if (l2cap_pi(sk)->force_reliable)
452 read_unlock(&l->lock);
455 static void l2cap_info_timeout(unsigned long arg)
457 struct l2cap_conn *conn = (void *) arg;
459 conn->info_ident = 0;
461 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
463 l2cap_conn_start(conn);
466 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
468 struct l2cap_conn *conn = hcon->l2cap_data;
473 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
477 hcon->l2cap_data = conn;
480 BT_DBG("hcon %p conn %p", hcon, conn);
482 conn->mtu = hcon->hdev->acl_mtu;
483 conn->src = &hcon->hdev->bdaddr;
484 conn->dst = &hcon->dst;
488 setup_timer(&conn->info_timer, l2cap_info_timeout,
489 (unsigned long) conn);
491 spin_lock_init(&conn->lock);
492 rwlock_init(&conn->chan_list.lock);
497 static void l2cap_conn_del(struct hci_conn *hcon, int err)
499 struct l2cap_conn *conn = hcon->l2cap_data;
505 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
508 kfree_skb(conn->rx_skb);
511 while ((sk = conn->chan_list.head)) {
513 l2cap_chan_del(sk, err);
518 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
519 del_timer_sync(&conn->info_timer);
521 hcon->l2cap_data = NULL;
525 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
527 struct l2cap_chan_list *l = &conn->chan_list;
528 write_lock_bh(&l->lock);
529 __l2cap_chan_add(conn, sk, parent);
530 write_unlock_bh(&l->lock);
533 /* ---- Socket interface ---- */
534 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
537 struct hlist_node *node;
538 sk_for_each(sk, node, &l2cap_sk_list.head)
539 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
546 /* Find socket with psm and source bdaddr.
547 * Returns closest match.
549 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
551 struct sock *sk = NULL, *sk1 = NULL;
552 struct hlist_node *node;
554 sk_for_each(sk, node, &l2cap_sk_list.head) {
555 if (state && sk->sk_state != state)
558 if (l2cap_pi(sk)->psm == psm) {
560 if (!bacmp(&bt_sk(sk)->src, src))
564 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
568 return node ? sk : sk1;
571 /* Find socket with given address (psm, src).
572 * Returns locked socket */
573 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
576 read_lock(&l2cap_sk_list.lock);
577 s = __l2cap_get_sock_by_psm(state, psm, src);
578 if (s) bh_lock_sock(s);
579 read_unlock(&l2cap_sk_list.lock);
583 static void l2cap_sock_destruct(struct sock *sk)
587 skb_queue_purge(&sk->sk_receive_queue);
588 skb_queue_purge(&sk->sk_write_queue);
591 static void l2cap_sock_cleanup_listen(struct sock *parent)
595 BT_DBG("parent %p", parent);
597 /* Close not yet accepted channels */
598 while ((sk = bt_accept_dequeue(parent, NULL)))
599 l2cap_sock_close(sk);
601 parent->sk_state = BT_CLOSED;
602 sock_set_flag(parent, SOCK_ZAPPED);
605 /* Kill socket (only if zapped and orphan)
606 * Must be called on unlocked socket.
608 static void l2cap_sock_kill(struct sock *sk)
610 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
613 BT_DBG("sk %p state %d", sk, sk->sk_state);
615 /* Kill poor orphan */
616 bt_sock_unlink(&l2cap_sk_list, sk);
617 sock_set_flag(sk, SOCK_DEAD);
621 static void __l2cap_sock_close(struct sock *sk, int reason)
623 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
625 switch (sk->sk_state) {
627 l2cap_sock_cleanup_listen(sk);
632 if (sk->sk_type == SOCK_SEQPACKET) {
633 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
634 struct l2cap_disconn_req req;
636 sk->sk_state = BT_DISCONN;
637 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
639 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
640 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
641 l2cap_send_cmd(conn, l2cap_get_ident(conn),
642 L2CAP_DISCONN_REQ, sizeof(req), &req);
644 l2cap_chan_del(sk, reason);
648 if (sk->sk_type == SOCK_SEQPACKET) {
649 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
650 struct l2cap_conn_rsp rsp;
653 if (bt_sk(sk)->defer_setup)
654 result = L2CAP_CR_SEC_BLOCK;
656 result = L2CAP_CR_BAD_PSM;
658 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
659 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
660 rsp.result = cpu_to_le16(result);
661 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
662 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
663 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
665 l2cap_chan_del(sk, reason);
670 l2cap_chan_del(sk, reason);
674 sock_set_flag(sk, SOCK_ZAPPED);
679 /* Must be called on unlocked socket. */
680 static void l2cap_sock_close(struct sock *sk)
682 l2cap_sock_clear_timer(sk);
684 __l2cap_sock_close(sk, ECONNRESET);
689 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
691 struct l2cap_pinfo *pi = l2cap_pi(sk);
696 sk->sk_type = parent->sk_type;
697 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
699 pi->imtu = l2cap_pi(parent)->imtu;
700 pi->omtu = l2cap_pi(parent)->omtu;
701 pi->sec_level = l2cap_pi(parent)->sec_level;
702 pi->role_switch = l2cap_pi(parent)->role_switch;
703 pi->force_reliable = l2cap_pi(parent)->force_reliable;
705 pi->imtu = L2CAP_DEFAULT_MTU;
707 pi->sec_level = BT_SECURITY_LOW;
709 pi->force_reliable = 0;
712 /* Default config options */
714 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
717 static struct proto l2cap_proto = {
719 .owner = THIS_MODULE,
720 .obj_size = sizeof(struct l2cap_pinfo)
723 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
727 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
731 sock_init_data(sock, sk);
732 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
734 sk->sk_destruct = l2cap_sock_destruct;
735 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
737 sock_reset_flag(sk, SOCK_ZAPPED);
739 sk->sk_protocol = proto;
740 sk->sk_state = BT_OPEN;
742 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
744 bt_sock_link(&l2cap_sk_list, sk);
748 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
752 BT_DBG("sock %p", sock);
754 sock->state = SS_UNCONNECTED;
756 if (sock->type != SOCK_SEQPACKET &&
757 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
758 return -ESOCKTNOSUPPORT;
760 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
763 sock->ops = &l2cap_sock_ops;
765 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
769 l2cap_sock_init(sk, NULL);
773 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
775 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
776 struct sock *sk = sock->sk;
779 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
781 if (!addr || addr->sa_family != AF_BLUETOOTH)
786 if (sk->sk_state != BT_OPEN) {
791 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
792 !capable(CAP_NET_BIND_SERVICE)) {
797 write_lock_bh(&l2cap_sk_list.lock);
799 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
802 /* Save source address */
803 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
804 l2cap_pi(sk)->psm = la->l2_psm;
805 l2cap_pi(sk)->sport = la->l2_psm;
806 sk->sk_state = BT_BOUND;
808 if (btohs(la->l2_psm) == 0x0001)
809 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
812 write_unlock_bh(&l2cap_sk_list.lock);
819 static int l2cap_do_connect(struct sock *sk)
821 bdaddr_t *src = &bt_sk(sk)->src;
822 bdaddr_t *dst = &bt_sk(sk)->dst;
823 struct l2cap_conn *conn;
824 struct hci_conn *hcon;
825 struct hci_dev *hdev;
829 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
831 if (!(hdev = hci_get_route(dst, src)))
832 return -EHOSTUNREACH;
834 hci_dev_lock_bh(hdev);
838 if (sk->sk_type == SOCK_RAW) {
839 switch (l2cap_pi(sk)->sec_level) {
840 case BT_SECURITY_HIGH:
841 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
843 case BT_SECURITY_MEDIUM:
844 auth_type = HCI_AT_DEDICATED_BONDING;
847 auth_type = HCI_AT_NO_BONDING;
850 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
851 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
852 auth_type = HCI_AT_NO_BONDING_MITM;
854 auth_type = HCI_AT_NO_BONDING;
856 switch (l2cap_pi(sk)->sec_level) {
857 case BT_SECURITY_HIGH:
858 auth_type = HCI_AT_GENERAL_BONDING_MITM;
860 case BT_SECURITY_MEDIUM:
861 auth_type = HCI_AT_GENERAL_BONDING;
864 auth_type = HCI_AT_NO_BONDING;
869 hcon = hci_connect(hdev, ACL_LINK, dst,
870 l2cap_pi(sk)->sec_level, auth_type);
874 conn = l2cap_conn_add(hcon, 0);
882 /* Update source addr of the socket */
883 bacpy(src, conn->src);
885 l2cap_chan_add(conn, sk, NULL);
887 sk->sk_state = BT_CONNECT;
888 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
890 if (hcon->state == BT_CONNECTED) {
891 if (sk->sk_type != SOCK_SEQPACKET) {
892 l2cap_sock_clear_timer(sk);
893 sk->sk_state = BT_CONNECTED;
899 hci_dev_unlock_bh(hdev);
904 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
906 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
907 struct sock *sk = sock->sk;
914 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
919 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
924 switch(sk->sk_state) {
928 /* Already connecting */
932 /* Already connected */
945 /* Set destination address and psm */
946 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
947 l2cap_pi(sk)->psm = la->l2_psm;
949 if ((err = l2cap_do_connect(sk)))
953 err = bt_sock_wait_state(sk, BT_CONNECTED,
954 sock_sndtimeo(sk, flags & O_NONBLOCK));
960 static int l2cap_sock_listen(struct socket *sock, int backlog)
962 struct sock *sk = sock->sk;
965 BT_DBG("sk %p backlog %d", sk, backlog);
969 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
974 if (!l2cap_pi(sk)->psm) {
975 bdaddr_t *src = &bt_sk(sk)->src;
980 write_lock_bh(&l2cap_sk_list.lock);
982 for (psm = 0x1001; psm < 0x1100; psm += 2)
983 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
984 l2cap_pi(sk)->psm = htobs(psm);
985 l2cap_pi(sk)->sport = htobs(psm);
990 write_unlock_bh(&l2cap_sk_list.lock);
996 sk->sk_max_ack_backlog = backlog;
997 sk->sk_ack_backlog = 0;
998 sk->sk_state = BT_LISTEN;
1005 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1007 DECLARE_WAITQUEUE(wait, current);
1008 struct sock *sk = sock->sk, *nsk;
1012 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1014 if (sk->sk_state != BT_LISTEN) {
1019 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1021 BT_DBG("sk %p timeo %ld", sk, timeo);
1023 /* Wait for an incoming connection. (wake-one). */
1024 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1025 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1026 set_current_state(TASK_INTERRUPTIBLE);
1033 timeo = schedule_timeout(timeo);
1034 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1036 if (sk->sk_state != BT_LISTEN) {
1041 if (signal_pending(current)) {
1042 err = sock_intr_errno(timeo);
1046 set_current_state(TASK_RUNNING);
1047 remove_wait_queue(sk->sk_sleep, &wait);
1052 newsock->state = SS_CONNECTED;
1054 BT_DBG("new socket %p", nsk);
1061 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1063 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1064 struct sock *sk = sock->sk;
1066 BT_DBG("sock %p, sk %p", sock, sk);
1068 addr->sa_family = AF_BLUETOOTH;
1069 *len = sizeof(struct sockaddr_l2);
1072 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1074 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1076 la->l2_psm = l2cap_pi(sk)->psm;
1080 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1082 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1083 struct sk_buff *skb, **frag;
1084 int err, hlen, count, sent=0;
1085 struct l2cap_hdr *lh;
1087 BT_DBG("sk %p len %d", sk, len);
1089 /* First fragment (with L2CAP header) */
1090 if (sk->sk_type == SOCK_DGRAM)
1091 hlen = L2CAP_HDR_SIZE + 2;
1093 hlen = L2CAP_HDR_SIZE;
1095 count = min_t(unsigned int, (conn->mtu - hlen), len);
1097 skb = bt_skb_send_alloc(sk, hlen + count,
1098 msg->msg_flags & MSG_DONTWAIT, &err);
1102 /* Create L2CAP header */
1103 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1104 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1105 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1107 if (sk->sk_type == SOCK_DGRAM)
1108 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1110 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1118 /* Continuation fragments (no L2CAP header) */
1119 frag = &skb_shinfo(skb)->frag_list;
1121 count = min_t(unsigned int, conn->mtu, len);
1123 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1127 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1135 frag = &(*frag)->next;
1138 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1148 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1150 struct sock *sk = sock->sk;
1153 BT_DBG("sock %p, sk %p", sock, sk);
1155 err = sock_error(sk);
1159 if (msg->msg_flags & MSG_OOB)
1162 /* Check outgoing MTU */
1163 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1168 if (sk->sk_state == BT_CONNECTED)
1169 err = l2cap_do_send(sk, msg, len);
1177 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1179 struct sock *sk = sock->sk;
1183 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1184 struct l2cap_conn_rsp rsp;
1186 sk->sk_state = BT_CONFIG;
1188 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1189 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1190 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1191 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1192 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1193 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1201 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1204 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1206 struct sock *sk = sock->sk;
1207 struct l2cap_options opts;
1211 BT_DBG("sk %p", sk);
1217 opts.imtu = l2cap_pi(sk)->imtu;
1218 opts.omtu = l2cap_pi(sk)->omtu;
1219 opts.flush_to = l2cap_pi(sk)->flush_to;
1220 opts.mode = L2CAP_MODE_BASIC;
1222 len = min_t(unsigned int, sizeof(opts), optlen);
1223 if (copy_from_user((char *) &opts, optval, len)) {
1228 l2cap_pi(sk)->imtu = opts.imtu;
1229 l2cap_pi(sk)->omtu = opts.omtu;
1233 if (get_user(opt, (u32 __user *) optval)) {
1238 if (opt & L2CAP_LM_AUTH)
1239 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1240 if (opt & L2CAP_LM_ENCRYPT)
1241 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1242 if (opt & L2CAP_LM_SECURE)
1243 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1245 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1246 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1258 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1260 struct sock *sk = sock->sk;
1261 struct bt_security sec;
1265 BT_DBG("sk %p", sk);
1267 if (level == SOL_L2CAP)
1268 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1270 if (level != SOL_BLUETOOTH)
1271 return -ENOPROTOOPT;
1277 if (sk->sk_type != SOCK_SEQPACKET) {
1282 sec.level = BT_SECURITY_LOW;
1284 len = min_t(unsigned int, sizeof(sec), optlen);
1285 if (copy_from_user((char *) &sec, optval, len)) {
1290 if (sec.level < BT_SECURITY_LOW ||
1291 sec.level > BT_SECURITY_HIGH) {
1296 l2cap_pi(sk)->sec_level = sec.level;
1299 case BT_DEFER_SETUP:
1300 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1305 if (get_user(opt, (u32 __user *) optval)) {
1310 bt_sk(sk)->defer_setup = opt;
1322 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1324 struct sock *sk = sock->sk;
1325 struct l2cap_options opts;
1326 struct l2cap_conninfo cinfo;
1330 BT_DBG("sk %p", sk);
1332 if (get_user(len, optlen))
1339 opts.imtu = l2cap_pi(sk)->imtu;
1340 opts.omtu = l2cap_pi(sk)->omtu;
1341 opts.flush_to = l2cap_pi(sk)->flush_to;
1342 opts.mode = L2CAP_MODE_BASIC;
1344 len = min_t(unsigned int, len, sizeof(opts));
1345 if (copy_to_user(optval, (char *) &opts, len))
1351 switch (l2cap_pi(sk)->sec_level) {
1352 case BT_SECURITY_LOW:
1353 opt = L2CAP_LM_AUTH;
1355 case BT_SECURITY_MEDIUM:
1356 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1358 case BT_SECURITY_HIGH:
1359 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1367 if (l2cap_pi(sk)->role_switch)
1368 opt |= L2CAP_LM_MASTER;
1370 if (l2cap_pi(sk)->force_reliable)
1371 opt |= L2CAP_LM_RELIABLE;
1373 if (put_user(opt, (u32 __user *) optval))
1377 case L2CAP_CONNINFO:
1378 if (sk->sk_state != BT_CONNECTED &&
1379 !(sk->sk_state == BT_CONNECT2 &&
1380 bt_sk(sk)->defer_setup)) {
1385 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1386 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1388 len = min_t(unsigned int, len, sizeof(cinfo));
1389 if (copy_to_user(optval, (char *) &cinfo, len))
1403 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1405 struct sock *sk = sock->sk;
1406 struct bt_security sec;
1409 BT_DBG("sk %p", sk);
1411 if (level == SOL_L2CAP)
1412 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1414 if (level != SOL_BLUETOOTH)
1415 return -ENOPROTOOPT;
1417 if (get_user(len, optlen))
1424 if (sk->sk_type != SOCK_SEQPACKET) {
1429 sec.level = l2cap_pi(sk)->sec_level;
1431 len = min_t(unsigned int, len, sizeof(sec));
1432 if (copy_to_user(optval, (char *) &sec, len))
1437 case BT_DEFER_SETUP:
1438 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1443 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1457 static int l2cap_sock_shutdown(struct socket *sock, int how)
1459 struct sock *sk = sock->sk;
1462 BT_DBG("sock %p, sk %p", sock, sk);
1468 if (!sk->sk_shutdown) {
1469 sk->sk_shutdown = SHUTDOWN_MASK;
1470 l2cap_sock_clear_timer(sk);
1471 __l2cap_sock_close(sk, 0);
1473 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1474 err = bt_sock_wait_state(sk, BT_CLOSED,
1481 static int l2cap_sock_release(struct socket *sock)
1483 struct sock *sk = sock->sk;
1486 BT_DBG("sock %p, sk %p", sock, sk);
1491 err = l2cap_sock_shutdown(sock, 2);
1494 l2cap_sock_kill(sk);
1498 static void l2cap_chan_ready(struct sock *sk)
1500 struct sock *parent = bt_sk(sk)->parent;
1502 BT_DBG("sk %p, parent %p", sk, parent);
1504 l2cap_pi(sk)->conf_state = 0;
1505 l2cap_sock_clear_timer(sk);
1508 /* Outgoing channel.
1509 * Wake up socket sleeping on connect.
1511 sk->sk_state = BT_CONNECTED;
1512 sk->sk_state_change(sk);
1514 /* Incoming channel.
1515 * Wake up socket sleeping on accept.
1517 parent->sk_data_ready(parent, 0);
1521 /* Copy frame to all raw sockets on that connection */
1522 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1524 struct l2cap_chan_list *l = &conn->chan_list;
1525 struct sk_buff *nskb;
1528 BT_DBG("conn %p", conn);
1530 read_lock(&l->lock);
1531 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1532 if (sk->sk_type != SOCK_RAW)
1535 /* Don't send frame to the socket it came from */
1539 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1542 if (sock_queue_rcv_skb(sk, nskb))
1545 read_unlock(&l->lock);
1548 /* ---- L2CAP signalling commands ---- */
1549 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1550 u8 code, u8 ident, u16 dlen, void *data)
1552 struct sk_buff *skb, **frag;
1553 struct l2cap_cmd_hdr *cmd;
1554 struct l2cap_hdr *lh;
1557 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1559 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1560 count = min_t(unsigned int, conn->mtu, len);
1562 skb = bt_skb_alloc(count, GFP_ATOMIC);
1566 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1567 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1568 lh->cid = cpu_to_le16(0x0001);
1570 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1573 cmd->len = cpu_to_le16(dlen);
1576 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1577 memcpy(skb_put(skb, count), data, count);
1583 /* Continuation fragments (no L2CAP header) */
1584 frag = &skb_shinfo(skb)->frag_list;
1586 count = min_t(unsigned int, conn->mtu, len);
1588 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1592 memcpy(skb_put(*frag, count), data, count);
1597 frag = &(*frag)->next;
1607 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1609 struct l2cap_conf_opt *opt = *ptr;
1612 len = L2CAP_CONF_OPT_SIZE + opt->len;
1620 *val = *((u8 *) opt->val);
1624 *val = __le16_to_cpu(*((__le16 *) opt->val));
1628 *val = __le32_to_cpu(*((__le32 *) opt->val));
1632 *val = (unsigned long) opt->val;
1636 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1640 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1642 struct l2cap_conf_opt *opt = *ptr;
1644 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1651 *((u8 *) opt->val) = val;
1655 *((__le16 *) opt->val) = cpu_to_le16(val);
1659 *((__le32 *) opt->val) = cpu_to_le32(val);
1663 memcpy(opt->val, (void *) val, len);
1667 *ptr += L2CAP_CONF_OPT_SIZE + len;
1670 static int l2cap_build_conf_req(struct sock *sk, void *data)
1672 struct l2cap_pinfo *pi = l2cap_pi(sk);
1673 struct l2cap_conf_req *req = data;
1674 void *ptr = req->data;
1676 BT_DBG("sk %p", sk);
1678 if (pi->imtu != L2CAP_DEFAULT_MTU)
1679 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1681 /* FIXME: Need actual value of the flush timeout */
1682 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1683 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1685 req->dcid = cpu_to_le16(pi->dcid);
1686 req->flags = cpu_to_le16(0);
1691 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1693 struct l2cap_pinfo *pi = l2cap_pi(sk);
1694 struct l2cap_conf_rsp *rsp = data;
1695 void *ptr = rsp->data;
1696 void *req = pi->conf_req;
1697 int len = pi->conf_len;
1698 int type, hint, olen;
1700 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1701 u16 mtu = L2CAP_DEFAULT_MTU;
1702 u16 result = L2CAP_CONF_SUCCESS;
1704 BT_DBG("sk %p", sk);
1706 while (len >= L2CAP_CONF_OPT_SIZE) {
1707 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1713 case L2CAP_CONF_MTU:
1717 case L2CAP_CONF_FLUSH_TO:
1721 case L2CAP_CONF_QOS:
1724 case L2CAP_CONF_RFC:
1725 if (olen == sizeof(rfc))
1726 memcpy(&rfc, (void *) val, olen);
1733 result = L2CAP_CONF_UNKNOWN;
1734 *((u8 *) ptr++) = type;
1739 if (result == L2CAP_CONF_SUCCESS) {
1740 /* Configure output options and let the other side know
1741 * which ones we don't like. */
1743 if (rfc.mode == L2CAP_MODE_BASIC) {
1745 result = L2CAP_CONF_UNACCEPT;
1748 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1751 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1753 result = L2CAP_CONF_UNACCEPT;
1755 memset(&rfc, 0, sizeof(rfc));
1756 rfc.mode = L2CAP_MODE_BASIC;
1758 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1759 sizeof(rfc), (unsigned long) &rfc);
1763 rsp->scid = cpu_to_le16(pi->dcid);
1764 rsp->result = cpu_to_le16(result);
1765 rsp->flags = cpu_to_le16(0x0000);
1770 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1772 struct l2cap_conf_rsp *rsp = data;
1773 void *ptr = rsp->data;
1775 BT_DBG("sk %p", sk);
1777 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1778 rsp->result = cpu_to_le16(result);
1779 rsp->flags = cpu_to_le16(flags);
1784 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1786 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1788 if (rej->reason != 0x0000)
1791 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1792 cmd->ident == conn->info_ident) {
1793 conn->info_ident = 0;
1794 del_timer(&conn->info_timer);
1796 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1798 l2cap_conn_start(conn);
1804 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1806 struct l2cap_chan_list *list = &conn->chan_list;
1807 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1808 struct l2cap_conn_rsp rsp;
1809 struct sock *sk, *parent;
1810 int result, status = L2CAP_CS_NO_INFO;
1812 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1813 __le16 psm = req->psm;
1815 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1817 /* Check if we have socket listening on psm */
1818 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1820 result = L2CAP_CR_BAD_PSM;
1824 /* Check if the ACL is secure enough (if not SDP) */
1825 if (psm != cpu_to_le16(0x0001) &&
1826 !hci_conn_check_link_mode(conn->hcon)) {
1827 result = L2CAP_CR_SEC_BLOCK;
1831 result = L2CAP_CR_NO_MEM;
1833 /* Check for backlog size */
1834 if (sk_acceptq_is_full(parent)) {
1835 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1839 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1843 write_lock_bh(&list->lock);
1845 /* Check if we already have channel with that dcid */
1846 if (__l2cap_get_chan_by_dcid(list, scid)) {
1847 write_unlock_bh(&list->lock);
1848 sock_set_flag(sk, SOCK_ZAPPED);
1849 l2cap_sock_kill(sk);
1853 hci_conn_hold(conn->hcon);
1855 l2cap_sock_init(sk, parent);
1856 bacpy(&bt_sk(sk)->src, conn->src);
1857 bacpy(&bt_sk(sk)->dst, conn->dst);
1858 l2cap_pi(sk)->psm = psm;
1859 l2cap_pi(sk)->dcid = scid;
1861 __l2cap_chan_add(conn, sk, parent);
1862 dcid = l2cap_pi(sk)->scid;
1864 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1866 l2cap_pi(sk)->ident = cmd->ident;
1868 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
1869 if (l2cap_check_security(sk)) {
1870 if (bt_sk(sk)->defer_setup) {
1871 sk->sk_state = BT_CONNECT2;
1872 result = L2CAP_CR_PEND;
1873 status = L2CAP_CS_AUTHOR_PEND;
1874 parent->sk_data_ready(parent, 0);
1876 sk->sk_state = BT_CONFIG;
1877 result = L2CAP_CR_SUCCESS;
1878 status = L2CAP_CS_NO_INFO;
1881 sk->sk_state = BT_CONNECT2;
1882 result = L2CAP_CR_PEND;
1883 status = L2CAP_CS_AUTHEN_PEND;
1886 sk->sk_state = BT_CONNECT2;
1887 result = L2CAP_CR_PEND;
1888 status = L2CAP_CS_NO_INFO;
1891 write_unlock_bh(&list->lock);
1894 bh_unlock_sock(parent);
1897 rsp.scid = cpu_to_le16(scid);
1898 rsp.dcid = cpu_to_le16(dcid);
1899 rsp.result = cpu_to_le16(result);
1900 rsp.status = cpu_to_le16(status);
1901 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1903 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1904 struct l2cap_info_req info;
1905 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1907 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1908 conn->info_ident = l2cap_get_ident(conn);
1910 mod_timer(&conn->info_timer, jiffies +
1911 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1913 l2cap_send_cmd(conn, conn->info_ident,
1914 L2CAP_INFO_REQ, sizeof(info), &info);
1920 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1922 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1923 u16 scid, dcid, result, status;
1927 scid = __le16_to_cpu(rsp->scid);
1928 dcid = __le16_to_cpu(rsp->dcid);
1929 result = __le16_to_cpu(rsp->result);
1930 status = __le16_to_cpu(rsp->status);
1932 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1935 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1938 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1943 case L2CAP_CR_SUCCESS:
1944 sk->sk_state = BT_CONFIG;
1945 l2cap_pi(sk)->ident = 0;
1946 l2cap_pi(sk)->dcid = dcid;
1947 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1949 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
1951 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1952 l2cap_build_conf_req(sk, req), req);
1956 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
1960 l2cap_chan_del(sk, ECONNREFUSED);
1968 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1970 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1976 dcid = __le16_to_cpu(req->dcid);
1977 flags = __le16_to_cpu(req->flags);
1979 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1981 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1984 if (sk->sk_state == BT_DISCONN)
1987 /* Reject if config buffer is too small. */
1988 len = cmd_len - sizeof(*req);
1989 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1990 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1991 l2cap_build_conf_rsp(sk, rsp,
1992 L2CAP_CONF_REJECT, flags), rsp);
1997 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1998 l2cap_pi(sk)->conf_len += len;
2000 if (flags & 0x0001) {
2001 /* Incomplete config. Send empty response. */
2002 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2003 l2cap_build_conf_rsp(sk, rsp,
2004 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2008 /* Complete config. */
2009 len = l2cap_parse_conf_req(sk, rsp);
2013 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2015 /* Reset config buffer. */
2016 l2cap_pi(sk)->conf_len = 0;
2018 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2021 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2022 sk->sk_state = BT_CONNECTED;
2023 l2cap_chan_ready(sk);
2027 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2029 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2030 l2cap_build_conf_req(sk, buf), buf);
2038 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2040 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2041 u16 scid, flags, result;
2044 scid = __le16_to_cpu(rsp->scid);
2045 flags = __le16_to_cpu(rsp->flags);
2046 result = __le16_to_cpu(rsp->result);
2048 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
2050 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2054 case L2CAP_CONF_SUCCESS:
2057 case L2CAP_CONF_UNACCEPT:
2058 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
2060 /* It does not make sense to adjust L2CAP parameters
2061 * that are currently defined in the spec. We simply
2062 * resend config request that we sent earlier. It is
2063 * stupid, but it helps qualification testing which
2064 * expects at least some response from us. */
2065 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2066 l2cap_build_conf_req(sk, req), req);
2071 sk->sk_state = BT_DISCONN;
2072 sk->sk_err = ECONNRESET;
2073 l2cap_sock_set_timer(sk, HZ * 5);
2075 struct l2cap_disconn_req req;
2076 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2077 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2078 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2079 L2CAP_DISCONN_REQ, sizeof(req), &req);
2087 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2089 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2090 sk->sk_state = BT_CONNECTED;
2091 l2cap_chan_ready(sk);
2099 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2101 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2102 struct l2cap_disconn_rsp rsp;
2106 scid = __le16_to_cpu(req->scid);
2107 dcid = __le16_to_cpu(req->dcid);
2109 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2111 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2114 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2115 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2116 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2118 sk->sk_shutdown = SHUTDOWN_MASK;
2120 l2cap_chan_del(sk, ECONNRESET);
2123 l2cap_sock_kill(sk);
2127 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2129 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2133 scid = __le16_to_cpu(rsp->scid);
2134 dcid = __le16_to_cpu(rsp->dcid);
2136 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2138 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2141 l2cap_chan_del(sk, 0);
2144 l2cap_sock_kill(sk);
2148 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2150 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2153 type = __le16_to_cpu(req->type);
2155 BT_DBG("type 0x%4.4x", type);
2157 if (type == L2CAP_IT_FEAT_MASK) {
2159 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2160 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2161 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2162 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
2163 l2cap_send_cmd(conn, cmd->ident,
2164 L2CAP_INFO_RSP, sizeof(buf), buf);
2166 struct l2cap_info_rsp rsp;
2167 rsp.type = cpu_to_le16(type);
2168 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2169 l2cap_send_cmd(conn, cmd->ident,
2170 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2176 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2178 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2181 type = __le16_to_cpu(rsp->type);
2182 result = __le16_to_cpu(rsp->result);
2184 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2186 conn->info_ident = 0;
2188 del_timer(&conn->info_timer);
2190 if (type == L2CAP_IT_FEAT_MASK) {
2191 conn->feat_mask = get_unaligned_le32(rsp->data);
2193 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2195 l2cap_conn_start(conn);
2201 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2203 u8 *data = skb->data;
2205 struct l2cap_cmd_hdr cmd;
2208 l2cap_raw_recv(conn, skb);
2210 while (len >= L2CAP_CMD_HDR_SIZE) {
2212 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2213 data += L2CAP_CMD_HDR_SIZE;
2214 len -= L2CAP_CMD_HDR_SIZE;
2216 cmd_len = le16_to_cpu(cmd.len);
2218 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2220 if (cmd_len > len || !cmd.ident) {
2221 BT_DBG("corrupted command");
2226 case L2CAP_COMMAND_REJ:
2227 l2cap_command_rej(conn, &cmd, data);
2230 case L2CAP_CONN_REQ:
2231 err = l2cap_connect_req(conn, &cmd, data);
2234 case L2CAP_CONN_RSP:
2235 err = l2cap_connect_rsp(conn, &cmd, data);
2238 case L2CAP_CONF_REQ:
2239 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2242 case L2CAP_CONF_RSP:
2243 err = l2cap_config_rsp(conn, &cmd, data);
2246 case L2CAP_DISCONN_REQ:
2247 err = l2cap_disconnect_req(conn, &cmd, data);
2250 case L2CAP_DISCONN_RSP:
2251 err = l2cap_disconnect_rsp(conn, &cmd, data);
2254 case L2CAP_ECHO_REQ:
2255 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2258 case L2CAP_ECHO_RSP:
2261 case L2CAP_INFO_REQ:
2262 err = l2cap_information_req(conn, &cmd, data);
2265 case L2CAP_INFO_RSP:
2266 err = l2cap_information_rsp(conn, &cmd, data);
2270 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2276 struct l2cap_cmd_rej rej;
2277 BT_DBG("error %d", err);
2279 /* FIXME: Map err to a valid reason */
2280 rej.reason = cpu_to_le16(0);
2281 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2291 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2295 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2297 BT_DBG("unknown cid 0x%4.4x", cid);
2301 BT_DBG("sk %p, len %d", sk, skb->len);
2303 if (sk->sk_state != BT_CONNECTED)
2306 if (l2cap_pi(sk)->imtu < skb->len)
2309 /* If socket recv buffers overflows we drop data here
2310 * which is *bad* because L2CAP has to be reliable.
2311 * But we don't have any other choice. L2CAP doesn't
2312 * provide flow control mechanism. */
2314 if (!sock_queue_rcv_skb(sk, skb))
2327 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2331 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2335 BT_DBG("sk %p, len %d", sk, skb->len);
2337 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2340 if (l2cap_pi(sk)->imtu < skb->len)
2343 if (!sock_queue_rcv_skb(sk, skb))
2350 if (sk) bh_unlock_sock(sk);
2354 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2356 struct l2cap_hdr *lh = (void *) skb->data;
2360 skb_pull(skb, L2CAP_HDR_SIZE);
2361 cid = __le16_to_cpu(lh->cid);
2362 len = __le16_to_cpu(lh->len);
2364 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2368 l2cap_sig_channel(conn, skb);
2372 psm = get_unaligned((__le16 *) skb->data);
2374 l2cap_conless_channel(conn, psm, skb);
2378 l2cap_data_channel(conn, cid, skb);
2383 /* ---- L2CAP interface with lower layer (HCI) ---- */
2385 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2387 int exact = 0, lm1 = 0, lm2 = 0;
2388 register struct sock *sk;
2389 struct hlist_node *node;
2391 if (type != ACL_LINK)
2394 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2396 /* Find listening sockets and check their link_mode */
2397 read_lock(&l2cap_sk_list.lock);
2398 sk_for_each(sk, node, &l2cap_sk_list.head) {
2399 if (sk->sk_state != BT_LISTEN)
2402 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2403 lm1 |= HCI_LM_ACCEPT;
2404 if (l2cap_pi(sk)->role_switch)
2405 lm1 |= HCI_LM_MASTER;
2407 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2408 lm2 |= HCI_LM_ACCEPT;
2409 if (l2cap_pi(sk)->role_switch)
2410 lm2 |= HCI_LM_MASTER;
2413 read_unlock(&l2cap_sk_list.lock);
2415 return exact ? lm1 : lm2;
2418 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2420 struct l2cap_conn *conn;
2422 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2424 if (hcon->type != ACL_LINK)
2428 conn = l2cap_conn_add(hcon, status);
2430 l2cap_conn_ready(conn);
2432 l2cap_conn_del(hcon, bt_err(status));
2437 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2439 BT_DBG("hcon %p reason %d", hcon, reason);
2441 if (hcon->type != ACL_LINK)
2444 l2cap_conn_del(hcon, bt_err(reason));
2449 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
2451 if (sk->sk_type != SOCK_SEQPACKET)
2454 if (encrypt == 0x00) {
2455 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
2456 l2cap_sock_clear_timer(sk);
2457 l2cap_sock_set_timer(sk, HZ * 5);
2458 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
2459 __l2cap_sock_close(sk, ECONNREFUSED);
2461 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
2462 l2cap_sock_clear_timer(sk);
2466 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2468 struct l2cap_chan_list *l;
2469 struct l2cap_conn *conn = hcon->l2cap_data;
2475 l = &conn->chan_list;
2477 BT_DBG("conn %p", conn);
2479 read_lock(&l->lock);
2481 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2484 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
2489 if (!status && (sk->sk_state == BT_CONNECTED ||
2490 sk->sk_state == BT_CONFIG)) {
2491 l2cap_check_encryption(sk, encrypt);
2496 if (sk->sk_state == BT_CONNECT) {
2498 struct l2cap_conn_req req;
2499 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2500 req.psm = l2cap_pi(sk)->psm;
2502 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2504 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2505 L2CAP_CONN_REQ, sizeof(req), &req);
2507 l2cap_sock_clear_timer(sk);
2508 l2cap_sock_set_timer(sk, HZ / 10);
2510 } else if (sk->sk_state == BT_CONNECT2) {
2511 struct l2cap_conn_rsp rsp;
2515 sk->sk_state = BT_CONFIG;
2516 result = L2CAP_CR_SUCCESS;
2518 sk->sk_state = BT_DISCONN;
2519 l2cap_sock_set_timer(sk, HZ / 10);
2520 result = L2CAP_CR_SEC_BLOCK;
2523 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2524 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2525 rsp.result = cpu_to_le16(result);
2526 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2527 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2528 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2534 read_unlock(&l->lock);
2539 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2541 struct l2cap_conn *conn = hcon->l2cap_data;
2543 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2546 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2548 if (flags & ACL_START) {
2549 struct l2cap_hdr *hdr;
2553 BT_ERR("Unexpected start frame (len %d)", skb->len);
2554 kfree_skb(conn->rx_skb);
2555 conn->rx_skb = NULL;
2557 l2cap_conn_unreliable(conn, ECOMM);
2561 BT_ERR("Frame is too short (len %d)", skb->len);
2562 l2cap_conn_unreliable(conn, ECOMM);
2566 hdr = (struct l2cap_hdr *) skb->data;
2567 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2569 if (len == skb->len) {
2570 /* Complete frame received */
2571 l2cap_recv_frame(conn, skb);
2575 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2577 if (skb->len > len) {
2578 BT_ERR("Frame is too long (len %d, expected len %d)",
2580 l2cap_conn_unreliable(conn, ECOMM);
2584 /* Allocate skb for the complete frame (with header) */
2585 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2588 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2590 conn->rx_len = len - skb->len;
2592 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2594 if (!conn->rx_len) {
2595 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2596 l2cap_conn_unreliable(conn, ECOMM);
2600 if (skb->len > conn->rx_len) {
2601 BT_ERR("Fragment is too long (len %d, expected %d)",
2602 skb->len, conn->rx_len);
2603 kfree_skb(conn->rx_skb);
2604 conn->rx_skb = NULL;
2606 l2cap_conn_unreliable(conn, ECOMM);
2610 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2612 conn->rx_len -= skb->len;
2614 if (!conn->rx_len) {
2615 /* Complete frame received */
2616 l2cap_recv_frame(conn, conn->rx_skb);
2617 conn->rx_skb = NULL;
2626 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2629 struct hlist_node *node;
2632 read_lock_bh(&l2cap_sk_list.lock);
2634 sk_for_each(sk, node, &l2cap_sk_list.head) {
2635 struct l2cap_pinfo *pi = l2cap_pi(sk);
2637 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2638 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2639 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2640 pi->imtu, pi->omtu, pi->sec_level);
2643 read_unlock_bh(&l2cap_sk_list.lock);
2648 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2650 static const struct proto_ops l2cap_sock_ops = {
2651 .family = PF_BLUETOOTH,
2652 .owner = THIS_MODULE,
2653 .release = l2cap_sock_release,
2654 .bind = l2cap_sock_bind,
2655 .connect = l2cap_sock_connect,
2656 .listen = l2cap_sock_listen,
2657 .accept = l2cap_sock_accept,
2658 .getname = l2cap_sock_getname,
2659 .sendmsg = l2cap_sock_sendmsg,
2660 .recvmsg = l2cap_sock_recvmsg,
2661 .poll = bt_sock_poll,
2662 .ioctl = bt_sock_ioctl,
2663 .mmap = sock_no_mmap,
2664 .socketpair = sock_no_socketpair,
2665 .shutdown = l2cap_sock_shutdown,
2666 .setsockopt = l2cap_sock_setsockopt,
2667 .getsockopt = l2cap_sock_getsockopt
2670 static struct net_proto_family l2cap_sock_family_ops = {
2671 .family = PF_BLUETOOTH,
2672 .owner = THIS_MODULE,
2673 .create = l2cap_sock_create,
2676 static struct hci_proto l2cap_hci_proto = {
2678 .id = HCI_PROTO_L2CAP,
2679 .connect_ind = l2cap_connect_ind,
2680 .connect_cfm = l2cap_connect_cfm,
2681 .disconn_ind = l2cap_disconn_ind,
2682 .security_cfm = l2cap_security_cfm,
2683 .recv_acldata = l2cap_recv_acldata
2686 static int __init l2cap_init(void)
2690 err = proto_register(&l2cap_proto, 0);
2694 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2696 BT_ERR("L2CAP socket registration failed");
2700 err = hci_register_proto(&l2cap_hci_proto);
2702 BT_ERR("L2CAP protocol registration failed");
2703 bt_sock_unregister(BTPROTO_L2CAP);
2707 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2708 BT_ERR("Failed to create L2CAP info file");
2710 BT_INFO("L2CAP ver %s", VERSION);
2711 BT_INFO("L2CAP socket layer initialized");
2716 proto_unregister(&l2cap_proto);
2720 static void __exit l2cap_exit(void)
2722 class_remove_file(bt_class, &class_attr_l2cap);
2724 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2725 BT_ERR("L2CAP socket unregistration failed");
2727 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2728 BT_ERR("L2CAP protocol unregistration failed");
2730 proto_unregister(&l2cap_proto);
2733 void l2cap_load(void)
2735 /* Dummy function to trigger automatic L2CAP module loading by
2736 * other modules that use L2CAP sockets but don't use any other
2737 * symbols from it. */
2740 EXPORT_SYMBOL(l2cap_load);
2742 module_init(l2cap_init);
2743 module_exit(l2cap_exit);
2745 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2746 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2747 MODULE_VERSION(VERSION);
2748 MODULE_LICENSE("GPL");
2749 MODULE_ALIAS("bt-proto-0");