2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.13"
55 static u32 l2cap_feat_mask = 0x0080;
56 static u8 l2cap_fixed_chan[8] = { 0x02, };
58 static const struct proto_ops l2cap_sock_ops;
60 static struct bt_sock_list l2cap_sk_list = {
61 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
64 static void __l2cap_sock_close(struct sock *sk, int reason);
65 static void l2cap_sock_close(struct sock *sk);
66 static void l2cap_sock_kill(struct sock *sk);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
71 /* ---- L2CAP timers ---- */
72 static void l2cap_sock_timeout(unsigned long arg)
74 struct sock *sk = (struct sock *) arg;
77 BT_DBG("sock %p state %d", sk, sk->sk_state);
81 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
82 reason = ECONNREFUSED;
83 else if (sk->sk_state == BT_CONNECT &&
84 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
85 reason = ECONNREFUSED;
89 __l2cap_sock_close(sk, reason);
97 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
99 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
100 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
103 static void l2cap_sock_clear_timer(struct sock *sk)
105 BT_DBG("sock %p state %d", sk, sk->sk_state);
106 sk_stop_timer(sk, &sk->sk_timer);
109 /* ---- L2CAP channels ---- */
110 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->dcid == cid)
120 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
123 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
124 if (l2cap_pi(s)->scid == cid)
130 /* Find channel with given SCID.
131 * Returns locked socket */
132 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 s = __l2cap_get_chan_by_scid(l, cid);
137 if (s) bh_lock_sock(s);
138 read_unlock(&l->lock);
142 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
152 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s) bh_lock_sock(s);
158 read_unlock(&l->lock);
162 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
166 for (; cid < 0xffff; cid++) {
167 if(!__l2cap_get_chan_by_scid(l, cid))
174 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
179 l2cap_pi(l->head)->prev_c = sk;
181 l2cap_pi(sk)->next_c = l->head;
182 l2cap_pi(sk)->prev_c = NULL;
186 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
188 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
190 write_lock_bh(&l->lock);
195 l2cap_pi(next)->prev_c = prev;
197 l2cap_pi(prev)->next_c = next;
198 write_unlock_bh(&l->lock);
203 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
205 struct l2cap_chan_list *l = &conn->chan_list;
207 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
209 l2cap_pi(sk)->conn = conn;
211 if (sk->sk_type == SOCK_SEQPACKET) {
212 /* Alloc CID for connection-oriented socket */
213 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
214 } else if (sk->sk_type == SOCK_DGRAM) {
215 /* Connectionless socket */
216 l2cap_pi(sk)->scid = 0x0002;
217 l2cap_pi(sk)->dcid = 0x0002;
218 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
220 /* Raw socket can send/recv signalling messages only */
221 l2cap_pi(sk)->scid = 0x0001;
222 l2cap_pi(sk)->dcid = 0x0001;
223 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
226 __l2cap_chan_link(l, sk);
229 bt_accept_enqueue(parent, sk);
233 * Must be called on the locked socket. */
234 static void l2cap_chan_del(struct sock *sk, int err)
236 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
237 struct sock *parent = bt_sk(sk)->parent;
239 l2cap_sock_clear_timer(sk);
241 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
244 /* Unlink from channel list */
245 l2cap_chan_unlink(&conn->chan_list, sk);
246 l2cap_pi(sk)->conn = NULL;
247 hci_conn_put(conn->hcon);
250 sk->sk_state = BT_CLOSED;
251 sock_set_flag(sk, SOCK_ZAPPED);
257 bt_accept_unlink(sk);
258 parent->sk_data_ready(parent, 0);
260 sk->sk_state_change(sk);
263 /* Service level security */
264 static inline int l2cap_check_security(struct sock *sk)
266 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
269 switch (l2cap_pi(sk)->sec_level) {
270 case BT_SECURITY_HIGH:
271 auth_type = HCI_AT_GENERAL_BONDING_MITM;
273 case BT_SECURITY_MEDIUM:
274 auth_type = HCI_AT_GENERAL_BONDING;
277 auth_type = HCI_AT_NO_BONDING;
281 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
285 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
289 /* Get next available identificator.
290 * 1 - 128 are used by kernel.
291 * 129 - 199 are reserved.
292 * 200 - 254 are used by utilities like l2ping, etc.
295 spin_lock_bh(&conn->lock);
297 if (++conn->tx_ident > 128)
302 spin_unlock_bh(&conn->lock);
307 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
309 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
311 BT_DBG("code 0x%2.2x", code);
316 return hci_send_acl(conn->hcon, skb, 0);
319 static void l2cap_do_start(struct sock *sk)
321 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
323 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
324 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
327 if (l2cap_check_security(sk)) {
328 struct l2cap_conn_req req;
329 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
330 req.psm = l2cap_pi(sk)->psm;
332 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
334 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
335 L2CAP_CONN_REQ, sizeof(req), &req);
338 struct l2cap_info_req req;
339 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
341 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
342 conn->info_ident = l2cap_get_ident(conn);
344 mod_timer(&conn->info_timer, jiffies +
345 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
347 l2cap_send_cmd(conn, conn->info_ident,
348 L2CAP_INFO_REQ, sizeof(req), &req);
352 /* ---- L2CAP connections ---- */
353 static void l2cap_conn_start(struct l2cap_conn *conn)
355 struct l2cap_chan_list *l = &conn->chan_list;
358 BT_DBG("conn %p", conn);
362 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
365 if (sk->sk_type != SOCK_SEQPACKET) {
370 if (sk->sk_state == BT_CONNECT) {
371 if (l2cap_check_security(sk)) {
372 struct l2cap_conn_req req;
373 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
374 req.psm = l2cap_pi(sk)->psm;
376 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
378 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
379 L2CAP_CONN_REQ, sizeof(req), &req);
381 } else if (sk->sk_state == BT_CONNECT2) {
382 struct l2cap_conn_rsp rsp;
383 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
384 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
386 if (l2cap_check_security(sk)) {
387 if (bt_sk(sk)->defer_setup) {
388 struct sock *parent = bt_sk(sk)->parent;
389 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
390 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
391 parent->sk_data_ready(parent, 0);
394 sk->sk_state = BT_CONFIG;
395 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
396 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
399 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
400 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
403 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
404 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
410 read_unlock(&l->lock);
413 static void l2cap_conn_ready(struct l2cap_conn *conn)
415 struct l2cap_chan_list *l = &conn->chan_list;
418 BT_DBG("conn %p", conn);
422 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
425 if (sk->sk_type != SOCK_SEQPACKET) {
426 l2cap_sock_clear_timer(sk);
427 sk->sk_state = BT_CONNECTED;
428 sk->sk_state_change(sk);
429 } else if (sk->sk_state == BT_CONNECT)
435 read_unlock(&l->lock);
438 /* Notify sockets that we cannot guaranty reliability anymore */
439 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
441 struct l2cap_chan_list *l = &conn->chan_list;
444 BT_DBG("conn %p", conn);
448 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
449 if (l2cap_pi(sk)->force_reliable)
453 read_unlock(&l->lock);
456 static void l2cap_info_timeout(unsigned long arg)
458 struct l2cap_conn *conn = (void *) arg;
460 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
461 conn->info_ident = 0;
463 l2cap_conn_start(conn);
466 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
468 struct l2cap_conn *conn = hcon->l2cap_data;
473 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
477 hcon->l2cap_data = conn;
480 BT_DBG("hcon %p conn %p", hcon, conn);
482 conn->mtu = hcon->hdev->acl_mtu;
483 conn->src = &hcon->hdev->bdaddr;
484 conn->dst = &hcon->dst;
488 setup_timer(&conn->info_timer, l2cap_info_timeout,
489 (unsigned long) conn);
491 spin_lock_init(&conn->lock);
492 rwlock_init(&conn->chan_list.lock);
497 static void l2cap_conn_del(struct hci_conn *hcon, int err)
499 struct l2cap_conn *conn = hcon->l2cap_data;
505 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
508 kfree_skb(conn->rx_skb);
511 while ((sk = conn->chan_list.head)) {
513 l2cap_chan_del(sk, err);
518 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
519 del_timer_sync(&conn->info_timer);
521 hcon->l2cap_data = NULL;
525 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
527 struct l2cap_chan_list *l = &conn->chan_list;
528 write_lock_bh(&l->lock);
529 __l2cap_chan_add(conn, sk, parent);
530 write_unlock_bh(&l->lock);
533 /* ---- Socket interface ---- */
534 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
537 struct hlist_node *node;
538 sk_for_each(sk, node, &l2cap_sk_list.head)
539 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
546 /* Find socket with psm and source bdaddr.
547 * Returns closest match.
549 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
551 struct sock *sk = NULL, *sk1 = NULL;
552 struct hlist_node *node;
554 sk_for_each(sk, node, &l2cap_sk_list.head) {
555 if (state && sk->sk_state != state)
558 if (l2cap_pi(sk)->psm == psm) {
560 if (!bacmp(&bt_sk(sk)->src, src))
564 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
568 return node ? sk : sk1;
571 /* Find socket with given address (psm, src).
572 * Returns locked socket */
573 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
576 read_lock(&l2cap_sk_list.lock);
577 s = __l2cap_get_sock_by_psm(state, psm, src);
578 if (s) bh_lock_sock(s);
579 read_unlock(&l2cap_sk_list.lock);
583 static void l2cap_sock_destruct(struct sock *sk)
587 skb_queue_purge(&sk->sk_receive_queue);
588 skb_queue_purge(&sk->sk_write_queue);
591 static void l2cap_sock_cleanup_listen(struct sock *parent)
595 BT_DBG("parent %p", parent);
597 /* Close not yet accepted channels */
598 while ((sk = bt_accept_dequeue(parent, NULL)))
599 l2cap_sock_close(sk);
601 parent->sk_state = BT_CLOSED;
602 sock_set_flag(parent, SOCK_ZAPPED);
605 /* Kill socket (only if zapped and orphan)
606 * Must be called on unlocked socket.
608 static void l2cap_sock_kill(struct sock *sk)
610 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
613 BT_DBG("sk %p state %d", sk, sk->sk_state);
615 /* Kill poor orphan */
616 bt_sock_unlink(&l2cap_sk_list, sk);
617 sock_set_flag(sk, SOCK_DEAD);
621 static void __l2cap_sock_close(struct sock *sk, int reason)
623 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
625 switch (sk->sk_state) {
627 l2cap_sock_cleanup_listen(sk);
632 if (sk->sk_type == SOCK_SEQPACKET) {
633 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
634 struct l2cap_disconn_req req;
636 sk->sk_state = BT_DISCONN;
637 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
639 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
640 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
641 l2cap_send_cmd(conn, l2cap_get_ident(conn),
642 L2CAP_DISCONN_REQ, sizeof(req), &req);
644 l2cap_chan_del(sk, reason);
648 if (sk->sk_type == SOCK_SEQPACKET) {
649 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
650 struct l2cap_conn_rsp rsp;
653 if (bt_sk(sk)->defer_setup)
654 result = L2CAP_CR_SEC_BLOCK;
656 result = L2CAP_CR_BAD_PSM;
658 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
659 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
660 rsp.result = cpu_to_le16(result);
661 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
662 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
663 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
665 l2cap_chan_del(sk, reason);
670 l2cap_chan_del(sk, reason);
674 sock_set_flag(sk, SOCK_ZAPPED);
679 /* Must be called on unlocked socket. */
680 static void l2cap_sock_close(struct sock *sk)
682 l2cap_sock_clear_timer(sk);
684 __l2cap_sock_close(sk, ECONNRESET);
689 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
691 struct l2cap_pinfo *pi = l2cap_pi(sk);
696 sk->sk_type = parent->sk_type;
697 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
699 pi->imtu = l2cap_pi(parent)->imtu;
700 pi->omtu = l2cap_pi(parent)->omtu;
701 pi->sec_level = l2cap_pi(parent)->sec_level;
702 pi->role_switch = l2cap_pi(parent)->role_switch;
703 pi->force_reliable = l2cap_pi(parent)->force_reliable;
705 pi->imtu = L2CAP_DEFAULT_MTU;
707 pi->sec_level = BT_SECURITY_LOW;
709 pi->force_reliable = 0;
712 /* Default config options */
714 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
717 static struct proto l2cap_proto = {
719 .owner = THIS_MODULE,
720 .obj_size = sizeof(struct l2cap_pinfo)
723 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
727 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
731 sock_init_data(sock, sk);
732 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
734 sk->sk_destruct = l2cap_sock_destruct;
735 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
737 sock_reset_flag(sk, SOCK_ZAPPED);
739 sk->sk_protocol = proto;
740 sk->sk_state = BT_OPEN;
742 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
744 bt_sock_link(&l2cap_sk_list, sk);
748 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
752 BT_DBG("sock %p", sock);
754 sock->state = SS_UNCONNECTED;
756 if (sock->type != SOCK_SEQPACKET &&
757 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
758 return -ESOCKTNOSUPPORT;
760 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
763 sock->ops = &l2cap_sock_ops;
765 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
769 l2cap_sock_init(sk, NULL);
773 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
775 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
776 struct sock *sk = sock->sk;
779 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
781 if (!addr || addr->sa_family != AF_BLUETOOTH)
786 if (sk->sk_state != BT_OPEN) {
791 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
792 !capable(CAP_NET_BIND_SERVICE)) {
797 write_lock_bh(&l2cap_sk_list.lock);
799 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
802 /* Save source address */
803 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
804 l2cap_pi(sk)->psm = la->l2_psm;
805 l2cap_pi(sk)->sport = la->l2_psm;
806 sk->sk_state = BT_BOUND;
808 if (btohs(la->l2_psm) == 0x0001 || btohs(la->l2_psm) == 0x0003)
809 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
812 write_unlock_bh(&l2cap_sk_list.lock);
819 static int l2cap_do_connect(struct sock *sk)
821 bdaddr_t *src = &bt_sk(sk)->src;
822 bdaddr_t *dst = &bt_sk(sk)->dst;
823 struct l2cap_conn *conn;
824 struct hci_conn *hcon;
825 struct hci_dev *hdev;
829 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
831 if (!(hdev = hci_get_route(dst, src)))
832 return -EHOSTUNREACH;
834 hci_dev_lock_bh(hdev);
838 if (sk->sk_type == SOCK_RAW) {
839 switch (l2cap_pi(sk)->sec_level) {
840 case BT_SECURITY_HIGH:
841 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
843 case BT_SECURITY_MEDIUM:
844 auth_type = HCI_AT_DEDICATED_BONDING;
847 auth_type = HCI_AT_NO_BONDING;
850 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
851 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
852 auth_type = HCI_AT_NO_BONDING_MITM;
854 auth_type = HCI_AT_NO_BONDING;
856 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
857 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
859 switch (l2cap_pi(sk)->sec_level) {
860 case BT_SECURITY_HIGH:
861 auth_type = HCI_AT_GENERAL_BONDING_MITM;
863 case BT_SECURITY_MEDIUM:
864 auth_type = HCI_AT_GENERAL_BONDING;
867 auth_type = HCI_AT_NO_BONDING;
872 hcon = hci_connect(hdev, ACL_LINK, dst,
873 l2cap_pi(sk)->sec_level, auth_type);
877 conn = l2cap_conn_add(hcon, 0);
885 /* Update source addr of the socket */
886 bacpy(src, conn->src);
888 l2cap_chan_add(conn, sk, NULL);
890 sk->sk_state = BT_CONNECT;
891 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
893 if (hcon->state == BT_CONNECTED) {
894 if (sk->sk_type != SOCK_SEQPACKET) {
895 l2cap_sock_clear_timer(sk);
896 sk->sk_state = BT_CONNECTED;
902 hci_dev_unlock_bh(hdev);
907 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
909 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
910 struct sock *sk = sock->sk;
917 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
922 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
927 switch(sk->sk_state) {
931 /* Already connecting */
935 /* Already connected */
948 /* Set destination address and psm */
949 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
950 l2cap_pi(sk)->psm = la->l2_psm;
952 if ((err = l2cap_do_connect(sk)))
956 err = bt_sock_wait_state(sk, BT_CONNECTED,
957 sock_sndtimeo(sk, flags & O_NONBLOCK));
963 static int l2cap_sock_listen(struct socket *sock, int backlog)
965 struct sock *sk = sock->sk;
968 BT_DBG("sk %p backlog %d", sk, backlog);
972 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
977 if (!l2cap_pi(sk)->psm) {
978 bdaddr_t *src = &bt_sk(sk)->src;
983 write_lock_bh(&l2cap_sk_list.lock);
985 for (psm = 0x1001; psm < 0x1100; psm += 2)
986 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
987 l2cap_pi(sk)->psm = htobs(psm);
988 l2cap_pi(sk)->sport = htobs(psm);
993 write_unlock_bh(&l2cap_sk_list.lock);
999 sk->sk_max_ack_backlog = backlog;
1000 sk->sk_ack_backlog = 0;
1001 sk->sk_state = BT_LISTEN;
1008 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1010 DECLARE_WAITQUEUE(wait, current);
1011 struct sock *sk = sock->sk, *nsk;
1015 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1017 if (sk->sk_state != BT_LISTEN) {
1022 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1024 BT_DBG("sk %p timeo %ld", sk, timeo);
1026 /* Wait for an incoming connection. (wake-one). */
1027 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1028 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1029 set_current_state(TASK_INTERRUPTIBLE);
1036 timeo = schedule_timeout(timeo);
1037 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1039 if (sk->sk_state != BT_LISTEN) {
1044 if (signal_pending(current)) {
1045 err = sock_intr_errno(timeo);
1049 set_current_state(TASK_RUNNING);
1050 remove_wait_queue(sk->sk_sleep, &wait);
1055 newsock->state = SS_CONNECTED;
1057 BT_DBG("new socket %p", nsk);
1064 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1066 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1067 struct sock *sk = sock->sk;
1069 BT_DBG("sock %p, sk %p", sock, sk);
1071 addr->sa_family = AF_BLUETOOTH;
1072 *len = sizeof(struct sockaddr_l2);
1075 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1077 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1079 la->l2_psm = l2cap_pi(sk)->psm;
1083 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1085 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1086 struct sk_buff *skb, **frag;
1087 int err, hlen, count, sent=0;
1088 struct l2cap_hdr *lh;
1090 BT_DBG("sk %p len %d", sk, len);
1092 /* First fragment (with L2CAP header) */
1093 if (sk->sk_type == SOCK_DGRAM)
1094 hlen = L2CAP_HDR_SIZE + 2;
1096 hlen = L2CAP_HDR_SIZE;
1098 count = min_t(unsigned int, (conn->mtu - hlen), len);
1100 skb = bt_skb_send_alloc(sk, hlen + count,
1101 msg->msg_flags & MSG_DONTWAIT, &err);
1105 /* Create L2CAP header */
1106 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1107 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1108 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1110 if (sk->sk_type == SOCK_DGRAM)
1111 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1113 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1121 /* Continuation fragments (no L2CAP header) */
1122 frag = &skb_shinfo(skb)->frag_list;
1124 count = min_t(unsigned int, conn->mtu, len);
1126 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1130 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1138 frag = &(*frag)->next;
1141 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1151 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1153 struct sock *sk = sock->sk;
1156 BT_DBG("sock %p, sk %p", sock, sk);
1158 err = sock_error(sk);
1162 if (msg->msg_flags & MSG_OOB)
1165 /* Check outgoing MTU */
1166 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1171 if (sk->sk_state == BT_CONNECTED)
1172 err = l2cap_do_send(sk, msg, len);
1180 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1182 struct sock *sk = sock->sk;
1186 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1187 struct l2cap_conn_rsp rsp;
1189 sk->sk_state = BT_CONFIG;
1191 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1192 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1193 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1194 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1195 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1196 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1204 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1207 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1209 struct sock *sk = sock->sk;
1210 struct l2cap_options opts;
1214 BT_DBG("sk %p", sk);
1220 opts.imtu = l2cap_pi(sk)->imtu;
1221 opts.omtu = l2cap_pi(sk)->omtu;
1222 opts.flush_to = l2cap_pi(sk)->flush_to;
1223 opts.mode = L2CAP_MODE_BASIC;
1225 len = min_t(unsigned int, sizeof(opts), optlen);
1226 if (copy_from_user((char *) &opts, optval, len)) {
1231 l2cap_pi(sk)->imtu = opts.imtu;
1232 l2cap_pi(sk)->omtu = opts.omtu;
1236 if (get_user(opt, (u32 __user *) optval)) {
1241 if (opt & L2CAP_LM_AUTH)
1242 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1243 if (opt & L2CAP_LM_ENCRYPT)
1244 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1245 if (opt & L2CAP_LM_SECURE)
1246 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1248 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1249 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1261 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1263 struct sock *sk = sock->sk;
1264 struct bt_security sec;
1268 BT_DBG("sk %p", sk);
1270 if (level == SOL_L2CAP)
1271 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1273 if (level != SOL_BLUETOOTH)
1274 return -ENOPROTOOPT;
1280 if (sk->sk_type != SOCK_SEQPACKET) {
1285 sec.level = BT_SECURITY_LOW;
1287 len = min_t(unsigned int, sizeof(sec), optlen);
1288 if (copy_from_user((char *) &sec, optval, len)) {
1293 if (sec.level < BT_SECURITY_LOW ||
1294 sec.level > BT_SECURITY_HIGH) {
1299 l2cap_pi(sk)->sec_level = sec.level;
1302 case BT_DEFER_SETUP:
1303 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1308 if (get_user(opt, (u32 __user *) optval)) {
1313 bt_sk(sk)->defer_setup = opt;
1325 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1327 struct sock *sk = sock->sk;
1328 struct l2cap_options opts;
1329 struct l2cap_conninfo cinfo;
1333 BT_DBG("sk %p", sk);
1335 if (get_user(len, optlen))
1342 opts.imtu = l2cap_pi(sk)->imtu;
1343 opts.omtu = l2cap_pi(sk)->omtu;
1344 opts.flush_to = l2cap_pi(sk)->flush_to;
1345 opts.mode = L2CAP_MODE_BASIC;
1347 len = min_t(unsigned int, len, sizeof(opts));
1348 if (copy_to_user(optval, (char *) &opts, len))
1354 switch (l2cap_pi(sk)->sec_level) {
1355 case BT_SECURITY_LOW:
1356 opt = L2CAP_LM_AUTH;
1358 case BT_SECURITY_MEDIUM:
1359 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1361 case BT_SECURITY_HIGH:
1362 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1370 if (l2cap_pi(sk)->role_switch)
1371 opt |= L2CAP_LM_MASTER;
1373 if (l2cap_pi(sk)->force_reliable)
1374 opt |= L2CAP_LM_RELIABLE;
1376 if (put_user(opt, (u32 __user *) optval))
1380 case L2CAP_CONNINFO:
1381 if (sk->sk_state != BT_CONNECTED &&
1382 !(sk->sk_state == BT_CONNECT2 &&
1383 bt_sk(sk)->defer_setup)) {
1388 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1389 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1391 len = min_t(unsigned int, len, sizeof(cinfo));
1392 if (copy_to_user(optval, (char *) &cinfo, len))
1406 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1408 struct sock *sk = sock->sk;
1409 struct bt_security sec;
1412 BT_DBG("sk %p", sk);
1414 if (level == SOL_L2CAP)
1415 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1417 if (level != SOL_BLUETOOTH)
1418 return -ENOPROTOOPT;
1420 if (get_user(len, optlen))
1427 if (sk->sk_type != SOCK_SEQPACKET) {
1432 sec.level = l2cap_pi(sk)->sec_level;
1434 len = min_t(unsigned int, len, sizeof(sec));
1435 if (copy_to_user(optval, (char *) &sec, len))
1440 case BT_DEFER_SETUP:
1441 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1446 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1460 static int l2cap_sock_shutdown(struct socket *sock, int how)
1462 struct sock *sk = sock->sk;
1465 BT_DBG("sock %p, sk %p", sock, sk);
1471 if (!sk->sk_shutdown) {
1472 sk->sk_shutdown = SHUTDOWN_MASK;
1473 l2cap_sock_clear_timer(sk);
1474 __l2cap_sock_close(sk, 0);
1476 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1477 err = bt_sock_wait_state(sk, BT_CLOSED,
1484 static int l2cap_sock_release(struct socket *sock)
1486 struct sock *sk = sock->sk;
1489 BT_DBG("sock %p, sk %p", sock, sk);
1494 err = l2cap_sock_shutdown(sock, 2);
1497 l2cap_sock_kill(sk);
1501 static void l2cap_chan_ready(struct sock *sk)
1503 struct sock *parent = bt_sk(sk)->parent;
1505 BT_DBG("sk %p, parent %p", sk, parent);
1507 l2cap_pi(sk)->conf_state = 0;
1508 l2cap_sock_clear_timer(sk);
1511 /* Outgoing channel.
1512 * Wake up socket sleeping on connect.
1514 sk->sk_state = BT_CONNECTED;
1515 sk->sk_state_change(sk);
1517 /* Incoming channel.
1518 * Wake up socket sleeping on accept.
1520 parent->sk_data_ready(parent, 0);
1524 /* Copy frame to all raw sockets on that connection */
1525 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1527 struct l2cap_chan_list *l = &conn->chan_list;
1528 struct sk_buff *nskb;
1531 BT_DBG("conn %p", conn);
1533 read_lock(&l->lock);
1534 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1535 if (sk->sk_type != SOCK_RAW)
1538 /* Don't send frame to the socket it came from */
1542 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1545 if (sock_queue_rcv_skb(sk, nskb))
1548 read_unlock(&l->lock);
1551 /* ---- L2CAP signalling commands ---- */
1552 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1553 u8 code, u8 ident, u16 dlen, void *data)
1555 struct sk_buff *skb, **frag;
1556 struct l2cap_cmd_hdr *cmd;
1557 struct l2cap_hdr *lh;
1560 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1562 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1563 count = min_t(unsigned int, conn->mtu, len);
1565 skb = bt_skb_alloc(count, GFP_ATOMIC);
1569 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1570 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1571 lh->cid = cpu_to_le16(0x0001);
1573 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1576 cmd->len = cpu_to_le16(dlen);
1579 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1580 memcpy(skb_put(skb, count), data, count);
1586 /* Continuation fragments (no L2CAP header) */
1587 frag = &skb_shinfo(skb)->frag_list;
1589 count = min_t(unsigned int, conn->mtu, len);
1591 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1595 memcpy(skb_put(*frag, count), data, count);
1600 frag = &(*frag)->next;
1610 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1612 struct l2cap_conf_opt *opt = *ptr;
1615 len = L2CAP_CONF_OPT_SIZE + opt->len;
1623 *val = *((u8 *) opt->val);
1627 *val = __le16_to_cpu(*((__le16 *) opt->val));
1631 *val = __le32_to_cpu(*((__le32 *) opt->val));
1635 *val = (unsigned long) opt->val;
1639 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1643 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1645 struct l2cap_conf_opt *opt = *ptr;
1647 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1654 *((u8 *) opt->val) = val;
1658 *((__le16 *) opt->val) = cpu_to_le16(val);
1662 *((__le32 *) opt->val) = cpu_to_le32(val);
1666 memcpy(opt->val, (void *) val, len);
1670 *ptr += L2CAP_CONF_OPT_SIZE + len;
1673 static int l2cap_build_conf_req(struct sock *sk, void *data)
1675 struct l2cap_pinfo *pi = l2cap_pi(sk);
1676 struct l2cap_conf_req *req = data;
1677 void *ptr = req->data;
1679 BT_DBG("sk %p", sk);
1681 if (pi->imtu != L2CAP_DEFAULT_MTU)
1682 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1684 /* FIXME: Need actual value of the flush timeout */
1685 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1686 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1688 req->dcid = cpu_to_le16(pi->dcid);
1689 req->flags = cpu_to_le16(0);
1694 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1696 struct l2cap_pinfo *pi = l2cap_pi(sk);
1697 struct l2cap_conf_rsp *rsp = data;
1698 void *ptr = rsp->data;
1699 void *req = pi->conf_req;
1700 int len = pi->conf_len;
1701 int type, hint, olen;
1703 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1704 u16 mtu = L2CAP_DEFAULT_MTU;
1705 u16 result = L2CAP_CONF_SUCCESS;
1707 BT_DBG("sk %p", sk);
1709 while (len >= L2CAP_CONF_OPT_SIZE) {
1710 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1716 case L2CAP_CONF_MTU:
1720 case L2CAP_CONF_FLUSH_TO:
1724 case L2CAP_CONF_QOS:
1727 case L2CAP_CONF_RFC:
1728 if (olen == sizeof(rfc))
1729 memcpy(&rfc, (void *) val, olen);
1736 result = L2CAP_CONF_UNKNOWN;
1737 *((u8 *) ptr++) = type;
1742 if (result == L2CAP_CONF_SUCCESS) {
1743 /* Configure output options and let the other side know
1744 * which ones we don't like. */
1746 if (rfc.mode == L2CAP_MODE_BASIC) {
1748 result = L2CAP_CONF_UNACCEPT;
1751 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1754 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1756 result = L2CAP_CONF_UNACCEPT;
1758 memset(&rfc, 0, sizeof(rfc));
1759 rfc.mode = L2CAP_MODE_BASIC;
1761 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1762 sizeof(rfc), (unsigned long) &rfc);
1766 rsp->scid = cpu_to_le16(pi->dcid);
1767 rsp->result = cpu_to_le16(result);
1768 rsp->flags = cpu_to_le16(0x0000);
1773 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1775 struct l2cap_conf_rsp *rsp = data;
1776 void *ptr = rsp->data;
1778 BT_DBG("sk %p", sk);
1780 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1781 rsp->result = cpu_to_le16(result);
1782 rsp->flags = cpu_to_le16(flags);
1787 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1789 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1791 if (rej->reason != 0x0000)
1794 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1795 cmd->ident == conn->info_ident) {
1796 del_timer(&conn->info_timer);
1798 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1799 conn->info_ident = 0;
1801 l2cap_conn_start(conn);
1807 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1809 struct l2cap_chan_list *list = &conn->chan_list;
1810 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1811 struct l2cap_conn_rsp rsp;
1812 struct sock *sk, *parent;
1813 int result, status = L2CAP_CS_NO_INFO;
1815 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1816 __le16 psm = req->psm;
1818 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1820 /* Check if we have socket listening on psm */
1821 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1823 result = L2CAP_CR_BAD_PSM;
1827 /* Check if the ACL is secure enough (if not SDP) */
1828 if (psm != cpu_to_le16(0x0001) &&
1829 !hci_conn_check_link_mode(conn->hcon)) {
1830 result = L2CAP_CR_SEC_BLOCK;
1834 result = L2CAP_CR_NO_MEM;
1836 /* Check for backlog size */
1837 if (sk_acceptq_is_full(parent)) {
1838 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1842 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1846 write_lock_bh(&list->lock);
1848 /* Check if we already have channel with that dcid */
1849 if (__l2cap_get_chan_by_dcid(list, scid)) {
1850 write_unlock_bh(&list->lock);
1851 sock_set_flag(sk, SOCK_ZAPPED);
1852 l2cap_sock_kill(sk);
1856 hci_conn_hold(conn->hcon);
1858 l2cap_sock_init(sk, parent);
1859 bacpy(&bt_sk(sk)->src, conn->src);
1860 bacpy(&bt_sk(sk)->dst, conn->dst);
1861 l2cap_pi(sk)->psm = psm;
1862 l2cap_pi(sk)->dcid = scid;
1864 __l2cap_chan_add(conn, sk, parent);
1865 dcid = l2cap_pi(sk)->scid;
1867 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1869 l2cap_pi(sk)->ident = cmd->ident;
1871 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
1872 if (l2cap_check_security(sk)) {
1873 if (bt_sk(sk)->defer_setup) {
1874 sk->sk_state = BT_CONNECT2;
1875 result = L2CAP_CR_PEND;
1876 status = L2CAP_CS_AUTHOR_PEND;
1877 parent->sk_data_ready(parent, 0);
1879 sk->sk_state = BT_CONFIG;
1880 result = L2CAP_CR_SUCCESS;
1881 status = L2CAP_CS_NO_INFO;
1884 sk->sk_state = BT_CONNECT2;
1885 result = L2CAP_CR_PEND;
1886 status = L2CAP_CS_AUTHEN_PEND;
1889 sk->sk_state = BT_CONNECT2;
1890 result = L2CAP_CR_PEND;
1891 status = L2CAP_CS_NO_INFO;
1894 write_unlock_bh(&list->lock);
1897 bh_unlock_sock(parent);
1900 rsp.scid = cpu_to_le16(scid);
1901 rsp.dcid = cpu_to_le16(dcid);
1902 rsp.result = cpu_to_le16(result);
1903 rsp.status = cpu_to_le16(status);
1904 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1906 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1907 struct l2cap_info_req info;
1908 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1910 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1911 conn->info_ident = l2cap_get_ident(conn);
1913 mod_timer(&conn->info_timer, jiffies +
1914 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1916 l2cap_send_cmd(conn, conn->info_ident,
1917 L2CAP_INFO_REQ, sizeof(info), &info);
1923 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1925 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1926 u16 scid, dcid, result, status;
1930 scid = __le16_to_cpu(rsp->scid);
1931 dcid = __le16_to_cpu(rsp->dcid);
1932 result = __le16_to_cpu(rsp->result);
1933 status = __le16_to_cpu(rsp->status);
1935 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1938 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1941 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1946 case L2CAP_CR_SUCCESS:
1947 sk->sk_state = BT_CONFIG;
1948 l2cap_pi(sk)->ident = 0;
1949 l2cap_pi(sk)->dcid = dcid;
1950 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1952 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
1954 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1955 l2cap_build_conf_req(sk, req), req);
1959 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
1963 l2cap_chan_del(sk, ECONNREFUSED);
1971 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1973 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1979 dcid = __le16_to_cpu(req->dcid);
1980 flags = __le16_to_cpu(req->flags);
1982 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1984 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1987 if (sk->sk_state == BT_DISCONN)
1990 /* Reject if config buffer is too small. */
1991 len = cmd_len - sizeof(*req);
1992 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1993 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1994 l2cap_build_conf_rsp(sk, rsp,
1995 L2CAP_CONF_REJECT, flags), rsp);
2000 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2001 l2cap_pi(sk)->conf_len += len;
2003 if (flags & 0x0001) {
2004 /* Incomplete config. Send empty response. */
2005 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2006 l2cap_build_conf_rsp(sk, rsp,
2007 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2011 /* Complete config. */
2012 len = l2cap_parse_conf_req(sk, rsp);
2016 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2018 /* Reset config buffer. */
2019 l2cap_pi(sk)->conf_len = 0;
2021 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2024 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2025 sk->sk_state = BT_CONNECTED;
2026 l2cap_chan_ready(sk);
2030 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2032 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2033 l2cap_build_conf_req(sk, buf), buf);
2041 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2043 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2044 u16 scid, flags, result;
2047 scid = __le16_to_cpu(rsp->scid);
2048 flags = __le16_to_cpu(rsp->flags);
2049 result = __le16_to_cpu(rsp->result);
2051 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
2053 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2057 case L2CAP_CONF_SUCCESS:
2060 case L2CAP_CONF_UNACCEPT:
2061 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
2063 /* It does not make sense to adjust L2CAP parameters
2064 * that are currently defined in the spec. We simply
2065 * resend config request that we sent earlier. It is
2066 * stupid, but it helps qualification testing which
2067 * expects at least some response from us. */
2068 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2069 l2cap_build_conf_req(sk, req), req);
2074 sk->sk_state = BT_DISCONN;
2075 sk->sk_err = ECONNRESET;
2076 l2cap_sock_set_timer(sk, HZ * 5);
2078 struct l2cap_disconn_req req;
2079 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2080 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2081 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2082 L2CAP_DISCONN_REQ, sizeof(req), &req);
2090 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2092 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2093 sk->sk_state = BT_CONNECTED;
2094 l2cap_chan_ready(sk);
2102 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2104 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2105 struct l2cap_disconn_rsp rsp;
2109 scid = __le16_to_cpu(req->scid);
2110 dcid = __le16_to_cpu(req->dcid);
2112 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2114 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2117 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2118 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2119 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2121 sk->sk_shutdown = SHUTDOWN_MASK;
2123 l2cap_chan_del(sk, ECONNRESET);
2126 l2cap_sock_kill(sk);
2130 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2132 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2136 scid = __le16_to_cpu(rsp->scid);
2137 dcid = __le16_to_cpu(rsp->dcid);
2139 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2141 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2144 l2cap_chan_del(sk, 0);
2147 l2cap_sock_kill(sk);
2151 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2153 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2156 type = __le16_to_cpu(req->type);
2158 BT_DBG("type 0x%4.4x", type);
2160 if (type == L2CAP_IT_FEAT_MASK) {
2162 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2163 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2164 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2165 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
2166 l2cap_send_cmd(conn, cmd->ident,
2167 L2CAP_INFO_RSP, sizeof(buf), buf);
2168 } else if (type == L2CAP_IT_FIXED_CHAN) {
2170 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2171 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2172 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2173 memcpy(buf + 4, l2cap_fixed_chan, 8);
2174 l2cap_send_cmd(conn, cmd->ident,
2175 L2CAP_INFO_RSP, sizeof(buf), buf);
2177 struct l2cap_info_rsp rsp;
2178 rsp.type = cpu_to_le16(type);
2179 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2180 l2cap_send_cmd(conn, cmd->ident,
2181 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2187 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2189 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2192 type = __le16_to_cpu(rsp->type);
2193 result = __le16_to_cpu(rsp->result);
2195 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2197 del_timer(&conn->info_timer);
2199 if (type == L2CAP_IT_FEAT_MASK) {
2200 conn->feat_mask = get_unaligned_le32(rsp->data);
2202 if (conn->feat_mask & 0x0080) {
2203 struct l2cap_info_req req;
2204 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2206 conn->info_ident = l2cap_get_ident(conn);
2208 l2cap_send_cmd(conn, conn->info_ident,
2209 L2CAP_INFO_REQ, sizeof(req), &req);
2211 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2212 conn->info_ident = 0;
2214 l2cap_conn_start(conn);
2216 } else if (type == L2CAP_IT_FIXED_CHAN) {
2217 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2218 conn->info_ident = 0;
2220 l2cap_conn_start(conn);
2226 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2228 u8 *data = skb->data;
2230 struct l2cap_cmd_hdr cmd;
2233 l2cap_raw_recv(conn, skb);
2235 while (len >= L2CAP_CMD_HDR_SIZE) {
2237 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2238 data += L2CAP_CMD_HDR_SIZE;
2239 len -= L2CAP_CMD_HDR_SIZE;
2241 cmd_len = le16_to_cpu(cmd.len);
2243 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2245 if (cmd_len > len || !cmd.ident) {
2246 BT_DBG("corrupted command");
2251 case L2CAP_COMMAND_REJ:
2252 l2cap_command_rej(conn, &cmd, data);
2255 case L2CAP_CONN_REQ:
2256 err = l2cap_connect_req(conn, &cmd, data);
2259 case L2CAP_CONN_RSP:
2260 err = l2cap_connect_rsp(conn, &cmd, data);
2263 case L2CAP_CONF_REQ:
2264 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2267 case L2CAP_CONF_RSP:
2268 err = l2cap_config_rsp(conn, &cmd, data);
2271 case L2CAP_DISCONN_REQ:
2272 err = l2cap_disconnect_req(conn, &cmd, data);
2275 case L2CAP_DISCONN_RSP:
2276 err = l2cap_disconnect_rsp(conn, &cmd, data);
2279 case L2CAP_ECHO_REQ:
2280 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2283 case L2CAP_ECHO_RSP:
2286 case L2CAP_INFO_REQ:
2287 err = l2cap_information_req(conn, &cmd, data);
2290 case L2CAP_INFO_RSP:
2291 err = l2cap_information_rsp(conn, &cmd, data);
2295 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2301 struct l2cap_cmd_rej rej;
2302 BT_DBG("error %d", err);
2304 /* FIXME: Map err to a valid reason */
2305 rej.reason = cpu_to_le16(0);
2306 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2316 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2320 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2322 BT_DBG("unknown cid 0x%4.4x", cid);
2326 BT_DBG("sk %p, len %d", sk, skb->len);
2328 if (sk->sk_state != BT_CONNECTED)
2331 if (l2cap_pi(sk)->imtu < skb->len)
2334 /* If socket recv buffers overflows we drop data here
2335 * which is *bad* because L2CAP has to be reliable.
2336 * But we don't have any other choice. L2CAP doesn't
2337 * provide flow control mechanism. */
2339 if (!sock_queue_rcv_skb(sk, skb))
2352 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2356 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2360 BT_DBG("sk %p, len %d", sk, skb->len);
2362 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2365 if (l2cap_pi(sk)->imtu < skb->len)
2368 if (!sock_queue_rcv_skb(sk, skb))
2375 if (sk) bh_unlock_sock(sk);
2379 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2381 struct l2cap_hdr *lh = (void *) skb->data;
2385 skb_pull(skb, L2CAP_HDR_SIZE);
2386 cid = __le16_to_cpu(lh->cid);
2387 len = __le16_to_cpu(lh->len);
2389 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2393 l2cap_sig_channel(conn, skb);
2397 psm = get_unaligned((__le16 *) skb->data);
2399 l2cap_conless_channel(conn, psm, skb);
2403 l2cap_data_channel(conn, cid, skb);
2408 /* ---- L2CAP interface with lower layer (HCI) ---- */
2410 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2412 int exact = 0, lm1 = 0, lm2 = 0;
2413 register struct sock *sk;
2414 struct hlist_node *node;
2416 if (type != ACL_LINK)
2419 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2421 /* Find listening sockets and check their link_mode */
2422 read_lock(&l2cap_sk_list.lock);
2423 sk_for_each(sk, node, &l2cap_sk_list.head) {
2424 if (sk->sk_state != BT_LISTEN)
2427 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2428 lm1 |= HCI_LM_ACCEPT;
2429 if (l2cap_pi(sk)->role_switch)
2430 lm1 |= HCI_LM_MASTER;
2432 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2433 lm2 |= HCI_LM_ACCEPT;
2434 if (l2cap_pi(sk)->role_switch)
2435 lm2 |= HCI_LM_MASTER;
2438 read_unlock(&l2cap_sk_list.lock);
2440 return exact ? lm1 : lm2;
2443 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2445 struct l2cap_conn *conn;
2447 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2449 if (hcon->type != ACL_LINK)
2453 conn = l2cap_conn_add(hcon, status);
2455 l2cap_conn_ready(conn);
2457 l2cap_conn_del(hcon, bt_err(status));
2462 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2464 BT_DBG("hcon %p reason %d", hcon, reason);
2466 if (hcon->type != ACL_LINK)
2469 l2cap_conn_del(hcon, bt_err(reason));
2474 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
2476 if (sk->sk_type != SOCK_SEQPACKET)
2479 if (encrypt == 0x00) {
2480 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
2481 l2cap_sock_clear_timer(sk);
2482 l2cap_sock_set_timer(sk, HZ * 5);
2483 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
2484 __l2cap_sock_close(sk, ECONNREFUSED);
2486 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
2487 l2cap_sock_clear_timer(sk);
2491 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2493 struct l2cap_chan_list *l;
2494 struct l2cap_conn *conn = hcon->l2cap_data;
2500 l = &conn->chan_list;
2502 BT_DBG("conn %p", conn);
2504 read_lock(&l->lock);
2506 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2509 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
2514 if (!status && (sk->sk_state == BT_CONNECTED ||
2515 sk->sk_state == BT_CONFIG)) {
2516 l2cap_check_encryption(sk, encrypt);
2521 if (sk->sk_state == BT_CONNECT) {
2523 struct l2cap_conn_req req;
2524 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2525 req.psm = l2cap_pi(sk)->psm;
2527 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2529 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2530 L2CAP_CONN_REQ, sizeof(req), &req);
2532 l2cap_sock_clear_timer(sk);
2533 l2cap_sock_set_timer(sk, HZ / 10);
2535 } else if (sk->sk_state == BT_CONNECT2) {
2536 struct l2cap_conn_rsp rsp;
2540 sk->sk_state = BT_CONFIG;
2541 result = L2CAP_CR_SUCCESS;
2543 sk->sk_state = BT_DISCONN;
2544 l2cap_sock_set_timer(sk, HZ / 10);
2545 result = L2CAP_CR_SEC_BLOCK;
2548 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2549 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2550 rsp.result = cpu_to_le16(result);
2551 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2552 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2553 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2559 read_unlock(&l->lock);
2564 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2566 struct l2cap_conn *conn = hcon->l2cap_data;
2568 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2571 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2573 if (flags & ACL_START) {
2574 struct l2cap_hdr *hdr;
2578 BT_ERR("Unexpected start frame (len %d)", skb->len);
2579 kfree_skb(conn->rx_skb);
2580 conn->rx_skb = NULL;
2582 l2cap_conn_unreliable(conn, ECOMM);
2586 BT_ERR("Frame is too short (len %d)", skb->len);
2587 l2cap_conn_unreliable(conn, ECOMM);
2591 hdr = (struct l2cap_hdr *) skb->data;
2592 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2594 if (len == skb->len) {
2595 /* Complete frame received */
2596 l2cap_recv_frame(conn, skb);
2600 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2602 if (skb->len > len) {
2603 BT_ERR("Frame is too long (len %d, expected len %d)",
2605 l2cap_conn_unreliable(conn, ECOMM);
2609 /* Allocate skb for the complete frame (with header) */
2610 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2613 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2615 conn->rx_len = len - skb->len;
2617 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2619 if (!conn->rx_len) {
2620 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2621 l2cap_conn_unreliable(conn, ECOMM);
2625 if (skb->len > conn->rx_len) {
2626 BT_ERR("Fragment is too long (len %d, expected %d)",
2627 skb->len, conn->rx_len);
2628 kfree_skb(conn->rx_skb);
2629 conn->rx_skb = NULL;
2631 l2cap_conn_unreliable(conn, ECOMM);
2635 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2637 conn->rx_len -= skb->len;
2639 if (!conn->rx_len) {
2640 /* Complete frame received */
2641 l2cap_recv_frame(conn, conn->rx_skb);
2642 conn->rx_skb = NULL;
2651 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2654 struct hlist_node *node;
2657 read_lock_bh(&l2cap_sk_list.lock);
2659 sk_for_each(sk, node, &l2cap_sk_list.head) {
2660 struct l2cap_pinfo *pi = l2cap_pi(sk);
2662 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2663 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2664 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2665 pi->imtu, pi->omtu, pi->sec_level);
2668 read_unlock_bh(&l2cap_sk_list.lock);
2673 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2675 static const struct proto_ops l2cap_sock_ops = {
2676 .family = PF_BLUETOOTH,
2677 .owner = THIS_MODULE,
2678 .release = l2cap_sock_release,
2679 .bind = l2cap_sock_bind,
2680 .connect = l2cap_sock_connect,
2681 .listen = l2cap_sock_listen,
2682 .accept = l2cap_sock_accept,
2683 .getname = l2cap_sock_getname,
2684 .sendmsg = l2cap_sock_sendmsg,
2685 .recvmsg = l2cap_sock_recvmsg,
2686 .poll = bt_sock_poll,
2687 .ioctl = bt_sock_ioctl,
2688 .mmap = sock_no_mmap,
2689 .socketpair = sock_no_socketpair,
2690 .shutdown = l2cap_sock_shutdown,
2691 .setsockopt = l2cap_sock_setsockopt,
2692 .getsockopt = l2cap_sock_getsockopt
2695 static struct net_proto_family l2cap_sock_family_ops = {
2696 .family = PF_BLUETOOTH,
2697 .owner = THIS_MODULE,
2698 .create = l2cap_sock_create,
2701 static struct hci_proto l2cap_hci_proto = {
2703 .id = HCI_PROTO_L2CAP,
2704 .connect_ind = l2cap_connect_ind,
2705 .connect_cfm = l2cap_connect_cfm,
2706 .disconn_ind = l2cap_disconn_ind,
2707 .security_cfm = l2cap_security_cfm,
2708 .recv_acldata = l2cap_recv_acldata
2711 static int __init l2cap_init(void)
2715 err = proto_register(&l2cap_proto, 0);
2719 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2721 BT_ERR("L2CAP socket registration failed");
2725 err = hci_register_proto(&l2cap_hci_proto);
2727 BT_ERR("L2CAP protocol registration failed");
2728 bt_sock_unregister(BTPROTO_L2CAP);
2732 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2733 BT_ERR("Failed to create L2CAP info file");
2735 BT_INFO("L2CAP ver %s", VERSION);
2736 BT_INFO("L2CAP socket layer initialized");
2741 proto_unregister(&l2cap_proto);
2745 static void __exit l2cap_exit(void)
2747 class_remove_file(bt_class, &class_attr_l2cap);
2749 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2750 BT_ERR("L2CAP socket unregistration failed");
2752 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2753 BT_ERR("L2CAP protocol unregistration failed");
2755 proto_unregister(&l2cap_proto);
2758 void l2cap_load(void)
2760 /* Dummy function to trigger automatic L2CAP module loading by
2761 * other modules that use L2CAP sockets but don't use any other
2762 * symbols from it. */
2765 EXPORT_SYMBOL(l2cap_load);
2767 module_init(l2cap_init);
2768 module_exit(l2cap_exit);
2770 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2771 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2772 MODULE_VERSION(VERSION);
2773 MODULE_LICENSE("GPL");
2774 MODULE_ALIAS("bt-proto-0");