2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 read_unlock(&conn->chan_lock);
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 struct l2cap_chan *c;
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
136 read_unlock(&conn->chan_lock);
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
142 u16 cid = L2CAP_CID_DYN_START;
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
152 static struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
154 struct l2cap_chan *chan;
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
167 struct sock *sk = chan->sk;
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
172 conn->disc_reason = 0x13;
174 l2cap_pi(sk)->conn = conn;
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
201 list_add(&chan->list, &conn->chan_l);
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
212 l2cap_sock_clear_timer(sk);
214 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
237 sk->sk_state_change(sk);
239 skb_queue_purge(TX_QUEUE(sk));
241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
242 struct srej_list *l, *tmp;
244 del_timer(&chan->retrans_timer);
245 del_timer(&chan->monitor_timer);
246 del_timer(&chan->ack_timer);
248 skb_queue_purge(SREJ_QUEUE(sk));
249 skb_queue_purge(BUSY_QUEUE(sk));
251 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
260 static inline u8 l2cap_get_auth_type(struct sock *sk)
262 if (sk->sk_type == SOCK_RAW) {
263 switch (l2cap_pi(sk)->sec_level) {
264 case BT_SECURITY_HIGH:
265 return HCI_AT_DEDICATED_BONDING_MITM;
266 case BT_SECURITY_MEDIUM:
267 return HCI_AT_DEDICATED_BONDING;
269 return HCI_AT_NO_BONDING;
271 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
272 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
273 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
275 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
276 return HCI_AT_NO_BONDING_MITM;
278 return HCI_AT_NO_BONDING;
280 switch (l2cap_pi(sk)->sec_level) {
281 case BT_SECURITY_HIGH:
282 return HCI_AT_GENERAL_BONDING_MITM;
283 case BT_SECURITY_MEDIUM:
284 return HCI_AT_GENERAL_BONDING;
286 return HCI_AT_NO_BONDING;
291 /* Service level security */
292 static inline int l2cap_check_security(struct sock *sk)
294 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
297 auth_type = l2cap_get_auth_type(sk);
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 u8 l2cap_get_ident(struct l2cap_conn *conn)
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn->lock);
315 if (++conn->tx_ident > 128)
320 spin_unlock_bh(&conn->lock);
325 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
330 BT_DBG("code 0x%2.2x", code);
335 if (lmp_no_flush_capable(conn->hcon->hdev))
336 flags = ACL_START_NO_FLUSH;
340 hci_send_acl(conn->hcon, skb, flags);
343 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
346 struct l2cap_hdr *lh;
347 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
348 struct l2cap_conn *conn = pi->conn;
349 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
353 if (sk->sk_state != BT_CONNECTED)
356 if (pi->fcs == L2CAP_FCS_CRC16)
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
364 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
369 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
393 hci_send_acl(pi->conn->hcon, skb, flags);
396 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
398 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY;
400 chan->conn_state |= L2CAP_CONN_RNR_SENT;
402 control |= L2CAP_SUPER_RCV_READY;
404 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
406 l2cap_send_sframe(chan, control);
409 static inline int __l2cap_no_conn_pending(struct sock *sk)
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
414 static void l2cap_do_start(struct l2cap_chan *chan)
416 struct sock *sk = chan->sk;
417 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
419 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
420 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
423 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
424 struct l2cap_conn_req req;
425 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
426 req.psm = l2cap_pi(sk)->psm;
428 chan->ident = l2cap_get_ident(conn);
429 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
431 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
435 struct l2cap_info_req req;
436 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
438 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
439 conn->info_ident = l2cap_get_ident(conn);
441 mod_timer(&conn->info_timer, jiffies +
442 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
444 l2cap_send_cmd(conn, conn->info_ident,
445 L2CAP_INFO_REQ, sizeof(req), &req);
449 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
451 u32 local_feat_mask = l2cap_feat_mask;
453 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
456 case L2CAP_MODE_ERTM:
457 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
458 case L2CAP_MODE_STREAMING:
459 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
465 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
468 struct l2cap_disconn_req req;
475 skb_queue_purge(TX_QUEUE(sk));
477 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
478 del_timer(&chan->retrans_timer);
479 del_timer(&chan->monitor_timer);
480 del_timer(&chan->ack_timer);
483 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
484 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
485 l2cap_send_cmd(conn, l2cap_get_ident(conn),
486 L2CAP_DISCONN_REQ, sizeof(req), &req);
488 sk->sk_state = BT_DISCONN;
492 /* ---- L2CAP connections ---- */
493 static void l2cap_conn_start(struct l2cap_conn *conn)
495 struct l2cap_chan *chan, *tmp;
497 BT_DBG("conn %p", conn);
499 read_lock(&conn->chan_lock);
501 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
502 struct sock *sk = chan->sk;
506 if (sk->sk_type != SOCK_SEQPACKET &&
507 sk->sk_type != SOCK_STREAM) {
512 if (sk->sk_state == BT_CONNECT) {
513 struct l2cap_conn_req req;
515 if (!l2cap_check_security(sk) ||
516 !__l2cap_no_conn_pending(sk)) {
521 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
523 && l2cap_pi(sk)->conf_state &
524 L2CAP_CONF_STATE2_DEVICE) {
525 /* __l2cap_sock_close() calls list_del(chan)
526 * so release the lock */
527 read_unlock_bh(&conn->chan_lock);
528 __l2cap_sock_close(sk, ECONNRESET);
529 read_lock_bh(&conn->chan_lock);
534 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
535 req.psm = l2cap_pi(sk)->psm;
537 chan->ident = l2cap_get_ident(conn);
538 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
540 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
543 } else if (sk->sk_state == BT_CONNECT2) {
544 struct l2cap_conn_rsp rsp;
546 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
547 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
549 if (l2cap_check_security(sk)) {
550 if (bt_sk(sk)->defer_setup) {
551 struct sock *parent = bt_sk(sk)->parent;
552 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
553 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
554 parent->sk_data_ready(parent, 0);
557 sk->sk_state = BT_CONFIG;
558 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
559 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
562 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
563 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
566 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
569 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
570 rsp.result != L2CAP_CR_SUCCESS) {
575 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
576 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
577 l2cap_build_conf_req(chan, buf), buf);
578 chan->num_conf_req++;
584 read_unlock(&conn->chan_lock);
587 /* Find socket with cid and source bdaddr.
588 * Returns closest match, locked.
590 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
592 struct sock *s, *sk = NULL, *sk1 = NULL;
593 struct hlist_node *node;
595 read_lock(&l2cap_sk_list.lock);
597 sk_for_each(sk, node, &l2cap_sk_list.head) {
598 if (state && sk->sk_state != state)
601 if (l2cap_pi(sk)->scid == cid) {
603 if (!bacmp(&bt_sk(sk)->src, src))
607 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
614 read_unlock(&l2cap_sk_list.lock);
619 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
621 struct sock *parent, *uninitialized_var(sk);
622 struct l2cap_chan *chan;
626 /* Check if we have socket listening on cid */
627 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
632 /* Check for backlog size */
633 if (sk_acceptq_is_full(parent)) {
634 BT_DBG("backlog full %d", parent->sk_ack_backlog);
638 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
642 chan = l2cap_chan_alloc(sk);
648 write_lock_bh(&conn->chan_lock);
650 hci_conn_hold(conn->hcon);
652 l2cap_sock_init(sk, parent);
654 bacpy(&bt_sk(sk)->src, conn->src);
655 bacpy(&bt_sk(sk)->dst, conn->dst);
657 bt_accept_enqueue(parent, sk);
659 __l2cap_chan_add(conn, chan);
661 l2cap_pi(sk)->chan = chan;
663 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
665 sk->sk_state = BT_CONNECTED;
666 parent->sk_data_ready(parent, 0);
668 write_unlock_bh(&conn->chan_lock);
671 bh_unlock_sock(parent);
674 static void l2cap_conn_ready(struct l2cap_conn *conn)
676 struct l2cap_chan *chan;
678 BT_DBG("conn %p", conn);
680 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
681 l2cap_le_conn_ready(conn);
683 read_lock(&conn->chan_lock);
685 list_for_each_entry(chan, &conn->chan_l, list) {
686 struct sock *sk = chan->sk;
690 if (conn->hcon->type == LE_LINK) {
691 l2cap_sock_clear_timer(sk);
692 sk->sk_state = BT_CONNECTED;
693 sk->sk_state_change(sk);
696 if (sk->sk_type != SOCK_SEQPACKET &&
697 sk->sk_type != SOCK_STREAM) {
698 l2cap_sock_clear_timer(sk);
699 sk->sk_state = BT_CONNECTED;
700 sk->sk_state_change(sk);
701 } else if (sk->sk_state == BT_CONNECT)
702 l2cap_do_start(chan);
707 read_unlock(&conn->chan_lock);
710 /* Notify sockets that we cannot guaranty reliability anymore */
711 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
713 struct l2cap_chan *chan;
715 BT_DBG("conn %p", conn);
717 read_lock(&conn->chan_lock);
719 list_for_each_entry(chan, &conn->chan_l, list) {
720 struct sock *sk = chan->sk;
722 if (l2cap_pi(sk)->force_reliable)
726 read_unlock(&conn->chan_lock);
729 static void l2cap_info_timeout(unsigned long arg)
731 struct l2cap_conn *conn = (void *) arg;
733 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
734 conn->info_ident = 0;
736 l2cap_conn_start(conn);
739 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
741 struct l2cap_conn *conn = hcon->l2cap_data;
746 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
750 hcon->l2cap_data = conn;
753 BT_DBG("hcon %p conn %p", hcon, conn);
755 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
756 conn->mtu = hcon->hdev->le_mtu;
758 conn->mtu = hcon->hdev->acl_mtu;
760 conn->src = &hcon->hdev->bdaddr;
761 conn->dst = &hcon->dst;
765 spin_lock_init(&conn->lock);
766 rwlock_init(&conn->chan_lock);
768 INIT_LIST_HEAD(&conn->chan_l);
770 if (hcon->type != LE_LINK)
771 setup_timer(&conn->info_timer, l2cap_info_timeout,
772 (unsigned long) conn);
774 conn->disc_reason = 0x13;
779 static void l2cap_conn_del(struct hci_conn *hcon, int err)
781 struct l2cap_conn *conn = hcon->l2cap_data;
782 struct l2cap_chan *chan, *l;
788 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
790 kfree_skb(conn->rx_skb);
793 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
796 l2cap_chan_del(chan, err);
801 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
802 del_timer_sync(&conn->info_timer);
804 hcon->l2cap_data = NULL;
808 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
810 write_lock_bh(&conn->chan_lock);
811 __l2cap_chan_add(conn, chan);
812 write_unlock_bh(&conn->chan_lock);
815 /* ---- Socket interface ---- */
817 /* Find socket with psm and source bdaddr.
818 * Returns closest match.
820 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
822 struct sock *sk = NULL, *sk1 = NULL;
823 struct hlist_node *node;
825 read_lock(&l2cap_sk_list.lock);
827 sk_for_each(sk, node, &l2cap_sk_list.head) {
828 if (state && sk->sk_state != state)
831 if (l2cap_pi(sk)->psm == psm) {
833 if (!bacmp(&bt_sk(sk)->src, src))
837 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
842 read_unlock(&l2cap_sk_list.lock);
844 return node ? sk : sk1;
847 int l2cap_do_connect(struct sock *sk)
849 bdaddr_t *src = &bt_sk(sk)->src;
850 bdaddr_t *dst = &bt_sk(sk)->dst;
851 struct l2cap_conn *conn;
852 struct l2cap_chan *chan;
853 struct hci_conn *hcon;
854 struct hci_dev *hdev;
858 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
861 hdev = hci_get_route(dst, src);
863 return -EHOSTUNREACH;
865 hci_dev_lock_bh(hdev);
867 auth_type = l2cap_get_auth_type(sk);
869 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
870 hcon = hci_connect(hdev, LE_LINK, dst,
871 l2cap_pi(sk)->sec_level, auth_type);
873 hcon = hci_connect(hdev, ACL_LINK, dst,
874 l2cap_pi(sk)->sec_level, auth_type);
881 conn = l2cap_conn_add(hcon, 0);
888 chan = l2cap_chan_alloc(sk);
895 /* Update source addr of the socket */
896 bacpy(src, conn->src);
898 l2cap_chan_add(conn, chan);
900 l2cap_pi(sk)->chan = chan;
902 sk->sk_state = BT_CONNECT;
903 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
905 if (hcon->state == BT_CONNECTED) {
906 if (sk->sk_type != SOCK_SEQPACKET &&
907 sk->sk_type != SOCK_STREAM) {
908 l2cap_sock_clear_timer(sk);
909 if (l2cap_check_security(sk))
910 sk->sk_state = BT_CONNECTED;
912 l2cap_do_start(chan);
918 hci_dev_unlock_bh(hdev);
923 int __l2cap_wait_ack(struct sock *sk)
925 DECLARE_WAITQUEUE(wait, current);
929 add_wait_queue(sk_sleep(sk), &wait);
930 while ((l2cap_pi(sk)->chan->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
931 set_current_state(TASK_INTERRUPTIBLE);
936 if (signal_pending(current)) {
937 err = sock_intr_errno(timeo);
942 timeo = schedule_timeout(timeo);
945 err = sock_error(sk);
949 set_current_state(TASK_RUNNING);
950 remove_wait_queue(sk_sleep(sk), &wait);
954 static void l2cap_monitor_timeout(unsigned long arg)
956 struct l2cap_chan *chan = (void *) arg;
957 struct sock *sk = chan->sk;
959 BT_DBG("chan %p", chan);
962 if (chan->retry_count >= chan->remote_max_tx) {
963 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, ECONNABORTED);
969 __mod_monitor_timer();
971 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
975 static void l2cap_retrans_timeout(unsigned long arg)
977 struct l2cap_chan *chan = (void *) arg;
978 struct sock *sk = chan->sk;
983 chan->retry_count = 1;
984 __mod_monitor_timer();
986 chan->conn_state |= L2CAP_CONN_WAIT_F;
988 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
992 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
994 struct sock *sk = chan->sk;
997 while ((skb = skb_peek(TX_QUEUE(sk))) &&
998 chan->unacked_frames) {
999 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1002 skb = skb_dequeue(TX_QUEUE(sk));
1005 chan->unacked_frames--;
1008 if (!chan->unacked_frames)
1009 del_timer(&chan->retrans_timer);
1012 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1014 struct l2cap_pinfo *pi = l2cap_pi(sk);
1015 struct hci_conn *hcon = pi->conn->hcon;
1018 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1020 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1021 flags = ACL_START_NO_FLUSH;
1025 hci_send_acl(hcon, skb, flags);
1028 void l2cap_streaming_send(struct l2cap_chan *chan)
1030 struct sock *sk = chan->sk;
1031 struct sk_buff *skb;
1032 struct l2cap_pinfo *pi = l2cap_pi(sk);
1035 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1036 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1037 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1038 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1040 if (pi->fcs == L2CAP_FCS_CRC16) {
1041 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1042 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1045 l2cap_do_send(sk, skb);
1047 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1051 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1053 struct sock *sk = chan->sk;
1054 struct l2cap_pinfo *pi = l2cap_pi(sk);
1055 struct sk_buff *skb, *tx_skb;
1058 skb = skb_peek(TX_QUEUE(sk));
1063 if (bt_cb(skb)->tx_seq == tx_seq)
1066 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1069 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1071 if (chan->remote_max_tx &&
1072 bt_cb(skb)->retries == chan->remote_max_tx) {
1073 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1077 tx_skb = skb_clone(skb, GFP_ATOMIC);
1078 bt_cb(skb)->retries++;
1079 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1081 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1082 control |= L2CAP_CTRL_FINAL;
1083 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1086 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1087 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1089 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1091 if (pi->fcs == L2CAP_FCS_CRC16) {
1092 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1093 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1096 l2cap_do_send(sk, tx_skb);
1099 int l2cap_ertm_send(struct l2cap_chan *chan)
1101 struct sk_buff *skb, *tx_skb;
1102 struct sock *sk = chan->sk;
1103 struct l2cap_pinfo *pi = l2cap_pi(sk);
1107 if (sk->sk_state != BT_CONNECTED)
1110 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(chan))) {
1112 if (chan->remote_max_tx &&
1113 bt_cb(skb)->retries == chan->remote_max_tx) {
1114 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1118 tx_skb = skb_clone(skb, GFP_ATOMIC);
1120 bt_cb(skb)->retries++;
1122 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1123 control &= L2CAP_CTRL_SAR;
1125 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1126 control |= L2CAP_CTRL_FINAL;
1127 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1129 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1130 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1131 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1134 if (pi->fcs == L2CAP_FCS_CRC16) {
1135 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1136 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1139 l2cap_do_send(sk, tx_skb);
1141 __mod_retrans_timer();
1143 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1144 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1146 if (bt_cb(skb)->retries == 1)
1147 chan->unacked_frames++;
1149 chan->frames_sent++;
1151 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1152 sk->sk_send_head = NULL;
1154 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1162 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1164 struct sock *sk = chan->sk;
1167 if (!skb_queue_empty(TX_QUEUE(sk)))
1168 sk->sk_send_head = TX_QUEUE(sk)->next;
1170 chan->next_tx_seq = chan->expected_ack_seq;
1171 ret = l2cap_ertm_send(chan);
1175 static void l2cap_send_ack(struct l2cap_chan *chan)
1179 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1181 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1182 control |= L2CAP_SUPER_RCV_NOT_READY;
1183 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1184 l2cap_send_sframe(chan, control);
1188 if (l2cap_ertm_send(chan) > 0)
1191 control |= L2CAP_SUPER_RCV_READY;
1192 l2cap_send_sframe(chan, control);
1195 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1197 struct srej_list *tail;
1200 control = L2CAP_SUPER_SELECT_REJECT;
1201 control |= L2CAP_CTRL_FINAL;
1203 tail = list_entry(SREJ_LIST(chan->sk)->prev, struct srej_list, list);
1204 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1206 l2cap_send_sframe(chan, control);
1209 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1211 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1212 struct sk_buff **frag;
1215 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1221 /* Continuation fragments (no L2CAP header) */
1222 frag = &skb_shinfo(skb)->frag_list;
1224 count = min_t(unsigned int, conn->mtu, len);
1226 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1229 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1235 frag = &(*frag)->next;
1241 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1244 struct sk_buff *skb;
1245 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1246 struct l2cap_hdr *lh;
1248 BT_DBG("sk %p len %d", sk, (int)len);
1250 count = min_t(unsigned int, (conn->mtu - hlen), len);
1251 skb = bt_skb_send_alloc(sk, count + hlen,
1252 msg->msg_flags & MSG_DONTWAIT, &err);
1254 return ERR_PTR(err);
1256 /* Create L2CAP header */
1257 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1258 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1259 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1260 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1262 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1263 if (unlikely(err < 0)) {
1265 return ERR_PTR(err);
1270 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1272 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1273 struct sk_buff *skb;
1274 int err, count, hlen = L2CAP_HDR_SIZE;
1275 struct l2cap_hdr *lh;
1277 BT_DBG("sk %p len %d", sk, (int)len);
1279 count = min_t(unsigned int, (conn->mtu - hlen), len);
1280 skb = bt_skb_send_alloc(sk, count + hlen,
1281 msg->msg_flags & MSG_DONTWAIT, &err);
1283 return ERR_PTR(err);
1285 /* Create L2CAP header */
1286 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1287 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1288 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1290 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1291 if (unlikely(err < 0)) {
1293 return ERR_PTR(err);
1298 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1300 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1301 struct sk_buff *skb;
1302 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1303 struct l2cap_hdr *lh;
1305 BT_DBG("sk %p len %d", sk, (int)len);
1308 return ERR_PTR(-ENOTCONN);
1313 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1316 count = min_t(unsigned int, (conn->mtu - hlen), len);
1317 skb = bt_skb_send_alloc(sk, count + hlen,
1318 msg->msg_flags & MSG_DONTWAIT, &err);
1320 return ERR_PTR(err);
1322 /* Create L2CAP header */
1323 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1324 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1325 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1326 put_unaligned_le16(control, skb_put(skb, 2));
1328 put_unaligned_le16(sdulen, skb_put(skb, 2));
1330 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1331 if (unlikely(err < 0)) {
1333 return ERR_PTR(err);
1336 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1337 put_unaligned_le16(0, skb_put(skb, 2));
1339 bt_cb(skb)->retries = 0;
1343 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1345 struct sock *sk = chan->sk;
1346 struct sk_buff *skb;
1347 struct sk_buff_head sar_queue;
1351 skb_queue_head_init(&sar_queue);
1352 control = L2CAP_SDU_START;
1353 skb = l2cap_create_iframe_pdu(sk, msg, chan->remote_mps, control, len);
1355 return PTR_ERR(skb);
1357 __skb_queue_tail(&sar_queue, skb);
1358 len -= chan->remote_mps;
1359 size += chan->remote_mps;
1364 if (len > chan->remote_mps) {
1365 control = L2CAP_SDU_CONTINUE;
1366 buflen = chan->remote_mps;
1368 control = L2CAP_SDU_END;
1372 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1374 skb_queue_purge(&sar_queue);
1375 return PTR_ERR(skb);
1378 __skb_queue_tail(&sar_queue, skb);
1382 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1383 if (sk->sk_send_head == NULL)
1384 sk->sk_send_head = sar_queue.next;
1389 static void l2cap_chan_ready(struct sock *sk)
1391 struct sock *parent = bt_sk(sk)->parent;
1393 BT_DBG("sk %p, parent %p", sk, parent);
1395 l2cap_pi(sk)->conf_state = 0;
1396 l2cap_sock_clear_timer(sk);
1399 /* Outgoing channel.
1400 * Wake up socket sleeping on connect.
1402 sk->sk_state = BT_CONNECTED;
1403 sk->sk_state_change(sk);
1405 /* Incoming channel.
1406 * Wake up socket sleeping on accept.
1408 parent->sk_data_ready(parent, 0);
1412 /* Copy frame to all raw sockets on that connection */
1413 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1415 struct sk_buff *nskb;
1416 struct l2cap_chan *chan;
1418 BT_DBG("conn %p", conn);
1420 read_lock(&conn->chan_lock);
1421 list_for_each_entry(chan, &conn->chan_l, list) {
1422 struct sock *sk = chan->sk;
1423 if (sk->sk_type != SOCK_RAW)
1426 /* Don't send frame to the socket it came from */
1429 nskb = skb_clone(skb, GFP_ATOMIC);
1433 if (sock_queue_rcv_skb(sk, nskb))
1436 read_unlock(&conn->chan_lock);
1439 /* ---- L2CAP signalling commands ---- */
1440 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1441 u8 code, u8 ident, u16 dlen, void *data)
1443 struct sk_buff *skb, **frag;
1444 struct l2cap_cmd_hdr *cmd;
1445 struct l2cap_hdr *lh;
1448 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1449 conn, code, ident, dlen);
1451 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1452 count = min_t(unsigned int, conn->mtu, len);
1454 skb = bt_skb_alloc(count, GFP_ATOMIC);
1458 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1459 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1461 if (conn->hcon->type == LE_LINK)
1462 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1464 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1466 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1469 cmd->len = cpu_to_le16(dlen);
1472 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1473 memcpy(skb_put(skb, count), data, count);
1479 /* Continuation fragments (no L2CAP header) */
1480 frag = &skb_shinfo(skb)->frag_list;
1482 count = min_t(unsigned int, conn->mtu, len);
1484 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1488 memcpy(skb_put(*frag, count), data, count);
1493 frag = &(*frag)->next;
1503 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1505 struct l2cap_conf_opt *opt = *ptr;
1508 len = L2CAP_CONF_OPT_SIZE + opt->len;
1516 *val = *((u8 *) opt->val);
1520 *val = get_unaligned_le16(opt->val);
1524 *val = get_unaligned_le32(opt->val);
1528 *val = (unsigned long) opt->val;
1532 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1536 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1538 struct l2cap_conf_opt *opt = *ptr;
1540 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1547 *((u8 *) opt->val) = val;
1551 put_unaligned_le16(val, opt->val);
1555 put_unaligned_le32(val, opt->val);
1559 memcpy(opt->val, (void *) val, len);
1563 *ptr += L2CAP_CONF_OPT_SIZE + len;
1566 static void l2cap_ack_timeout(unsigned long arg)
1568 struct l2cap_chan *chan = (void *) arg;
1570 bh_lock_sock(chan->sk);
1571 l2cap_send_ack(chan);
1572 bh_unlock_sock(chan->sk);
1575 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1577 struct sock *sk = chan->sk;
1579 chan->expected_ack_seq = 0;
1580 chan->unacked_frames = 0;
1581 chan->buffer_seq = 0;
1582 chan->num_acked = 0;
1583 chan->frames_sent = 0;
1585 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1586 (unsigned long) chan);
1587 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1588 (unsigned long) chan);
1589 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1591 __skb_queue_head_init(SREJ_QUEUE(sk));
1592 __skb_queue_head_init(BUSY_QUEUE(sk));
1594 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1596 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1599 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1602 case L2CAP_MODE_STREAMING:
1603 case L2CAP_MODE_ERTM:
1604 if (l2cap_mode_supported(mode, remote_feat_mask))
1608 return L2CAP_MODE_BASIC;
1612 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1614 struct sock *sk = chan->sk;
1615 struct l2cap_pinfo *pi = l2cap_pi(sk);
1616 struct l2cap_conf_req *req = data;
1617 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1618 void *ptr = req->data;
1620 BT_DBG("sk %p", sk);
1622 if (chan->num_conf_req || chan->num_conf_rsp)
1626 case L2CAP_MODE_STREAMING:
1627 case L2CAP_MODE_ERTM:
1628 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1633 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1638 if (pi->imtu != L2CAP_DEFAULT_MTU)
1639 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1642 case L2CAP_MODE_BASIC:
1643 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1644 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1647 rfc.mode = L2CAP_MODE_BASIC;
1649 rfc.max_transmit = 0;
1650 rfc.retrans_timeout = 0;
1651 rfc.monitor_timeout = 0;
1652 rfc.max_pdu_size = 0;
1654 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1655 (unsigned long) &rfc);
1658 case L2CAP_MODE_ERTM:
1659 rfc.mode = L2CAP_MODE_ERTM;
1660 rfc.txwin_size = pi->tx_win;
1661 rfc.max_transmit = pi->max_tx;
1662 rfc.retrans_timeout = 0;
1663 rfc.monitor_timeout = 0;
1664 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1665 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1666 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1668 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1669 (unsigned long) &rfc);
1671 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1674 if (pi->fcs == L2CAP_FCS_NONE ||
1675 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1676 pi->fcs = L2CAP_FCS_NONE;
1677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1681 case L2CAP_MODE_STREAMING:
1682 rfc.mode = L2CAP_MODE_STREAMING;
1684 rfc.max_transmit = 0;
1685 rfc.retrans_timeout = 0;
1686 rfc.monitor_timeout = 0;
1687 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1688 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1689 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1691 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1692 (unsigned long) &rfc);
1694 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1697 if (pi->fcs == L2CAP_FCS_NONE ||
1698 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1699 pi->fcs = L2CAP_FCS_NONE;
1700 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1705 req->dcid = cpu_to_le16(pi->dcid);
1706 req->flags = cpu_to_le16(0);
1711 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1713 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1714 struct l2cap_conf_rsp *rsp = data;
1715 void *ptr = rsp->data;
1716 void *req = chan->conf_req;
1717 int len = chan->conf_len;
1718 int type, hint, olen;
1720 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1721 u16 mtu = L2CAP_DEFAULT_MTU;
1722 u16 result = L2CAP_CONF_SUCCESS;
1724 BT_DBG("chan %p", chan);
1726 while (len >= L2CAP_CONF_OPT_SIZE) {
1727 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1729 hint = type & L2CAP_CONF_HINT;
1730 type &= L2CAP_CONF_MASK;
1733 case L2CAP_CONF_MTU:
1737 case L2CAP_CONF_FLUSH_TO:
1741 case L2CAP_CONF_QOS:
1744 case L2CAP_CONF_RFC:
1745 if (olen == sizeof(rfc))
1746 memcpy(&rfc, (void *) val, olen);
1749 case L2CAP_CONF_FCS:
1750 if (val == L2CAP_FCS_NONE)
1751 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1759 result = L2CAP_CONF_UNKNOWN;
1760 *((u8 *) ptr++) = type;
1765 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1769 case L2CAP_MODE_STREAMING:
1770 case L2CAP_MODE_ERTM:
1771 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1772 pi->mode = l2cap_select_mode(rfc.mode,
1773 pi->conn->feat_mask);
1777 if (pi->mode != rfc.mode)
1778 return -ECONNREFUSED;
1784 if (pi->mode != rfc.mode) {
1785 result = L2CAP_CONF_UNACCEPT;
1786 rfc.mode = pi->mode;
1788 if (chan->num_conf_rsp == 1)
1789 return -ECONNREFUSED;
1791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1792 sizeof(rfc), (unsigned long) &rfc);
1796 if (result == L2CAP_CONF_SUCCESS) {
1797 /* Configure output options and let the other side know
1798 * which ones we don't like. */
1800 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1801 result = L2CAP_CONF_UNACCEPT;
1804 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1806 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1809 case L2CAP_MODE_BASIC:
1810 pi->fcs = L2CAP_FCS_NONE;
1811 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1814 case L2CAP_MODE_ERTM:
1815 chan->remote_tx_win = rfc.txwin_size;
1816 chan->remote_max_tx = rfc.max_transmit;
1818 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1819 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1821 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1823 rfc.retrans_timeout =
1824 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1825 rfc.monitor_timeout =
1826 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1828 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1830 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1831 sizeof(rfc), (unsigned long) &rfc);
1835 case L2CAP_MODE_STREAMING:
1836 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1837 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1839 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1841 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1843 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1844 sizeof(rfc), (unsigned long) &rfc);
1849 result = L2CAP_CONF_UNACCEPT;
1851 memset(&rfc, 0, sizeof(rfc));
1852 rfc.mode = pi->mode;
1855 if (result == L2CAP_CONF_SUCCESS)
1856 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1858 rsp->scid = cpu_to_le16(pi->dcid);
1859 rsp->result = cpu_to_le16(result);
1860 rsp->flags = cpu_to_le16(0x0000);
1865 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1867 struct l2cap_pinfo *pi = l2cap_pi(sk);
1868 struct l2cap_conf_req *req = data;
1869 void *ptr = req->data;
1872 struct l2cap_conf_rfc rfc;
1874 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1876 while (len >= L2CAP_CONF_OPT_SIZE) {
1877 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1880 case L2CAP_CONF_MTU:
1881 if (val < L2CAP_DEFAULT_MIN_MTU) {
1882 *result = L2CAP_CONF_UNACCEPT;
1883 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1886 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1889 case L2CAP_CONF_FLUSH_TO:
1891 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1895 case L2CAP_CONF_RFC:
1896 if (olen == sizeof(rfc))
1897 memcpy(&rfc, (void *)val, olen);
1899 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1900 rfc.mode != pi->mode)
1901 return -ECONNREFUSED;
1905 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1906 sizeof(rfc), (unsigned long) &rfc);
1911 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1912 return -ECONNREFUSED;
1914 pi->mode = rfc.mode;
1916 if (*result == L2CAP_CONF_SUCCESS) {
1918 case L2CAP_MODE_ERTM:
1919 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1920 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1921 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1923 case L2CAP_MODE_STREAMING:
1924 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1928 req->dcid = cpu_to_le16(pi->dcid);
1929 req->flags = cpu_to_le16(0x0000);
1934 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1936 struct l2cap_conf_rsp *rsp = data;
1937 void *ptr = rsp->data;
1939 BT_DBG("sk %p", sk);
1941 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1942 rsp->result = cpu_to_le16(result);
1943 rsp->flags = cpu_to_le16(flags);
1948 void __l2cap_connect_rsp_defer(struct sock *sk)
1950 struct l2cap_conn_rsp rsp;
1951 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1952 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1955 sk->sk_state = BT_CONFIG;
1957 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1958 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1959 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1960 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1961 l2cap_send_cmd(conn, chan->ident,
1962 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1964 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
1967 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1968 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1969 l2cap_build_conf_req(chan, buf), buf);
1970 chan->num_conf_req++;
1973 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1975 struct l2cap_pinfo *pi = l2cap_pi(sk);
1978 struct l2cap_conf_rfc rfc;
1980 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1982 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1985 while (len >= L2CAP_CONF_OPT_SIZE) {
1986 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1989 case L2CAP_CONF_RFC:
1990 if (olen == sizeof(rfc))
1991 memcpy(&rfc, (void *)val, olen);
1998 case L2CAP_MODE_ERTM:
1999 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2000 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2001 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2003 case L2CAP_MODE_STREAMING:
2004 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2008 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2010 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2012 if (rej->reason != 0x0000)
2015 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2016 cmd->ident == conn->info_ident) {
2017 del_timer(&conn->info_timer);
2019 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2020 conn->info_ident = 0;
2022 l2cap_conn_start(conn);
2028 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2030 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2031 struct l2cap_conn_rsp rsp;
2032 struct l2cap_chan *chan = NULL;
2033 struct sock *parent, *sk = NULL;
2034 int result, status = L2CAP_CS_NO_INFO;
2036 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2037 __le16 psm = req->psm;
2039 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2041 /* Check if we have socket listening on psm */
2042 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2044 result = L2CAP_CR_BAD_PSM;
2048 bh_lock_sock(parent);
2050 /* Check if the ACL is secure enough (if not SDP) */
2051 if (psm != cpu_to_le16(0x0001) &&
2052 !hci_conn_check_link_mode(conn->hcon)) {
2053 conn->disc_reason = 0x05;
2054 result = L2CAP_CR_SEC_BLOCK;
2058 result = L2CAP_CR_NO_MEM;
2060 /* Check for backlog size */
2061 if (sk_acceptq_is_full(parent)) {
2062 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2066 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2070 chan = l2cap_chan_alloc(sk);
2072 l2cap_sock_kill(sk);
2076 write_lock_bh(&conn->chan_lock);
2078 /* Check if we already have channel with that dcid */
2079 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2080 write_unlock_bh(&conn->chan_lock);
2081 sock_set_flag(sk, SOCK_ZAPPED);
2082 l2cap_sock_kill(sk);
2086 hci_conn_hold(conn->hcon);
2088 l2cap_sock_init(sk, parent);
2089 bacpy(&bt_sk(sk)->src, conn->src);
2090 bacpy(&bt_sk(sk)->dst, conn->dst);
2091 l2cap_pi(sk)->psm = psm;
2092 l2cap_pi(sk)->dcid = scid;
2094 bt_accept_enqueue(parent, sk);
2096 __l2cap_chan_add(conn, chan);
2098 l2cap_pi(sk)->chan = chan;
2100 dcid = l2cap_pi(sk)->scid;
2102 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2104 chan->ident = cmd->ident;
2106 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2107 if (l2cap_check_security(sk)) {
2108 if (bt_sk(sk)->defer_setup) {
2109 sk->sk_state = BT_CONNECT2;
2110 result = L2CAP_CR_PEND;
2111 status = L2CAP_CS_AUTHOR_PEND;
2112 parent->sk_data_ready(parent, 0);
2114 sk->sk_state = BT_CONFIG;
2115 result = L2CAP_CR_SUCCESS;
2116 status = L2CAP_CS_NO_INFO;
2119 sk->sk_state = BT_CONNECT2;
2120 result = L2CAP_CR_PEND;
2121 status = L2CAP_CS_AUTHEN_PEND;
2124 sk->sk_state = BT_CONNECT2;
2125 result = L2CAP_CR_PEND;
2126 status = L2CAP_CS_NO_INFO;
2129 write_unlock_bh(&conn->chan_lock);
2132 bh_unlock_sock(parent);
2135 rsp.scid = cpu_to_le16(scid);
2136 rsp.dcid = cpu_to_le16(dcid);
2137 rsp.result = cpu_to_le16(result);
2138 rsp.status = cpu_to_le16(status);
2139 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2141 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2142 struct l2cap_info_req info;
2143 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2145 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2146 conn->info_ident = l2cap_get_ident(conn);
2148 mod_timer(&conn->info_timer, jiffies +
2149 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2151 l2cap_send_cmd(conn, conn->info_ident,
2152 L2CAP_INFO_REQ, sizeof(info), &info);
2155 if (chan && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2156 result == L2CAP_CR_SUCCESS) {
2158 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2159 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2160 l2cap_build_conf_req(chan, buf), buf);
2161 chan->num_conf_req++;
2167 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2169 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2170 u16 scid, dcid, result, status;
2171 struct l2cap_chan *chan;
2175 scid = __le16_to_cpu(rsp->scid);
2176 dcid = __le16_to_cpu(rsp->dcid);
2177 result = __le16_to_cpu(rsp->result);
2178 status = __le16_to_cpu(rsp->status);
2180 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2183 chan = l2cap_get_chan_by_scid(conn, scid);
2187 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2195 case L2CAP_CR_SUCCESS:
2196 sk->sk_state = BT_CONFIG;
2198 l2cap_pi(sk)->dcid = dcid;
2199 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2201 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2204 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2206 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2207 l2cap_build_conf_req(chan, req), req);
2208 chan->num_conf_req++;
2212 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2216 /* don't delete l2cap channel if sk is owned by user */
2217 if (sock_owned_by_user(sk)) {
2218 sk->sk_state = BT_DISCONN;
2219 l2cap_sock_clear_timer(sk);
2220 l2cap_sock_set_timer(sk, HZ / 5);
2224 l2cap_chan_del(chan, ECONNREFUSED);
2232 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2234 /* FCS is enabled only in ERTM or streaming mode, if one or both
2237 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2238 pi->fcs = L2CAP_FCS_NONE;
2239 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2240 pi->fcs = L2CAP_FCS_CRC16;
2243 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2245 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2248 struct l2cap_chan *chan;
2252 dcid = __le16_to_cpu(req->dcid);
2253 flags = __le16_to_cpu(req->flags);
2255 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2257 chan = l2cap_get_chan_by_scid(conn, dcid);
2263 if (sk->sk_state != BT_CONFIG) {
2264 struct l2cap_cmd_rej rej;
2266 rej.reason = cpu_to_le16(0x0002);
2267 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2272 /* Reject if config buffer is too small. */
2273 len = cmd_len - sizeof(*req);
2274 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2275 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2276 l2cap_build_conf_rsp(sk, rsp,
2277 L2CAP_CONF_REJECT, flags), rsp);
2282 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2283 chan->conf_len += len;
2285 if (flags & 0x0001) {
2286 /* Incomplete config. Send empty response. */
2287 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2288 l2cap_build_conf_rsp(sk, rsp,
2289 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2293 /* Complete config. */
2294 len = l2cap_parse_conf_req(chan, rsp);
2296 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2300 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2301 chan->num_conf_rsp++;
2303 /* Reset config buffer. */
2306 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2309 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2310 set_default_fcs(l2cap_pi(sk));
2312 sk->sk_state = BT_CONNECTED;
2314 chan->next_tx_seq = 0;
2315 chan->expected_tx_seq = 0;
2316 __skb_queue_head_init(TX_QUEUE(sk));
2317 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2318 l2cap_ertm_init(chan);
2320 l2cap_chan_ready(sk);
2324 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2326 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2327 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2328 l2cap_build_conf_req(chan, buf), buf);
2329 chan->num_conf_req++;
2337 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2339 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2340 u16 scid, flags, result;
2341 struct l2cap_chan *chan;
2343 int len = cmd->len - sizeof(*rsp);
2345 scid = __le16_to_cpu(rsp->scid);
2346 flags = __le16_to_cpu(rsp->flags);
2347 result = __le16_to_cpu(rsp->result);
2349 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2350 scid, flags, result);
2352 chan = l2cap_get_chan_by_scid(conn, scid);
2359 case L2CAP_CONF_SUCCESS:
2360 l2cap_conf_rfc_get(sk, rsp->data, len);
2363 case L2CAP_CONF_UNACCEPT:
2364 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2367 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2368 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2372 /* throw out any old stored conf requests */
2373 result = L2CAP_CONF_SUCCESS;
2374 len = l2cap_parse_conf_rsp(sk, rsp->data,
2377 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2381 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2382 L2CAP_CONF_REQ, len, req);
2383 chan->num_conf_req++;
2384 if (result != L2CAP_CONF_SUCCESS)
2390 sk->sk_err = ECONNRESET;
2391 l2cap_sock_set_timer(sk, HZ * 5);
2392 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2399 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2401 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2402 set_default_fcs(l2cap_pi(sk));
2404 sk->sk_state = BT_CONNECTED;
2405 chan->next_tx_seq = 0;
2406 chan->expected_tx_seq = 0;
2407 __skb_queue_head_init(TX_QUEUE(sk));
2408 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2409 l2cap_ertm_init(chan);
2411 l2cap_chan_ready(sk);
2419 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2421 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2422 struct l2cap_disconn_rsp rsp;
2424 struct l2cap_chan *chan;
2427 scid = __le16_to_cpu(req->scid);
2428 dcid = __le16_to_cpu(req->dcid);
2430 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2432 chan = l2cap_get_chan_by_scid(conn, dcid);
2438 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2439 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2440 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2442 sk->sk_shutdown = SHUTDOWN_MASK;
2444 /* don't delete l2cap channel if sk is owned by user */
2445 if (sock_owned_by_user(sk)) {
2446 sk->sk_state = BT_DISCONN;
2447 l2cap_sock_clear_timer(sk);
2448 l2cap_sock_set_timer(sk, HZ / 5);
2453 l2cap_chan_del(chan, ECONNRESET);
2456 l2cap_sock_kill(sk);
2460 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2462 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2464 struct l2cap_chan *chan;
2467 scid = __le16_to_cpu(rsp->scid);
2468 dcid = __le16_to_cpu(rsp->dcid);
2470 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2472 chan = l2cap_get_chan_by_scid(conn, scid);
2478 /* don't delete l2cap channel if sk is owned by user */
2479 if (sock_owned_by_user(sk)) {
2480 sk->sk_state = BT_DISCONN;
2481 l2cap_sock_clear_timer(sk);
2482 l2cap_sock_set_timer(sk, HZ / 5);
2487 l2cap_chan_del(chan, 0);
2490 l2cap_sock_kill(sk);
2494 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2496 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2499 type = __le16_to_cpu(req->type);
2501 BT_DBG("type 0x%4.4x", type);
2503 if (type == L2CAP_IT_FEAT_MASK) {
2505 u32 feat_mask = l2cap_feat_mask;
2506 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2507 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2508 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2510 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2512 put_unaligned_le32(feat_mask, rsp->data);
2513 l2cap_send_cmd(conn, cmd->ident,
2514 L2CAP_INFO_RSP, sizeof(buf), buf);
2515 } else if (type == L2CAP_IT_FIXED_CHAN) {
2517 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2518 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2519 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2520 memcpy(buf + 4, l2cap_fixed_chan, 8);
2521 l2cap_send_cmd(conn, cmd->ident,
2522 L2CAP_INFO_RSP, sizeof(buf), buf);
2524 struct l2cap_info_rsp rsp;
2525 rsp.type = cpu_to_le16(type);
2526 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2527 l2cap_send_cmd(conn, cmd->ident,
2528 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2534 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2536 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2539 type = __le16_to_cpu(rsp->type);
2540 result = __le16_to_cpu(rsp->result);
2542 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2544 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2545 if (cmd->ident != conn->info_ident ||
2546 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2549 del_timer(&conn->info_timer);
2551 if (result != L2CAP_IR_SUCCESS) {
2552 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2553 conn->info_ident = 0;
2555 l2cap_conn_start(conn);
2560 if (type == L2CAP_IT_FEAT_MASK) {
2561 conn->feat_mask = get_unaligned_le32(rsp->data);
2563 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2564 struct l2cap_info_req req;
2565 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2567 conn->info_ident = l2cap_get_ident(conn);
2569 l2cap_send_cmd(conn, conn->info_ident,
2570 L2CAP_INFO_REQ, sizeof(req), &req);
2572 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2573 conn->info_ident = 0;
2575 l2cap_conn_start(conn);
2577 } else if (type == L2CAP_IT_FIXED_CHAN) {
2578 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2579 conn->info_ident = 0;
2581 l2cap_conn_start(conn);
2587 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2592 if (min > max || min < 6 || max > 3200)
2595 if (to_multiplier < 10 || to_multiplier > 3200)
2598 if (max >= to_multiplier * 8)
2601 max_latency = (to_multiplier * 8 / max) - 1;
2602 if (latency > 499 || latency > max_latency)
2608 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2609 struct l2cap_cmd_hdr *cmd, u8 *data)
2611 struct hci_conn *hcon = conn->hcon;
2612 struct l2cap_conn_param_update_req *req;
2613 struct l2cap_conn_param_update_rsp rsp;
2614 u16 min, max, latency, to_multiplier, cmd_len;
2617 if (!(hcon->link_mode & HCI_LM_MASTER))
2620 cmd_len = __le16_to_cpu(cmd->len);
2621 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2624 req = (struct l2cap_conn_param_update_req *) data;
2625 min = __le16_to_cpu(req->min);
2626 max = __le16_to_cpu(req->max);
2627 latency = __le16_to_cpu(req->latency);
2628 to_multiplier = __le16_to_cpu(req->to_multiplier);
2630 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2631 min, max, latency, to_multiplier);
2633 memset(&rsp, 0, sizeof(rsp));
2635 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2637 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2639 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2641 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2645 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2650 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2651 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2655 switch (cmd->code) {
2656 case L2CAP_COMMAND_REJ:
2657 l2cap_command_rej(conn, cmd, data);
2660 case L2CAP_CONN_REQ:
2661 err = l2cap_connect_req(conn, cmd, data);
2664 case L2CAP_CONN_RSP:
2665 err = l2cap_connect_rsp(conn, cmd, data);
2668 case L2CAP_CONF_REQ:
2669 err = l2cap_config_req(conn, cmd, cmd_len, data);
2672 case L2CAP_CONF_RSP:
2673 err = l2cap_config_rsp(conn, cmd, data);
2676 case L2CAP_DISCONN_REQ:
2677 err = l2cap_disconnect_req(conn, cmd, data);
2680 case L2CAP_DISCONN_RSP:
2681 err = l2cap_disconnect_rsp(conn, cmd, data);
2684 case L2CAP_ECHO_REQ:
2685 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2688 case L2CAP_ECHO_RSP:
2691 case L2CAP_INFO_REQ:
2692 err = l2cap_information_req(conn, cmd, data);
2695 case L2CAP_INFO_RSP:
2696 err = l2cap_information_rsp(conn, cmd, data);
2700 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2708 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2709 struct l2cap_cmd_hdr *cmd, u8 *data)
2711 switch (cmd->code) {
2712 case L2CAP_COMMAND_REJ:
2715 case L2CAP_CONN_PARAM_UPDATE_REQ:
2716 return l2cap_conn_param_update_req(conn, cmd, data);
2718 case L2CAP_CONN_PARAM_UPDATE_RSP:
2722 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2727 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2728 struct sk_buff *skb)
2730 u8 *data = skb->data;
2732 struct l2cap_cmd_hdr cmd;
2735 l2cap_raw_recv(conn, skb);
2737 while (len >= L2CAP_CMD_HDR_SIZE) {
2739 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2740 data += L2CAP_CMD_HDR_SIZE;
2741 len -= L2CAP_CMD_HDR_SIZE;
2743 cmd_len = le16_to_cpu(cmd.len);
2745 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2747 if (cmd_len > len || !cmd.ident) {
2748 BT_DBG("corrupted command");
2752 if (conn->hcon->type == LE_LINK)
2753 err = l2cap_le_sig_cmd(conn, &cmd, data);
2755 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2758 struct l2cap_cmd_rej rej;
2760 BT_ERR("Wrong link type (%d)", err);
2762 /* FIXME: Map err to a valid reason */
2763 rej.reason = cpu_to_le16(0);
2764 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2774 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2776 u16 our_fcs, rcv_fcs;
2777 int hdr_size = L2CAP_HDR_SIZE + 2;
2779 if (pi->fcs == L2CAP_FCS_CRC16) {
2780 skb_trim(skb, skb->len - 2);
2781 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2782 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2784 if (our_fcs != rcv_fcs)
2790 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2794 chan->frames_sent = 0;
2796 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2798 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2799 control |= L2CAP_SUPER_RCV_NOT_READY;
2800 l2cap_send_sframe(chan, control);
2801 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2804 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2805 l2cap_retransmit_frames(chan);
2807 l2cap_ertm_send(chan);
2809 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2810 chan->frames_sent == 0) {
2811 control |= L2CAP_SUPER_RCV_READY;
2812 l2cap_send_sframe(chan, control);
2816 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2818 struct sock *sk = chan->sk;
2819 struct sk_buff *next_skb;
2820 int tx_seq_offset, next_tx_seq_offset;
2822 bt_cb(skb)->tx_seq = tx_seq;
2823 bt_cb(skb)->sar = sar;
2825 next_skb = skb_peek(SREJ_QUEUE(sk));
2827 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2831 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2832 if (tx_seq_offset < 0)
2833 tx_seq_offset += 64;
2836 if (bt_cb(next_skb)->tx_seq == tx_seq)
2839 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2840 chan->buffer_seq) % 64;
2841 if (next_tx_seq_offset < 0)
2842 next_tx_seq_offset += 64;
2844 if (next_tx_seq_offset > tx_seq_offset) {
2845 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2849 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2852 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2854 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2859 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2861 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2862 struct sk_buff *_skb;
2865 switch (control & L2CAP_CTRL_SAR) {
2866 case L2CAP_SDU_UNSEGMENTED:
2867 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2870 err = sock_queue_rcv_skb(chan->sk, skb);
2876 case L2CAP_SDU_START:
2877 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2880 chan->sdu_len = get_unaligned_le16(skb->data);
2882 if (chan->sdu_len > pi->imtu)
2885 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2889 /* pull sdu_len bytes only after alloc, because of Local Busy
2890 * condition we have to be sure that this will be executed
2891 * only once, i.e., when alloc does not fail */
2894 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2896 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2897 chan->partial_sdu_len = skb->len;
2900 case L2CAP_SDU_CONTINUE:
2901 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2907 chan->partial_sdu_len += skb->len;
2908 if (chan->partial_sdu_len > chan->sdu_len)
2911 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2916 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2922 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2923 chan->partial_sdu_len += skb->len;
2925 if (chan->partial_sdu_len > pi->imtu)
2928 if (chan->partial_sdu_len != chan->sdu_len)
2931 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2934 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2936 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2940 err = sock_queue_rcv_skb(chan->sk, _skb);
2943 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2947 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2948 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2950 kfree_skb(chan->sdu);
2958 kfree_skb(chan->sdu);
2962 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
2967 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2969 struct sock *sk = chan->sk;
2970 struct sk_buff *skb;
2974 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2975 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2976 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2978 skb_queue_head(BUSY_QUEUE(sk), skb);
2982 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2985 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2988 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2989 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2990 l2cap_send_sframe(chan, control);
2991 chan->retry_count = 1;
2993 del_timer(&chan->retrans_timer);
2994 __mod_monitor_timer();
2996 chan->conn_state |= L2CAP_CONN_WAIT_F;
2999 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3000 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3002 BT_DBG("sk %p, Exit local busy", sk);
3007 static void l2cap_busy_work(struct work_struct *work)
3009 DECLARE_WAITQUEUE(wait, current);
3010 struct l2cap_pinfo *pi =
3011 container_of(work, struct l2cap_pinfo, busy_work);
3012 struct sock *sk = (struct sock *)pi;
3013 int n_tries = 0, timeo = HZ/5, err;
3014 struct sk_buff *skb;
3018 add_wait_queue(sk_sleep(sk), &wait);
3019 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3020 set_current_state(TASK_INTERRUPTIBLE);
3022 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3024 l2cap_send_disconn_req(pi->conn, pi->chan, EBUSY);
3031 if (signal_pending(current)) {
3032 err = sock_intr_errno(timeo);
3037 timeo = schedule_timeout(timeo);
3040 err = sock_error(sk);
3044 if (l2cap_try_push_rx_skb(l2cap_pi(sk)->chan) == 0)
3048 set_current_state(TASK_RUNNING);
3049 remove_wait_queue(sk_sleep(sk), &wait);
3054 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3056 struct sock *sk = chan->sk;
3057 struct l2cap_pinfo *pi = l2cap_pi(sk);
3060 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3061 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3062 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3063 return l2cap_try_push_rx_skb(chan);
3068 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3070 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3074 /* Busy Condition */
3075 BT_DBG("sk %p, Enter local busy", sk);
3077 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3078 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3079 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3081 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3082 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3083 l2cap_send_sframe(chan, sctrl);
3085 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3087 del_timer(&chan->ack_timer);
3089 queue_work(_busy_wq, &pi->busy_work);
3094 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3096 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3097 struct sk_buff *_skb;
3101 * TODO: We have to notify the userland if some data is lost with the
3105 switch (control & L2CAP_CTRL_SAR) {
3106 case L2CAP_SDU_UNSEGMENTED:
3107 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3108 kfree_skb(chan->sdu);
3112 err = sock_queue_rcv_skb(chan->sk, skb);
3118 case L2CAP_SDU_START:
3119 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3120 kfree_skb(chan->sdu);
3124 chan->sdu_len = get_unaligned_le16(skb->data);
3127 if (chan->sdu_len > pi->imtu) {
3132 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3138 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3140 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3141 chan->partial_sdu_len = skb->len;
3145 case L2CAP_SDU_CONTINUE:
3146 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3149 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3151 chan->partial_sdu_len += skb->len;
3152 if (chan->partial_sdu_len > chan->sdu_len)
3153 kfree_skb(chan->sdu);
3160 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3163 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3165 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3166 chan->partial_sdu_len += skb->len;
3168 if (chan->partial_sdu_len > pi->imtu)
3171 if (chan->partial_sdu_len == chan->sdu_len) {
3172 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3173 err = sock_queue_rcv_skb(chan->sk, _skb);
3180 kfree_skb(chan->sdu);
3188 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3190 struct sock *sk = chan->sk;
3191 struct sk_buff *skb;
3194 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3195 if (bt_cb(skb)->tx_seq != tx_seq)
3198 skb = skb_dequeue(SREJ_QUEUE(sk));
3199 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3200 l2cap_ertm_reassembly_sdu(chan, skb, control);
3201 chan->buffer_seq_srej =
3202 (chan->buffer_seq_srej + 1) % 64;
3203 tx_seq = (tx_seq + 1) % 64;
3207 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3209 struct sock *sk = chan->sk;
3210 struct srej_list *l, *tmp;
3213 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3214 if (l->tx_seq == tx_seq) {
3219 control = L2CAP_SUPER_SELECT_REJECT;
3220 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3221 l2cap_send_sframe(chan, control);
3223 list_add_tail(&l->list, SREJ_LIST(sk));
3227 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3229 struct sock *sk = chan->sk;
3230 struct srej_list *new;
3233 while (tx_seq != chan->expected_tx_seq) {
3234 control = L2CAP_SUPER_SELECT_REJECT;
3235 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3236 l2cap_send_sframe(chan, control);
3238 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3239 new->tx_seq = chan->expected_tx_seq;
3240 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3241 list_add_tail(&new->list, SREJ_LIST(sk));
3243 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3246 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3248 struct sock *sk = chan->sk;
3249 struct l2cap_pinfo *pi = l2cap_pi(sk);
3250 u8 tx_seq = __get_txseq(rx_control);
3251 u8 req_seq = __get_reqseq(rx_control);
3252 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3253 int tx_seq_offset, expected_tx_seq_offset;
3254 int num_to_ack = (pi->tx_win/6) + 1;
3257 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3258 tx_seq, rx_control);
3260 if (L2CAP_CTRL_FINAL & rx_control &&
3261 chan->conn_state & L2CAP_CONN_WAIT_F) {
3262 del_timer(&chan->monitor_timer);
3263 if (chan->unacked_frames > 0)
3264 __mod_retrans_timer();
3265 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3268 chan->expected_ack_seq = req_seq;
3269 l2cap_drop_acked_frames(chan);
3271 if (tx_seq == chan->expected_tx_seq)
3274 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3275 if (tx_seq_offset < 0)
3276 tx_seq_offset += 64;
3278 /* invalid tx_seq */
3279 if (tx_seq_offset >= pi->tx_win) {
3280 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3284 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3287 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3288 struct srej_list *first;
3290 first = list_first_entry(SREJ_LIST(sk),
3291 struct srej_list, list);
3292 if (tx_seq == first->tx_seq) {
3293 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3294 l2cap_check_srej_gap(chan, tx_seq);
3296 list_del(&first->list);
3299 if (list_empty(SREJ_LIST(sk))) {
3300 chan->buffer_seq = chan->buffer_seq_srej;
3301 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3302 l2cap_send_ack(chan);
3303 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3306 struct srej_list *l;
3308 /* duplicated tx_seq */
3309 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3312 list_for_each_entry(l, SREJ_LIST(sk), list) {
3313 if (l->tx_seq == tx_seq) {
3314 l2cap_resend_srejframe(chan, tx_seq);
3318 l2cap_send_srejframe(chan, tx_seq);
3321 expected_tx_seq_offset =
3322 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3323 if (expected_tx_seq_offset < 0)
3324 expected_tx_seq_offset += 64;
3326 /* duplicated tx_seq */
3327 if (tx_seq_offset < expected_tx_seq_offset)
3330 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3332 BT_DBG("sk %p, Enter SREJ", sk);
3334 INIT_LIST_HEAD(SREJ_LIST(sk));
3335 chan->buffer_seq_srej = chan->buffer_seq;
3337 __skb_queue_head_init(SREJ_QUEUE(sk));
3338 __skb_queue_head_init(BUSY_QUEUE(sk));
3339 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3341 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3343 l2cap_send_srejframe(chan, tx_seq);
3345 del_timer(&chan->ack_timer);
3350 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3352 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3353 bt_cb(skb)->tx_seq = tx_seq;
3354 bt_cb(skb)->sar = sar;
3355 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3359 err = l2cap_push_rx_skb(chan, skb, rx_control);
3363 if (rx_control & L2CAP_CTRL_FINAL) {
3364 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3365 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3367 l2cap_retransmit_frames(chan);
3372 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3373 if (chan->num_acked == num_to_ack - 1)
3374 l2cap_send_ack(chan);
3383 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3385 struct sock *sk = chan->sk;
3387 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3390 chan->expected_ack_seq = __get_reqseq(rx_control);
3391 l2cap_drop_acked_frames(chan);
3393 if (rx_control & L2CAP_CTRL_POLL) {
3394 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3395 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3396 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3397 (chan->unacked_frames > 0))
3398 __mod_retrans_timer();
3400 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3401 l2cap_send_srejtail(chan);
3403 l2cap_send_i_or_rr_or_rnr(chan);
3406 } else if (rx_control & L2CAP_CTRL_FINAL) {
3407 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3409 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3410 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3412 l2cap_retransmit_frames(chan);
3415 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3416 (chan->unacked_frames > 0))
3417 __mod_retrans_timer();
3419 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3420 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3421 l2cap_send_ack(chan);
3423 l2cap_ertm_send(chan);
3427 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3429 u8 tx_seq = __get_reqseq(rx_control);
3431 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3433 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3435 chan->expected_ack_seq = tx_seq;
3436 l2cap_drop_acked_frames(chan);
3438 if (rx_control & L2CAP_CTRL_FINAL) {
3439 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3440 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3442 l2cap_retransmit_frames(chan);
3444 l2cap_retransmit_frames(chan);
3446 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3447 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3450 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3452 u8 tx_seq = __get_reqseq(rx_control);
3454 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3456 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3458 if (rx_control & L2CAP_CTRL_POLL) {
3459 chan->expected_ack_seq = tx_seq;
3460 l2cap_drop_acked_frames(chan);
3462 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3463 l2cap_retransmit_one_frame(chan, tx_seq);
3465 l2cap_ertm_send(chan);
3467 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3468 chan->srej_save_reqseq = tx_seq;
3469 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3471 } else if (rx_control & L2CAP_CTRL_FINAL) {
3472 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3473 chan->srej_save_reqseq == tx_seq)
3474 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3476 l2cap_retransmit_one_frame(chan, tx_seq);
3478 l2cap_retransmit_one_frame(chan, tx_seq);
3479 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3480 chan->srej_save_reqseq = tx_seq;
3481 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3486 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3488 u8 tx_seq = __get_reqseq(rx_control);
3490 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3492 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3493 chan->expected_ack_seq = tx_seq;
3494 l2cap_drop_acked_frames(chan);
3496 if (rx_control & L2CAP_CTRL_POLL)
3497 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3499 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3500 del_timer(&chan->retrans_timer);
3501 if (rx_control & L2CAP_CTRL_POLL)
3502 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3506 if (rx_control & L2CAP_CTRL_POLL)
3507 l2cap_send_srejtail(chan);
3509 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3512 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3514 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3516 if (L2CAP_CTRL_FINAL & rx_control &&
3517 chan->conn_state & L2CAP_CONN_WAIT_F) {
3518 del_timer(&chan->monitor_timer);
3519 if (chan->unacked_frames > 0)
3520 __mod_retrans_timer();
3521 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3524 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3525 case L2CAP_SUPER_RCV_READY:
3526 l2cap_data_channel_rrframe(chan, rx_control);
3529 case L2CAP_SUPER_REJECT:
3530 l2cap_data_channel_rejframe(chan, rx_control);
3533 case L2CAP_SUPER_SELECT_REJECT:
3534 l2cap_data_channel_srejframe(chan, rx_control);
3537 case L2CAP_SUPER_RCV_NOT_READY:
3538 l2cap_data_channel_rnrframe(chan, rx_control);
3546 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3548 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3549 struct l2cap_pinfo *pi = l2cap_pi(sk);
3552 int len, next_tx_seq_offset, req_seq_offset;
3554 control = get_unaligned_le16(skb->data);
3559 * We can just drop the corrupted I-frame here.
3560 * Receiver will miss it and start proper recovery
3561 * procedures and ask retransmission.
3563 if (l2cap_check_fcs(pi, skb))
3566 if (__is_sar_start(control) && __is_iframe(control))
3569 if (pi->fcs == L2CAP_FCS_CRC16)
3572 if (len > pi->mps) {
3573 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3577 req_seq = __get_reqseq(control);
3578 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3579 if (req_seq_offset < 0)
3580 req_seq_offset += 64;
3582 next_tx_seq_offset =
3583 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3584 if (next_tx_seq_offset < 0)
3585 next_tx_seq_offset += 64;
3587 /* check for invalid req-seq */
3588 if (req_seq_offset > next_tx_seq_offset) {
3589 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3593 if (__is_iframe(control)) {
3595 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3599 l2cap_data_channel_iframe(chan, control, skb);
3603 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3607 l2cap_data_channel_sframe(chan, control, skb);
3617 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3619 struct l2cap_chan *chan;
3621 struct l2cap_pinfo *pi;
3626 chan = l2cap_get_chan_by_scid(conn, cid);
3628 BT_DBG("unknown cid 0x%4.4x", cid);
3635 BT_DBG("sk %p, len %d", sk, skb->len);
3637 if (sk->sk_state != BT_CONNECTED)
3641 case L2CAP_MODE_BASIC:
3642 /* If socket recv buffers overflows we drop data here
3643 * which is *bad* because L2CAP has to be reliable.
3644 * But we don't have any other choice. L2CAP doesn't
3645 * provide flow control mechanism. */
3647 if (pi->imtu < skb->len)
3650 if (!sock_queue_rcv_skb(sk, skb))
3654 case L2CAP_MODE_ERTM:
3655 if (!sock_owned_by_user(sk)) {
3656 l2cap_ertm_data_rcv(sk, skb);
3658 if (sk_add_backlog(sk, skb))
3664 case L2CAP_MODE_STREAMING:
3665 control = get_unaligned_le16(skb->data);
3669 if (l2cap_check_fcs(pi, skb))
3672 if (__is_sar_start(control))
3675 if (pi->fcs == L2CAP_FCS_CRC16)
3678 if (len > pi->mps || len < 0 || __is_sframe(control))
3681 tx_seq = __get_txseq(control);
3683 if (chan->expected_tx_seq == tx_seq)
3684 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3686 chan->expected_tx_seq = (tx_seq + 1) % 64;
3688 l2cap_streaming_reassembly_sdu(chan, skb, control);
3693 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3707 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3711 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3717 BT_DBG("sk %p, len %d", sk, skb->len);
3719 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3722 if (l2cap_pi(sk)->imtu < skb->len)
3725 if (!sock_queue_rcv_skb(sk, skb))
3737 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3739 struct l2cap_hdr *lh = (void *) skb->data;
3743 skb_pull(skb, L2CAP_HDR_SIZE);
3744 cid = __le16_to_cpu(lh->cid);
3745 len = __le16_to_cpu(lh->len);
3747 if (len != skb->len) {
3752 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3755 case L2CAP_CID_LE_SIGNALING:
3756 case L2CAP_CID_SIGNALING:
3757 l2cap_sig_channel(conn, skb);
3760 case L2CAP_CID_CONN_LESS:
3761 psm = get_unaligned_le16(skb->data);
3763 l2cap_conless_channel(conn, psm, skb);
3767 l2cap_data_channel(conn, cid, skb);
3772 /* ---- L2CAP interface with lower layer (HCI) ---- */
3774 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3776 int exact = 0, lm1 = 0, lm2 = 0;
3777 register struct sock *sk;
3778 struct hlist_node *node;
3780 if (type != ACL_LINK)
3783 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3785 /* Find listening sockets and check their link_mode */
3786 read_lock(&l2cap_sk_list.lock);
3787 sk_for_each(sk, node, &l2cap_sk_list.head) {
3788 if (sk->sk_state != BT_LISTEN)
3791 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3792 lm1 |= HCI_LM_ACCEPT;
3793 if (l2cap_pi(sk)->role_switch)
3794 lm1 |= HCI_LM_MASTER;
3796 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3797 lm2 |= HCI_LM_ACCEPT;
3798 if (l2cap_pi(sk)->role_switch)
3799 lm2 |= HCI_LM_MASTER;
3802 read_unlock(&l2cap_sk_list.lock);
3804 return exact ? lm1 : lm2;
3807 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3809 struct l2cap_conn *conn;
3811 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3813 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3817 conn = l2cap_conn_add(hcon, status);
3819 l2cap_conn_ready(conn);
3821 l2cap_conn_del(hcon, bt_err(status));
3826 static int l2cap_disconn_ind(struct hci_conn *hcon)
3828 struct l2cap_conn *conn = hcon->l2cap_data;
3830 BT_DBG("hcon %p", hcon);
3832 if (hcon->type != ACL_LINK || !conn)
3835 return conn->disc_reason;
3838 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3840 BT_DBG("hcon %p reason %d", hcon, reason);
3842 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3845 l2cap_conn_del(hcon, bt_err(reason));
3850 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3852 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3855 if (encrypt == 0x00) {
3856 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3857 l2cap_sock_clear_timer(sk);
3858 l2cap_sock_set_timer(sk, HZ * 5);
3859 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3860 __l2cap_sock_close(sk, ECONNREFUSED);
3862 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3863 l2cap_sock_clear_timer(sk);
3867 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3869 struct l2cap_conn *conn = hcon->l2cap_data;
3870 struct l2cap_chan *chan;
3875 BT_DBG("conn %p", conn);
3877 read_lock(&conn->chan_lock);
3879 list_for_each_entry(chan, &conn->chan_l, list) {
3880 struct sock *sk = chan->sk;
3884 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3889 if (!status && (sk->sk_state == BT_CONNECTED ||
3890 sk->sk_state == BT_CONFIG)) {
3891 l2cap_check_encryption(sk, encrypt);
3896 if (sk->sk_state == BT_CONNECT) {
3898 struct l2cap_conn_req req;
3899 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3900 req.psm = l2cap_pi(sk)->psm;
3902 chan->ident = l2cap_get_ident(conn);
3903 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3905 l2cap_send_cmd(conn, chan->ident,
3906 L2CAP_CONN_REQ, sizeof(req), &req);
3908 l2cap_sock_clear_timer(sk);
3909 l2cap_sock_set_timer(sk, HZ / 10);
3911 } else if (sk->sk_state == BT_CONNECT2) {
3912 struct l2cap_conn_rsp rsp;
3916 sk->sk_state = BT_CONFIG;
3917 result = L2CAP_CR_SUCCESS;
3919 sk->sk_state = BT_DISCONN;
3920 l2cap_sock_set_timer(sk, HZ / 10);
3921 result = L2CAP_CR_SEC_BLOCK;
3924 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3925 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3926 rsp.result = cpu_to_le16(result);
3927 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3928 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3935 read_unlock(&conn->chan_lock);
3940 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3942 struct l2cap_conn *conn = hcon->l2cap_data;
3945 conn = l2cap_conn_add(hcon, 0);
3950 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3952 if (!(flags & ACL_CONT)) {
3953 struct l2cap_hdr *hdr;
3954 struct l2cap_chan *chan;
3959 BT_ERR("Unexpected start frame (len %d)", skb->len);
3960 kfree_skb(conn->rx_skb);
3961 conn->rx_skb = NULL;
3963 l2cap_conn_unreliable(conn, ECOMM);
3966 /* Start fragment always begin with Basic L2CAP header */
3967 if (skb->len < L2CAP_HDR_SIZE) {
3968 BT_ERR("Frame is too short (len %d)", skb->len);
3969 l2cap_conn_unreliable(conn, ECOMM);
3973 hdr = (struct l2cap_hdr *) skb->data;
3974 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3975 cid = __le16_to_cpu(hdr->cid);
3977 if (len == skb->len) {
3978 /* Complete frame received */
3979 l2cap_recv_frame(conn, skb);
3983 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3985 if (skb->len > len) {
3986 BT_ERR("Frame is too long (len %d, expected len %d)",
3988 l2cap_conn_unreliable(conn, ECOMM);
3992 chan = l2cap_get_chan_by_scid(conn, cid);
3994 if (chan && chan->sk) {
3995 struct sock *sk = chan->sk;
3997 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3998 BT_ERR("Frame exceeding recv MTU (len %d, "
4000 l2cap_pi(sk)->imtu);
4002 l2cap_conn_unreliable(conn, ECOMM);
4008 /* Allocate skb for the complete frame (with header) */
4009 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4013 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4015 conn->rx_len = len - skb->len;
4017 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4019 if (!conn->rx_len) {
4020 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4021 l2cap_conn_unreliable(conn, ECOMM);
4025 if (skb->len > conn->rx_len) {
4026 BT_ERR("Fragment is too long (len %d, expected %d)",
4027 skb->len, conn->rx_len);
4028 kfree_skb(conn->rx_skb);
4029 conn->rx_skb = NULL;
4031 l2cap_conn_unreliable(conn, ECOMM);
4035 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4037 conn->rx_len -= skb->len;
4039 if (!conn->rx_len) {
4040 /* Complete frame received */
4041 l2cap_recv_frame(conn, conn->rx_skb);
4042 conn->rx_skb = NULL;
4051 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4054 struct hlist_node *node;
4056 read_lock_bh(&l2cap_sk_list.lock);
4058 sk_for_each(sk, node, &l2cap_sk_list.head) {
4059 struct l2cap_pinfo *pi = l2cap_pi(sk);
4061 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4062 batostr(&bt_sk(sk)->src),
4063 batostr(&bt_sk(sk)->dst),
4064 sk->sk_state, __le16_to_cpu(pi->psm),
4066 pi->imtu, pi->omtu, pi->sec_level,
4070 read_unlock_bh(&l2cap_sk_list.lock);
4075 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4077 return single_open(file, l2cap_debugfs_show, inode->i_private);
4080 static const struct file_operations l2cap_debugfs_fops = {
4081 .open = l2cap_debugfs_open,
4083 .llseek = seq_lseek,
4084 .release = single_release,
4087 static struct dentry *l2cap_debugfs;
4089 static struct hci_proto l2cap_hci_proto = {
4091 .id = HCI_PROTO_L2CAP,
4092 .connect_ind = l2cap_connect_ind,
4093 .connect_cfm = l2cap_connect_cfm,
4094 .disconn_ind = l2cap_disconn_ind,
4095 .disconn_cfm = l2cap_disconn_cfm,
4096 .security_cfm = l2cap_security_cfm,
4097 .recv_acldata = l2cap_recv_acldata
4100 int __init l2cap_init(void)
4104 err = l2cap_init_sockets();
4108 _busy_wq = create_singlethread_workqueue("l2cap");
4114 err = hci_register_proto(&l2cap_hci_proto);
4116 BT_ERR("L2CAP protocol registration failed");
4117 bt_sock_unregister(BTPROTO_L2CAP);
4122 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4123 bt_debugfs, NULL, &l2cap_debugfs_fops);
4125 BT_ERR("Failed to create L2CAP debug file");
4131 destroy_workqueue(_busy_wq);
4132 l2cap_cleanup_sockets();
4136 void l2cap_exit(void)
4138 debugfs_remove(l2cap_debugfs);
4140 flush_workqueue(_busy_wq);
4141 destroy_workqueue(_busy_wq);
4143 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4144 BT_ERR("L2CAP protocol unregistration failed");
4146 l2cap_cleanup_sockets();
4149 module_param(disable_ertm, bool, 0644);
4150 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");