2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 read_unlock(&conn->chan_lock);
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 struct l2cap_chan *c;
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
136 read_unlock(&conn->chan_lock);
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
142 u16 cid = L2CAP_CID_DYN_START;
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
152 static struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
154 struct l2cap_chan *chan;
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
167 struct sock *sk = chan->sk;
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
172 conn->disc_reason = 0x13;
174 l2cap_pi(sk)->conn = conn;
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
201 list_add(&chan->list, &conn->chan_l);
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
212 l2cap_sock_clear_timer(sk);
214 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
237 sk->sk_state_change(sk);
239 skb_queue_purge(TX_QUEUE(sk));
241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
242 struct srej_list *l, *tmp;
244 del_timer(&l2cap_pi(sk)->retrans_timer);
245 del_timer(&l2cap_pi(sk)->monitor_timer);
246 del_timer(&l2cap_pi(sk)->ack_timer);
248 skb_queue_purge(SREJ_QUEUE(sk));
249 skb_queue_purge(BUSY_QUEUE(sk));
251 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
260 static inline u8 l2cap_get_auth_type(struct sock *sk)
262 if (sk->sk_type == SOCK_RAW) {
263 switch (l2cap_pi(sk)->sec_level) {
264 case BT_SECURITY_HIGH:
265 return HCI_AT_DEDICATED_BONDING_MITM;
266 case BT_SECURITY_MEDIUM:
267 return HCI_AT_DEDICATED_BONDING;
269 return HCI_AT_NO_BONDING;
271 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
272 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
273 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
275 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
276 return HCI_AT_NO_BONDING_MITM;
278 return HCI_AT_NO_BONDING;
280 switch (l2cap_pi(sk)->sec_level) {
281 case BT_SECURITY_HIGH:
282 return HCI_AT_GENERAL_BONDING_MITM;
283 case BT_SECURITY_MEDIUM:
284 return HCI_AT_GENERAL_BONDING;
286 return HCI_AT_NO_BONDING;
291 /* Service level security */
292 static inline int l2cap_check_security(struct sock *sk)
294 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
297 auth_type = l2cap_get_auth_type(sk);
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 u8 l2cap_get_ident(struct l2cap_conn *conn)
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn->lock);
315 if (++conn->tx_ident > 128)
320 spin_unlock_bh(&conn->lock);
325 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
330 BT_DBG("code 0x%2.2x", code);
335 if (lmp_no_flush_capable(conn->hcon->hdev))
336 flags = ACL_START_NO_FLUSH;
340 hci_send_acl(conn->hcon, skb, flags);
343 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
346 struct l2cap_hdr *lh;
347 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
348 struct l2cap_conn *conn = pi->conn;
349 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
353 if (sk->sk_state != BT_CONNECTED)
356 if (pi->fcs == L2CAP_FCS_CRC16)
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
364 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
369 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
393 hci_send_acl(pi->conn->hcon, skb, flags);
396 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
398 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY;
400 chan->conn_state |= L2CAP_CONN_RNR_SENT;
402 control |= L2CAP_SUPER_RCV_READY;
404 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
406 l2cap_send_sframe(chan, control);
409 static inline int __l2cap_no_conn_pending(struct sock *sk)
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
414 static void l2cap_do_start(struct l2cap_chan *chan)
416 struct sock *sk = chan->sk;
417 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
419 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
420 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
423 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
424 struct l2cap_conn_req req;
425 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
426 req.psm = l2cap_pi(sk)->psm;
428 chan->ident = l2cap_get_ident(conn);
429 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
431 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
435 struct l2cap_info_req req;
436 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
438 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
439 conn->info_ident = l2cap_get_ident(conn);
441 mod_timer(&conn->info_timer, jiffies +
442 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
444 l2cap_send_cmd(conn, conn->info_ident,
445 L2CAP_INFO_REQ, sizeof(req), &req);
449 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
451 u32 local_feat_mask = l2cap_feat_mask;
453 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
456 case L2CAP_MODE_ERTM:
457 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
458 case L2CAP_MODE_STREAMING:
459 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
465 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
467 struct l2cap_disconn_req req;
472 skb_queue_purge(TX_QUEUE(sk));
474 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
475 del_timer(&l2cap_pi(sk)->retrans_timer);
476 del_timer(&l2cap_pi(sk)->monitor_timer);
477 del_timer(&l2cap_pi(sk)->ack_timer);
480 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
481 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
482 l2cap_send_cmd(conn, l2cap_get_ident(conn),
483 L2CAP_DISCONN_REQ, sizeof(req), &req);
485 sk->sk_state = BT_DISCONN;
489 /* ---- L2CAP connections ---- */
490 static void l2cap_conn_start(struct l2cap_conn *conn)
492 struct l2cap_chan *chan, *tmp;
494 BT_DBG("conn %p", conn);
496 read_lock(&conn->chan_lock);
498 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
499 struct sock *sk = chan->sk;
503 if (sk->sk_type != SOCK_SEQPACKET &&
504 sk->sk_type != SOCK_STREAM) {
509 if (sk->sk_state == BT_CONNECT) {
510 struct l2cap_conn_req req;
512 if (!l2cap_check_security(sk) ||
513 !__l2cap_no_conn_pending(sk)) {
518 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
520 && l2cap_pi(sk)->conf_state &
521 L2CAP_CONF_STATE2_DEVICE) {
522 /* __l2cap_sock_close() calls list_del(chan)
523 * so release the lock */
524 read_unlock_bh(&conn->chan_lock);
525 __l2cap_sock_close(sk, ECONNRESET);
526 read_lock_bh(&conn->chan_lock);
531 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
532 req.psm = l2cap_pi(sk)->psm;
534 chan->ident = l2cap_get_ident(conn);
535 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
537 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
540 } else if (sk->sk_state == BT_CONNECT2) {
541 struct l2cap_conn_rsp rsp;
543 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
544 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
546 if (l2cap_check_security(sk)) {
547 if (bt_sk(sk)->defer_setup) {
548 struct sock *parent = bt_sk(sk)->parent;
549 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
550 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
551 parent->sk_data_ready(parent, 0);
554 sk->sk_state = BT_CONFIG;
555 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
556 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
559 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
563 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
566 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
567 rsp.result != L2CAP_CR_SUCCESS) {
572 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
574 l2cap_build_conf_req(chan, buf), buf);
575 chan->num_conf_req++;
581 read_unlock(&conn->chan_lock);
584 /* Find socket with cid and source bdaddr.
585 * Returns closest match, locked.
587 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
589 struct sock *s, *sk = NULL, *sk1 = NULL;
590 struct hlist_node *node;
592 read_lock(&l2cap_sk_list.lock);
594 sk_for_each(sk, node, &l2cap_sk_list.head) {
595 if (state && sk->sk_state != state)
598 if (l2cap_pi(sk)->scid == cid) {
600 if (!bacmp(&bt_sk(sk)->src, src))
604 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
611 read_unlock(&l2cap_sk_list.lock);
616 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
618 struct sock *parent, *uninitialized_var(sk);
619 struct l2cap_chan *chan;
623 /* Check if we have socket listening on cid */
624 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
629 /* Check for backlog size */
630 if (sk_acceptq_is_full(parent)) {
631 BT_DBG("backlog full %d", parent->sk_ack_backlog);
635 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
639 chan = l2cap_chan_alloc(sk);
645 write_lock_bh(&conn->chan_lock);
647 hci_conn_hold(conn->hcon);
649 l2cap_sock_init(sk, parent);
651 bacpy(&bt_sk(sk)->src, conn->src);
652 bacpy(&bt_sk(sk)->dst, conn->dst);
654 bt_accept_enqueue(parent, sk);
656 __l2cap_chan_add(conn, chan);
658 l2cap_pi(sk)->chan = chan;
660 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
662 sk->sk_state = BT_CONNECTED;
663 parent->sk_data_ready(parent, 0);
665 write_unlock_bh(&conn->chan_lock);
668 bh_unlock_sock(parent);
671 static void l2cap_conn_ready(struct l2cap_conn *conn)
673 struct l2cap_chan *chan;
675 BT_DBG("conn %p", conn);
677 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
678 l2cap_le_conn_ready(conn);
680 read_lock(&conn->chan_lock);
682 list_for_each_entry(chan, &conn->chan_l, list) {
683 struct sock *sk = chan->sk;
687 if (conn->hcon->type == LE_LINK) {
688 l2cap_sock_clear_timer(sk);
689 sk->sk_state = BT_CONNECTED;
690 sk->sk_state_change(sk);
693 if (sk->sk_type != SOCK_SEQPACKET &&
694 sk->sk_type != SOCK_STREAM) {
695 l2cap_sock_clear_timer(sk);
696 sk->sk_state = BT_CONNECTED;
697 sk->sk_state_change(sk);
698 } else if (sk->sk_state == BT_CONNECT)
699 l2cap_do_start(chan);
704 read_unlock(&conn->chan_lock);
707 /* Notify sockets that we cannot guaranty reliability anymore */
708 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
710 struct l2cap_chan *chan;
712 BT_DBG("conn %p", conn);
714 read_lock(&conn->chan_lock);
716 list_for_each_entry(chan, &conn->chan_l, list) {
717 struct sock *sk = chan->sk;
719 if (l2cap_pi(sk)->force_reliable)
723 read_unlock(&conn->chan_lock);
726 static void l2cap_info_timeout(unsigned long arg)
728 struct l2cap_conn *conn = (void *) arg;
730 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
731 conn->info_ident = 0;
733 l2cap_conn_start(conn);
736 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
738 struct l2cap_conn *conn = hcon->l2cap_data;
743 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
747 hcon->l2cap_data = conn;
750 BT_DBG("hcon %p conn %p", hcon, conn);
752 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
753 conn->mtu = hcon->hdev->le_mtu;
755 conn->mtu = hcon->hdev->acl_mtu;
757 conn->src = &hcon->hdev->bdaddr;
758 conn->dst = &hcon->dst;
762 spin_lock_init(&conn->lock);
763 rwlock_init(&conn->chan_lock);
765 INIT_LIST_HEAD(&conn->chan_l);
767 if (hcon->type != LE_LINK)
768 setup_timer(&conn->info_timer, l2cap_info_timeout,
769 (unsigned long) conn);
771 conn->disc_reason = 0x13;
776 static void l2cap_conn_del(struct hci_conn *hcon, int err)
778 struct l2cap_conn *conn = hcon->l2cap_data;
779 struct l2cap_chan *chan, *l;
785 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
787 kfree_skb(conn->rx_skb);
790 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
793 l2cap_chan_del(chan, err);
798 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
799 del_timer_sync(&conn->info_timer);
801 hcon->l2cap_data = NULL;
805 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
807 write_lock_bh(&conn->chan_lock);
808 __l2cap_chan_add(conn, chan);
809 write_unlock_bh(&conn->chan_lock);
812 /* ---- Socket interface ---- */
814 /* Find socket with psm and source bdaddr.
815 * Returns closest match.
817 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
819 struct sock *sk = NULL, *sk1 = NULL;
820 struct hlist_node *node;
822 read_lock(&l2cap_sk_list.lock);
824 sk_for_each(sk, node, &l2cap_sk_list.head) {
825 if (state && sk->sk_state != state)
828 if (l2cap_pi(sk)->psm == psm) {
830 if (!bacmp(&bt_sk(sk)->src, src))
834 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
839 read_unlock(&l2cap_sk_list.lock);
841 return node ? sk : sk1;
844 int l2cap_do_connect(struct sock *sk)
846 bdaddr_t *src = &bt_sk(sk)->src;
847 bdaddr_t *dst = &bt_sk(sk)->dst;
848 struct l2cap_conn *conn;
849 struct l2cap_chan *chan;
850 struct hci_conn *hcon;
851 struct hci_dev *hdev;
855 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
858 hdev = hci_get_route(dst, src);
860 return -EHOSTUNREACH;
862 hci_dev_lock_bh(hdev);
864 auth_type = l2cap_get_auth_type(sk);
866 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
867 hcon = hci_connect(hdev, LE_LINK, dst,
868 l2cap_pi(sk)->sec_level, auth_type);
870 hcon = hci_connect(hdev, ACL_LINK, dst,
871 l2cap_pi(sk)->sec_level, auth_type);
878 conn = l2cap_conn_add(hcon, 0);
885 chan = l2cap_chan_alloc(sk);
892 /* Update source addr of the socket */
893 bacpy(src, conn->src);
895 l2cap_chan_add(conn, chan);
897 l2cap_pi(sk)->chan = chan;
899 sk->sk_state = BT_CONNECT;
900 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
902 if (hcon->state == BT_CONNECTED) {
903 if (sk->sk_type != SOCK_SEQPACKET &&
904 sk->sk_type != SOCK_STREAM) {
905 l2cap_sock_clear_timer(sk);
906 if (l2cap_check_security(sk))
907 sk->sk_state = BT_CONNECTED;
909 l2cap_do_start(chan);
915 hci_dev_unlock_bh(hdev);
920 int __l2cap_wait_ack(struct sock *sk)
922 DECLARE_WAITQUEUE(wait, current);
926 add_wait_queue(sk_sleep(sk), &wait);
927 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
928 set_current_state(TASK_INTERRUPTIBLE);
933 if (signal_pending(current)) {
934 err = sock_intr_errno(timeo);
939 timeo = schedule_timeout(timeo);
942 err = sock_error(sk);
946 set_current_state(TASK_RUNNING);
947 remove_wait_queue(sk_sleep(sk), &wait);
951 static void l2cap_monitor_timeout(unsigned long arg)
953 struct l2cap_chan *chan = (void *) arg;
954 struct sock *sk = chan->sk;
956 BT_DBG("chan %p", chan);
959 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
960 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
965 l2cap_pi(sk)->retry_count++;
966 __mod_monitor_timer();
968 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
972 static void l2cap_retrans_timeout(unsigned long arg)
974 struct l2cap_chan *chan = (void *) arg;
975 struct sock *sk = chan->sk;
980 l2cap_pi(sk)->retry_count = 1;
981 __mod_monitor_timer();
983 chan->conn_state |= L2CAP_CONN_WAIT_F;
985 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
989 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
991 struct sock *sk = chan->sk;
994 while ((skb = skb_peek(TX_QUEUE(sk))) &&
995 l2cap_pi(sk)->unacked_frames) {
996 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
999 skb = skb_dequeue(TX_QUEUE(sk));
1002 l2cap_pi(sk)->unacked_frames--;
1005 if (!l2cap_pi(sk)->unacked_frames)
1006 del_timer(&l2cap_pi(sk)->retrans_timer);
1009 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1011 struct l2cap_pinfo *pi = l2cap_pi(sk);
1012 struct hci_conn *hcon = pi->conn->hcon;
1015 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1017 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1018 flags = ACL_START_NO_FLUSH;
1022 hci_send_acl(hcon, skb, flags);
1025 void l2cap_streaming_send(struct l2cap_chan *chan)
1027 struct sock *sk = chan->sk;
1028 struct sk_buff *skb;
1029 struct l2cap_pinfo *pi = l2cap_pi(sk);
1032 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1033 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1034 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1035 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1037 if (pi->fcs == L2CAP_FCS_CRC16) {
1038 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1039 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1042 l2cap_do_send(sk, skb);
1044 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1048 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1050 struct sock *sk = chan->sk;
1051 struct l2cap_pinfo *pi = l2cap_pi(sk);
1052 struct sk_buff *skb, *tx_skb;
1055 skb = skb_peek(TX_QUEUE(sk));
1060 if (bt_cb(skb)->tx_seq == tx_seq)
1063 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1066 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1068 if (pi->remote_max_tx &&
1069 bt_cb(skb)->retries == pi->remote_max_tx) {
1070 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1074 tx_skb = skb_clone(skb, GFP_ATOMIC);
1075 bt_cb(skb)->retries++;
1076 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1078 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1079 control |= L2CAP_CTRL_FINAL;
1080 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1083 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1084 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1086 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1088 if (pi->fcs == L2CAP_FCS_CRC16) {
1089 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1090 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1093 l2cap_do_send(sk, tx_skb);
1096 int l2cap_ertm_send(struct l2cap_chan *chan)
1098 struct sk_buff *skb, *tx_skb;
1099 struct sock *sk = chan->sk;
1100 struct l2cap_pinfo *pi = l2cap_pi(sk);
1104 if (sk->sk_state != BT_CONNECTED)
1107 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(chan))) {
1109 if (pi->remote_max_tx &&
1110 bt_cb(skb)->retries == pi->remote_max_tx) {
1111 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1115 tx_skb = skb_clone(skb, GFP_ATOMIC);
1117 bt_cb(skb)->retries++;
1119 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1120 control &= L2CAP_CTRL_SAR;
1122 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1123 control |= L2CAP_CTRL_FINAL;
1124 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1126 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1127 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1128 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1131 if (pi->fcs == L2CAP_FCS_CRC16) {
1132 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1133 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1136 l2cap_do_send(sk, tx_skb);
1138 __mod_retrans_timer();
1140 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1141 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1143 if (bt_cb(skb)->retries == 1)
1144 pi->unacked_frames++;
1148 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1149 sk->sk_send_head = NULL;
1151 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1159 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1161 struct sock *sk = chan->sk;
1164 if (!skb_queue_empty(TX_QUEUE(sk)))
1165 sk->sk_send_head = TX_QUEUE(sk)->next;
1167 chan->next_tx_seq = chan->expected_ack_seq;
1168 ret = l2cap_ertm_send(chan);
1172 static void l2cap_send_ack(struct l2cap_chan *chan)
1176 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1178 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1179 control |= L2CAP_SUPER_RCV_NOT_READY;
1180 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1181 l2cap_send_sframe(chan, control);
1185 if (l2cap_ertm_send(chan) > 0)
1188 control |= L2CAP_SUPER_RCV_READY;
1189 l2cap_send_sframe(chan, control);
1192 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1194 struct srej_list *tail;
1197 control = L2CAP_SUPER_SELECT_REJECT;
1198 control |= L2CAP_CTRL_FINAL;
1200 tail = list_entry(SREJ_LIST(chan->sk)->prev, struct srej_list, list);
1201 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1203 l2cap_send_sframe(chan, control);
1206 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1208 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1209 struct sk_buff **frag;
1212 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1218 /* Continuation fragments (no L2CAP header) */
1219 frag = &skb_shinfo(skb)->frag_list;
1221 count = min_t(unsigned int, conn->mtu, len);
1223 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1226 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1232 frag = &(*frag)->next;
1238 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1240 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1241 struct sk_buff *skb;
1242 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1243 struct l2cap_hdr *lh;
1245 BT_DBG("sk %p len %d", sk, (int)len);
1247 count = min_t(unsigned int, (conn->mtu - hlen), len);
1248 skb = bt_skb_send_alloc(sk, count + hlen,
1249 msg->msg_flags & MSG_DONTWAIT, &err);
1251 return ERR_PTR(err);
1253 /* Create L2CAP header */
1254 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1255 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1256 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1257 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1259 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1260 if (unlikely(err < 0)) {
1262 return ERR_PTR(err);
1267 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1269 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1270 struct sk_buff *skb;
1271 int err, count, hlen = L2CAP_HDR_SIZE;
1272 struct l2cap_hdr *lh;
1274 BT_DBG("sk %p len %d", sk, (int)len);
1276 count = min_t(unsigned int, (conn->mtu - hlen), len);
1277 skb = bt_skb_send_alloc(sk, count + hlen,
1278 msg->msg_flags & MSG_DONTWAIT, &err);
1280 return ERR_PTR(err);
1282 /* Create L2CAP header */
1283 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1284 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1285 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1287 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1288 if (unlikely(err < 0)) {
1290 return ERR_PTR(err);
1295 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1297 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1298 struct sk_buff *skb;
1299 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1300 struct l2cap_hdr *lh;
1302 BT_DBG("sk %p len %d", sk, (int)len);
1305 return ERR_PTR(-ENOTCONN);
1310 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1313 count = min_t(unsigned int, (conn->mtu - hlen), len);
1314 skb = bt_skb_send_alloc(sk, count + hlen,
1315 msg->msg_flags & MSG_DONTWAIT, &err);
1317 return ERR_PTR(err);
1319 /* Create L2CAP header */
1320 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1321 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1322 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1323 put_unaligned_le16(control, skb_put(skb, 2));
1325 put_unaligned_le16(sdulen, skb_put(skb, 2));
1327 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1328 if (unlikely(err < 0)) {
1330 return ERR_PTR(err);
1333 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1334 put_unaligned_le16(0, skb_put(skb, 2));
1336 bt_cb(skb)->retries = 0;
1340 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1342 struct l2cap_pinfo *pi = l2cap_pi(sk);
1343 struct sk_buff *skb;
1344 struct sk_buff_head sar_queue;
1348 skb_queue_head_init(&sar_queue);
1349 control = L2CAP_SDU_START;
1350 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1352 return PTR_ERR(skb);
1354 __skb_queue_tail(&sar_queue, skb);
1355 len -= pi->remote_mps;
1356 size += pi->remote_mps;
1361 if (len > pi->remote_mps) {
1362 control = L2CAP_SDU_CONTINUE;
1363 buflen = pi->remote_mps;
1365 control = L2CAP_SDU_END;
1369 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1371 skb_queue_purge(&sar_queue);
1372 return PTR_ERR(skb);
1375 __skb_queue_tail(&sar_queue, skb);
1379 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1380 if (sk->sk_send_head == NULL)
1381 sk->sk_send_head = sar_queue.next;
1386 static void l2cap_chan_ready(struct sock *sk)
1388 struct sock *parent = bt_sk(sk)->parent;
1390 BT_DBG("sk %p, parent %p", sk, parent);
1392 l2cap_pi(sk)->conf_state = 0;
1393 l2cap_sock_clear_timer(sk);
1396 /* Outgoing channel.
1397 * Wake up socket sleeping on connect.
1399 sk->sk_state = BT_CONNECTED;
1400 sk->sk_state_change(sk);
1402 /* Incoming channel.
1403 * Wake up socket sleeping on accept.
1405 parent->sk_data_ready(parent, 0);
1409 /* Copy frame to all raw sockets on that connection */
1410 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1412 struct sk_buff *nskb;
1413 struct l2cap_chan *chan;
1415 BT_DBG("conn %p", conn);
1417 read_lock(&conn->chan_lock);
1418 list_for_each_entry(chan, &conn->chan_l, list) {
1419 struct sock *sk = chan->sk;
1420 if (sk->sk_type != SOCK_RAW)
1423 /* Don't send frame to the socket it came from */
1426 nskb = skb_clone(skb, GFP_ATOMIC);
1430 if (sock_queue_rcv_skb(sk, nskb))
1433 read_unlock(&conn->chan_lock);
1436 /* ---- L2CAP signalling commands ---- */
1437 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1438 u8 code, u8 ident, u16 dlen, void *data)
1440 struct sk_buff *skb, **frag;
1441 struct l2cap_cmd_hdr *cmd;
1442 struct l2cap_hdr *lh;
1445 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1446 conn, code, ident, dlen);
1448 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1449 count = min_t(unsigned int, conn->mtu, len);
1451 skb = bt_skb_alloc(count, GFP_ATOMIC);
1455 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1456 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1458 if (conn->hcon->type == LE_LINK)
1459 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1461 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1463 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1466 cmd->len = cpu_to_le16(dlen);
1469 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1470 memcpy(skb_put(skb, count), data, count);
1476 /* Continuation fragments (no L2CAP header) */
1477 frag = &skb_shinfo(skb)->frag_list;
1479 count = min_t(unsigned int, conn->mtu, len);
1481 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1485 memcpy(skb_put(*frag, count), data, count);
1490 frag = &(*frag)->next;
1500 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1502 struct l2cap_conf_opt *opt = *ptr;
1505 len = L2CAP_CONF_OPT_SIZE + opt->len;
1513 *val = *((u8 *) opt->val);
1517 *val = get_unaligned_le16(opt->val);
1521 *val = get_unaligned_le32(opt->val);
1525 *val = (unsigned long) opt->val;
1529 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1533 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1535 struct l2cap_conf_opt *opt = *ptr;
1537 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1544 *((u8 *) opt->val) = val;
1548 put_unaligned_le16(val, opt->val);
1552 put_unaligned_le32(val, opt->val);
1556 memcpy(opt->val, (void *) val, len);
1560 *ptr += L2CAP_CONF_OPT_SIZE + len;
1563 static void l2cap_ack_timeout(unsigned long arg)
1565 struct l2cap_chan *chan = (void *) arg;
1567 bh_lock_sock(chan->sk);
1568 l2cap_send_ack(chan);
1569 bh_unlock_sock(chan->sk);
1572 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1574 struct sock *sk = chan->sk;
1576 chan->expected_ack_seq = 0;
1577 l2cap_pi(sk)->unacked_frames = 0;
1578 chan->buffer_seq = 0;
1579 l2cap_pi(sk)->num_acked = 0;
1580 l2cap_pi(sk)->frames_sent = 0;
1582 setup_timer(&l2cap_pi(sk)->retrans_timer,
1583 l2cap_retrans_timeout, (unsigned long) chan);
1584 setup_timer(&l2cap_pi(sk)->monitor_timer,
1585 l2cap_monitor_timeout, (unsigned long) chan);
1586 setup_timer(&l2cap_pi(sk)->ack_timer,
1587 l2cap_ack_timeout, (unsigned long) chan);
1589 __skb_queue_head_init(SREJ_QUEUE(sk));
1590 __skb_queue_head_init(BUSY_QUEUE(sk));
1592 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1594 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1597 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1600 case L2CAP_MODE_STREAMING:
1601 case L2CAP_MODE_ERTM:
1602 if (l2cap_mode_supported(mode, remote_feat_mask))
1606 return L2CAP_MODE_BASIC;
1610 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1612 struct sock *sk = chan->sk;
1613 struct l2cap_pinfo *pi = l2cap_pi(sk);
1614 struct l2cap_conf_req *req = data;
1615 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1616 void *ptr = req->data;
1618 BT_DBG("sk %p", sk);
1620 if (chan->num_conf_req || chan->num_conf_rsp)
1624 case L2CAP_MODE_STREAMING:
1625 case L2CAP_MODE_ERTM:
1626 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1631 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1636 if (pi->imtu != L2CAP_DEFAULT_MTU)
1637 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1640 case L2CAP_MODE_BASIC:
1641 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1642 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1645 rfc.mode = L2CAP_MODE_BASIC;
1647 rfc.max_transmit = 0;
1648 rfc.retrans_timeout = 0;
1649 rfc.monitor_timeout = 0;
1650 rfc.max_pdu_size = 0;
1652 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1653 (unsigned long) &rfc);
1656 case L2CAP_MODE_ERTM:
1657 rfc.mode = L2CAP_MODE_ERTM;
1658 rfc.txwin_size = pi->tx_win;
1659 rfc.max_transmit = pi->max_tx;
1660 rfc.retrans_timeout = 0;
1661 rfc.monitor_timeout = 0;
1662 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1663 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1664 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1666 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1667 (unsigned long) &rfc);
1669 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1672 if (pi->fcs == L2CAP_FCS_NONE ||
1673 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1674 pi->fcs = L2CAP_FCS_NONE;
1675 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1679 case L2CAP_MODE_STREAMING:
1680 rfc.mode = L2CAP_MODE_STREAMING;
1682 rfc.max_transmit = 0;
1683 rfc.retrans_timeout = 0;
1684 rfc.monitor_timeout = 0;
1685 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1686 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1687 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1689 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1690 (unsigned long) &rfc);
1692 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1695 if (pi->fcs == L2CAP_FCS_NONE ||
1696 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1697 pi->fcs = L2CAP_FCS_NONE;
1698 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1703 req->dcid = cpu_to_le16(pi->dcid);
1704 req->flags = cpu_to_le16(0);
1709 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1711 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1712 struct l2cap_conf_rsp *rsp = data;
1713 void *ptr = rsp->data;
1714 void *req = chan->conf_req;
1715 int len = chan->conf_len;
1716 int type, hint, olen;
1718 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1719 u16 mtu = L2CAP_DEFAULT_MTU;
1720 u16 result = L2CAP_CONF_SUCCESS;
1722 BT_DBG("chan %p", chan);
1724 while (len >= L2CAP_CONF_OPT_SIZE) {
1725 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1727 hint = type & L2CAP_CONF_HINT;
1728 type &= L2CAP_CONF_MASK;
1731 case L2CAP_CONF_MTU:
1735 case L2CAP_CONF_FLUSH_TO:
1739 case L2CAP_CONF_QOS:
1742 case L2CAP_CONF_RFC:
1743 if (olen == sizeof(rfc))
1744 memcpy(&rfc, (void *) val, olen);
1747 case L2CAP_CONF_FCS:
1748 if (val == L2CAP_FCS_NONE)
1749 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1757 result = L2CAP_CONF_UNKNOWN;
1758 *((u8 *) ptr++) = type;
1763 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1767 case L2CAP_MODE_STREAMING:
1768 case L2CAP_MODE_ERTM:
1769 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1770 pi->mode = l2cap_select_mode(rfc.mode,
1771 pi->conn->feat_mask);
1775 if (pi->mode != rfc.mode)
1776 return -ECONNREFUSED;
1782 if (pi->mode != rfc.mode) {
1783 result = L2CAP_CONF_UNACCEPT;
1784 rfc.mode = pi->mode;
1786 if (chan->num_conf_rsp == 1)
1787 return -ECONNREFUSED;
1789 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1790 sizeof(rfc), (unsigned long) &rfc);
1794 if (result == L2CAP_CONF_SUCCESS) {
1795 /* Configure output options and let the other side know
1796 * which ones we don't like. */
1798 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1799 result = L2CAP_CONF_UNACCEPT;
1802 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1804 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1807 case L2CAP_MODE_BASIC:
1808 pi->fcs = L2CAP_FCS_NONE;
1809 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1812 case L2CAP_MODE_ERTM:
1813 pi->remote_tx_win = rfc.txwin_size;
1814 pi->remote_max_tx = rfc.max_transmit;
1816 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1817 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1819 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1821 rfc.retrans_timeout =
1822 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1823 rfc.monitor_timeout =
1824 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1826 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1828 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1829 sizeof(rfc), (unsigned long) &rfc);
1833 case L2CAP_MODE_STREAMING:
1834 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1835 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1837 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1839 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1841 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1842 sizeof(rfc), (unsigned long) &rfc);
1847 result = L2CAP_CONF_UNACCEPT;
1849 memset(&rfc, 0, sizeof(rfc));
1850 rfc.mode = pi->mode;
1853 if (result == L2CAP_CONF_SUCCESS)
1854 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1856 rsp->scid = cpu_to_le16(pi->dcid);
1857 rsp->result = cpu_to_le16(result);
1858 rsp->flags = cpu_to_le16(0x0000);
1863 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1865 struct l2cap_pinfo *pi = l2cap_pi(sk);
1866 struct l2cap_conf_req *req = data;
1867 void *ptr = req->data;
1870 struct l2cap_conf_rfc rfc;
1872 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1874 while (len >= L2CAP_CONF_OPT_SIZE) {
1875 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1878 case L2CAP_CONF_MTU:
1879 if (val < L2CAP_DEFAULT_MIN_MTU) {
1880 *result = L2CAP_CONF_UNACCEPT;
1881 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1884 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1887 case L2CAP_CONF_FLUSH_TO:
1889 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1893 case L2CAP_CONF_RFC:
1894 if (olen == sizeof(rfc))
1895 memcpy(&rfc, (void *)val, olen);
1897 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1898 rfc.mode != pi->mode)
1899 return -ECONNREFUSED;
1903 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1904 sizeof(rfc), (unsigned long) &rfc);
1909 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1910 return -ECONNREFUSED;
1912 pi->mode = rfc.mode;
1914 if (*result == L2CAP_CONF_SUCCESS) {
1916 case L2CAP_MODE_ERTM:
1917 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1918 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1919 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1921 case L2CAP_MODE_STREAMING:
1922 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1926 req->dcid = cpu_to_le16(pi->dcid);
1927 req->flags = cpu_to_le16(0x0000);
1932 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1934 struct l2cap_conf_rsp *rsp = data;
1935 void *ptr = rsp->data;
1937 BT_DBG("sk %p", sk);
1939 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1940 rsp->result = cpu_to_le16(result);
1941 rsp->flags = cpu_to_le16(flags);
1946 void __l2cap_connect_rsp_defer(struct sock *sk)
1948 struct l2cap_conn_rsp rsp;
1949 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1950 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1953 sk->sk_state = BT_CONFIG;
1955 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1956 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1957 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1958 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1959 l2cap_send_cmd(conn, chan->ident,
1960 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1962 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
1965 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1966 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1967 l2cap_build_conf_req(chan, buf), buf);
1968 chan->num_conf_req++;
1971 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1973 struct l2cap_pinfo *pi = l2cap_pi(sk);
1976 struct l2cap_conf_rfc rfc;
1978 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1980 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1983 while (len >= L2CAP_CONF_OPT_SIZE) {
1984 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1987 case L2CAP_CONF_RFC:
1988 if (olen == sizeof(rfc))
1989 memcpy(&rfc, (void *)val, olen);
1996 case L2CAP_MODE_ERTM:
1997 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1998 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1999 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2001 case L2CAP_MODE_STREAMING:
2002 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2006 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2008 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2010 if (rej->reason != 0x0000)
2013 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2014 cmd->ident == conn->info_ident) {
2015 del_timer(&conn->info_timer);
2017 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2018 conn->info_ident = 0;
2020 l2cap_conn_start(conn);
2026 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2028 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2029 struct l2cap_conn_rsp rsp;
2030 struct l2cap_chan *chan = NULL;
2031 struct sock *parent, *sk = NULL;
2032 int result, status = L2CAP_CS_NO_INFO;
2034 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2035 __le16 psm = req->psm;
2037 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2039 /* Check if we have socket listening on psm */
2040 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2042 result = L2CAP_CR_BAD_PSM;
2046 bh_lock_sock(parent);
2048 /* Check if the ACL is secure enough (if not SDP) */
2049 if (psm != cpu_to_le16(0x0001) &&
2050 !hci_conn_check_link_mode(conn->hcon)) {
2051 conn->disc_reason = 0x05;
2052 result = L2CAP_CR_SEC_BLOCK;
2056 result = L2CAP_CR_NO_MEM;
2058 /* Check for backlog size */
2059 if (sk_acceptq_is_full(parent)) {
2060 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2064 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2068 chan = l2cap_chan_alloc(sk);
2070 l2cap_sock_kill(sk);
2074 write_lock_bh(&conn->chan_lock);
2076 /* Check if we already have channel with that dcid */
2077 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2078 write_unlock_bh(&conn->chan_lock);
2079 sock_set_flag(sk, SOCK_ZAPPED);
2080 l2cap_sock_kill(sk);
2084 hci_conn_hold(conn->hcon);
2086 l2cap_sock_init(sk, parent);
2087 bacpy(&bt_sk(sk)->src, conn->src);
2088 bacpy(&bt_sk(sk)->dst, conn->dst);
2089 l2cap_pi(sk)->psm = psm;
2090 l2cap_pi(sk)->dcid = scid;
2092 bt_accept_enqueue(parent, sk);
2094 __l2cap_chan_add(conn, chan);
2096 l2cap_pi(sk)->chan = chan;
2098 dcid = l2cap_pi(sk)->scid;
2100 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2102 chan->ident = cmd->ident;
2104 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2105 if (l2cap_check_security(sk)) {
2106 if (bt_sk(sk)->defer_setup) {
2107 sk->sk_state = BT_CONNECT2;
2108 result = L2CAP_CR_PEND;
2109 status = L2CAP_CS_AUTHOR_PEND;
2110 parent->sk_data_ready(parent, 0);
2112 sk->sk_state = BT_CONFIG;
2113 result = L2CAP_CR_SUCCESS;
2114 status = L2CAP_CS_NO_INFO;
2117 sk->sk_state = BT_CONNECT2;
2118 result = L2CAP_CR_PEND;
2119 status = L2CAP_CS_AUTHEN_PEND;
2122 sk->sk_state = BT_CONNECT2;
2123 result = L2CAP_CR_PEND;
2124 status = L2CAP_CS_NO_INFO;
2127 write_unlock_bh(&conn->chan_lock);
2130 bh_unlock_sock(parent);
2133 rsp.scid = cpu_to_le16(scid);
2134 rsp.dcid = cpu_to_le16(dcid);
2135 rsp.result = cpu_to_le16(result);
2136 rsp.status = cpu_to_le16(status);
2137 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2139 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2140 struct l2cap_info_req info;
2141 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2143 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2144 conn->info_ident = l2cap_get_ident(conn);
2146 mod_timer(&conn->info_timer, jiffies +
2147 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2149 l2cap_send_cmd(conn, conn->info_ident,
2150 L2CAP_INFO_REQ, sizeof(info), &info);
2153 if (chan && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2154 result == L2CAP_CR_SUCCESS) {
2156 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2157 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2158 l2cap_build_conf_req(chan, buf), buf);
2159 chan->num_conf_req++;
2165 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2167 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2168 u16 scid, dcid, result, status;
2169 struct l2cap_chan *chan;
2173 scid = __le16_to_cpu(rsp->scid);
2174 dcid = __le16_to_cpu(rsp->dcid);
2175 result = __le16_to_cpu(rsp->result);
2176 status = __le16_to_cpu(rsp->status);
2178 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2181 chan = l2cap_get_chan_by_scid(conn, scid);
2185 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2193 case L2CAP_CR_SUCCESS:
2194 sk->sk_state = BT_CONFIG;
2196 l2cap_pi(sk)->dcid = dcid;
2197 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2199 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2202 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2204 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2205 l2cap_build_conf_req(chan, req), req);
2206 chan->num_conf_req++;
2210 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2214 /* don't delete l2cap channel if sk is owned by user */
2215 if (sock_owned_by_user(sk)) {
2216 sk->sk_state = BT_DISCONN;
2217 l2cap_sock_clear_timer(sk);
2218 l2cap_sock_set_timer(sk, HZ / 5);
2222 l2cap_chan_del(chan, ECONNREFUSED);
2230 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2232 /* FCS is enabled only in ERTM or streaming mode, if one or both
2235 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2236 pi->fcs = L2CAP_FCS_NONE;
2237 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2238 pi->fcs = L2CAP_FCS_CRC16;
2241 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2243 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2246 struct l2cap_chan *chan;
2250 dcid = __le16_to_cpu(req->dcid);
2251 flags = __le16_to_cpu(req->flags);
2253 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2255 chan = l2cap_get_chan_by_scid(conn, dcid);
2261 if (sk->sk_state != BT_CONFIG) {
2262 struct l2cap_cmd_rej rej;
2264 rej.reason = cpu_to_le16(0x0002);
2265 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2270 /* Reject if config buffer is too small. */
2271 len = cmd_len - sizeof(*req);
2272 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2273 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2274 l2cap_build_conf_rsp(sk, rsp,
2275 L2CAP_CONF_REJECT, flags), rsp);
2280 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2281 chan->conf_len += len;
2283 if (flags & 0x0001) {
2284 /* Incomplete config. Send empty response. */
2285 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2286 l2cap_build_conf_rsp(sk, rsp,
2287 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2291 /* Complete config. */
2292 len = l2cap_parse_conf_req(chan, rsp);
2294 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2298 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2299 chan->num_conf_rsp++;
2301 /* Reset config buffer. */
2304 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2307 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2308 set_default_fcs(l2cap_pi(sk));
2310 sk->sk_state = BT_CONNECTED;
2312 chan->next_tx_seq = 0;
2313 chan->expected_tx_seq = 0;
2314 __skb_queue_head_init(TX_QUEUE(sk));
2315 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2316 l2cap_ertm_init(chan);
2318 l2cap_chan_ready(sk);
2322 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2324 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2325 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2326 l2cap_build_conf_req(chan, buf), buf);
2327 chan->num_conf_req++;
2335 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2337 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2338 u16 scid, flags, result;
2339 struct l2cap_chan *chan;
2341 int len = cmd->len - sizeof(*rsp);
2343 scid = __le16_to_cpu(rsp->scid);
2344 flags = __le16_to_cpu(rsp->flags);
2345 result = __le16_to_cpu(rsp->result);
2347 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2348 scid, flags, result);
2350 chan = l2cap_get_chan_by_scid(conn, scid);
2357 case L2CAP_CONF_SUCCESS:
2358 l2cap_conf_rfc_get(sk, rsp->data, len);
2361 case L2CAP_CONF_UNACCEPT:
2362 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2365 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2366 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2370 /* throw out any old stored conf requests */
2371 result = L2CAP_CONF_SUCCESS;
2372 len = l2cap_parse_conf_rsp(sk, rsp->data,
2375 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2379 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2380 L2CAP_CONF_REQ, len, req);
2381 chan->num_conf_req++;
2382 if (result != L2CAP_CONF_SUCCESS)
2388 sk->sk_err = ECONNRESET;
2389 l2cap_sock_set_timer(sk, HZ * 5);
2390 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2397 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2399 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2400 set_default_fcs(l2cap_pi(sk));
2402 sk->sk_state = BT_CONNECTED;
2403 chan->next_tx_seq = 0;
2404 chan->expected_tx_seq = 0;
2405 __skb_queue_head_init(TX_QUEUE(sk));
2406 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2407 l2cap_ertm_init(chan);
2409 l2cap_chan_ready(sk);
2417 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2419 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2420 struct l2cap_disconn_rsp rsp;
2422 struct l2cap_chan *chan;
2425 scid = __le16_to_cpu(req->scid);
2426 dcid = __le16_to_cpu(req->dcid);
2428 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2430 chan = l2cap_get_chan_by_scid(conn, dcid);
2436 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2437 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2438 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2440 sk->sk_shutdown = SHUTDOWN_MASK;
2442 /* don't delete l2cap channel if sk is owned by user */
2443 if (sock_owned_by_user(sk)) {
2444 sk->sk_state = BT_DISCONN;
2445 l2cap_sock_clear_timer(sk);
2446 l2cap_sock_set_timer(sk, HZ / 5);
2451 l2cap_chan_del(chan, ECONNRESET);
2454 l2cap_sock_kill(sk);
2458 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2460 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2462 struct l2cap_chan *chan;
2465 scid = __le16_to_cpu(rsp->scid);
2466 dcid = __le16_to_cpu(rsp->dcid);
2468 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2470 chan = l2cap_get_chan_by_scid(conn, scid);
2476 /* don't delete l2cap channel if sk is owned by user */
2477 if (sock_owned_by_user(sk)) {
2478 sk->sk_state = BT_DISCONN;
2479 l2cap_sock_clear_timer(sk);
2480 l2cap_sock_set_timer(sk, HZ / 5);
2485 l2cap_chan_del(chan, 0);
2488 l2cap_sock_kill(sk);
2492 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2494 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2497 type = __le16_to_cpu(req->type);
2499 BT_DBG("type 0x%4.4x", type);
2501 if (type == L2CAP_IT_FEAT_MASK) {
2503 u32 feat_mask = l2cap_feat_mask;
2504 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2505 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2506 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2508 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2510 put_unaligned_le32(feat_mask, rsp->data);
2511 l2cap_send_cmd(conn, cmd->ident,
2512 L2CAP_INFO_RSP, sizeof(buf), buf);
2513 } else if (type == L2CAP_IT_FIXED_CHAN) {
2515 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2516 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2517 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2518 memcpy(buf + 4, l2cap_fixed_chan, 8);
2519 l2cap_send_cmd(conn, cmd->ident,
2520 L2CAP_INFO_RSP, sizeof(buf), buf);
2522 struct l2cap_info_rsp rsp;
2523 rsp.type = cpu_to_le16(type);
2524 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2525 l2cap_send_cmd(conn, cmd->ident,
2526 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2532 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2534 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2537 type = __le16_to_cpu(rsp->type);
2538 result = __le16_to_cpu(rsp->result);
2540 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2542 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2543 if (cmd->ident != conn->info_ident ||
2544 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2547 del_timer(&conn->info_timer);
2549 if (result != L2CAP_IR_SUCCESS) {
2550 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2551 conn->info_ident = 0;
2553 l2cap_conn_start(conn);
2558 if (type == L2CAP_IT_FEAT_MASK) {
2559 conn->feat_mask = get_unaligned_le32(rsp->data);
2561 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2562 struct l2cap_info_req req;
2563 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2565 conn->info_ident = l2cap_get_ident(conn);
2567 l2cap_send_cmd(conn, conn->info_ident,
2568 L2CAP_INFO_REQ, sizeof(req), &req);
2570 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2571 conn->info_ident = 0;
2573 l2cap_conn_start(conn);
2575 } else if (type == L2CAP_IT_FIXED_CHAN) {
2576 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2577 conn->info_ident = 0;
2579 l2cap_conn_start(conn);
2585 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2590 if (min > max || min < 6 || max > 3200)
2593 if (to_multiplier < 10 || to_multiplier > 3200)
2596 if (max >= to_multiplier * 8)
2599 max_latency = (to_multiplier * 8 / max) - 1;
2600 if (latency > 499 || latency > max_latency)
2606 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2607 struct l2cap_cmd_hdr *cmd, u8 *data)
2609 struct hci_conn *hcon = conn->hcon;
2610 struct l2cap_conn_param_update_req *req;
2611 struct l2cap_conn_param_update_rsp rsp;
2612 u16 min, max, latency, to_multiplier, cmd_len;
2615 if (!(hcon->link_mode & HCI_LM_MASTER))
2618 cmd_len = __le16_to_cpu(cmd->len);
2619 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2622 req = (struct l2cap_conn_param_update_req *) data;
2623 min = __le16_to_cpu(req->min);
2624 max = __le16_to_cpu(req->max);
2625 latency = __le16_to_cpu(req->latency);
2626 to_multiplier = __le16_to_cpu(req->to_multiplier);
2628 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2629 min, max, latency, to_multiplier);
2631 memset(&rsp, 0, sizeof(rsp));
2633 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2635 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2637 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2639 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2643 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2648 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2649 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2653 switch (cmd->code) {
2654 case L2CAP_COMMAND_REJ:
2655 l2cap_command_rej(conn, cmd, data);
2658 case L2CAP_CONN_REQ:
2659 err = l2cap_connect_req(conn, cmd, data);
2662 case L2CAP_CONN_RSP:
2663 err = l2cap_connect_rsp(conn, cmd, data);
2666 case L2CAP_CONF_REQ:
2667 err = l2cap_config_req(conn, cmd, cmd_len, data);
2670 case L2CAP_CONF_RSP:
2671 err = l2cap_config_rsp(conn, cmd, data);
2674 case L2CAP_DISCONN_REQ:
2675 err = l2cap_disconnect_req(conn, cmd, data);
2678 case L2CAP_DISCONN_RSP:
2679 err = l2cap_disconnect_rsp(conn, cmd, data);
2682 case L2CAP_ECHO_REQ:
2683 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2686 case L2CAP_ECHO_RSP:
2689 case L2CAP_INFO_REQ:
2690 err = l2cap_information_req(conn, cmd, data);
2693 case L2CAP_INFO_RSP:
2694 err = l2cap_information_rsp(conn, cmd, data);
2698 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2706 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2707 struct l2cap_cmd_hdr *cmd, u8 *data)
2709 switch (cmd->code) {
2710 case L2CAP_COMMAND_REJ:
2713 case L2CAP_CONN_PARAM_UPDATE_REQ:
2714 return l2cap_conn_param_update_req(conn, cmd, data);
2716 case L2CAP_CONN_PARAM_UPDATE_RSP:
2720 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2725 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2726 struct sk_buff *skb)
2728 u8 *data = skb->data;
2730 struct l2cap_cmd_hdr cmd;
2733 l2cap_raw_recv(conn, skb);
2735 while (len >= L2CAP_CMD_HDR_SIZE) {
2737 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2738 data += L2CAP_CMD_HDR_SIZE;
2739 len -= L2CAP_CMD_HDR_SIZE;
2741 cmd_len = le16_to_cpu(cmd.len);
2743 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2745 if (cmd_len > len || !cmd.ident) {
2746 BT_DBG("corrupted command");
2750 if (conn->hcon->type == LE_LINK)
2751 err = l2cap_le_sig_cmd(conn, &cmd, data);
2753 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2756 struct l2cap_cmd_rej rej;
2758 BT_ERR("Wrong link type (%d)", err);
2760 /* FIXME: Map err to a valid reason */
2761 rej.reason = cpu_to_le16(0);
2762 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2772 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2774 u16 our_fcs, rcv_fcs;
2775 int hdr_size = L2CAP_HDR_SIZE + 2;
2777 if (pi->fcs == L2CAP_FCS_CRC16) {
2778 skb_trim(skb, skb->len - 2);
2779 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2780 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2782 if (our_fcs != rcv_fcs)
2788 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2790 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2793 pi->frames_sent = 0;
2795 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2797 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2798 control |= L2CAP_SUPER_RCV_NOT_READY;
2799 l2cap_send_sframe(chan, control);
2800 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2803 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2804 l2cap_retransmit_frames(chan);
2806 l2cap_ertm_send(chan);
2808 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2809 pi->frames_sent == 0) {
2810 control |= L2CAP_SUPER_RCV_READY;
2811 l2cap_send_sframe(chan, control);
2815 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2817 struct sock *sk = chan->sk;
2818 struct sk_buff *next_skb;
2819 int tx_seq_offset, next_tx_seq_offset;
2821 bt_cb(skb)->tx_seq = tx_seq;
2822 bt_cb(skb)->sar = sar;
2824 next_skb = skb_peek(SREJ_QUEUE(sk));
2826 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2830 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2831 if (tx_seq_offset < 0)
2832 tx_seq_offset += 64;
2835 if (bt_cb(next_skb)->tx_seq == tx_seq)
2838 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2839 chan->buffer_seq) % 64;
2840 if (next_tx_seq_offset < 0)
2841 next_tx_seq_offset += 64;
2843 if (next_tx_seq_offset > tx_seq_offset) {
2844 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2848 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2851 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2853 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2858 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2860 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2861 struct sk_buff *_skb;
2864 switch (control & L2CAP_CTRL_SAR) {
2865 case L2CAP_SDU_UNSEGMENTED:
2866 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2869 err = sock_queue_rcv_skb(chan->sk, skb);
2875 case L2CAP_SDU_START:
2876 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2879 pi->sdu_len = get_unaligned_le16(skb->data);
2881 if (pi->sdu_len > pi->imtu)
2884 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2888 /* pull sdu_len bytes only after alloc, because of Local Busy
2889 * condition we have to be sure that this will be executed
2890 * only once, i.e., when alloc does not fail */
2893 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2895 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2896 pi->partial_sdu_len = skb->len;
2899 case L2CAP_SDU_CONTINUE:
2900 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2906 pi->partial_sdu_len += skb->len;
2907 if (pi->partial_sdu_len > pi->sdu_len)
2910 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2915 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2921 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2922 pi->partial_sdu_len += skb->len;
2924 if (pi->partial_sdu_len > pi->imtu)
2927 if (pi->partial_sdu_len != pi->sdu_len)
2930 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2933 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2935 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2939 err = sock_queue_rcv_skb(chan->sk, _skb);
2942 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2946 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2947 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2961 l2cap_send_disconn_req(pi->conn, chan->sk, ECONNRESET);
2966 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2968 struct sock *sk = chan->sk;
2969 struct l2cap_pinfo *pi = l2cap_pi(sk);
2970 struct sk_buff *skb;
2974 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2975 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2976 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2978 skb_queue_head(BUSY_QUEUE(sk), skb);
2982 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2985 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2988 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2989 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2990 l2cap_send_sframe(chan, control);
2991 l2cap_pi(sk)->retry_count = 1;
2993 del_timer(&pi->retrans_timer);
2994 __mod_monitor_timer();
2996 chan->conn_state |= L2CAP_CONN_WAIT_F;
2999 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3000 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3002 BT_DBG("sk %p, Exit local busy", sk);
3007 static void l2cap_busy_work(struct work_struct *work)
3009 DECLARE_WAITQUEUE(wait, current);
3010 struct l2cap_pinfo *pi =
3011 container_of(work, struct l2cap_pinfo, busy_work);
3012 struct sock *sk = (struct sock *)pi;
3013 int n_tries = 0, timeo = HZ/5, err;
3014 struct sk_buff *skb;
3018 add_wait_queue(sk_sleep(sk), &wait);
3019 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3020 set_current_state(TASK_INTERRUPTIBLE);
3022 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3024 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3031 if (signal_pending(current)) {
3032 err = sock_intr_errno(timeo);
3037 timeo = schedule_timeout(timeo);
3040 err = sock_error(sk);
3044 if (l2cap_try_push_rx_skb(l2cap_pi(sk)->chan) == 0)
3048 set_current_state(TASK_RUNNING);
3049 remove_wait_queue(sk_sleep(sk), &wait);
3054 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3056 struct sock *sk = chan->sk;
3057 struct l2cap_pinfo *pi = l2cap_pi(sk);
3060 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3061 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3062 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3063 return l2cap_try_push_rx_skb(chan);
3068 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3070 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3074 /* Busy Condition */
3075 BT_DBG("sk %p, Enter local busy", sk);
3077 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3078 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3079 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3081 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3082 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3083 l2cap_send_sframe(chan, sctrl);
3085 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3087 del_timer(&pi->ack_timer);
3089 queue_work(_busy_wq, &pi->busy_work);
3094 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3096 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3097 struct sk_buff *_skb;
3101 * TODO: We have to notify the userland if some data is lost with the
3105 switch (control & L2CAP_CTRL_SAR) {
3106 case L2CAP_SDU_UNSEGMENTED:
3107 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3112 err = sock_queue_rcv_skb(chan->sk, skb);
3118 case L2CAP_SDU_START:
3119 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3124 pi->sdu_len = get_unaligned_le16(skb->data);
3127 if (pi->sdu_len > pi->imtu) {
3132 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3138 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3140 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3141 pi->partial_sdu_len = skb->len;
3145 case L2CAP_SDU_CONTINUE:
3146 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3149 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3151 pi->partial_sdu_len += skb->len;
3152 if (pi->partial_sdu_len > pi->sdu_len)
3160 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3163 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3165 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3166 pi->partial_sdu_len += skb->len;
3168 if (pi->partial_sdu_len > pi->imtu)
3171 if (pi->partial_sdu_len == pi->sdu_len) {
3172 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3173 err = sock_queue_rcv_skb(chan->sk, _skb);
3188 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3190 struct sock *sk = chan->sk;
3191 struct sk_buff *skb;
3194 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3195 if (bt_cb(skb)->tx_seq != tx_seq)
3198 skb = skb_dequeue(SREJ_QUEUE(sk));
3199 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3200 l2cap_ertm_reassembly_sdu(chan, skb, control);
3201 chan->buffer_seq_srej =
3202 (chan->buffer_seq_srej + 1) % 64;
3203 tx_seq = (tx_seq + 1) % 64;
3207 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3209 struct sock *sk = chan->sk;
3210 struct srej_list *l, *tmp;
3213 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3214 if (l->tx_seq == tx_seq) {
3219 control = L2CAP_SUPER_SELECT_REJECT;
3220 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3221 l2cap_send_sframe(chan, control);
3223 list_add_tail(&l->list, SREJ_LIST(sk));
3227 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3229 struct sock *sk = chan->sk;
3230 struct srej_list *new;
3233 while (tx_seq != chan->expected_tx_seq) {
3234 control = L2CAP_SUPER_SELECT_REJECT;
3235 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3236 l2cap_send_sframe(chan, control);
3238 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3239 new->tx_seq = chan->expected_tx_seq;
3240 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3241 list_add_tail(&new->list, SREJ_LIST(sk));
3243 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3246 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3248 struct sock *sk = chan->sk;
3249 struct l2cap_pinfo *pi = l2cap_pi(sk);
3250 u8 tx_seq = __get_txseq(rx_control);
3251 u8 req_seq = __get_reqseq(rx_control);
3252 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3253 int tx_seq_offset, expected_tx_seq_offset;
3254 int num_to_ack = (pi->tx_win/6) + 1;
3257 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3258 tx_seq, rx_control);
3260 if (L2CAP_CTRL_FINAL & rx_control &&
3261 chan->conn_state & L2CAP_CONN_WAIT_F) {
3262 del_timer(&pi->monitor_timer);
3263 if (pi->unacked_frames > 0)
3264 __mod_retrans_timer();
3265 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3268 chan->expected_ack_seq = req_seq;
3269 l2cap_drop_acked_frames(chan);
3271 if (tx_seq == chan->expected_tx_seq)
3274 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3275 if (tx_seq_offset < 0)
3276 tx_seq_offset += 64;
3278 /* invalid tx_seq */
3279 if (tx_seq_offset >= pi->tx_win) {
3280 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3284 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3287 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3288 struct srej_list *first;
3290 first = list_first_entry(SREJ_LIST(sk),
3291 struct srej_list, list);
3292 if (tx_seq == first->tx_seq) {
3293 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3294 l2cap_check_srej_gap(chan, tx_seq);
3296 list_del(&first->list);
3299 if (list_empty(SREJ_LIST(sk))) {
3300 chan->buffer_seq = chan->buffer_seq_srej;
3301 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3302 l2cap_send_ack(chan);
3303 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3306 struct srej_list *l;
3308 /* duplicated tx_seq */
3309 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3312 list_for_each_entry(l, SREJ_LIST(sk), list) {
3313 if (l->tx_seq == tx_seq) {
3314 l2cap_resend_srejframe(chan, tx_seq);
3318 l2cap_send_srejframe(chan, tx_seq);
3321 expected_tx_seq_offset =
3322 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3323 if (expected_tx_seq_offset < 0)
3324 expected_tx_seq_offset += 64;
3326 /* duplicated tx_seq */
3327 if (tx_seq_offset < expected_tx_seq_offset)
3330 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3332 BT_DBG("sk %p, Enter SREJ", sk);
3334 INIT_LIST_HEAD(SREJ_LIST(sk));
3335 chan->buffer_seq_srej = chan->buffer_seq;
3337 __skb_queue_head_init(SREJ_QUEUE(sk));
3338 __skb_queue_head_init(BUSY_QUEUE(sk));
3339 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3341 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3343 l2cap_send_srejframe(chan, tx_seq);
3345 del_timer(&pi->ack_timer);
3350 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3352 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3353 bt_cb(skb)->tx_seq = tx_seq;
3354 bt_cb(skb)->sar = sar;
3355 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3359 err = l2cap_push_rx_skb(chan, skb, rx_control);
3363 if (rx_control & L2CAP_CTRL_FINAL) {
3364 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3365 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3367 l2cap_retransmit_frames(chan);
3372 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3373 if (pi->num_acked == num_to_ack - 1)
3374 l2cap_send_ack(chan);
3383 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3385 struct sock *sk = chan->sk;
3386 struct l2cap_pinfo *pi = l2cap_pi(sk);
3388 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3391 chan->expected_ack_seq = __get_reqseq(rx_control);
3392 l2cap_drop_acked_frames(chan);
3394 if (rx_control & L2CAP_CTRL_POLL) {
3395 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3396 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3397 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3398 (pi->unacked_frames > 0))
3399 __mod_retrans_timer();
3401 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3402 l2cap_send_srejtail(chan);
3404 l2cap_send_i_or_rr_or_rnr(chan);
3407 } else if (rx_control & L2CAP_CTRL_FINAL) {
3408 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3410 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3411 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3413 l2cap_retransmit_frames(chan);
3416 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3417 (pi->unacked_frames > 0))
3418 __mod_retrans_timer();
3420 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3421 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3422 l2cap_send_ack(chan);
3424 l2cap_ertm_send(chan);
3428 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3430 u8 tx_seq = __get_reqseq(rx_control);
3432 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3434 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3436 chan->expected_ack_seq = tx_seq;
3437 l2cap_drop_acked_frames(chan);
3439 if (rx_control & L2CAP_CTRL_FINAL) {
3440 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3441 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3443 l2cap_retransmit_frames(chan);
3445 l2cap_retransmit_frames(chan);
3447 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3448 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3451 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3453 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3454 u8 tx_seq = __get_reqseq(rx_control);
3456 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3458 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3460 if (rx_control & L2CAP_CTRL_POLL) {
3461 chan->expected_ack_seq = tx_seq;
3462 l2cap_drop_acked_frames(chan);
3464 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3465 l2cap_retransmit_one_frame(chan, tx_seq);
3467 l2cap_ertm_send(chan);
3469 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3470 pi->srej_save_reqseq = tx_seq;
3471 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3473 } else if (rx_control & L2CAP_CTRL_FINAL) {
3474 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3475 pi->srej_save_reqseq == tx_seq)
3476 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3478 l2cap_retransmit_one_frame(chan, tx_seq);
3480 l2cap_retransmit_one_frame(chan, tx_seq);
3481 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3482 pi->srej_save_reqseq = tx_seq;
3483 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3488 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3490 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3491 u8 tx_seq = __get_reqseq(rx_control);
3493 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3495 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3496 chan->expected_ack_seq = tx_seq;
3497 l2cap_drop_acked_frames(chan);
3499 if (rx_control & L2CAP_CTRL_POLL)
3500 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3502 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3503 del_timer(&pi->retrans_timer);
3504 if (rx_control & L2CAP_CTRL_POLL)
3505 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3509 if (rx_control & L2CAP_CTRL_POLL)
3510 l2cap_send_srejtail(chan);
3512 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3515 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3517 struct sock *sk = chan->sk;
3519 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3521 if (L2CAP_CTRL_FINAL & rx_control &&
3522 chan->conn_state & L2CAP_CONN_WAIT_F) {
3523 del_timer(&l2cap_pi(sk)->monitor_timer);
3524 if (l2cap_pi(sk)->unacked_frames > 0)
3525 __mod_retrans_timer();
3526 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3529 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3530 case L2CAP_SUPER_RCV_READY:
3531 l2cap_data_channel_rrframe(chan, rx_control);
3534 case L2CAP_SUPER_REJECT:
3535 l2cap_data_channel_rejframe(chan, rx_control);
3538 case L2CAP_SUPER_SELECT_REJECT:
3539 l2cap_data_channel_srejframe(chan, rx_control);
3542 case L2CAP_SUPER_RCV_NOT_READY:
3543 l2cap_data_channel_rnrframe(chan, rx_control);
3551 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3553 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3554 struct l2cap_pinfo *pi = l2cap_pi(sk);
3557 int len, next_tx_seq_offset, req_seq_offset;
3559 control = get_unaligned_le16(skb->data);
3564 * We can just drop the corrupted I-frame here.
3565 * Receiver will miss it and start proper recovery
3566 * procedures and ask retransmission.
3568 if (l2cap_check_fcs(pi, skb))
3571 if (__is_sar_start(control) && __is_iframe(control))
3574 if (pi->fcs == L2CAP_FCS_CRC16)
3577 if (len > pi->mps) {
3578 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3582 req_seq = __get_reqseq(control);
3583 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3584 if (req_seq_offset < 0)
3585 req_seq_offset += 64;
3587 next_tx_seq_offset =
3588 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3589 if (next_tx_seq_offset < 0)
3590 next_tx_seq_offset += 64;
3592 /* check for invalid req-seq */
3593 if (req_seq_offset > next_tx_seq_offset) {
3594 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3598 if (__is_iframe(control)) {
3600 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3604 l2cap_data_channel_iframe(chan, control, skb);
3608 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3612 l2cap_data_channel_sframe(chan, control, skb);
3622 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3624 struct l2cap_chan *chan;
3626 struct l2cap_pinfo *pi;
3631 chan = l2cap_get_chan_by_scid(conn, cid);
3633 BT_DBG("unknown cid 0x%4.4x", cid);
3640 BT_DBG("sk %p, len %d", sk, skb->len);
3642 if (sk->sk_state != BT_CONNECTED)
3646 case L2CAP_MODE_BASIC:
3647 /* If socket recv buffers overflows we drop data here
3648 * which is *bad* because L2CAP has to be reliable.
3649 * But we don't have any other choice. L2CAP doesn't
3650 * provide flow control mechanism. */
3652 if (pi->imtu < skb->len)
3655 if (!sock_queue_rcv_skb(sk, skb))
3659 case L2CAP_MODE_ERTM:
3660 if (!sock_owned_by_user(sk)) {
3661 l2cap_ertm_data_rcv(sk, skb);
3663 if (sk_add_backlog(sk, skb))
3669 case L2CAP_MODE_STREAMING:
3670 control = get_unaligned_le16(skb->data);
3674 if (l2cap_check_fcs(pi, skb))
3677 if (__is_sar_start(control))
3680 if (pi->fcs == L2CAP_FCS_CRC16)
3683 if (len > pi->mps || len < 0 || __is_sframe(control))
3686 tx_seq = __get_txseq(control);
3688 if (chan->expected_tx_seq == tx_seq)
3689 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3691 chan->expected_tx_seq = (tx_seq + 1) % 64;
3693 l2cap_streaming_reassembly_sdu(chan, skb, control);
3698 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3712 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3716 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3722 BT_DBG("sk %p, len %d", sk, skb->len);
3724 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3727 if (l2cap_pi(sk)->imtu < skb->len)
3730 if (!sock_queue_rcv_skb(sk, skb))
3742 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3744 struct l2cap_hdr *lh = (void *) skb->data;
3748 skb_pull(skb, L2CAP_HDR_SIZE);
3749 cid = __le16_to_cpu(lh->cid);
3750 len = __le16_to_cpu(lh->len);
3752 if (len != skb->len) {
3757 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3760 case L2CAP_CID_LE_SIGNALING:
3761 case L2CAP_CID_SIGNALING:
3762 l2cap_sig_channel(conn, skb);
3765 case L2CAP_CID_CONN_LESS:
3766 psm = get_unaligned_le16(skb->data);
3768 l2cap_conless_channel(conn, psm, skb);
3772 l2cap_data_channel(conn, cid, skb);
3777 /* ---- L2CAP interface with lower layer (HCI) ---- */
3779 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3781 int exact = 0, lm1 = 0, lm2 = 0;
3782 register struct sock *sk;
3783 struct hlist_node *node;
3785 if (type != ACL_LINK)
3788 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3790 /* Find listening sockets and check their link_mode */
3791 read_lock(&l2cap_sk_list.lock);
3792 sk_for_each(sk, node, &l2cap_sk_list.head) {
3793 if (sk->sk_state != BT_LISTEN)
3796 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3797 lm1 |= HCI_LM_ACCEPT;
3798 if (l2cap_pi(sk)->role_switch)
3799 lm1 |= HCI_LM_MASTER;
3801 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3802 lm2 |= HCI_LM_ACCEPT;
3803 if (l2cap_pi(sk)->role_switch)
3804 lm2 |= HCI_LM_MASTER;
3807 read_unlock(&l2cap_sk_list.lock);
3809 return exact ? lm1 : lm2;
3812 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3814 struct l2cap_conn *conn;
3816 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3818 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3822 conn = l2cap_conn_add(hcon, status);
3824 l2cap_conn_ready(conn);
3826 l2cap_conn_del(hcon, bt_err(status));
3831 static int l2cap_disconn_ind(struct hci_conn *hcon)
3833 struct l2cap_conn *conn = hcon->l2cap_data;
3835 BT_DBG("hcon %p", hcon);
3837 if (hcon->type != ACL_LINK || !conn)
3840 return conn->disc_reason;
3843 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3845 BT_DBG("hcon %p reason %d", hcon, reason);
3847 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3850 l2cap_conn_del(hcon, bt_err(reason));
3855 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3857 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3860 if (encrypt == 0x00) {
3861 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3862 l2cap_sock_clear_timer(sk);
3863 l2cap_sock_set_timer(sk, HZ * 5);
3864 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3865 __l2cap_sock_close(sk, ECONNREFUSED);
3867 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3868 l2cap_sock_clear_timer(sk);
3872 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3874 struct l2cap_conn *conn = hcon->l2cap_data;
3875 struct l2cap_chan *chan;
3880 BT_DBG("conn %p", conn);
3882 read_lock(&conn->chan_lock);
3884 list_for_each_entry(chan, &conn->chan_l, list) {
3885 struct sock *sk = chan->sk;
3889 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3894 if (!status && (sk->sk_state == BT_CONNECTED ||
3895 sk->sk_state == BT_CONFIG)) {
3896 l2cap_check_encryption(sk, encrypt);
3901 if (sk->sk_state == BT_CONNECT) {
3903 struct l2cap_conn_req req;
3904 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3905 req.psm = l2cap_pi(sk)->psm;
3907 chan->ident = l2cap_get_ident(conn);
3908 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3910 l2cap_send_cmd(conn, chan->ident,
3911 L2CAP_CONN_REQ, sizeof(req), &req);
3913 l2cap_sock_clear_timer(sk);
3914 l2cap_sock_set_timer(sk, HZ / 10);
3916 } else if (sk->sk_state == BT_CONNECT2) {
3917 struct l2cap_conn_rsp rsp;
3921 sk->sk_state = BT_CONFIG;
3922 result = L2CAP_CR_SUCCESS;
3924 sk->sk_state = BT_DISCONN;
3925 l2cap_sock_set_timer(sk, HZ / 10);
3926 result = L2CAP_CR_SEC_BLOCK;
3929 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3930 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3931 rsp.result = cpu_to_le16(result);
3932 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3933 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3940 read_unlock(&conn->chan_lock);
3945 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3947 struct l2cap_conn *conn = hcon->l2cap_data;
3950 conn = l2cap_conn_add(hcon, 0);
3955 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3957 if (!(flags & ACL_CONT)) {
3958 struct l2cap_hdr *hdr;
3959 struct l2cap_chan *chan;
3964 BT_ERR("Unexpected start frame (len %d)", skb->len);
3965 kfree_skb(conn->rx_skb);
3966 conn->rx_skb = NULL;
3968 l2cap_conn_unreliable(conn, ECOMM);
3971 /* Start fragment always begin with Basic L2CAP header */
3972 if (skb->len < L2CAP_HDR_SIZE) {
3973 BT_ERR("Frame is too short (len %d)", skb->len);
3974 l2cap_conn_unreliable(conn, ECOMM);
3978 hdr = (struct l2cap_hdr *) skb->data;
3979 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3980 cid = __le16_to_cpu(hdr->cid);
3982 if (len == skb->len) {
3983 /* Complete frame received */
3984 l2cap_recv_frame(conn, skb);
3988 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3990 if (skb->len > len) {
3991 BT_ERR("Frame is too long (len %d, expected len %d)",
3993 l2cap_conn_unreliable(conn, ECOMM);
3997 chan = l2cap_get_chan_by_scid(conn, cid);
3999 if (chan && chan->sk) {
4000 struct sock *sk = chan->sk;
4002 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4003 BT_ERR("Frame exceeding recv MTU (len %d, "
4005 l2cap_pi(sk)->imtu);
4007 l2cap_conn_unreliable(conn, ECOMM);
4013 /* Allocate skb for the complete frame (with header) */
4014 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4018 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4020 conn->rx_len = len - skb->len;
4022 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4024 if (!conn->rx_len) {
4025 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4026 l2cap_conn_unreliable(conn, ECOMM);
4030 if (skb->len > conn->rx_len) {
4031 BT_ERR("Fragment is too long (len %d, expected %d)",
4032 skb->len, conn->rx_len);
4033 kfree_skb(conn->rx_skb);
4034 conn->rx_skb = NULL;
4036 l2cap_conn_unreliable(conn, ECOMM);
4040 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4042 conn->rx_len -= skb->len;
4044 if (!conn->rx_len) {
4045 /* Complete frame received */
4046 l2cap_recv_frame(conn, conn->rx_skb);
4047 conn->rx_skb = NULL;
4056 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4059 struct hlist_node *node;
4061 read_lock_bh(&l2cap_sk_list.lock);
4063 sk_for_each(sk, node, &l2cap_sk_list.head) {
4064 struct l2cap_pinfo *pi = l2cap_pi(sk);
4066 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4067 batostr(&bt_sk(sk)->src),
4068 batostr(&bt_sk(sk)->dst),
4069 sk->sk_state, __le16_to_cpu(pi->psm),
4071 pi->imtu, pi->omtu, pi->sec_level,
4075 read_unlock_bh(&l2cap_sk_list.lock);
4080 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4082 return single_open(file, l2cap_debugfs_show, inode->i_private);
4085 static const struct file_operations l2cap_debugfs_fops = {
4086 .open = l2cap_debugfs_open,
4088 .llseek = seq_lseek,
4089 .release = single_release,
4092 static struct dentry *l2cap_debugfs;
4094 static struct hci_proto l2cap_hci_proto = {
4096 .id = HCI_PROTO_L2CAP,
4097 .connect_ind = l2cap_connect_ind,
4098 .connect_cfm = l2cap_connect_cfm,
4099 .disconn_ind = l2cap_disconn_ind,
4100 .disconn_cfm = l2cap_disconn_cfm,
4101 .security_cfm = l2cap_security_cfm,
4102 .recv_acldata = l2cap_recv_acldata
4105 int __init l2cap_init(void)
4109 err = l2cap_init_sockets();
4113 _busy_wq = create_singlethread_workqueue("l2cap");
4119 err = hci_register_proto(&l2cap_hci_proto);
4121 BT_ERR("L2CAP protocol registration failed");
4122 bt_sock_unregister(BTPROTO_L2CAP);
4127 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4128 bt_debugfs, NULL, &l2cap_debugfs_fops);
4130 BT_ERR("Failed to create L2CAP debug file");
4136 destroy_workqueue(_busy_wq);
4137 l2cap_cleanup_sockets();
4141 void l2cap_exit(void)
4143 debugfs_remove(l2cap_debugfs);
4145 flush_workqueue(_busy_wq);
4146 destroy_workqueue(_busy_wq);
4148 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4149 BT_ERR("L2CAP protocol unregistration failed");
4151 l2cap_cleanup_sockets();
4154 module_param(disable_ertm, bool, 0644);
4155 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");