2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 read_unlock(&conn->chan_lock);
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 struct l2cap_chan *c;
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
136 read_unlock(&conn->chan_lock);
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
142 u16 cid = L2CAP_CID_DYN_START;
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
152 struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
154 struct l2cap_chan *chan;
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
167 struct sock *sk = chan->sk;
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
172 conn->disc_reason = 0x13;
174 l2cap_pi(sk)->conn = conn;
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
201 list_add(&chan->list, &conn->chan_l);
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
212 l2cap_sock_clear_timer(sk);
214 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
237 sk->sk_state_change(sk);
239 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE &&
240 l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE))
243 skb_queue_purge(&chan->tx_q);
245 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
246 struct srej_list *l, *tmp;
248 del_timer(&chan->retrans_timer);
249 del_timer(&chan->monitor_timer);
250 del_timer(&chan->ack_timer);
252 skb_queue_purge(&chan->srej_q);
253 skb_queue_purge(&chan->busy_q);
255 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
265 static inline u8 l2cap_get_auth_type(struct sock *sk)
267 if (sk->sk_type == SOCK_RAW) {
268 switch (l2cap_pi(sk)->sec_level) {
269 case BT_SECURITY_HIGH:
270 return HCI_AT_DEDICATED_BONDING_MITM;
271 case BT_SECURITY_MEDIUM:
272 return HCI_AT_DEDICATED_BONDING;
274 return HCI_AT_NO_BONDING;
276 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
278 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
280 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
281 return HCI_AT_NO_BONDING_MITM;
283 return HCI_AT_NO_BONDING;
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 return HCI_AT_GENERAL_BONDING_MITM;
288 case BT_SECURITY_MEDIUM:
289 return HCI_AT_GENERAL_BONDING;
291 return HCI_AT_NO_BONDING;
296 /* Service level security */
297 static inline int l2cap_check_security(struct sock *sk)
299 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
302 auth_type = l2cap_get_auth_type(sk);
304 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
308 u8 l2cap_get_ident(struct l2cap_conn *conn)
312 /* Get next available identificator.
313 * 1 - 128 are used by kernel.
314 * 129 - 199 are reserved.
315 * 200 - 254 are used by utilities like l2ping, etc.
318 spin_lock_bh(&conn->lock);
320 if (++conn->tx_ident > 128)
325 spin_unlock_bh(&conn->lock);
330 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
332 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
335 BT_DBG("code 0x%2.2x", code);
340 if (lmp_no_flush_capable(conn->hcon->hdev))
341 flags = ACL_START_NO_FLUSH;
345 hci_send_acl(conn->hcon, skb, flags);
348 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
351 struct l2cap_hdr *lh;
352 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
353 struct l2cap_conn *conn = pi->conn;
354 struct sock *sk = (struct sock *)pi;
355 int count, hlen = L2CAP_HDR_SIZE + 2;
358 if (sk->sk_state != BT_CONNECTED)
361 if (pi->fcs == L2CAP_FCS_CRC16)
364 BT_DBG("chan %p, control 0x%2.2x", chan, control);
366 count = min_t(unsigned int, conn->mtu, hlen);
367 control |= L2CAP_CTRL_FRAME_TYPE;
369 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
370 control |= L2CAP_CTRL_FINAL;
371 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
374 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
375 control |= L2CAP_CTRL_POLL;
376 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
379 skb = bt_skb_alloc(count, GFP_ATOMIC);
383 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
384 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
385 lh->cid = cpu_to_le16(pi->dcid);
386 put_unaligned_le16(control, skb_put(skb, 2));
388 if (pi->fcs == L2CAP_FCS_CRC16) {
389 u16 fcs = crc16(0, (u8 *)lh, count - 2);
390 put_unaligned_le16(fcs, skb_put(skb, 2));
393 if (lmp_no_flush_capable(conn->hcon->hdev))
394 flags = ACL_START_NO_FLUSH;
398 hci_send_acl(pi->conn->hcon, skb, flags);
401 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
403 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
404 control |= L2CAP_SUPER_RCV_NOT_READY;
405 chan->conn_state |= L2CAP_CONN_RNR_SENT;
407 control |= L2CAP_SUPER_RCV_READY;
409 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
411 l2cap_send_sframe(chan, control);
414 static inline int __l2cap_no_conn_pending(struct sock *sk)
416 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
419 static void l2cap_do_start(struct l2cap_chan *chan)
421 struct sock *sk = chan->sk;
422 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
424 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
425 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
428 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
429 struct l2cap_conn_req req;
430 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
431 req.psm = l2cap_pi(sk)->psm;
433 chan->ident = l2cap_get_ident(conn);
434 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
436 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
440 struct l2cap_info_req req;
441 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
444 conn->info_ident = l2cap_get_ident(conn);
446 mod_timer(&conn->info_timer, jiffies +
447 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
449 l2cap_send_cmd(conn, conn->info_ident,
450 L2CAP_INFO_REQ, sizeof(req), &req);
454 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
456 u32 local_feat_mask = l2cap_feat_mask;
458 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
461 case L2CAP_MODE_ERTM:
462 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
463 case L2CAP_MODE_STREAMING:
464 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
470 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
473 struct l2cap_disconn_req req;
480 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
481 del_timer(&chan->retrans_timer);
482 del_timer(&chan->monitor_timer);
483 del_timer(&chan->ack_timer);
486 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
487 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
488 l2cap_send_cmd(conn, l2cap_get_ident(conn),
489 L2CAP_DISCONN_REQ, sizeof(req), &req);
491 sk->sk_state = BT_DISCONN;
495 /* ---- L2CAP connections ---- */
496 static void l2cap_conn_start(struct l2cap_conn *conn)
498 struct l2cap_chan *chan, *tmp;
500 BT_DBG("conn %p", conn);
502 read_lock(&conn->chan_lock);
504 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
505 struct sock *sk = chan->sk;
509 if (sk->sk_type != SOCK_SEQPACKET &&
510 sk->sk_type != SOCK_STREAM) {
515 if (sk->sk_state == BT_CONNECT) {
516 struct l2cap_conn_req req;
518 if (!l2cap_check_security(sk) ||
519 !__l2cap_no_conn_pending(sk)) {
524 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
526 && l2cap_pi(sk)->conf_state &
527 L2CAP_CONF_STATE2_DEVICE) {
528 /* __l2cap_sock_close() calls list_del(chan)
529 * so release the lock */
530 read_unlock_bh(&conn->chan_lock);
531 __l2cap_sock_close(sk, ECONNRESET);
532 read_lock_bh(&conn->chan_lock);
537 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
538 req.psm = l2cap_pi(sk)->psm;
540 chan->ident = l2cap_get_ident(conn);
541 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
543 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
546 } else if (sk->sk_state == BT_CONNECT2) {
547 struct l2cap_conn_rsp rsp;
549 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
550 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
552 if (l2cap_check_security(sk)) {
553 if (bt_sk(sk)->defer_setup) {
554 struct sock *parent = bt_sk(sk)->parent;
555 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
556 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
557 parent->sk_data_ready(parent, 0);
560 sk->sk_state = BT_CONFIG;
561 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
562 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
569 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
572 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
573 rsp.result != L2CAP_CR_SUCCESS) {
578 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
579 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
580 l2cap_build_conf_req(chan, buf), buf);
581 chan->num_conf_req++;
587 read_unlock(&conn->chan_lock);
590 /* Find socket with cid and source bdaddr.
591 * Returns closest match, locked.
593 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
595 struct sock *sk = NULL, *sk1 = NULL;
596 struct hlist_node *node;
598 read_lock(&l2cap_sk_list.lock);
600 sk_for_each(sk, node, &l2cap_sk_list.head) {
601 if (state && sk->sk_state != state)
604 if (l2cap_pi(sk)->scid == cid) {
606 if (!bacmp(&bt_sk(sk)->src, src))
610 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
615 read_unlock(&l2cap_sk_list.lock);
617 return node ? sk : sk1;
620 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
622 struct sock *parent, *sk;
623 struct l2cap_chan *chan;
627 /* Check if we have socket listening on cid */
628 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 bh_lock_sock(parent);
635 /* Check for backlog size */
636 if (sk_acceptq_is_full(parent)) {
637 BT_DBG("backlog full %d", parent->sk_ack_backlog);
641 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
645 chan = l2cap_chan_alloc(sk);
651 l2cap_pi(sk)->chan = chan;
653 write_lock_bh(&conn->chan_lock);
655 hci_conn_hold(conn->hcon);
657 l2cap_sock_init(sk, parent);
659 bacpy(&bt_sk(sk)->src, conn->src);
660 bacpy(&bt_sk(sk)->dst, conn->dst);
662 bt_accept_enqueue(parent, sk);
664 __l2cap_chan_add(conn, chan);
666 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
668 sk->sk_state = BT_CONNECTED;
669 parent->sk_data_ready(parent, 0);
671 write_unlock_bh(&conn->chan_lock);
674 bh_unlock_sock(parent);
677 static void l2cap_conn_ready(struct l2cap_conn *conn)
679 struct l2cap_chan *chan;
681 BT_DBG("conn %p", conn);
683 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
684 l2cap_le_conn_ready(conn);
686 read_lock(&conn->chan_lock);
688 list_for_each_entry(chan, &conn->chan_l, list) {
689 struct sock *sk = chan->sk;
693 if (conn->hcon->type == LE_LINK) {
694 l2cap_sock_clear_timer(sk);
695 sk->sk_state = BT_CONNECTED;
696 sk->sk_state_change(sk);
699 if (sk->sk_type != SOCK_SEQPACKET &&
700 sk->sk_type != SOCK_STREAM) {
701 l2cap_sock_clear_timer(sk);
702 sk->sk_state = BT_CONNECTED;
703 sk->sk_state_change(sk);
704 } else if (sk->sk_state == BT_CONNECT)
705 l2cap_do_start(chan);
710 read_unlock(&conn->chan_lock);
713 /* Notify sockets that we cannot guaranty reliability anymore */
714 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
716 struct l2cap_chan *chan;
718 BT_DBG("conn %p", conn);
720 read_lock(&conn->chan_lock);
722 list_for_each_entry(chan, &conn->chan_l, list) {
723 struct sock *sk = chan->sk;
725 if (l2cap_pi(sk)->force_reliable)
729 read_unlock(&conn->chan_lock);
732 static void l2cap_info_timeout(unsigned long arg)
734 struct l2cap_conn *conn = (void *) arg;
736 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
737 conn->info_ident = 0;
739 l2cap_conn_start(conn);
742 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
744 struct l2cap_conn *conn = hcon->l2cap_data;
749 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
753 hcon->l2cap_data = conn;
756 BT_DBG("hcon %p conn %p", hcon, conn);
758 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
759 conn->mtu = hcon->hdev->le_mtu;
761 conn->mtu = hcon->hdev->acl_mtu;
763 conn->src = &hcon->hdev->bdaddr;
764 conn->dst = &hcon->dst;
768 spin_lock_init(&conn->lock);
769 rwlock_init(&conn->chan_lock);
771 INIT_LIST_HEAD(&conn->chan_l);
773 if (hcon->type != LE_LINK)
774 setup_timer(&conn->info_timer, l2cap_info_timeout,
775 (unsigned long) conn);
777 conn->disc_reason = 0x13;
782 static void l2cap_conn_del(struct hci_conn *hcon, int err)
784 struct l2cap_conn *conn = hcon->l2cap_data;
785 struct l2cap_chan *chan, *l;
791 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
793 kfree_skb(conn->rx_skb);
796 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
799 l2cap_chan_del(chan, err);
804 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
805 del_timer_sync(&conn->info_timer);
807 hcon->l2cap_data = NULL;
811 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
813 write_lock_bh(&conn->chan_lock);
814 __l2cap_chan_add(conn, chan);
815 write_unlock_bh(&conn->chan_lock);
818 /* ---- Socket interface ---- */
820 /* Find socket with psm and source bdaddr.
821 * Returns closest match.
823 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
825 struct sock *sk = NULL, *sk1 = NULL;
826 struct hlist_node *node;
828 read_lock(&l2cap_sk_list.lock);
830 sk_for_each(sk, node, &l2cap_sk_list.head) {
831 if (state && sk->sk_state != state)
834 if (l2cap_pi(sk)->psm == psm) {
836 if (!bacmp(&bt_sk(sk)->src, src))
840 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
845 read_unlock(&l2cap_sk_list.lock);
847 return node ? sk : sk1;
850 int l2cap_do_connect(struct l2cap_chan *chan)
852 struct sock *sk = chan->sk;
853 bdaddr_t *src = &bt_sk(sk)->src;
854 bdaddr_t *dst = &bt_sk(sk)->dst;
855 struct l2cap_conn *conn;
856 struct hci_conn *hcon;
857 struct hci_dev *hdev;
861 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
864 hdev = hci_get_route(dst, src);
866 return -EHOSTUNREACH;
868 hci_dev_lock_bh(hdev);
870 auth_type = l2cap_get_auth_type(sk);
872 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
873 hcon = hci_connect(hdev, LE_LINK, dst,
874 l2cap_pi(sk)->sec_level, auth_type);
876 hcon = hci_connect(hdev, ACL_LINK, dst,
877 l2cap_pi(sk)->sec_level, auth_type);
884 conn = l2cap_conn_add(hcon, 0);
891 /* Update source addr of the socket */
892 bacpy(src, conn->src);
894 l2cap_chan_add(conn, chan);
896 sk->sk_state = BT_CONNECT;
897 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
899 if (hcon->state == BT_CONNECTED) {
900 if (sk->sk_type != SOCK_SEQPACKET &&
901 sk->sk_type != SOCK_STREAM) {
902 l2cap_sock_clear_timer(sk);
903 if (l2cap_check_security(sk))
904 sk->sk_state = BT_CONNECTED;
906 l2cap_do_start(chan);
912 hci_dev_unlock_bh(hdev);
917 int __l2cap_wait_ack(struct sock *sk)
919 DECLARE_WAITQUEUE(wait, current);
923 add_wait_queue(sk_sleep(sk), &wait);
924 while ((l2cap_pi(sk)->chan->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
925 set_current_state(TASK_INTERRUPTIBLE);
930 if (signal_pending(current)) {
931 err = sock_intr_errno(timeo);
936 timeo = schedule_timeout(timeo);
939 err = sock_error(sk);
943 set_current_state(TASK_RUNNING);
944 remove_wait_queue(sk_sleep(sk), &wait);
948 static void l2cap_monitor_timeout(unsigned long arg)
950 struct l2cap_chan *chan = (void *) arg;
951 struct sock *sk = chan->sk;
953 BT_DBG("chan %p", chan);
956 if (chan->retry_count >= chan->remote_max_tx) {
957 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, ECONNABORTED);
963 __mod_monitor_timer();
965 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
969 static void l2cap_retrans_timeout(unsigned long arg)
971 struct l2cap_chan *chan = (void *) arg;
972 struct sock *sk = chan->sk;
974 BT_DBG("chan %p", chan);
977 chan->retry_count = 1;
978 __mod_monitor_timer();
980 chan->conn_state |= L2CAP_CONN_WAIT_F;
982 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
986 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
990 while ((skb = skb_peek(&chan->tx_q)) &&
991 chan->unacked_frames) {
992 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
995 skb = skb_dequeue(&chan->tx_q);
998 chan->unacked_frames--;
1001 if (!chan->unacked_frames)
1002 del_timer(&chan->retrans_timer);
1005 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1007 struct l2cap_pinfo *pi = l2cap_pi(sk);
1008 struct hci_conn *hcon = pi->conn->hcon;
1011 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1013 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1014 flags = ACL_START_NO_FLUSH;
1018 hci_send_acl(hcon, skb, flags);
1021 void l2cap_streaming_send(struct l2cap_chan *chan)
1023 struct sock *sk = chan->sk;
1024 struct sk_buff *skb;
1025 struct l2cap_pinfo *pi = l2cap_pi(sk);
1028 while ((skb = skb_dequeue(&chan->tx_q))) {
1029 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1030 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1031 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1033 if (pi->fcs == L2CAP_FCS_CRC16) {
1034 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1035 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1038 l2cap_do_send(sk, skb);
1040 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1044 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1046 struct sock *sk = chan->sk;
1047 struct l2cap_pinfo *pi = l2cap_pi(sk);
1048 struct sk_buff *skb, *tx_skb;
1051 skb = skb_peek(&chan->tx_q);
1056 if (bt_cb(skb)->tx_seq == tx_seq)
1059 if (skb_queue_is_last(&chan->tx_q, skb))
1062 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1064 if (chan->remote_max_tx &&
1065 bt_cb(skb)->retries == chan->remote_max_tx) {
1066 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1070 tx_skb = skb_clone(skb, GFP_ATOMIC);
1071 bt_cb(skb)->retries++;
1072 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1073 control &= L2CAP_CTRL_SAR;
1075 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1076 control |= L2CAP_CTRL_FINAL;
1077 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1080 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1081 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1083 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1085 if (pi->fcs == L2CAP_FCS_CRC16) {
1086 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1087 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1090 l2cap_do_send(sk, tx_skb);
1093 int l2cap_ertm_send(struct l2cap_chan *chan)
1095 struct sk_buff *skb, *tx_skb;
1096 struct sock *sk = chan->sk;
1097 struct l2cap_pinfo *pi = l2cap_pi(sk);
1101 if (sk->sk_state != BT_CONNECTED)
1104 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1106 if (chan->remote_max_tx &&
1107 bt_cb(skb)->retries == chan->remote_max_tx) {
1108 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1112 tx_skb = skb_clone(skb, GFP_ATOMIC);
1114 bt_cb(skb)->retries++;
1116 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1117 control &= L2CAP_CTRL_SAR;
1119 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1120 control |= L2CAP_CTRL_FINAL;
1121 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1123 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1124 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1125 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1128 if (pi->fcs == L2CAP_FCS_CRC16) {
1129 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1130 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1133 l2cap_do_send(sk, tx_skb);
1135 __mod_retrans_timer();
1137 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1138 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1140 if (bt_cb(skb)->retries == 1)
1141 chan->unacked_frames++;
1143 chan->frames_sent++;
1145 if (skb_queue_is_last(&chan->tx_q, skb))
1146 chan->tx_send_head = NULL;
1148 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1156 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1160 if (!skb_queue_empty(&chan->tx_q))
1161 chan->tx_send_head = chan->tx_q.next;
1163 chan->next_tx_seq = chan->expected_ack_seq;
1164 ret = l2cap_ertm_send(chan);
1168 static void l2cap_send_ack(struct l2cap_chan *chan)
1172 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1174 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1175 control |= L2CAP_SUPER_RCV_NOT_READY;
1176 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1177 l2cap_send_sframe(chan, control);
1181 if (l2cap_ertm_send(chan) > 0)
1184 control |= L2CAP_SUPER_RCV_READY;
1185 l2cap_send_sframe(chan, control);
1188 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1190 struct srej_list *tail;
1193 control = L2CAP_SUPER_SELECT_REJECT;
1194 control |= L2CAP_CTRL_FINAL;
1196 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1197 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1199 l2cap_send_sframe(chan, control);
1202 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1204 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1205 struct sk_buff **frag;
1208 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1214 /* Continuation fragments (no L2CAP header) */
1215 frag = &skb_shinfo(skb)->frag_list;
1217 count = min_t(unsigned int, conn->mtu, len);
1219 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1222 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1228 frag = &(*frag)->next;
1234 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1236 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1237 struct sk_buff *skb;
1238 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1239 struct l2cap_hdr *lh;
1241 BT_DBG("sk %p len %d", sk, (int)len);
1243 count = min_t(unsigned int, (conn->mtu - hlen), len);
1244 skb = bt_skb_send_alloc(sk, count + hlen,
1245 msg->msg_flags & MSG_DONTWAIT, &err);
1247 return ERR_PTR(err);
1249 /* Create L2CAP header */
1250 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1251 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1252 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1253 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1255 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1256 if (unlikely(err < 0)) {
1258 return ERR_PTR(err);
1263 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1265 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1266 struct sk_buff *skb;
1267 int err, count, hlen = L2CAP_HDR_SIZE;
1268 struct l2cap_hdr *lh;
1270 BT_DBG("sk %p len %d", sk, (int)len);
1272 count = min_t(unsigned int, (conn->mtu - hlen), len);
1273 skb = bt_skb_send_alloc(sk, count + hlen,
1274 msg->msg_flags & MSG_DONTWAIT, &err);
1276 return ERR_PTR(err);
1278 /* Create L2CAP header */
1279 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1280 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1281 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1283 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1284 if (unlikely(err < 0)) {
1286 return ERR_PTR(err);
1291 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1293 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1294 struct sk_buff *skb;
1295 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1296 struct l2cap_hdr *lh;
1298 BT_DBG("sk %p len %d", sk, (int)len);
1301 return ERR_PTR(-ENOTCONN);
1306 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1309 count = min_t(unsigned int, (conn->mtu - hlen), len);
1310 skb = bt_skb_send_alloc(sk, count + hlen,
1311 msg->msg_flags & MSG_DONTWAIT, &err);
1313 return ERR_PTR(err);
1315 /* Create L2CAP header */
1316 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1317 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1318 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1319 put_unaligned_le16(control, skb_put(skb, 2));
1321 put_unaligned_le16(sdulen, skb_put(skb, 2));
1323 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1324 if (unlikely(err < 0)) {
1326 return ERR_PTR(err);
1329 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1330 put_unaligned_le16(0, skb_put(skb, 2));
1332 bt_cb(skb)->retries = 0;
1336 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1338 struct sock *sk = chan->sk;
1339 struct sk_buff *skb;
1340 struct sk_buff_head sar_queue;
1344 skb_queue_head_init(&sar_queue);
1345 control = L2CAP_SDU_START;
1346 skb = l2cap_create_iframe_pdu(sk, msg, chan->remote_mps, control, len);
1348 return PTR_ERR(skb);
1350 __skb_queue_tail(&sar_queue, skb);
1351 len -= chan->remote_mps;
1352 size += chan->remote_mps;
1357 if (len > chan->remote_mps) {
1358 control = L2CAP_SDU_CONTINUE;
1359 buflen = chan->remote_mps;
1361 control = L2CAP_SDU_END;
1365 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1367 skb_queue_purge(&sar_queue);
1368 return PTR_ERR(skb);
1371 __skb_queue_tail(&sar_queue, skb);
1375 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1376 if (chan->tx_send_head == NULL)
1377 chan->tx_send_head = sar_queue.next;
1382 static void l2cap_chan_ready(struct sock *sk)
1384 struct sock *parent = bt_sk(sk)->parent;
1386 BT_DBG("sk %p, parent %p", sk, parent);
1388 l2cap_pi(sk)->conf_state = 0;
1389 l2cap_sock_clear_timer(sk);
1392 /* Outgoing channel.
1393 * Wake up socket sleeping on connect.
1395 sk->sk_state = BT_CONNECTED;
1396 sk->sk_state_change(sk);
1398 /* Incoming channel.
1399 * Wake up socket sleeping on accept.
1401 parent->sk_data_ready(parent, 0);
1405 /* Copy frame to all raw sockets on that connection */
1406 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1408 struct sk_buff *nskb;
1409 struct l2cap_chan *chan;
1411 BT_DBG("conn %p", conn);
1413 read_lock(&conn->chan_lock);
1414 list_for_each_entry(chan, &conn->chan_l, list) {
1415 struct sock *sk = chan->sk;
1416 if (sk->sk_type != SOCK_RAW)
1419 /* Don't send frame to the socket it came from */
1422 nskb = skb_clone(skb, GFP_ATOMIC);
1426 if (sock_queue_rcv_skb(sk, nskb))
1429 read_unlock(&conn->chan_lock);
1432 /* ---- L2CAP signalling commands ---- */
1433 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1434 u8 code, u8 ident, u16 dlen, void *data)
1436 struct sk_buff *skb, **frag;
1437 struct l2cap_cmd_hdr *cmd;
1438 struct l2cap_hdr *lh;
1441 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1442 conn, code, ident, dlen);
1444 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1445 count = min_t(unsigned int, conn->mtu, len);
1447 skb = bt_skb_alloc(count, GFP_ATOMIC);
1451 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1452 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1454 if (conn->hcon->type == LE_LINK)
1455 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1457 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1459 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1462 cmd->len = cpu_to_le16(dlen);
1465 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1466 memcpy(skb_put(skb, count), data, count);
1472 /* Continuation fragments (no L2CAP header) */
1473 frag = &skb_shinfo(skb)->frag_list;
1475 count = min_t(unsigned int, conn->mtu, len);
1477 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1481 memcpy(skb_put(*frag, count), data, count);
1486 frag = &(*frag)->next;
1496 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1498 struct l2cap_conf_opt *opt = *ptr;
1501 len = L2CAP_CONF_OPT_SIZE + opt->len;
1509 *val = *((u8 *) opt->val);
1513 *val = get_unaligned_le16(opt->val);
1517 *val = get_unaligned_le32(opt->val);
1521 *val = (unsigned long) opt->val;
1525 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1529 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1531 struct l2cap_conf_opt *opt = *ptr;
1533 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1540 *((u8 *) opt->val) = val;
1544 put_unaligned_le16(val, opt->val);
1548 put_unaligned_le32(val, opt->val);
1552 memcpy(opt->val, (void *) val, len);
1556 *ptr += L2CAP_CONF_OPT_SIZE + len;
1559 static void l2cap_ack_timeout(unsigned long arg)
1561 struct l2cap_chan *chan = (void *) arg;
1563 bh_lock_sock(chan->sk);
1564 l2cap_send_ack(chan);
1565 bh_unlock_sock(chan->sk);
1568 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1570 struct sock *sk = chan->sk;
1572 chan->expected_ack_seq = 0;
1573 chan->unacked_frames = 0;
1574 chan->buffer_seq = 0;
1575 chan->num_acked = 0;
1576 chan->frames_sent = 0;
1578 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1579 (unsigned long) chan);
1580 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1581 (unsigned long) chan);
1582 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1584 skb_queue_head_init(&chan->srej_q);
1585 skb_queue_head_init(&chan->busy_q);
1587 INIT_LIST_HEAD(&chan->srej_l);
1589 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1591 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1594 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1597 case L2CAP_MODE_STREAMING:
1598 case L2CAP_MODE_ERTM:
1599 if (l2cap_mode_supported(mode, remote_feat_mask))
1603 return L2CAP_MODE_BASIC;
1607 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1609 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1610 struct l2cap_conf_req *req = data;
1611 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1612 void *ptr = req->data;
1614 BT_DBG("chan %p", chan);
1616 if (chan->num_conf_req || chan->num_conf_rsp)
1620 case L2CAP_MODE_STREAMING:
1621 case L2CAP_MODE_ERTM:
1622 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1627 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1632 if (pi->imtu != L2CAP_DEFAULT_MTU)
1633 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1636 case L2CAP_MODE_BASIC:
1637 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1638 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1641 rfc.mode = L2CAP_MODE_BASIC;
1643 rfc.max_transmit = 0;
1644 rfc.retrans_timeout = 0;
1645 rfc.monitor_timeout = 0;
1646 rfc.max_pdu_size = 0;
1648 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1649 (unsigned long) &rfc);
1652 case L2CAP_MODE_ERTM:
1653 rfc.mode = L2CAP_MODE_ERTM;
1654 rfc.txwin_size = pi->tx_win;
1655 rfc.max_transmit = pi->max_tx;
1656 rfc.retrans_timeout = 0;
1657 rfc.monitor_timeout = 0;
1658 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1659 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1660 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1662 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1663 (unsigned long) &rfc);
1665 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1668 if (pi->fcs == L2CAP_FCS_NONE ||
1669 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1670 pi->fcs = L2CAP_FCS_NONE;
1671 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1675 case L2CAP_MODE_STREAMING:
1676 rfc.mode = L2CAP_MODE_STREAMING;
1678 rfc.max_transmit = 0;
1679 rfc.retrans_timeout = 0;
1680 rfc.monitor_timeout = 0;
1681 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1682 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1683 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1685 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1686 (unsigned long) &rfc);
1688 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1691 if (pi->fcs == L2CAP_FCS_NONE ||
1692 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1693 pi->fcs = L2CAP_FCS_NONE;
1694 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1699 req->dcid = cpu_to_le16(pi->dcid);
1700 req->flags = cpu_to_le16(0);
1705 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1707 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1708 struct l2cap_conf_rsp *rsp = data;
1709 void *ptr = rsp->data;
1710 void *req = chan->conf_req;
1711 int len = chan->conf_len;
1712 int type, hint, olen;
1714 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1715 u16 mtu = L2CAP_DEFAULT_MTU;
1716 u16 result = L2CAP_CONF_SUCCESS;
1718 BT_DBG("chan %p", chan);
1720 while (len >= L2CAP_CONF_OPT_SIZE) {
1721 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1723 hint = type & L2CAP_CONF_HINT;
1724 type &= L2CAP_CONF_MASK;
1727 case L2CAP_CONF_MTU:
1731 case L2CAP_CONF_FLUSH_TO:
1735 case L2CAP_CONF_QOS:
1738 case L2CAP_CONF_RFC:
1739 if (olen == sizeof(rfc))
1740 memcpy(&rfc, (void *) val, olen);
1743 case L2CAP_CONF_FCS:
1744 if (val == L2CAP_FCS_NONE)
1745 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1753 result = L2CAP_CONF_UNKNOWN;
1754 *((u8 *) ptr++) = type;
1759 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1763 case L2CAP_MODE_STREAMING:
1764 case L2CAP_MODE_ERTM:
1765 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1766 pi->mode = l2cap_select_mode(rfc.mode,
1767 pi->conn->feat_mask);
1771 if (pi->mode != rfc.mode)
1772 return -ECONNREFUSED;
1778 if (pi->mode != rfc.mode) {
1779 result = L2CAP_CONF_UNACCEPT;
1780 rfc.mode = pi->mode;
1782 if (chan->num_conf_rsp == 1)
1783 return -ECONNREFUSED;
1785 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1786 sizeof(rfc), (unsigned long) &rfc);
1790 if (result == L2CAP_CONF_SUCCESS) {
1791 /* Configure output options and let the other side know
1792 * which ones we don't like. */
1794 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1795 result = L2CAP_CONF_UNACCEPT;
1798 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1800 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1803 case L2CAP_MODE_BASIC:
1804 pi->fcs = L2CAP_FCS_NONE;
1805 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1808 case L2CAP_MODE_ERTM:
1809 chan->remote_tx_win = rfc.txwin_size;
1810 chan->remote_max_tx = rfc.max_transmit;
1812 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1813 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1815 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1817 rfc.retrans_timeout =
1818 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1819 rfc.monitor_timeout =
1820 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1822 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1824 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1825 sizeof(rfc), (unsigned long) &rfc);
1829 case L2CAP_MODE_STREAMING:
1830 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1831 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1833 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1835 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1837 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1838 sizeof(rfc), (unsigned long) &rfc);
1843 result = L2CAP_CONF_UNACCEPT;
1845 memset(&rfc, 0, sizeof(rfc));
1846 rfc.mode = pi->mode;
1849 if (result == L2CAP_CONF_SUCCESS)
1850 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1852 rsp->scid = cpu_to_le16(pi->dcid);
1853 rsp->result = cpu_to_le16(result);
1854 rsp->flags = cpu_to_le16(0x0000);
1859 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1861 struct l2cap_pinfo *pi = l2cap_pi(sk);
1862 struct l2cap_conf_req *req = data;
1863 void *ptr = req->data;
1866 struct l2cap_conf_rfc rfc;
1868 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1870 while (len >= L2CAP_CONF_OPT_SIZE) {
1871 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1874 case L2CAP_CONF_MTU:
1875 if (val < L2CAP_DEFAULT_MIN_MTU) {
1876 *result = L2CAP_CONF_UNACCEPT;
1877 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1880 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1883 case L2CAP_CONF_FLUSH_TO:
1885 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1889 case L2CAP_CONF_RFC:
1890 if (olen == sizeof(rfc))
1891 memcpy(&rfc, (void *)val, olen);
1893 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1894 rfc.mode != pi->mode)
1895 return -ECONNREFUSED;
1899 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1900 sizeof(rfc), (unsigned long) &rfc);
1905 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1906 return -ECONNREFUSED;
1908 pi->mode = rfc.mode;
1910 if (*result == L2CAP_CONF_SUCCESS) {
1912 case L2CAP_MODE_ERTM:
1913 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1914 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1915 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1917 case L2CAP_MODE_STREAMING:
1918 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1922 req->dcid = cpu_to_le16(pi->dcid);
1923 req->flags = cpu_to_le16(0x0000);
1928 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1930 struct l2cap_conf_rsp *rsp = data;
1931 void *ptr = rsp->data;
1933 BT_DBG("sk %p", sk);
1935 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1936 rsp->result = cpu_to_le16(result);
1937 rsp->flags = cpu_to_le16(flags);
1942 void __l2cap_connect_rsp_defer(struct sock *sk)
1944 struct l2cap_conn_rsp rsp;
1945 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1946 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1949 sk->sk_state = BT_CONFIG;
1951 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1952 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1953 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1954 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1955 l2cap_send_cmd(conn, chan->ident,
1956 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1958 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
1961 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1962 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1963 l2cap_build_conf_req(chan, buf), buf);
1964 chan->num_conf_req++;
1967 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1969 struct l2cap_pinfo *pi = l2cap_pi(sk);
1972 struct l2cap_conf_rfc rfc;
1974 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1976 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1979 while (len >= L2CAP_CONF_OPT_SIZE) {
1980 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1983 case L2CAP_CONF_RFC:
1984 if (olen == sizeof(rfc))
1985 memcpy(&rfc, (void *)val, olen);
1992 case L2CAP_MODE_ERTM:
1993 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1994 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1995 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1997 case L2CAP_MODE_STREAMING:
1998 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2002 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2004 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2006 if (rej->reason != 0x0000)
2009 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2010 cmd->ident == conn->info_ident) {
2011 del_timer(&conn->info_timer);
2013 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2014 conn->info_ident = 0;
2016 l2cap_conn_start(conn);
2022 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2024 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2025 struct l2cap_conn_rsp rsp;
2026 struct l2cap_chan *chan = NULL;
2027 struct sock *parent, *sk = NULL;
2028 int result, status = L2CAP_CS_NO_INFO;
2030 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2031 __le16 psm = req->psm;
2033 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2035 /* Check if we have socket listening on psm */
2036 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2038 result = L2CAP_CR_BAD_PSM;
2042 bh_lock_sock(parent);
2044 /* Check if the ACL is secure enough (if not SDP) */
2045 if (psm != cpu_to_le16(0x0001) &&
2046 !hci_conn_check_link_mode(conn->hcon)) {
2047 conn->disc_reason = 0x05;
2048 result = L2CAP_CR_SEC_BLOCK;
2052 result = L2CAP_CR_NO_MEM;
2054 /* Check for backlog size */
2055 if (sk_acceptq_is_full(parent)) {
2056 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2060 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2064 chan = l2cap_chan_alloc(sk);
2066 l2cap_sock_kill(sk);
2070 l2cap_pi(sk)->chan = chan;
2072 write_lock_bh(&conn->chan_lock);
2074 /* Check if we already have channel with that dcid */
2075 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2076 write_unlock_bh(&conn->chan_lock);
2077 sock_set_flag(sk, SOCK_ZAPPED);
2078 l2cap_sock_kill(sk);
2082 hci_conn_hold(conn->hcon);
2084 l2cap_sock_init(sk, parent);
2085 bacpy(&bt_sk(sk)->src, conn->src);
2086 bacpy(&bt_sk(sk)->dst, conn->dst);
2087 l2cap_pi(sk)->psm = psm;
2088 l2cap_pi(sk)->dcid = scid;
2090 bt_accept_enqueue(parent, sk);
2092 __l2cap_chan_add(conn, chan);
2094 dcid = l2cap_pi(sk)->scid;
2096 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2098 chan->ident = cmd->ident;
2100 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2101 if (l2cap_check_security(sk)) {
2102 if (bt_sk(sk)->defer_setup) {
2103 sk->sk_state = BT_CONNECT2;
2104 result = L2CAP_CR_PEND;
2105 status = L2CAP_CS_AUTHOR_PEND;
2106 parent->sk_data_ready(parent, 0);
2108 sk->sk_state = BT_CONFIG;
2109 result = L2CAP_CR_SUCCESS;
2110 status = L2CAP_CS_NO_INFO;
2113 sk->sk_state = BT_CONNECT2;
2114 result = L2CAP_CR_PEND;
2115 status = L2CAP_CS_AUTHEN_PEND;
2118 sk->sk_state = BT_CONNECT2;
2119 result = L2CAP_CR_PEND;
2120 status = L2CAP_CS_NO_INFO;
2123 write_unlock_bh(&conn->chan_lock);
2126 bh_unlock_sock(parent);
2129 rsp.scid = cpu_to_le16(scid);
2130 rsp.dcid = cpu_to_le16(dcid);
2131 rsp.result = cpu_to_le16(result);
2132 rsp.status = cpu_to_le16(status);
2133 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2135 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2136 struct l2cap_info_req info;
2137 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2139 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2140 conn->info_ident = l2cap_get_ident(conn);
2142 mod_timer(&conn->info_timer, jiffies +
2143 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2145 l2cap_send_cmd(conn, conn->info_ident,
2146 L2CAP_INFO_REQ, sizeof(info), &info);
2149 if (chan && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2150 result == L2CAP_CR_SUCCESS) {
2152 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2153 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2154 l2cap_build_conf_req(chan, buf), buf);
2155 chan->num_conf_req++;
2161 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2163 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2164 u16 scid, dcid, result, status;
2165 struct l2cap_chan *chan;
2169 scid = __le16_to_cpu(rsp->scid);
2170 dcid = __le16_to_cpu(rsp->dcid);
2171 result = __le16_to_cpu(rsp->result);
2172 status = __le16_to_cpu(rsp->status);
2174 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2177 chan = l2cap_get_chan_by_scid(conn, scid);
2181 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2189 case L2CAP_CR_SUCCESS:
2190 sk->sk_state = BT_CONFIG;
2192 l2cap_pi(sk)->dcid = dcid;
2193 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2195 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2198 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2200 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2201 l2cap_build_conf_req(chan, req), req);
2202 chan->num_conf_req++;
2206 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2210 /* don't delete l2cap channel if sk is owned by user */
2211 if (sock_owned_by_user(sk)) {
2212 sk->sk_state = BT_DISCONN;
2213 l2cap_sock_clear_timer(sk);
2214 l2cap_sock_set_timer(sk, HZ / 5);
2218 l2cap_chan_del(chan, ECONNREFUSED);
2226 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2228 /* FCS is enabled only in ERTM or streaming mode, if one or both
2231 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2232 pi->fcs = L2CAP_FCS_NONE;
2233 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2234 pi->fcs = L2CAP_FCS_CRC16;
2237 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2239 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2242 struct l2cap_chan *chan;
2246 dcid = __le16_to_cpu(req->dcid);
2247 flags = __le16_to_cpu(req->flags);
2249 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2251 chan = l2cap_get_chan_by_scid(conn, dcid);
2257 if (sk->sk_state != BT_CONFIG) {
2258 struct l2cap_cmd_rej rej;
2260 rej.reason = cpu_to_le16(0x0002);
2261 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2266 /* Reject if config buffer is too small. */
2267 len = cmd_len - sizeof(*req);
2268 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2269 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2270 l2cap_build_conf_rsp(sk, rsp,
2271 L2CAP_CONF_REJECT, flags), rsp);
2276 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2277 chan->conf_len += len;
2279 if (flags & 0x0001) {
2280 /* Incomplete config. Send empty response. */
2281 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2282 l2cap_build_conf_rsp(sk, rsp,
2283 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2287 /* Complete config. */
2288 len = l2cap_parse_conf_req(chan, rsp);
2290 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2294 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2295 chan->num_conf_rsp++;
2297 /* Reset config buffer. */
2300 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2303 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2304 set_default_fcs(l2cap_pi(sk));
2306 sk->sk_state = BT_CONNECTED;
2308 chan->next_tx_seq = 0;
2309 chan->expected_tx_seq = 0;
2310 skb_queue_head_init(&chan->tx_q);
2311 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2312 l2cap_ertm_init(chan);
2314 l2cap_chan_ready(sk);
2318 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2320 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2321 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2322 l2cap_build_conf_req(chan, buf), buf);
2323 chan->num_conf_req++;
2331 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2333 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2334 u16 scid, flags, result;
2335 struct l2cap_chan *chan;
2337 int len = cmd->len - sizeof(*rsp);
2339 scid = __le16_to_cpu(rsp->scid);
2340 flags = __le16_to_cpu(rsp->flags);
2341 result = __le16_to_cpu(rsp->result);
2343 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2344 scid, flags, result);
2346 chan = l2cap_get_chan_by_scid(conn, scid);
2353 case L2CAP_CONF_SUCCESS:
2354 l2cap_conf_rfc_get(sk, rsp->data, len);
2357 case L2CAP_CONF_UNACCEPT:
2358 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2361 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2362 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2366 /* throw out any old stored conf requests */
2367 result = L2CAP_CONF_SUCCESS;
2368 len = l2cap_parse_conf_rsp(sk, rsp->data,
2371 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2375 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2376 L2CAP_CONF_REQ, len, req);
2377 chan->num_conf_req++;
2378 if (result != L2CAP_CONF_SUCCESS)
2384 sk->sk_err = ECONNRESET;
2385 l2cap_sock_set_timer(sk, HZ * 5);
2386 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2393 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2395 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2396 set_default_fcs(l2cap_pi(sk));
2398 sk->sk_state = BT_CONNECTED;
2399 chan->next_tx_seq = 0;
2400 chan->expected_tx_seq = 0;
2401 skb_queue_head_init(&chan->tx_q);
2402 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2403 l2cap_ertm_init(chan);
2405 l2cap_chan_ready(sk);
2413 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2415 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2416 struct l2cap_disconn_rsp rsp;
2418 struct l2cap_chan *chan;
2421 scid = __le16_to_cpu(req->scid);
2422 dcid = __le16_to_cpu(req->dcid);
2424 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2426 chan = l2cap_get_chan_by_scid(conn, dcid);
2432 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2433 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2434 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2436 sk->sk_shutdown = SHUTDOWN_MASK;
2438 /* don't delete l2cap channel if sk is owned by user */
2439 if (sock_owned_by_user(sk)) {
2440 sk->sk_state = BT_DISCONN;
2441 l2cap_sock_clear_timer(sk);
2442 l2cap_sock_set_timer(sk, HZ / 5);
2447 l2cap_chan_del(chan, ECONNRESET);
2450 l2cap_sock_kill(sk);
2454 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2456 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2458 struct l2cap_chan *chan;
2461 scid = __le16_to_cpu(rsp->scid);
2462 dcid = __le16_to_cpu(rsp->dcid);
2464 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2466 chan = l2cap_get_chan_by_scid(conn, scid);
2472 /* don't delete l2cap channel if sk is owned by user */
2473 if (sock_owned_by_user(sk)) {
2474 sk->sk_state = BT_DISCONN;
2475 l2cap_sock_clear_timer(sk);
2476 l2cap_sock_set_timer(sk, HZ / 5);
2481 l2cap_chan_del(chan, 0);
2484 l2cap_sock_kill(sk);
2488 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2490 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2493 type = __le16_to_cpu(req->type);
2495 BT_DBG("type 0x%4.4x", type);
2497 if (type == L2CAP_IT_FEAT_MASK) {
2499 u32 feat_mask = l2cap_feat_mask;
2500 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2501 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2502 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2504 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2506 put_unaligned_le32(feat_mask, rsp->data);
2507 l2cap_send_cmd(conn, cmd->ident,
2508 L2CAP_INFO_RSP, sizeof(buf), buf);
2509 } else if (type == L2CAP_IT_FIXED_CHAN) {
2511 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2512 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2513 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2514 memcpy(buf + 4, l2cap_fixed_chan, 8);
2515 l2cap_send_cmd(conn, cmd->ident,
2516 L2CAP_INFO_RSP, sizeof(buf), buf);
2518 struct l2cap_info_rsp rsp;
2519 rsp.type = cpu_to_le16(type);
2520 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2521 l2cap_send_cmd(conn, cmd->ident,
2522 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2528 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2530 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2533 type = __le16_to_cpu(rsp->type);
2534 result = __le16_to_cpu(rsp->result);
2536 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2538 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2539 if (cmd->ident != conn->info_ident ||
2540 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2543 del_timer(&conn->info_timer);
2545 if (result != L2CAP_IR_SUCCESS) {
2546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2547 conn->info_ident = 0;
2549 l2cap_conn_start(conn);
2554 if (type == L2CAP_IT_FEAT_MASK) {
2555 conn->feat_mask = get_unaligned_le32(rsp->data);
2557 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2558 struct l2cap_info_req req;
2559 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2561 conn->info_ident = l2cap_get_ident(conn);
2563 l2cap_send_cmd(conn, conn->info_ident,
2564 L2CAP_INFO_REQ, sizeof(req), &req);
2566 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2567 conn->info_ident = 0;
2569 l2cap_conn_start(conn);
2571 } else if (type == L2CAP_IT_FIXED_CHAN) {
2572 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2573 conn->info_ident = 0;
2575 l2cap_conn_start(conn);
2581 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2586 if (min > max || min < 6 || max > 3200)
2589 if (to_multiplier < 10 || to_multiplier > 3200)
2592 if (max >= to_multiplier * 8)
2595 max_latency = (to_multiplier * 8 / max) - 1;
2596 if (latency > 499 || latency > max_latency)
2602 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2603 struct l2cap_cmd_hdr *cmd, u8 *data)
2605 struct hci_conn *hcon = conn->hcon;
2606 struct l2cap_conn_param_update_req *req;
2607 struct l2cap_conn_param_update_rsp rsp;
2608 u16 min, max, latency, to_multiplier, cmd_len;
2611 if (!(hcon->link_mode & HCI_LM_MASTER))
2614 cmd_len = __le16_to_cpu(cmd->len);
2615 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2618 req = (struct l2cap_conn_param_update_req *) data;
2619 min = __le16_to_cpu(req->min);
2620 max = __le16_to_cpu(req->max);
2621 latency = __le16_to_cpu(req->latency);
2622 to_multiplier = __le16_to_cpu(req->to_multiplier);
2624 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2625 min, max, latency, to_multiplier);
2627 memset(&rsp, 0, sizeof(rsp));
2629 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2631 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2633 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2635 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2639 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2644 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2645 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2649 switch (cmd->code) {
2650 case L2CAP_COMMAND_REJ:
2651 l2cap_command_rej(conn, cmd, data);
2654 case L2CAP_CONN_REQ:
2655 err = l2cap_connect_req(conn, cmd, data);
2658 case L2CAP_CONN_RSP:
2659 err = l2cap_connect_rsp(conn, cmd, data);
2662 case L2CAP_CONF_REQ:
2663 err = l2cap_config_req(conn, cmd, cmd_len, data);
2666 case L2CAP_CONF_RSP:
2667 err = l2cap_config_rsp(conn, cmd, data);
2670 case L2CAP_DISCONN_REQ:
2671 err = l2cap_disconnect_req(conn, cmd, data);
2674 case L2CAP_DISCONN_RSP:
2675 err = l2cap_disconnect_rsp(conn, cmd, data);
2678 case L2CAP_ECHO_REQ:
2679 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2682 case L2CAP_ECHO_RSP:
2685 case L2CAP_INFO_REQ:
2686 err = l2cap_information_req(conn, cmd, data);
2689 case L2CAP_INFO_RSP:
2690 err = l2cap_information_rsp(conn, cmd, data);
2694 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2702 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2703 struct l2cap_cmd_hdr *cmd, u8 *data)
2705 switch (cmd->code) {
2706 case L2CAP_COMMAND_REJ:
2709 case L2CAP_CONN_PARAM_UPDATE_REQ:
2710 return l2cap_conn_param_update_req(conn, cmd, data);
2712 case L2CAP_CONN_PARAM_UPDATE_RSP:
2716 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2721 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2722 struct sk_buff *skb)
2724 u8 *data = skb->data;
2726 struct l2cap_cmd_hdr cmd;
2729 l2cap_raw_recv(conn, skb);
2731 while (len >= L2CAP_CMD_HDR_SIZE) {
2733 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2734 data += L2CAP_CMD_HDR_SIZE;
2735 len -= L2CAP_CMD_HDR_SIZE;
2737 cmd_len = le16_to_cpu(cmd.len);
2739 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2741 if (cmd_len > len || !cmd.ident) {
2742 BT_DBG("corrupted command");
2746 if (conn->hcon->type == LE_LINK)
2747 err = l2cap_le_sig_cmd(conn, &cmd, data);
2749 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2752 struct l2cap_cmd_rej rej;
2754 BT_ERR("Wrong link type (%d)", err);
2756 /* FIXME: Map err to a valid reason */
2757 rej.reason = cpu_to_le16(0);
2758 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2768 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2770 u16 our_fcs, rcv_fcs;
2771 int hdr_size = L2CAP_HDR_SIZE + 2;
2773 if (pi->fcs == L2CAP_FCS_CRC16) {
2774 skb_trim(skb, skb->len - 2);
2775 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2776 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2778 if (our_fcs != rcv_fcs)
2784 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2788 chan->frames_sent = 0;
2790 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2792 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2793 control |= L2CAP_SUPER_RCV_NOT_READY;
2794 l2cap_send_sframe(chan, control);
2795 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2798 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2799 l2cap_retransmit_frames(chan);
2801 l2cap_ertm_send(chan);
2803 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2804 chan->frames_sent == 0) {
2805 control |= L2CAP_SUPER_RCV_READY;
2806 l2cap_send_sframe(chan, control);
2810 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2812 struct sk_buff *next_skb;
2813 int tx_seq_offset, next_tx_seq_offset;
2815 bt_cb(skb)->tx_seq = tx_seq;
2816 bt_cb(skb)->sar = sar;
2818 next_skb = skb_peek(&chan->srej_q);
2820 __skb_queue_tail(&chan->srej_q, skb);
2824 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2825 if (tx_seq_offset < 0)
2826 tx_seq_offset += 64;
2829 if (bt_cb(next_skb)->tx_seq == tx_seq)
2832 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2833 chan->buffer_seq) % 64;
2834 if (next_tx_seq_offset < 0)
2835 next_tx_seq_offset += 64;
2837 if (next_tx_seq_offset > tx_seq_offset) {
2838 __skb_queue_before(&chan->srej_q, next_skb, skb);
2842 if (skb_queue_is_last(&chan->srej_q, next_skb))
2845 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2847 __skb_queue_tail(&chan->srej_q, skb);
2852 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2854 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2855 struct sk_buff *_skb;
2858 switch (control & L2CAP_CTRL_SAR) {
2859 case L2CAP_SDU_UNSEGMENTED:
2860 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2863 err = sock_queue_rcv_skb(chan->sk, skb);
2869 case L2CAP_SDU_START:
2870 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2873 chan->sdu_len = get_unaligned_le16(skb->data);
2875 if (chan->sdu_len > pi->imtu)
2878 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2882 /* pull sdu_len bytes only after alloc, because of Local Busy
2883 * condition we have to be sure that this will be executed
2884 * only once, i.e., when alloc does not fail */
2887 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2889 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2890 chan->partial_sdu_len = skb->len;
2893 case L2CAP_SDU_CONTINUE:
2894 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2900 chan->partial_sdu_len += skb->len;
2901 if (chan->partial_sdu_len > chan->sdu_len)
2904 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2909 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2915 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2916 chan->partial_sdu_len += skb->len;
2918 if (chan->partial_sdu_len > pi->imtu)
2921 if (chan->partial_sdu_len != chan->sdu_len)
2924 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2927 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2929 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2933 err = sock_queue_rcv_skb(chan->sk, _skb);
2936 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2940 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2941 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2943 kfree_skb(chan->sdu);
2951 kfree_skb(chan->sdu);
2955 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
2960 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2962 struct sk_buff *skb;
2966 while ((skb = skb_dequeue(&chan->busy_q))) {
2967 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2968 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2970 skb_queue_head(&chan->busy_q, skb);
2974 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2977 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2980 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2981 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2982 l2cap_send_sframe(chan, control);
2983 chan->retry_count = 1;
2985 del_timer(&chan->retrans_timer);
2986 __mod_monitor_timer();
2988 chan->conn_state |= L2CAP_CONN_WAIT_F;
2991 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2992 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
2994 BT_DBG("chan %p, Exit local busy", chan);
2999 static void l2cap_busy_work(struct work_struct *work)
3001 DECLARE_WAITQUEUE(wait, current);
3002 struct l2cap_chan *chan =
3003 container_of(work, struct l2cap_chan, busy_work);
3004 struct sock *sk = chan->sk;
3005 int n_tries = 0, timeo = HZ/5, err;
3006 struct sk_buff *skb;
3010 add_wait_queue(sk_sleep(sk), &wait);
3011 while ((skb = skb_peek(&chan->busy_q))) {
3012 set_current_state(TASK_INTERRUPTIBLE);
3014 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3016 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, EBUSY);
3023 if (signal_pending(current)) {
3024 err = sock_intr_errno(timeo);
3029 timeo = schedule_timeout(timeo);
3032 err = sock_error(sk);
3036 if (l2cap_try_push_rx_skb(chan) == 0)
3040 set_current_state(TASK_RUNNING);
3041 remove_wait_queue(sk_sleep(sk), &wait);
3046 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3050 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3051 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3052 __skb_queue_tail(&chan->busy_q, skb);
3053 return l2cap_try_push_rx_skb(chan);
3058 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3060 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3064 /* Busy Condition */
3065 BT_DBG("chan %p, Enter local busy", chan);
3067 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3068 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3069 __skb_queue_tail(&chan->busy_q, skb);
3071 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3072 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3073 l2cap_send_sframe(chan, sctrl);
3075 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3077 del_timer(&chan->ack_timer);
3079 queue_work(_busy_wq, &chan->busy_work);
3084 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3086 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3087 struct sk_buff *_skb;
3091 * TODO: We have to notify the userland if some data is lost with the
3095 switch (control & L2CAP_CTRL_SAR) {
3096 case L2CAP_SDU_UNSEGMENTED:
3097 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3098 kfree_skb(chan->sdu);
3102 err = sock_queue_rcv_skb(chan->sk, skb);
3108 case L2CAP_SDU_START:
3109 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3110 kfree_skb(chan->sdu);
3114 chan->sdu_len = get_unaligned_le16(skb->data);
3117 if (chan->sdu_len > pi->imtu) {
3122 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3128 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3130 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3131 chan->partial_sdu_len = skb->len;
3135 case L2CAP_SDU_CONTINUE:
3136 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3139 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3141 chan->partial_sdu_len += skb->len;
3142 if (chan->partial_sdu_len > chan->sdu_len)
3143 kfree_skb(chan->sdu);
3150 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3153 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3155 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3156 chan->partial_sdu_len += skb->len;
3158 if (chan->partial_sdu_len > pi->imtu)
3161 if (chan->partial_sdu_len == chan->sdu_len) {
3162 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3163 err = sock_queue_rcv_skb(chan->sk, _skb);
3170 kfree_skb(chan->sdu);
3178 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3180 struct sk_buff *skb;
3183 while ((skb = skb_peek(&chan->srej_q))) {
3184 if (bt_cb(skb)->tx_seq != tx_seq)
3187 skb = skb_dequeue(&chan->srej_q);
3188 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3189 l2cap_ertm_reassembly_sdu(chan, skb, control);
3190 chan->buffer_seq_srej =
3191 (chan->buffer_seq_srej + 1) % 64;
3192 tx_seq = (tx_seq + 1) % 64;
3196 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3198 struct srej_list *l, *tmp;
3201 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3202 if (l->tx_seq == tx_seq) {
3207 control = L2CAP_SUPER_SELECT_REJECT;
3208 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3209 l2cap_send_sframe(chan, control);
3211 list_add_tail(&l->list, &chan->srej_l);
3215 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3217 struct srej_list *new;
3220 while (tx_seq != chan->expected_tx_seq) {
3221 control = L2CAP_SUPER_SELECT_REJECT;
3222 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3223 l2cap_send_sframe(chan, control);
3225 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3226 new->tx_seq = chan->expected_tx_seq;
3227 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3228 list_add_tail(&new->list, &chan->srej_l);
3230 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3233 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3235 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3236 u8 tx_seq = __get_txseq(rx_control);
3237 u8 req_seq = __get_reqseq(rx_control);
3238 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3239 int tx_seq_offset, expected_tx_seq_offset;
3240 int num_to_ack = (pi->tx_win/6) + 1;
3243 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3244 tx_seq, rx_control);
3246 if (L2CAP_CTRL_FINAL & rx_control &&
3247 chan->conn_state & L2CAP_CONN_WAIT_F) {
3248 del_timer(&chan->monitor_timer);
3249 if (chan->unacked_frames > 0)
3250 __mod_retrans_timer();
3251 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3254 chan->expected_ack_seq = req_seq;
3255 l2cap_drop_acked_frames(chan);
3257 if (tx_seq == chan->expected_tx_seq)
3260 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3261 if (tx_seq_offset < 0)
3262 tx_seq_offset += 64;
3264 /* invalid tx_seq */
3265 if (tx_seq_offset >= pi->tx_win) {
3266 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3270 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3273 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3274 struct srej_list *first;
3276 first = list_first_entry(&chan->srej_l,
3277 struct srej_list, list);
3278 if (tx_seq == first->tx_seq) {
3279 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3280 l2cap_check_srej_gap(chan, tx_seq);
3282 list_del(&first->list);
3285 if (list_empty(&chan->srej_l)) {
3286 chan->buffer_seq = chan->buffer_seq_srej;
3287 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3288 l2cap_send_ack(chan);
3289 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3292 struct srej_list *l;
3294 /* duplicated tx_seq */
3295 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3298 list_for_each_entry(l, &chan->srej_l, list) {
3299 if (l->tx_seq == tx_seq) {
3300 l2cap_resend_srejframe(chan, tx_seq);
3304 l2cap_send_srejframe(chan, tx_seq);
3307 expected_tx_seq_offset =
3308 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3309 if (expected_tx_seq_offset < 0)
3310 expected_tx_seq_offset += 64;
3312 /* duplicated tx_seq */
3313 if (tx_seq_offset < expected_tx_seq_offset)
3316 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3318 BT_DBG("chan %p, Enter SREJ", chan);
3320 INIT_LIST_HEAD(&chan->srej_l);
3321 chan->buffer_seq_srej = chan->buffer_seq;
3323 __skb_queue_head_init(&chan->srej_q);
3324 __skb_queue_head_init(&chan->busy_q);
3325 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3327 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3329 l2cap_send_srejframe(chan, tx_seq);
3331 del_timer(&chan->ack_timer);
3336 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3338 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3339 bt_cb(skb)->tx_seq = tx_seq;
3340 bt_cb(skb)->sar = sar;
3341 __skb_queue_tail(&chan->srej_q, skb);
3345 err = l2cap_push_rx_skb(chan, skb, rx_control);
3349 if (rx_control & L2CAP_CTRL_FINAL) {
3350 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3351 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3353 l2cap_retransmit_frames(chan);
3358 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3359 if (chan->num_acked == num_to_ack - 1)
3360 l2cap_send_ack(chan);
3369 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3371 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3374 chan->expected_ack_seq = __get_reqseq(rx_control);
3375 l2cap_drop_acked_frames(chan);
3377 if (rx_control & L2CAP_CTRL_POLL) {
3378 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3379 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3380 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3381 (chan->unacked_frames > 0))
3382 __mod_retrans_timer();
3384 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3385 l2cap_send_srejtail(chan);
3387 l2cap_send_i_or_rr_or_rnr(chan);
3390 } else if (rx_control & L2CAP_CTRL_FINAL) {
3391 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3393 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3394 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3396 l2cap_retransmit_frames(chan);
3399 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3400 (chan->unacked_frames > 0))
3401 __mod_retrans_timer();
3403 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3404 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3405 l2cap_send_ack(chan);
3407 l2cap_ertm_send(chan);
3411 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3413 u8 tx_seq = __get_reqseq(rx_control);
3415 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3417 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3419 chan->expected_ack_seq = tx_seq;
3420 l2cap_drop_acked_frames(chan);
3422 if (rx_control & L2CAP_CTRL_FINAL) {
3423 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3424 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3426 l2cap_retransmit_frames(chan);
3428 l2cap_retransmit_frames(chan);
3430 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3431 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3434 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3436 u8 tx_seq = __get_reqseq(rx_control);
3438 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3440 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3442 if (rx_control & L2CAP_CTRL_POLL) {
3443 chan->expected_ack_seq = tx_seq;
3444 l2cap_drop_acked_frames(chan);
3446 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3447 l2cap_retransmit_one_frame(chan, tx_seq);
3449 l2cap_ertm_send(chan);
3451 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3452 chan->srej_save_reqseq = tx_seq;
3453 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3455 } else if (rx_control & L2CAP_CTRL_FINAL) {
3456 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3457 chan->srej_save_reqseq == tx_seq)
3458 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3460 l2cap_retransmit_one_frame(chan, tx_seq);
3462 l2cap_retransmit_one_frame(chan, tx_seq);
3463 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3464 chan->srej_save_reqseq = tx_seq;
3465 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3470 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3472 u8 tx_seq = __get_reqseq(rx_control);
3474 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3476 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3477 chan->expected_ack_seq = tx_seq;
3478 l2cap_drop_acked_frames(chan);
3480 if (rx_control & L2CAP_CTRL_POLL)
3481 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3483 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3484 del_timer(&chan->retrans_timer);
3485 if (rx_control & L2CAP_CTRL_POLL)
3486 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3490 if (rx_control & L2CAP_CTRL_POLL)
3491 l2cap_send_srejtail(chan);
3493 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3496 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3498 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3500 if (L2CAP_CTRL_FINAL & rx_control &&
3501 chan->conn_state & L2CAP_CONN_WAIT_F) {
3502 del_timer(&chan->monitor_timer);
3503 if (chan->unacked_frames > 0)
3504 __mod_retrans_timer();
3505 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3508 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3509 case L2CAP_SUPER_RCV_READY:
3510 l2cap_data_channel_rrframe(chan, rx_control);
3513 case L2CAP_SUPER_REJECT:
3514 l2cap_data_channel_rejframe(chan, rx_control);
3517 case L2CAP_SUPER_SELECT_REJECT:
3518 l2cap_data_channel_srejframe(chan, rx_control);
3521 case L2CAP_SUPER_RCV_NOT_READY:
3522 l2cap_data_channel_rnrframe(chan, rx_control);
3530 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3532 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3533 struct l2cap_pinfo *pi = l2cap_pi(sk);
3536 int len, next_tx_seq_offset, req_seq_offset;
3538 control = get_unaligned_le16(skb->data);
3543 * We can just drop the corrupted I-frame here.
3544 * Receiver will miss it and start proper recovery
3545 * procedures and ask retransmission.
3547 if (l2cap_check_fcs(pi, skb))
3550 if (__is_sar_start(control) && __is_iframe(control))
3553 if (pi->fcs == L2CAP_FCS_CRC16)
3556 if (len > pi->mps) {
3557 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3561 req_seq = __get_reqseq(control);
3562 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3563 if (req_seq_offset < 0)
3564 req_seq_offset += 64;
3566 next_tx_seq_offset =
3567 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3568 if (next_tx_seq_offset < 0)
3569 next_tx_seq_offset += 64;
3571 /* check for invalid req-seq */
3572 if (req_seq_offset > next_tx_seq_offset) {
3573 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3577 if (__is_iframe(control)) {
3579 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3583 l2cap_data_channel_iframe(chan, control, skb);
3587 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3591 l2cap_data_channel_sframe(chan, control, skb);
3601 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3603 struct l2cap_chan *chan;
3605 struct l2cap_pinfo *pi;
3610 chan = l2cap_get_chan_by_scid(conn, cid);
3612 BT_DBG("unknown cid 0x%4.4x", cid);
3619 BT_DBG("chan %p, len %d", chan, skb->len);
3621 if (sk->sk_state != BT_CONNECTED)
3625 case L2CAP_MODE_BASIC:
3626 /* If socket recv buffers overflows we drop data here
3627 * which is *bad* because L2CAP has to be reliable.
3628 * But we don't have any other choice. L2CAP doesn't
3629 * provide flow control mechanism. */
3631 if (pi->imtu < skb->len)
3634 if (!sock_queue_rcv_skb(sk, skb))
3638 case L2CAP_MODE_ERTM:
3639 if (!sock_owned_by_user(sk)) {
3640 l2cap_ertm_data_rcv(sk, skb);
3642 if (sk_add_backlog(sk, skb))
3648 case L2CAP_MODE_STREAMING:
3649 control = get_unaligned_le16(skb->data);
3653 if (l2cap_check_fcs(pi, skb))
3656 if (__is_sar_start(control))
3659 if (pi->fcs == L2CAP_FCS_CRC16)
3662 if (len > pi->mps || len < 0 || __is_sframe(control))
3665 tx_seq = __get_txseq(control);
3667 if (chan->expected_tx_seq == tx_seq)
3668 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3670 chan->expected_tx_seq = (tx_seq + 1) % 64;
3672 l2cap_streaming_reassembly_sdu(chan, skb, control);
3677 BT_DBG("chan %p: bad mode 0x%2.2x", chan, pi->mode);
3691 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3695 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3701 BT_DBG("sk %p, len %d", sk, skb->len);
3703 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3706 if (l2cap_pi(sk)->imtu < skb->len)
3709 if (!sock_queue_rcv_skb(sk, skb))
3721 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3725 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
3731 BT_DBG("sk %p, len %d", sk, skb->len);
3733 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3736 if (l2cap_pi(sk)->imtu < skb->len)
3739 if (!sock_queue_rcv_skb(sk, skb))
3751 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3753 struct l2cap_hdr *lh = (void *) skb->data;
3757 skb_pull(skb, L2CAP_HDR_SIZE);
3758 cid = __le16_to_cpu(lh->cid);
3759 len = __le16_to_cpu(lh->len);
3761 if (len != skb->len) {
3766 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3769 case L2CAP_CID_LE_SIGNALING:
3770 case L2CAP_CID_SIGNALING:
3771 l2cap_sig_channel(conn, skb);
3774 case L2CAP_CID_CONN_LESS:
3775 psm = get_unaligned_le16(skb->data);
3777 l2cap_conless_channel(conn, psm, skb);
3780 case L2CAP_CID_LE_DATA:
3781 l2cap_att_channel(conn, cid, skb);
3785 l2cap_data_channel(conn, cid, skb);
3790 /* ---- L2CAP interface with lower layer (HCI) ---- */
3792 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3794 int exact = 0, lm1 = 0, lm2 = 0;
3795 register struct sock *sk;
3796 struct hlist_node *node;
3798 if (type != ACL_LINK)
3801 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3803 /* Find listening sockets and check their link_mode */
3804 read_lock(&l2cap_sk_list.lock);
3805 sk_for_each(sk, node, &l2cap_sk_list.head) {
3806 if (sk->sk_state != BT_LISTEN)
3809 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3810 lm1 |= HCI_LM_ACCEPT;
3811 if (l2cap_pi(sk)->role_switch)
3812 lm1 |= HCI_LM_MASTER;
3814 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3815 lm2 |= HCI_LM_ACCEPT;
3816 if (l2cap_pi(sk)->role_switch)
3817 lm2 |= HCI_LM_MASTER;
3820 read_unlock(&l2cap_sk_list.lock);
3822 return exact ? lm1 : lm2;
3825 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3827 struct l2cap_conn *conn;
3829 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3831 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3835 conn = l2cap_conn_add(hcon, status);
3837 l2cap_conn_ready(conn);
3839 l2cap_conn_del(hcon, bt_err(status));
3844 static int l2cap_disconn_ind(struct hci_conn *hcon)
3846 struct l2cap_conn *conn = hcon->l2cap_data;
3848 BT_DBG("hcon %p", hcon);
3850 if (hcon->type != ACL_LINK || !conn)
3853 return conn->disc_reason;
3856 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3858 BT_DBG("hcon %p reason %d", hcon, reason);
3860 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3863 l2cap_conn_del(hcon, bt_err(reason));
3868 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3870 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3873 if (encrypt == 0x00) {
3874 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3875 l2cap_sock_clear_timer(sk);
3876 l2cap_sock_set_timer(sk, HZ * 5);
3877 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3878 __l2cap_sock_close(sk, ECONNREFUSED);
3880 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3881 l2cap_sock_clear_timer(sk);
3885 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3887 struct l2cap_conn *conn = hcon->l2cap_data;
3888 struct l2cap_chan *chan;
3893 BT_DBG("conn %p", conn);
3895 read_lock(&conn->chan_lock);
3897 list_for_each_entry(chan, &conn->chan_l, list) {
3898 struct sock *sk = chan->sk;
3902 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3907 if (!status && (sk->sk_state == BT_CONNECTED ||
3908 sk->sk_state == BT_CONFIG)) {
3909 l2cap_check_encryption(sk, encrypt);
3914 if (sk->sk_state == BT_CONNECT) {
3916 struct l2cap_conn_req req;
3917 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3918 req.psm = l2cap_pi(sk)->psm;
3920 chan->ident = l2cap_get_ident(conn);
3921 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3923 l2cap_send_cmd(conn, chan->ident,
3924 L2CAP_CONN_REQ, sizeof(req), &req);
3926 l2cap_sock_clear_timer(sk);
3927 l2cap_sock_set_timer(sk, HZ / 10);
3929 } else if (sk->sk_state == BT_CONNECT2) {
3930 struct l2cap_conn_rsp rsp;
3934 sk->sk_state = BT_CONFIG;
3935 result = L2CAP_CR_SUCCESS;
3937 sk->sk_state = BT_DISCONN;
3938 l2cap_sock_set_timer(sk, HZ / 10);
3939 result = L2CAP_CR_SEC_BLOCK;
3942 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3943 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3944 rsp.result = cpu_to_le16(result);
3945 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3946 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3953 read_unlock(&conn->chan_lock);
3958 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3960 struct l2cap_conn *conn = hcon->l2cap_data;
3963 conn = l2cap_conn_add(hcon, 0);
3968 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3970 if (!(flags & ACL_CONT)) {
3971 struct l2cap_hdr *hdr;
3972 struct l2cap_chan *chan;
3977 BT_ERR("Unexpected start frame (len %d)", skb->len);
3978 kfree_skb(conn->rx_skb);
3979 conn->rx_skb = NULL;
3981 l2cap_conn_unreliable(conn, ECOMM);
3984 /* Start fragment always begin with Basic L2CAP header */
3985 if (skb->len < L2CAP_HDR_SIZE) {
3986 BT_ERR("Frame is too short (len %d)", skb->len);
3987 l2cap_conn_unreliable(conn, ECOMM);
3991 hdr = (struct l2cap_hdr *) skb->data;
3992 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3993 cid = __le16_to_cpu(hdr->cid);
3995 if (len == skb->len) {
3996 /* Complete frame received */
3997 l2cap_recv_frame(conn, skb);
4001 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4003 if (skb->len > len) {
4004 BT_ERR("Frame is too long (len %d, expected len %d)",
4006 l2cap_conn_unreliable(conn, ECOMM);
4010 chan = l2cap_get_chan_by_scid(conn, cid);
4012 if (chan && chan->sk) {
4013 struct sock *sk = chan->sk;
4015 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4016 BT_ERR("Frame exceeding recv MTU (len %d, "
4018 l2cap_pi(sk)->imtu);
4020 l2cap_conn_unreliable(conn, ECOMM);
4026 /* Allocate skb for the complete frame (with header) */
4027 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4031 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4033 conn->rx_len = len - skb->len;
4035 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4037 if (!conn->rx_len) {
4038 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4039 l2cap_conn_unreliable(conn, ECOMM);
4043 if (skb->len > conn->rx_len) {
4044 BT_ERR("Fragment is too long (len %d, expected %d)",
4045 skb->len, conn->rx_len);
4046 kfree_skb(conn->rx_skb);
4047 conn->rx_skb = NULL;
4049 l2cap_conn_unreliable(conn, ECOMM);
4053 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4055 conn->rx_len -= skb->len;
4057 if (!conn->rx_len) {
4058 /* Complete frame received */
4059 l2cap_recv_frame(conn, conn->rx_skb);
4060 conn->rx_skb = NULL;
4069 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4072 struct hlist_node *node;
4074 read_lock_bh(&l2cap_sk_list.lock);
4076 sk_for_each(sk, node, &l2cap_sk_list.head) {
4077 struct l2cap_pinfo *pi = l2cap_pi(sk);
4079 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4080 batostr(&bt_sk(sk)->src),
4081 batostr(&bt_sk(sk)->dst),
4082 sk->sk_state, __le16_to_cpu(pi->psm),
4084 pi->imtu, pi->omtu, pi->sec_level,
4088 read_unlock_bh(&l2cap_sk_list.lock);
4093 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4095 return single_open(file, l2cap_debugfs_show, inode->i_private);
4098 static const struct file_operations l2cap_debugfs_fops = {
4099 .open = l2cap_debugfs_open,
4101 .llseek = seq_lseek,
4102 .release = single_release,
4105 static struct dentry *l2cap_debugfs;
4107 static struct hci_proto l2cap_hci_proto = {
4109 .id = HCI_PROTO_L2CAP,
4110 .connect_ind = l2cap_connect_ind,
4111 .connect_cfm = l2cap_connect_cfm,
4112 .disconn_ind = l2cap_disconn_ind,
4113 .disconn_cfm = l2cap_disconn_cfm,
4114 .security_cfm = l2cap_security_cfm,
4115 .recv_acldata = l2cap_recv_acldata
4118 int __init l2cap_init(void)
4122 err = l2cap_init_sockets();
4126 _busy_wq = create_singlethread_workqueue("l2cap");
4132 err = hci_register_proto(&l2cap_hci_proto);
4134 BT_ERR("L2CAP protocol registration failed");
4135 bt_sock_unregister(BTPROTO_L2CAP);
4140 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4141 bt_debugfs, NULL, &l2cap_debugfs_fops);
4143 BT_ERR("Failed to create L2CAP debug file");
4149 destroy_workqueue(_busy_wq);
4150 l2cap_cleanup_sockets();
4154 void l2cap_exit(void)
4156 debugfs_remove(l2cap_debugfs);
4158 flush_workqueue(_busy_wq);
4159 destroy_workqueue(_busy_wq);
4161 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4162 BT_ERR("L2CAP protocol unregistration failed");
4164 l2cap_cleanup_sockets();
4167 module_param(disable_ertm, bool, 0644);
4168 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");