2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
80 for (c = l->head; c; c = c->next_c) {
81 if (l2cap_pi(c->sk)->dcid == cid)
87 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
90 for (c = l->head; c; c = c->next_c) {
91 if (l2cap_pi(c->sk)->scid == cid)
97 /* Find channel with given SCID.
98 * Returns locked socket */
99 static inline struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
101 struct l2cap_chan *c;
103 c = __l2cap_get_chan_by_scid(l, cid);
106 read_unlock(&l->lock);
110 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
112 struct l2cap_chan *c;
113 for (c = l->head; c; c = c->next_c) {
114 if (l2cap_pi(c->sk)->ident == ident)
120 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
122 struct l2cap_chan *c;
124 c = __l2cap_get_chan_by_ident(l, ident);
127 read_unlock(&l->lock);
131 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
133 u16 cid = L2CAP_CID_DYN_START;
135 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid))
143 static struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
145 struct l2cap_chan *chan;
147 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
156 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct l2cap_chan *chan)
161 l->head->prev_c = chan;
163 chan->next_c = l->head;
168 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct l2cap_chan *chan)
170 struct l2cap_chan *next = chan->next_c, *prev = chan->prev_c;
172 write_lock_bh(&l->lock);
180 write_unlock_bh(&l->lock);
182 __sock_put(chan->sk);
185 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
187 struct l2cap_chan_list *l = &conn->chan_list;
188 struct sock *sk = chan->sk;
190 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
191 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
193 conn->disc_reason = 0x13;
195 l2cap_pi(sk)->conn = conn;
197 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
198 if (conn->hcon->type == LE_LINK) {
200 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
201 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
202 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
204 /* Alloc CID for connection-oriented socket */
205 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
206 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
208 } else if (sk->sk_type == SOCK_DGRAM) {
209 /* Connectionless socket */
210 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
211 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
212 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
214 /* Raw socket can send/recv signalling messages only */
215 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
216 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
217 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
220 __l2cap_chan_link(l, chan);
224 * Must be called on the locked socket. */
225 void l2cap_chan_del(struct l2cap_chan *chan, int err)
227 struct sock *sk = chan->sk;
228 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
229 struct sock *parent = bt_sk(sk)->parent;
231 l2cap_sock_clear_timer(sk);
233 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
236 /* Unlink from channel list */
237 l2cap_chan_unlink(&conn->chan_list, chan);
238 l2cap_pi(sk)->conn = NULL;
239 hci_conn_put(conn->hcon);
242 sk->sk_state = BT_CLOSED;
243 sock_set_flag(sk, SOCK_ZAPPED);
249 bt_accept_unlink(sk);
250 parent->sk_data_ready(parent, 0);
252 sk->sk_state_change(sk);
254 skb_queue_purge(TX_QUEUE(sk));
256 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
257 struct srej_list *l, *tmp;
259 del_timer(&l2cap_pi(sk)->retrans_timer);
260 del_timer(&l2cap_pi(sk)->monitor_timer);
261 del_timer(&l2cap_pi(sk)->ack_timer);
263 skb_queue_purge(SREJ_QUEUE(sk));
264 skb_queue_purge(BUSY_QUEUE(sk));
266 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
275 static inline u8 l2cap_get_auth_type(struct sock *sk)
277 if (sk->sk_type == SOCK_RAW) {
278 switch (l2cap_pi(sk)->sec_level) {
279 case BT_SECURITY_HIGH:
280 return HCI_AT_DEDICATED_BONDING_MITM;
281 case BT_SECURITY_MEDIUM:
282 return HCI_AT_DEDICATED_BONDING;
284 return HCI_AT_NO_BONDING;
286 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
287 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
288 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
290 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
291 return HCI_AT_NO_BONDING_MITM;
293 return HCI_AT_NO_BONDING;
295 switch (l2cap_pi(sk)->sec_level) {
296 case BT_SECURITY_HIGH:
297 return HCI_AT_GENERAL_BONDING_MITM;
298 case BT_SECURITY_MEDIUM:
299 return HCI_AT_GENERAL_BONDING;
301 return HCI_AT_NO_BONDING;
306 /* Service level security */
307 static inline int l2cap_check_security(struct sock *sk)
309 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
312 auth_type = l2cap_get_auth_type(sk);
314 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
318 u8 l2cap_get_ident(struct l2cap_conn *conn)
322 /* Get next available identificator.
323 * 1 - 128 are used by kernel.
324 * 129 - 199 are reserved.
325 * 200 - 254 are used by utilities like l2ping, etc.
328 spin_lock_bh(&conn->lock);
330 if (++conn->tx_ident > 128)
335 spin_unlock_bh(&conn->lock);
340 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
342 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
345 BT_DBG("code 0x%2.2x", code);
350 if (lmp_no_flush_capable(conn->hcon->hdev))
351 flags = ACL_START_NO_FLUSH;
355 hci_send_acl(conn->hcon, skb, flags);
358 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
361 struct l2cap_hdr *lh;
362 struct l2cap_conn *conn = pi->conn;
363 struct sock *sk = (struct sock *)pi;
364 int count, hlen = L2CAP_HDR_SIZE + 2;
367 if (sk->sk_state != BT_CONNECTED)
370 if (pi->fcs == L2CAP_FCS_CRC16)
373 BT_DBG("pi %p, control 0x%2.2x", pi, control);
375 count = min_t(unsigned int, conn->mtu, hlen);
376 control |= L2CAP_CTRL_FRAME_TYPE;
378 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
379 control |= L2CAP_CTRL_FINAL;
380 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
383 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
384 control |= L2CAP_CTRL_POLL;
385 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
388 skb = bt_skb_alloc(count, GFP_ATOMIC);
392 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
393 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
394 lh->cid = cpu_to_le16(pi->dcid);
395 put_unaligned_le16(control, skb_put(skb, 2));
397 if (pi->fcs == L2CAP_FCS_CRC16) {
398 u16 fcs = crc16(0, (u8 *)lh, count - 2);
399 put_unaligned_le16(fcs, skb_put(skb, 2));
402 if (lmp_no_flush_capable(conn->hcon->hdev))
403 flags = ACL_START_NO_FLUSH;
407 hci_send_acl(pi->conn->hcon, skb, flags);
410 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
412 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
413 control |= L2CAP_SUPER_RCV_NOT_READY;
414 pi->conn_state |= L2CAP_CONN_RNR_SENT;
416 control |= L2CAP_SUPER_RCV_READY;
418 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
420 l2cap_send_sframe(pi, control);
423 static inline int __l2cap_no_conn_pending(struct sock *sk)
425 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
428 static void l2cap_do_start(struct sock *sk)
430 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
432 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
433 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
436 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
437 struct l2cap_conn_req req;
438 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
439 req.psm = l2cap_pi(sk)->psm;
441 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
442 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
444 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
445 L2CAP_CONN_REQ, sizeof(req), &req);
448 struct l2cap_info_req req;
449 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
451 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
452 conn->info_ident = l2cap_get_ident(conn);
454 mod_timer(&conn->info_timer, jiffies +
455 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
457 l2cap_send_cmd(conn, conn->info_ident,
458 L2CAP_INFO_REQ, sizeof(req), &req);
462 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
464 u32 local_feat_mask = l2cap_feat_mask;
466 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
469 case L2CAP_MODE_ERTM:
470 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
471 case L2CAP_MODE_STREAMING:
472 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
478 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
480 struct l2cap_disconn_req req;
485 skb_queue_purge(TX_QUEUE(sk));
487 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
488 del_timer(&l2cap_pi(sk)->retrans_timer);
489 del_timer(&l2cap_pi(sk)->monitor_timer);
490 del_timer(&l2cap_pi(sk)->ack_timer);
493 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
494 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
495 l2cap_send_cmd(conn, l2cap_get_ident(conn),
496 L2CAP_DISCONN_REQ, sizeof(req), &req);
498 sk->sk_state = BT_DISCONN;
502 /* ---- L2CAP connections ---- */
503 static void l2cap_conn_start(struct l2cap_conn *conn)
505 struct l2cap_chan_list *l = &conn->chan_list;
506 struct sock_del_list del, *tmp1, *tmp2;
507 struct l2cap_chan *chan;
509 BT_DBG("conn %p", conn);
511 INIT_LIST_HEAD(&del.list);
515 for (chan = l->head; chan; chan = chan->next_c) {
516 struct sock *sk = chan->sk;
519 if (sk->sk_type != SOCK_SEQPACKET &&
520 sk->sk_type != SOCK_STREAM) {
525 if (sk->sk_state == BT_CONNECT) {
526 struct l2cap_conn_req req;
528 if (!l2cap_check_security(sk) ||
529 !__l2cap_no_conn_pending(sk)) {
534 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
536 && l2cap_pi(sk)->conf_state &
537 L2CAP_CONF_STATE2_DEVICE) {
538 tmp1 = kzalloc(sizeof(struct sock_del_list),
541 list_add_tail(&tmp1->list, &del.list);
546 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
547 req.psm = l2cap_pi(sk)->psm;
549 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
550 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
552 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
553 L2CAP_CONN_REQ, sizeof(req), &req);
555 } else if (sk->sk_state == BT_CONNECT2) {
556 struct l2cap_conn_rsp rsp;
558 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
559 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
561 if (l2cap_check_security(sk)) {
562 if (bt_sk(sk)->defer_setup) {
563 struct sock *parent = bt_sk(sk)->parent;
564 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
565 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
566 parent->sk_data_ready(parent, 0);
569 sk->sk_state = BT_CONFIG;
570 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
571 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
574 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
575 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
578 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
579 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
581 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
582 rsp.result != L2CAP_CR_SUCCESS) {
587 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
588 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
589 l2cap_build_conf_req(sk, buf), buf);
590 l2cap_pi(sk)->num_conf_req++;
596 read_unlock(&l->lock);
598 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
599 bh_lock_sock(tmp1->sk);
600 __l2cap_sock_close(tmp1->sk, ECONNRESET);
601 bh_unlock_sock(tmp1->sk);
602 list_del(&tmp1->list);
607 /* Find socket with cid and source bdaddr.
608 * Returns closest match, locked.
610 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
612 struct sock *s, *sk = NULL, *sk1 = NULL;
613 struct hlist_node *node;
615 read_lock(&l2cap_sk_list.lock);
617 sk_for_each(sk, node, &l2cap_sk_list.head) {
618 if (state && sk->sk_state != state)
621 if (l2cap_pi(sk)->scid == cid) {
623 if (!bacmp(&bt_sk(sk)->src, src))
627 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
634 read_unlock(&l2cap_sk_list.lock);
639 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
641 struct l2cap_chan_list *list = &conn->chan_list;
642 struct sock *parent, *uninitialized_var(sk);
643 struct l2cap_chan *chan;
647 /* Check if we have socket listening on cid */
648 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
653 /* Check for backlog size */
654 if (sk_acceptq_is_full(parent)) {
655 BT_DBG("backlog full %d", parent->sk_ack_backlog);
659 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
663 chan = l2cap_chan_alloc(sk);
669 write_lock_bh(&list->lock);
671 hci_conn_hold(conn->hcon);
673 l2cap_sock_init(sk, parent);
674 bacpy(&bt_sk(sk)->src, conn->src);
675 bacpy(&bt_sk(sk)->dst, conn->dst);
677 bt_accept_enqueue(parent, sk);
679 __l2cap_chan_add(conn, chan);
681 l2cap_pi(sk)->chan = chan;
683 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
685 sk->sk_state = BT_CONNECTED;
686 parent->sk_data_ready(parent, 0);
688 write_unlock_bh(&list->lock);
691 bh_unlock_sock(parent);
694 static void l2cap_conn_ready(struct l2cap_conn *conn)
696 struct l2cap_chan_list *l = &conn->chan_list;
697 struct l2cap_chan *chan;
699 BT_DBG("conn %p", conn);
701 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
702 l2cap_le_conn_ready(conn);
706 for (chan = l->head; chan; chan = chan->next_c) {
707 struct sock *sk = chan->sk;
710 if (conn->hcon->type == LE_LINK) {
711 l2cap_sock_clear_timer(sk);
712 sk->sk_state = BT_CONNECTED;
713 sk->sk_state_change(sk);
716 if (sk->sk_type != SOCK_SEQPACKET &&
717 sk->sk_type != SOCK_STREAM) {
718 l2cap_sock_clear_timer(sk);
719 sk->sk_state = BT_CONNECTED;
720 sk->sk_state_change(sk);
721 } else if (sk->sk_state == BT_CONNECT)
727 read_unlock(&l->lock);
730 /* Notify sockets that we cannot guaranty reliability anymore */
731 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
733 struct l2cap_chan_list *l = &conn->chan_list;
734 struct l2cap_chan *chan;
736 BT_DBG("conn %p", conn);
740 for (chan = l->head; chan; chan = chan->next_c) {
741 struct sock *sk = chan->sk;
742 if (l2cap_pi(sk)->force_reliable)
746 read_unlock(&l->lock);
749 static void l2cap_info_timeout(unsigned long arg)
751 struct l2cap_conn *conn = (void *) arg;
753 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
754 conn->info_ident = 0;
756 l2cap_conn_start(conn);
759 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
761 struct l2cap_conn *conn = hcon->l2cap_data;
766 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
770 hcon->l2cap_data = conn;
773 BT_DBG("hcon %p conn %p", hcon, conn);
775 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
776 conn->mtu = hcon->hdev->le_mtu;
778 conn->mtu = hcon->hdev->acl_mtu;
780 conn->src = &hcon->hdev->bdaddr;
781 conn->dst = &hcon->dst;
785 spin_lock_init(&conn->lock);
786 rwlock_init(&conn->chan_list.lock);
788 if (hcon->type != LE_LINK)
789 setup_timer(&conn->info_timer, l2cap_info_timeout,
790 (unsigned long) conn);
792 conn->disc_reason = 0x13;
797 static void l2cap_conn_del(struct hci_conn *hcon, int err)
799 struct l2cap_conn *conn = hcon->l2cap_data;
800 struct l2cap_chan *chan;
806 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
808 kfree_skb(conn->rx_skb);
811 while ((chan = conn->chan_list.head)) {
814 l2cap_chan_del(chan, err);
819 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
820 del_timer_sync(&conn->info_timer);
822 hcon->l2cap_data = NULL;
826 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
828 struct l2cap_chan_list *l = &conn->chan_list;
829 write_lock_bh(&l->lock);
830 __l2cap_chan_add(conn, chan);
831 write_unlock_bh(&l->lock);
834 /* ---- Socket interface ---- */
836 /* Find socket with psm and source bdaddr.
837 * Returns closest match.
839 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
841 struct sock *sk = NULL, *sk1 = NULL;
842 struct hlist_node *node;
844 read_lock(&l2cap_sk_list.lock);
846 sk_for_each(sk, node, &l2cap_sk_list.head) {
847 if (state && sk->sk_state != state)
850 if (l2cap_pi(sk)->psm == psm) {
852 if (!bacmp(&bt_sk(sk)->src, src))
856 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
861 read_unlock(&l2cap_sk_list.lock);
863 return node ? sk : sk1;
866 int l2cap_do_connect(struct sock *sk)
868 bdaddr_t *src = &bt_sk(sk)->src;
869 bdaddr_t *dst = &bt_sk(sk)->dst;
870 struct l2cap_conn *conn;
871 struct l2cap_chan *chan;
872 struct hci_conn *hcon;
873 struct hci_dev *hdev;
877 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
880 hdev = hci_get_route(dst, src);
882 return -EHOSTUNREACH;
884 hci_dev_lock_bh(hdev);
886 auth_type = l2cap_get_auth_type(sk);
888 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
889 hcon = hci_connect(hdev, LE_LINK, dst,
890 l2cap_pi(sk)->sec_level, auth_type);
892 hcon = hci_connect(hdev, ACL_LINK, dst,
893 l2cap_pi(sk)->sec_level, auth_type);
900 conn = l2cap_conn_add(hcon, 0);
907 chan = l2cap_chan_alloc(sk);
914 /* Update source addr of the socket */
915 bacpy(src, conn->src);
917 l2cap_chan_add(conn, chan);
919 l2cap_pi(sk)->chan = chan;
921 sk->sk_state = BT_CONNECT;
922 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
924 if (hcon->state == BT_CONNECTED) {
925 if (sk->sk_type != SOCK_SEQPACKET &&
926 sk->sk_type != SOCK_STREAM) {
927 l2cap_sock_clear_timer(sk);
928 if (l2cap_check_security(sk))
929 sk->sk_state = BT_CONNECTED;
937 hci_dev_unlock_bh(hdev);
942 int __l2cap_wait_ack(struct sock *sk)
944 DECLARE_WAITQUEUE(wait, current);
948 add_wait_queue(sk_sleep(sk), &wait);
949 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
950 set_current_state(TASK_INTERRUPTIBLE);
955 if (signal_pending(current)) {
956 err = sock_intr_errno(timeo);
961 timeo = schedule_timeout(timeo);
964 err = sock_error(sk);
968 set_current_state(TASK_RUNNING);
969 remove_wait_queue(sk_sleep(sk), &wait);
973 static void l2cap_monitor_timeout(unsigned long arg)
975 struct sock *sk = (void *) arg;
980 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
981 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
986 l2cap_pi(sk)->retry_count++;
987 __mod_monitor_timer();
989 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
993 static void l2cap_retrans_timeout(unsigned long arg)
995 struct sock *sk = (void *) arg;
1000 l2cap_pi(sk)->retry_count = 1;
1001 __mod_monitor_timer();
1003 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1005 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1009 static void l2cap_drop_acked_frames(struct sock *sk)
1011 struct sk_buff *skb;
1013 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1014 l2cap_pi(sk)->unacked_frames) {
1015 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1018 skb = skb_dequeue(TX_QUEUE(sk));
1021 l2cap_pi(sk)->unacked_frames--;
1024 if (!l2cap_pi(sk)->unacked_frames)
1025 del_timer(&l2cap_pi(sk)->retrans_timer);
1028 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1030 struct l2cap_pinfo *pi = l2cap_pi(sk);
1031 struct hci_conn *hcon = pi->conn->hcon;
1034 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1036 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1037 flags = ACL_START_NO_FLUSH;
1041 hci_send_acl(hcon, skb, flags);
1044 void l2cap_streaming_send(struct sock *sk)
1046 struct sk_buff *skb;
1047 struct l2cap_pinfo *pi = l2cap_pi(sk);
1050 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1051 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1052 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1053 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1055 if (pi->fcs == L2CAP_FCS_CRC16) {
1056 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1057 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1060 l2cap_do_send(sk, skb);
1062 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1066 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1068 struct l2cap_pinfo *pi = l2cap_pi(sk);
1069 struct sk_buff *skb, *tx_skb;
1072 skb = skb_peek(TX_QUEUE(sk));
1077 if (bt_cb(skb)->tx_seq == tx_seq)
1080 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1083 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1085 if (pi->remote_max_tx &&
1086 bt_cb(skb)->retries == pi->remote_max_tx) {
1087 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1091 tx_skb = skb_clone(skb, GFP_ATOMIC);
1092 bt_cb(skb)->retries++;
1093 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1095 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1096 control |= L2CAP_CTRL_FINAL;
1097 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1100 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1101 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1103 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1105 if (pi->fcs == L2CAP_FCS_CRC16) {
1106 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1107 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1110 l2cap_do_send(sk, tx_skb);
1113 int l2cap_ertm_send(struct sock *sk)
1115 struct sk_buff *skb, *tx_skb;
1116 struct l2cap_pinfo *pi = l2cap_pi(sk);
1120 if (sk->sk_state != BT_CONNECTED)
1123 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1125 if (pi->remote_max_tx &&
1126 bt_cb(skb)->retries == pi->remote_max_tx) {
1127 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1131 tx_skb = skb_clone(skb, GFP_ATOMIC);
1133 bt_cb(skb)->retries++;
1135 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1136 control &= L2CAP_CTRL_SAR;
1138 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1139 control |= L2CAP_CTRL_FINAL;
1140 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1142 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1143 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1144 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1147 if (pi->fcs == L2CAP_FCS_CRC16) {
1148 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1149 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1152 l2cap_do_send(sk, tx_skb);
1154 __mod_retrans_timer();
1156 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1157 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1159 if (bt_cb(skb)->retries == 1)
1160 pi->unacked_frames++;
1164 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1165 sk->sk_send_head = NULL;
1167 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1175 static int l2cap_retransmit_frames(struct sock *sk)
1177 struct l2cap_pinfo *pi = l2cap_pi(sk);
1180 if (!skb_queue_empty(TX_QUEUE(sk)))
1181 sk->sk_send_head = TX_QUEUE(sk)->next;
1183 pi->next_tx_seq = pi->expected_ack_seq;
1184 ret = l2cap_ertm_send(sk);
1188 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1190 struct sock *sk = (struct sock *)pi;
1193 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1195 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1196 control |= L2CAP_SUPER_RCV_NOT_READY;
1197 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1198 l2cap_send_sframe(pi, control);
1202 if (l2cap_ertm_send(sk) > 0)
1205 control |= L2CAP_SUPER_RCV_READY;
1206 l2cap_send_sframe(pi, control);
1209 static void l2cap_send_srejtail(struct sock *sk)
1211 struct srej_list *tail;
1214 control = L2CAP_SUPER_SELECT_REJECT;
1215 control |= L2CAP_CTRL_FINAL;
1217 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1218 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1220 l2cap_send_sframe(l2cap_pi(sk), control);
1223 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1225 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1226 struct sk_buff **frag;
1229 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1235 /* Continuation fragments (no L2CAP header) */
1236 frag = &skb_shinfo(skb)->frag_list;
1238 count = min_t(unsigned int, conn->mtu, len);
1240 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1243 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1249 frag = &(*frag)->next;
1255 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1257 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1258 struct sk_buff *skb;
1259 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1260 struct l2cap_hdr *lh;
1262 BT_DBG("sk %p len %d", sk, (int)len);
1264 count = min_t(unsigned int, (conn->mtu - hlen), len);
1265 skb = bt_skb_send_alloc(sk, count + hlen,
1266 msg->msg_flags & MSG_DONTWAIT, &err);
1268 return ERR_PTR(err);
1270 /* Create L2CAP header */
1271 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1272 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1273 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1274 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1276 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1277 if (unlikely(err < 0)) {
1279 return ERR_PTR(err);
1284 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1286 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1287 struct sk_buff *skb;
1288 int err, count, hlen = L2CAP_HDR_SIZE;
1289 struct l2cap_hdr *lh;
1291 BT_DBG("sk %p len %d", sk, (int)len);
1293 count = min_t(unsigned int, (conn->mtu - hlen), len);
1294 skb = bt_skb_send_alloc(sk, count + hlen,
1295 msg->msg_flags & MSG_DONTWAIT, &err);
1297 return ERR_PTR(err);
1299 /* Create L2CAP header */
1300 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1301 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1302 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1304 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1305 if (unlikely(err < 0)) {
1307 return ERR_PTR(err);
1312 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1314 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1315 struct sk_buff *skb;
1316 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1317 struct l2cap_hdr *lh;
1319 BT_DBG("sk %p len %d", sk, (int)len);
1322 return ERR_PTR(-ENOTCONN);
1327 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1330 count = min_t(unsigned int, (conn->mtu - hlen), len);
1331 skb = bt_skb_send_alloc(sk, count + hlen,
1332 msg->msg_flags & MSG_DONTWAIT, &err);
1334 return ERR_PTR(err);
1336 /* Create L2CAP header */
1337 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1338 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1339 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1340 put_unaligned_le16(control, skb_put(skb, 2));
1342 put_unaligned_le16(sdulen, skb_put(skb, 2));
1344 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1345 if (unlikely(err < 0)) {
1347 return ERR_PTR(err);
1350 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1351 put_unaligned_le16(0, skb_put(skb, 2));
1353 bt_cb(skb)->retries = 0;
1357 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1359 struct l2cap_pinfo *pi = l2cap_pi(sk);
1360 struct sk_buff *skb;
1361 struct sk_buff_head sar_queue;
1365 skb_queue_head_init(&sar_queue);
1366 control = L2CAP_SDU_START;
1367 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1369 return PTR_ERR(skb);
1371 __skb_queue_tail(&sar_queue, skb);
1372 len -= pi->remote_mps;
1373 size += pi->remote_mps;
1378 if (len > pi->remote_mps) {
1379 control = L2CAP_SDU_CONTINUE;
1380 buflen = pi->remote_mps;
1382 control = L2CAP_SDU_END;
1386 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1388 skb_queue_purge(&sar_queue);
1389 return PTR_ERR(skb);
1392 __skb_queue_tail(&sar_queue, skb);
1396 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1397 if (sk->sk_send_head == NULL)
1398 sk->sk_send_head = sar_queue.next;
1403 static void l2cap_chan_ready(struct sock *sk)
1405 struct sock *parent = bt_sk(sk)->parent;
1407 BT_DBG("sk %p, parent %p", sk, parent);
1409 l2cap_pi(sk)->conf_state = 0;
1410 l2cap_sock_clear_timer(sk);
1413 /* Outgoing channel.
1414 * Wake up socket sleeping on connect.
1416 sk->sk_state = BT_CONNECTED;
1417 sk->sk_state_change(sk);
1419 /* Incoming channel.
1420 * Wake up socket sleeping on accept.
1422 parent->sk_data_ready(parent, 0);
1426 /* Copy frame to all raw sockets on that connection */
1427 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1429 struct l2cap_chan_list *l = &conn->chan_list;
1430 struct sk_buff *nskb;
1431 struct l2cap_chan *chan;
1433 BT_DBG("conn %p", conn);
1435 read_lock(&l->lock);
1436 for (chan = l->head; chan; chan = chan->next_c) {
1437 struct sock *sk = chan->sk;
1438 if (sk->sk_type != SOCK_RAW)
1441 /* Don't send frame to the socket it came from */
1444 nskb = skb_clone(skb, GFP_ATOMIC);
1448 if (sock_queue_rcv_skb(sk, nskb))
1451 read_unlock(&l->lock);
1454 /* ---- L2CAP signalling commands ---- */
1455 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1456 u8 code, u8 ident, u16 dlen, void *data)
1458 struct sk_buff *skb, **frag;
1459 struct l2cap_cmd_hdr *cmd;
1460 struct l2cap_hdr *lh;
1463 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1464 conn, code, ident, dlen);
1466 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1467 count = min_t(unsigned int, conn->mtu, len);
1469 skb = bt_skb_alloc(count, GFP_ATOMIC);
1473 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1474 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1476 if (conn->hcon->type == LE_LINK)
1477 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1479 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1481 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1484 cmd->len = cpu_to_le16(dlen);
1487 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1488 memcpy(skb_put(skb, count), data, count);
1494 /* Continuation fragments (no L2CAP header) */
1495 frag = &skb_shinfo(skb)->frag_list;
1497 count = min_t(unsigned int, conn->mtu, len);
1499 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1503 memcpy(skb_put(*frag, count), data, count);
1508 frag = &(*frag)->next;
1518 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1520 struct l2cap_conf_opt *opt = *ptr;
1523 len = L2CAP_CONF_OPT_SIZE + opt->len;
1531 *val = *((u8 *) opt->val);
1535 *val = get_unaligned_le16(opt->val);
1539 *val = get_unaligned_le32(opt->val);
1543 *val = (unsigned long) opt->val;
1547 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1551 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1553 struct l2cap_conf_opt *opt = *ptr;
1555 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1562 *((u8 *) opt->val) = val;
1566 put_unaligned_le16(val, opt->val);
1570 put_unaligned_le32(val, opt->val);
1574 memcpy(opt->val, (void *) val, len);
1578 *ptr += L2CAP_CONF_OPT_SIZE + len;
1581 static void l2cap_ack_timeout(unsigned long arg)
1583 struct sock *sk = (void *) arg;
1586 l2cap_send_ack(l2cap_pi(sk));
1590 static inline void l2cap_ertm_init(struct sock *sk)
1592 l2cap_pi(sk)->expected_ack_seq = 0;
1593 l2cap_pi(sk)->unacked_frames = 0;
1594 l2cap_pi(sk)->buffer_seq = 0;
1595 l2cap_pi(sk)->num_acked = 0;
1596 l2cap_pi(sk)->frames_sent = 0;
1598 setup_timer(&l2cap_pi(sk)->retrans_timer,
1599 l2cap_retrans_timeout, (unsigned long) sk);
1600 setup_timer(&l2cap_pi(sk)->monitor_timer,
1601 l2cap_monitor_timeout, (unsigned long) sk);
1602 setup_timer(&l2cap_pi(sk)->ack_timer,
1603 l2cap_ack_timeout, (unsigned long) sk);
1605 __skb_queue_head_init(SREJ_QUEUE(sk));
1606 __skb_queue_head_init(BUSY_QUEUE(sk));
1608 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1610 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1613 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1616 case L2CAP_MODE_STREAMING:
1617 case L2CAP_MODE_ERTM:
1618 if (l2cap_mode_supported(mode, remote_feat_mask))
1622 return L2CAP_MODE_BASIC;
1626 int l2cap_build_conf_req(struct sock *sk, void *data)
1628 struct l2cap_pinfo *pi = l2cap_pi(sk);
1629 struct l2cap_conf_req *req = data;
1630 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1631 void *ptr = req->data;
1633 BT_DBG("sk %p", sk);
1635 if (pi->num_conf_req || pi->num_conf_rsp)
1639 case L2CAP_MODE_STREAMING:
1640 case L2CAP_MODE_ERTM:
1641 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1646 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1651 if (pi->imtu != L2CAP_DEFAULT_MTU)
1652 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1655 case L2CAP_MODE_BASIC:
1656 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1657 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1660 rfc.mode = L2CAP_MODE_BASIC;
1662 rfc.max_transmit = 0;
1663 rfc.retrans_timeout = 0;
1664 rfc.monitor_timeout = 0;
1665 rfc.max_pdu_size = 0;
1667 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1668 (unsigned long) &rfc);
1671 case L2CAP_MODE_ERTM:
1672 rfc.mode = L2CAP_MODE_ERTM;
1673 rfc.txwin_size = pi->tx_win;
1674 rfc.max_transmit = pi->max_tx;
1675 rfc.retrans_timeout = 0;
1676 rfc.monitor_timeout = 0;
1677 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1678 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1679 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1681 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1682 (unsigned long) &rfc);
1684 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1687 if (pi->fcs == L2CAP_FCS_NONE ||
1688 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1689 pi->fcs = L2CAP_FCS_NONE;
1690 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1694 case L2CAP_MODE_STREAMING:
1695 rfc.mode = L2CAP_MODE_STREAMING;
1697 rfc.max_transmit = 0;
1698 rfc.retrans_timeout = 0;
1699 rfc.monitor_timeout = 0;
1700 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1701 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1702 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1704 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1705 (unsigned long) &rfc);
1707 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1710 if (pi->fcs == L2CAP_FCS_NONE ||
1711 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1712 pi->fcs = L2CAP_FCS_NONE;
1713 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1718 req->dcid = cpu_to_le16(pi->dcid);
1719 req->flags = cpu_to_le16(0);
1724 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1726 struct l2cap_pinfo *pi = l2cap_pi(sk);
1727 struct l2cap_conf_rsp *rsp = data;
1728 void *ptr = rsp->data;
1729 void *req = pi->conf_req;
1730 int len = pi->conf_len;
1731 int type, hint, olen;
1733 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1734 u16 mtu = L2CAP_DEFAULT_MTU;
1735 u16 result = L2CAP_CONF_SUCCESS;
1737 BT_DBG("sk %p", sk);
1739 while (len >= L2CAP_CONF_OPT_SIZE) {
1740 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1742 hint = type & L2CAP_CONF_HINT;
1743 type &= L2CAP_CONF_MASK;
1746 case L2CAP_CONF_MTU:
1750 case L2CAP_CONF_FLUSH_TO:
1754 case L2CAP_CONF_QOS:
1757 case L2CAP_CONF_RFC:
1758 if (olen == sizeof(rfc))
1759 memcpy(&rfc, (void *) val, olen);
1762 case L2CAP_CONF_FCS:
1763 if (val == L2CAP_FCS_NONE)
1764 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1772 result = L2CAP_CONF_UNKNOWN;
1773 *((u8 *) ptr++) = type;
1778 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1782 case L2CAP_MODE_STREAMING:
1783 case L2CAP_MODE_ERTM:
1784 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1785 pi->mode = l2cap_select_mode(rfc.mode,
1786 pi->conn->feat_mask);
1790 if (pi->mode != rfc.mode)
1791 return -ECONNREFUSED;
1797 if (pi->mode != rfc.mode) {
1798 result = L2CAP_CONF_UNACCEPT;
1799 rfc.mode = pi->mode;
1801 if (pi->num_conf_rsp == 1)
1802 return -ECONNREFUSED;
1804 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1805 sizeof(rfc), (unsigned long) &rfc);
1809 if (result == L2CAP_CONF_SUCCESS) {
1810 /* Configure output options and let the other side know
1811 * which ones we don't like. */
1813 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1814 result = L2CAP_CONF_UNACCEPT;
1817 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1819 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1822 case L2CAP_MODE_BASIC:
1823 pi->fcs = L2CAP_FCS_NONE;
1824 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1827 case L2CAP_MODE_ERTM:
1828 pi->remote_tx_win = rfc.txwin_size;
1829 pi->remote_max_tx = rfc.max_transmit;
1831 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1832 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1834 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1836 rfc.retrans_timeout =
1837 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1838 rfc.monitor_timeout =
1839 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1841 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1843 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1844 sizeof(rfc), (unsigned long) &rfc);
1848 case L2CAP_MODE_STREAMING:
1849 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1850 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1852 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1854 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1856 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1857 sizeof(rfc), (unsigned long) &rfc);
1862 result = L2CAP_CONF_UNACCEPT;
1864 memset(&rfc, 0, sizeof(rfc));
1865 rfc.mode = pi->mode;
1868 if (result == L2CAP_CONF_SUCCESS)
1869 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1871 rsp->scid = cpu_to_le16(pi->dcid);
1872 rsp->result = cpu_to_le16(result);
1873 rsp->flags = cpu_to_le16(0x0000);
1878 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1880 struct l2cap_pinfo *pi = l2cap_pi(sk);
1881 struct l2cap_conf_req *req = data;
1882 void *ptr = req->data;
1885 struct l2cap_conf_rfc rfc;
1887 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1889 while (len >= L2CAP_CONF_OPT_SIZE) {
1890 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1893 case L2CAP_CONF_MTU:
1894 if (val < L2CAP_DEFAULT_MIN_MTU) {
1895 *result = L2CAP_CONF_UNACCEPT;
1896 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1899 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1902 case L2CAP_CONF_FLUSH_TO:
1904 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1908 case L2CAP_CONF_RFC:
1909 if (olen == sizeof(rfc))
1910 memcpy(&rfc, (void *)val, olen);
1912 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1913 rfc.mode != pi->mode)
1914 return -ECONNREFUSED;
1918 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1919 sizeof(rfc), (unsigned long) &rfc);
1924 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1925 return -ECONNREFUSED;
1927 pi->mode = rfc.mode;
1929 if (*result == L2CAP_CONF_SUCCESS) {
1931 case L2CAP_MODE_ERTM:
1932 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1933 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1934 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1936 case L2CAP_MODE_STREAMING:
1937 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1941 req->dcid = cpu_to_le16(pi->dcid);
1942 req->flags = cpu_to_le16(0x0000);
1947 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1949 struct l2cap_conf_rsp *rsp = data;
1950 void *ptr = rsp->data;
1952 BT_DBG("sk %p", sk);
1954 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1955 rsp->result = cpu_to_le16(result);
1956 rsp->flags = cpu_to_le16(flags);
1961 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1963 struct l2cap_pinfo *pi = l2cap_pi(sk);
1966 struct l2cap_conf_rfc rfc;
1968 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1970 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1973 while (len >= L2CAP_CONF_OPT_SIZE) {
1974 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1977 case L2CAP_CONF_RFC:
1978 if (olen == sizeof(rfc))
1979 memcpy(&rfc, (void *)val, olen);
1986 case L2CAP_MODE_ERTM:
1987 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1988 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1989 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1991 case L2CAP_MODE_STREAMING:
1992 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1996 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1998 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2000 if (rej->reason != 0x0000)
2003 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2004 cmd->ident == conn->info_ident) {
2005 del_timer(&conn->info_timer);
2007 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2008 conn->info_ident = 0;
2010 l2cap_conn_start(conn);
2016 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2018 struct l2cap_chan_list *list = &conn->chan_list;
2019 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2020 struct l2cap_conn_rsp rsp;
2021 struct l2cap_chan *chan;
2022 struct sock *parent, *sk = NULL;
2023 int result, status = L2CAP_CS_NO_INFO;
2025 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2026 __le16 psm = req->psm;
2028 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2030 /* Check if we have socket listening on psm */
2031 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2033 result = L2CAP_CR_BAD_PSM;
2037 bh_lock_sock(parent);
2039 /* Check if the ACL is secure enough (if not SDP) */
2040 if (psm != cpu_to_le16(0x0001) &&
2041 !hci_conn_check_link_mode(conn->hcon)) {
2042 conn->disc_reason = 0x05;
2043 result = L2CAP_CR_SEC_BLOCK;
2047 result = L2CAP_CR_NO_MEM;
2049 /* Check for backlog size */
2050 if (sk_acceptq_is_full(parent)) {
2051 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2055 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2059 chan = l2cap_chan_alloc(sk);
2061 l2cap_sock_kill(sk);
2065 write_lock_bh(&list->lock);
2067 /* Check if we already have channel with that dcid */
2068 if (__l2cap_get_chan_by_dcid(list, scid)) {
2069 write_unlock_bh(&list->lock);
2070 sock_set_flag(sk, SOCK_ZAPPED);
2071 l2cap_sock_kill(sk);
2075 hci_conn_hold(conn->hcon);
2077 l2cap_sock_init(sk, parent);
2078 bacpy(&bt_sk(sk)->src, conn->src);
2079 bacpy(&bt_sk(sk)->dst, conn->dst);
2080 l2cap_pi(sk)->psm = psm;
2081 l2cap_pi(sk)->dcid = scid;
2083 bt_accept_enqueue(parent, sk);
2085 __l2cap_chan_add(conn, chan);
2087 l2cap_pi(sk)->chan = chan;
2089 dcid = l2cap_pi(sk)->scid;
2091 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2093 l2cap_pi(sk)->ident = cmd->ident;
2095 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2096 if (l2cap_check_security(sk)) {
2097 if (bt_sk(sk)->defer_setup) {
2098 sk->sk_state = BT_CONNECT2;
2099 result = L2CAP_CR_PEND;
2100 status = L2CAP_CS_AUTHOR_PEND;
2101 parent->sk_data_ready(parent, 0);
2103 sk->sk_state = BT_CONFIG;
2104 result = L2CAP_CR_SUCCESS;
2105 status = L2CAP_CS_NO_INFO;
2108 sk->sk_state = BT_CONNECT2;
2109 result = L2CAP_CR_PEND;
2110 status = L2CAP_CS_AUTHEN_PEND;
2113 sk->sk_state = BT_CONNECT2;
2114 result = L2CAP_CR_PEND;
2115 status = L2CAP_CS_NO_INFO;
2118 write_unlock_bh(&list->lock);
2121 bh_unlock_sock(parent);
2124 rsp.scid = cpu_to_le16(scid);
2125 rsp.dcid = cpu_to_le16(dcid);
2126 rsp.result = cpu_to_le16(result);
2127 rsp.status = cpu_to_le16(status);
2128 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2130 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2131 struct l2cap_info_req info;
2132 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2134 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2135 conn->info_ident = l2cap_get_ident(conn);
2137 mod_timer(&conn->info_timer, jiffies +
2138 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2140 l2cap_send_cmd(conn, conn->info_ident,
2141 L2CAP_INFO_REQ, sizeof(info), &info);
2144 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2145 result == L2CAP_CR_SUCCESS) {
2147 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2148 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2149 l2cap_build_conf_req(sk, buf), buf);
2150 l2cap_pi(sk)->num_conf_req++;
2156 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2158 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2159 u16 scid, dcid, result, status;
2160 struct l2cap_chan *chan;
2164 scid = __le16_to_cpu(rsp->scid);
2165 dcid = __le16_to_cpu(rsp->dcid);
2166 result = __le16_to_cpu(rsp->result);
2167 status = __le16_to_cpu(rsp->status);
2169 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2172 chan = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2176 chan = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2184 case L2CAP_CR_SUCCESS:
2185 sk->sk_state = BT_CONFIG;
2186 l2cap_pi(sk)->ident = 0;
2187 l2cap_pi(sk)->dcid = dcid;
2188 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2190 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2193 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2195 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2196 l2cap_build_conf_req(sk, req), req);
2197 l2cap_pi(sk)->num_conf_req++;
2201 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2205 /* don't delete l2cap channel if sk is owned by user */
2206 if (sock_owned_by_user(sk)) {
2207 sk->sk_state = BT_DISCONN;
2208 l2cap_sock_clear_timer(sk);
2209 l2cap_sock_set_timer(sk, HZ / 5);
2213 l2cap_chan_del(chan, ECONNREFUSED);
2221 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2223 /* FCS is enabled only in ERTM or streaming mode, if one or both
2226 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2227 pi->fcs = L2CAP_FCS_NONE;
2228 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2229 pi->fcs = L2CAP_FCS_CRC16;
2232 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2234 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2237 struct l2cap_chan *chan;
2241 dcid = __le16_to_cpu(req->dcid);
2242 flags = __le16_to_cpu(req->flags);
2244 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2246 chan = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2252 if (sk->sk_state != BT_CONFIG) {
2253 struct l2cap_cmd_rej rej;
2255 rej.reason = cpu_to_le16(0x0002);
2256 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2261 /* Reject if config buffer is too small. */
2262 len = cmd_len - sizeof(*req);
2263 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2264 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2265 l2cap_build_conf_rsp(sk, rsp,
2266 L2CAP_CONF_REJECT, flags), rsp);
2271 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2272 l2cap_pi(sk)->conf_len += len;
2274 if (flags & 0x0001) {
2275 /* Incomplete config. Send empty response. */
2276 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2277 l2cap_build_conf_rsp(sk, rsp,
2278 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2282 /* Complete config. */
2283 len = l2cap_parse_conf_req(sk, rsp);
2285 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2289 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2290 l2cap_pi(sk)->num_conf_rsp++;
2292 /* Reset config buffer. */
2293 l2cap_pi(sk)->conf_len = 0;
2295 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2298 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2299 set_default_fcs(l2cap_pi(sk));
2301 sk->sk_state = BT_CONNECTED;
2303 l2cap_pi(sk)->next_tx_seq = 0;
2304 l2cap_pi(sk)->expected_tx_seq = 0;
2305 __skb_queue_head_init(TX_QUEUE(sk));
2306 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2307 l2cap_ertm_init(sk);
2309 l2cap_chan_ready(sk);
2313 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2315 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2316 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2317 l2cap_build_conf_req(sk, buf), buf);
2318 l2cap_pi(sk)->num_conf_req++;
2326 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2328 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2329 u16 scid, flags, result;
2330 struct l2cap_chan *chan;
2332 int len = cmd->len - sizeof(*rsp);
2334 scid = __le16_to_cpu(rsp->scid);
2335 flags = __le16_to_cpu(rsp->flags);
2336 result = __le16_to_cpu(rsp->result);
2338 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2339 scid, flags, result);
2341 chan = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2348 case L2CAP_CONF_SUCCESS:
2349 l2cap_conf_rfc_get(sk, rsp->data, len);
2352 case L2CAP_CONF_UNACCEPT:
2353 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2356 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2357 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2361 /* throw out any old stored conf requests */
2362 result = L2CAP_CONF_SUCCESS;
2363 len = l2cap_parse_conf_rsp(sk, rsp->data,
2366 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2370 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2371 L2CAP_CONF_REQ, len, req);
2372 l2cap_pi(sk)->num_conf_req++;
2373 if (result != L2CAP_CONF_SUCCESS)
2379 sk->sk_err = ECONNRESET;
2380 l2cap_sock_set_timer(sk, HZ * 5);
2381 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2388 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2390 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2391 set_default_fcs(l2cap_pi(sk));
2393 sk->sk_state = BT_CONNECTED;
2394 l2cap_pi(sk)->next_tx_seq = 0;
2395 l2cap_pi(sk)->expected_tx_seq = 0;
2396 __skb_queue_head_init(TX_QUEUE(sk));
2397 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2398 l2cap_ertm_init(sk);
2400 l2cap_chan_ready(sk);
2408 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2410 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2411 struct l2cap_disconn_rsp rsp;
2413 struct l2cap_chan *chan;
2416 scid = __le16_to_cpu(req->scid);
2417 dcid = __le16_to_cpu(req->dcid);
2419 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2421 chan = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2427 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2428 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2429 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2431 sk->sk_shutdown = SHUTDOWN_MASK;
2433 /* don't delete l2cap channel if sk is owned by user */
2434 if (sock_owned_by_user(sk)) {
2435 sk->sk_state = BT_DISCONN;
2436 l2cap_sock_clear_timer(sk);
2437 l2cap_sock_set_timer(sk, HZ / 5);
2442 l2cap_chan_del(chan, ECONNRESET);
2445 l2cap_sock_kill(sk);
2449 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2451 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2453 struct l2cap_chan *chan;
2456 scid = __le16_to_cpu(rsp->scid);
2457 dcid = __le16_to_cpu(rsp->dcid);
2459 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2461 chan = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2467 /* don't delete l2cap channel if sk is owned by user */
2468 if (sock_owned_by_user(sk)) {
2469 sk->sk_state = BT_DISCONN;
2470 l2cap_sock_clear_timer(sk);
2471 l2cap_sock_set_timer(sk, HZ / 5);
2476 l2cap_chan_del(chan, 0);
2479 l2cap_sock_kill(sk);
2483 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2485 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2488 type = __le16_to_cpu(req->type);
2490 BT_DBG("type 0x%4.4x", type);
2492 if (type == L2CAP_IT_FEAT_MASK) {
2494 u32 feat_mask = l2cap_feat_mask;
2495 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2496 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2497 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2499 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2501 put_unaligned_le32(feat_mask, rsp->data);
2502 l2cap_send_cmd(conn, cmd->ident,
2503 L2CAP_INFO_RSP, sizeof(buf), buf);
2504 } else if (type == L2CAP_IT_FIXED_CHAN) {
2506 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2507 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2508 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2509 memcpy(buf + 4, l2cap_fixed_chan, 8);
2510 l2cap_send_cmd(conn, cmd->ident,
2511 L2CAP_INFO_RSP, sizeof(buf), buf);
2513 struct l2cap_info_rsp rsp;
2514 rsp.type = cpu_to_le16(type);
2515 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2516 l2cap_send_cmd(conn, cmd->ident,
2517 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2523 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2525 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2528 type = __le16_to_cpu(rsp->type);
2529 result = __le16_to_cpu(rsp->result);
2531 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2533 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2534 if (cmd->ident != conn->info_ident ||
2535 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2538 del_timer(&conn->info_timer);
2540 if (result != L2CAP_IR_SUCCESS) {
2541 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2542 conn->info_ident = 0;
2544 l2cap_conn_start(conn);
2549 if (type == L2CAP_IT_FEAT_MASK) {
2550 conn->feat_mask = get_unaligned_le32(rsp->data);
2552 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2553 struct l2cap_info_req req;
2554 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2556 conn->info_ident = l2cap_get_ident(conn);
2558 l2cap_send_cmd(conn, conn->info_ident,
2559 L2CAP_INFO_REQ, sizeof(req), &req);
2561 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2562 conn->info_ident = 0;
2564 l2cap_conn_start(conn);
2566 } else if (type == L2CAP_IT_FIXED_CHAN) {
2567 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2568 conn->info_ident = 0;
2570 l2cap_conn_start(conn);
2576 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2581 if (min > max || min < 6 || max > 3200)
2584 if (to_multiplier < 10 || to_multiplier > 3200)
2587 if (max >= to_multiplier * 8)
2590 max_latency = (to_multiplier * 8 / max) - 1;
2591 if (latency > 499 || latency > max_latency)
2597 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2598 struct l2cap_cmd_hdr *cmd, u8 *data)
2600 struct hci_conn *hcon = conn->hcon;
2601 struct l2cap_conn_param_update_req *req;
2602 struct l2cap_conn_param_update_rsp rsp;
2603 u16 min, max, latency, to_multiplier, cmd_len;
2606 if (!(hcon->link_mode & HCI_LM_MASTER))
2609 cmd_len = __le16_to_cpu(cmd->len);
2610 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2613 req = (struct l2cap_conn_param_update_req *) data;
2614 min = __le16_to_cpu(req->min);
2615 max = __le16_to_cpu(req->max);
2616 latency = __le16_to_cpu(req->latency);
2617 to_multiplier = __le16_to_cpu(req->to_multiplier);
2619 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2620 min, max, latency, to_multiplier);
2622 memset(&rsp, 0, sizeof(rsp));
2624 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2626 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2628 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2630 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2634 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2639 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2640 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2644 switch (cmd->code) {
2645 case L2CAP_COMMAND_REJ:
2646 l2cap_command_rej(conn, cmd, data);
2649 case L2CAP_CONN_REQ:
2650 err = l2cap_connect_req(conn, cmd, data);
2653 case L2CAP_CONN_RSP:
2654 err = l2cap_connect_rsp(conn, cmd, data);
2657 case L2CAP_CONF_REQ:
2658 err = l2cap_config_req(conn, cmd, cmd_len, data);
2661 case L2CAP_CONF_RSP:
2662 err = l2cap_config_rsp(conn, cmd, data);
2665 case L2CAP_DISCONN_REQ:
2666 err = l2cap_disconnect_req(conn, cmd, data);
2669 case L2CAP_DISCONN_RSP:
2670 err = l2cap_disconnect_rsp(conn, cmd, data);
2673 case L2CAP_ECHO_REQ:
2674 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2677 case L2CAP_ECHO_RSP:
2680 case L2CAP_INFO_REQ:
2681 err = l2cap_information_req(conn, cmd, data);
2684 case L2CAP_INFO_RSP:
2685 err = l2cap_information_rsp(conn, cmd, data);
2689 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2697 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2698 struct l2cap_cmd_hdr *cmd, u8 *data)
2700 switch (cmd->code) {
2701 case L2CAP_COMMAND_REJ:
2704 case L2CAP_CONN_PARAM_UPDATE_REQ:
2705 return l2cap_conn_param_update_req(conn, cmd, data);
2707 case L2CAP_CONN_PARAM_UPDATE_RSP:
2711 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2716 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2717 struct sk_buff *skb)
2719 u8 *data = skb->data;
2721 struct l2cap_cmd_hdr cmd;
2724 l2cap_raw_recv(conn, skb);
2726 while (len >= L2CAP_CMD_HDR_SIZE) {
2728 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2729 data += L2CAP_CMD_HDR_SIZE;
2730 len -= L2CAP_CMD_HDR_SIZE;
2732 cmd_len = le16_to_cpu(cmd.len);
2734 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2736 if (cmd_len > len || !cmd.ident) {
2737 BT_DBG("corrupted command");
2741 if (conn->hcon->type == LE_LINK)
2742 err = l2cap_le_sig_cmd(conn, &cmd, data);
2744 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2747 struct l2cap_cmd_rej rej;
2749 BT_ERR("Wrong link type (%d)", err);
2751 /* FIXME: Map err to a valid reason */
2752 rej.reason = cpu_to_le16(0);
2753 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2763 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2765 u16 our_fcs, rcv_fcs;
2766 int hdr_size = L2CAP_HDR_SIZE + 2;
2768 if (pi->fcs == L2CAP_FCS_CRC16) {
2769 skb_trim(skb, skb->len - 2);
2770 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2771 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2773 if (our_fcs != rcv_fcs)
2779 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2781 struct l2cap_pinfo *pi = l2cap_pi(sk);
2784 pi->frames_sent = 0;
2786 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2788 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2789 control |= L2CAP_SUPER_RCV_NOT_READY;
2790 l2cap_send_sframe(pi, control);
2791 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2794 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2795 l2cap_retransmit_frames(sk);
2797 l2cap_ertm_send(sk);
2799 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2800 pi->frames_sent == 0) {
2801 control |= L2CAP_SUPER_RCV_READY;
2802 l2cap_send_sframe(pi, control);
2806 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2808 struct sk_buff *next_skb;
2809 struct l2cap_pinfo *pi = l2cap_pi(sk);
2810 int tx_seq_offset, next_tx_seq_offset;
2812 bt_cb(skb)->tx_seq = tx_seq;
2813 bt_cb(skb)->sar = sar;
2815 next_skb = skb_peek(SREJ_QUEUE(sk));
2817 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2821 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2822 if (tx_seq_offset < 0)
2823 tx_seq_offset += 64;
2826 if (bt_cb(next_skb)->tx_seq == tx_seq)
2829 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2830 pi->buffer_seq) % 64;
2831 if (next_tx_seq_offset < 0)
2832 next_tx_seq_offset += 64;
2834 if (next_tx_seq_offset > tx_seq_offset) {
2835 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2839 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2842 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2844 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2849 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2851 struct l2cap_pinfo *pi = l2cap_pi(sk);
2852 struct sk_buff *_skb;
2855 switch (control & L2CAP_CTRL_SAR) {
2856 case L2CAP_SDU_UNSEGMENTED:
2857 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2860 err = sock_queue_rcv_skb(sk, skb);
2866 case L2CAP_SDU_START:
2867 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2870 pi->sdu_len = get_unaligned_le16(skb->data);
2872 if (pi->sdu_len > pi->imtu)
2875 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2879 /* pull sdu_len bytes only after alloc, because of Local Busy
2880 * condition we have to be sure that this will be executed
2881 * only once, i.e., when alloc does not fail */
2884 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2886 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2887 pi->partial_sdu_len = skb->len;
2890 case L2CAP_SDU_CONTINUE:
2891 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2897 pi->partial_sdu_len += skb->len;
2898 if (pi->partial_sdu_len > pi->sdu_len)
2901 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2906 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2912 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2913 pi->partial_sdu_len += skb->len;
2915 if (pi->partial_sdu_len > pi->imtu)
2918 if (pi->partial_sdu_len != pi->sdu_len)
2921 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2924 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2926 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2930 err = sock_queue_rcv_skb(sk, _skb);
2933 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2937 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2938 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2952 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2957 static int l2cap_try_push_rx_skb(struct sock *sk)
2959 struct l2cap_pinfo *pi = l2cap_pi(sk);
2960 struct sk_buff *skb;
2964 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2965 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2966 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2968 skb_queue_head(BUSY_QUEUE(sk), skb);
2972 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2975 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2978 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2979 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2980 l2cap_send_sframe(pi, control);
2981 l2cap_pi(sk)->retry_count = 1;
2983 del_timer(&pi->retrans_timer);
2984 __mod_monitor_timer();
2986 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2989 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2990 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2992 BT_DBG("sk %p, Exit local busy", sk);
2997 static void l2cap_busy_work(struct work_struct *work)
2999 DECLARE_WAITQUEUE(wait, current);
3000 struct l2cap_pinfo *pi =
3001 container_of(work, struct l2cap_pinfo, busy_work);
3002 struct sock *sk = (struct sock *)pi;
3003 int n_tries = 0, timeo = HZ/5, err;
3004 struct sk_buff *skb;
3008 add_wait_queue(sk_sleep(sk), &wait);
3009 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3010 set_current_state(TASK_INTERRUPTIBLE);
3012 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3014 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3021 if (signal_pending(current)) {
3022 err = sock_intr_errno(timeo);
3027 timeo = schedule_timeout(timeo);
3030 err = sock_error(sk);
3034 if (l2cap_try_push_rx_skb(sk) == 0)
3038 set_current_state(TASK_RUNNING);
3039 remove_wait_queue(sk_sleep(sk), &wait);
3044 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3046 struct l2cap_pinfo *pi = l2cap_pi(sk);
3049 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3050 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3051 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3052 return l2cap_try_push_rx_skb(sk);
3057 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3059 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3063 /* Busy Condition */
3064 BT_DBG("sk %p, Enter local busy", sk);
3066 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3067 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3068 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3070 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3071 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3072 l2cap_send_sframe(pi, sctrl);
3074 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3076 del_timer(&pi->ack_timer);
3078 queue_work(_busy_wq, &pi->busy_work);
3083 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3085 struct l2cap_pinfo *pi = l2cap_pi(sk);
3086 struct sk_buff *_skb;
3090 * TODO: We have to notify the userland if some data is lost with the
3094 switch (control & L2CAP_CTRL_SAR) {
3095 case L2CAP_SDU_UNSEGMENTED:
3096 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3101 err = sock_queue_rcv_skb(sk, skb);
3107 case L2CAP_SDU_START:
3108 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3113 pi->sdu_len = get_unaligned_le16(skb->data);
3116 if (pi->sdu_len > pi->imtu) {
3121 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3127 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3129 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3130 pi->partial_sdu_len = skb->len;
3134 case L2CAP_SDU_CONTINUE:
3135 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3138 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3140 pi->partial_sdu_len += skb->len;
3141 if (pi->partial_sdu_len > pi->sdu_len)
3149 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3152 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3154 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3155 pi->partial_sdu_len += skb->len;
3157 if (pi->partial_sdu_len > pi->imtu)
3160 if (pi->partial_sdu_len == pi->sdu_len) {
3161 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3162 err = sock_queue_rcv_skb(sk, _skb);
3177 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3179 struct sk_buff *skb;
3182 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3183 if (bt_cb(skb)->tx_seq != tx_seq)
3186 skb = skb_dequeue(SREJ_QUEUE(sk));
3187 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3188 l2cap_ertm_reassembly_sdu(sk, skb, control);
3189 l2cap_pi(sk)->buffer_seq_srej =
3190 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3191 tx_seq = (tx_seq + 1) % 64;
3195 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3197 struct l2cap_pinfo *pi = l2cap_pi(sk);
3198 struct srej_list *l, *tmp;
3201 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3202 if (l->tx_seq == tx_seq) {
3207 control = L2CAP_SUPER_SELECT_REJECT;
3208 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3209 l2cap_send_sframe(pi, control);
3211 list_add_tail(&l->list, SREJ_LIST(sk));
3215 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3217 struct l2cap_pinfo *pi = l2cap_pi(sk);
3218 struct srej_list *new;
3221 while (tx_seq != pi->expected_tx_seq) {
3222 control = L2CAP_SUPER_SELECT_REJECT;
3223 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3224 l2cap_send_sframe(pi, control);
3226 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3227 new->tx_seq = pi->expected_tx_seq;
3228 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3229 list_add_tail(&new->list, SREJ_LIST(sk));
3231 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3234 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3236 struct l2cap_pinfo *pi = l2cap_pi(sk);
3237 u8 tx_seq = __get_txseq(rx_control);
3238 u8 req_seq = __get_reqseq(rx_control);
3239 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3240 int tx_seq_offset, expected_tx_seq_offset;
3241 int num_to_ack = (pi->tx_win/6) + 1;
3244 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3247 if (L2CAP_CTRL_FINAL & rx_control &&
3248 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3249 del_timer(&pi->monitor_timer);
3250 if (pi->unacked_frames > 0)
3251 __mod_retrans_timer();
3252 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3255 pi->expected_ack_seq = req_seq;
3256 l2cap_drop_acked_frames(sk);
3258 if (tx_seq == pi->expected_tx_seq)
3261 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3262 if (tx_seq_offset < 0)
3263 tx_seq_offset += 64;
3265 /* invalid tx_seq */
3266 if (tx_seq_offset >= pi->tx_win) {
3267 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3271 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3274 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3275 struct srej_list *first;
3277 first = list_first_entry(SREJ_LIST(sk),
3278 struct srej_list, list);
3279 if (tx_seq == first->tx_seq) {
3280 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3281 l2cap_check_srej_gap(sk, tx_seq);
3283 list_del(&first->list);
3286 if (list_empty(SREJ_LIST(sk))) {
3287 pi->buffer_seq = pi->buffer_seq_srej;
3288 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3290 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3293 struct srej_list *l;
3295 /* duplicated tx_seq */
3296 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3299 list_for_each_entry(l, SREJ_LIST(sk), list) {
3300 if (l->tx_seq == tx_seq) {
3301 l2cap_resend_srejframe(sk, tx_seq);
3305 l2cap_send_srejframe(sk, tx_seq);
3308 expected_tx_seq_offset =
3309 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3310 if (expected_tx_seq_offset < 0)
3311 expected_tx_seq_offset += 64;
3313 /* duplicated tx_seq */
3314 if (tx_seq_offset < expected_tx_seq_offset)
3317 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3319 BT_DBG("sk %p, Enter SREJ", sk);
3321 INIT_LIST_HEAD(SREJ_LIST(sk));
3322 pi->buffer_seq_srej = pi->buffer_seq;
3324 __skb_queue_head_init(SREJ_QUEUE(sk));
3325 __skb_queue_head_init(BUSY_QUEUE(sk));
3326 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3328 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3330 l2cap_send_srejframe(sk, tx_seq);
3332 del_timer(&pi->ack_timer);
3337 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3339 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3340 bt_cb(skb)->tx_seq = tx_seq;
3341 bt_cb(skb)->sar = sar;
3342 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3346 err = l2cap_push_rx_skb(sk, skb, rx_control);
3350 if (rx_control & L2CAP_CTRL_FINAL) {
3351 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3352 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3354 l2cap_retransmit_frames(sk);
3359 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3360 if (pi->num_acked == num_to_ack - 1)
3370 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3372 struct l2cap_pinfo *pi = l2cap_pi(sk);
3374 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3377 pi->expected_ack_seq = __get_reqseq(rx_control);
3378 l2cap_drop_acked_frames(sk);
3380 if (rx_control & L2CAP_CTRL_POLL) {
3381 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3382 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3383 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3384 (pi->unacked_frames > 0))
3385 __mod_retrans_timer();
3387 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3388 l2cap_send_srejtail(sk);
3390 l2cap_send_i_or_rr_or_rnr(sk);
3393 } else if (rx_control & L2CAP_CTRL_FINAL) {
3394 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3396 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3397 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3399 l2cap_retransmit_frames(sk);
3402 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3403 (pi->unacked_frames > 0))
3404 __mod_retrans_timer();
3406 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3407 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3410 l2cap_ertm_send(sk);
3414 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3416 struct l2cap_pinfo *pi = l2cap_pi(sk);
3417 u8 tx_seq = __get_reqseq(rx_control);
3419 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3421 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3423 pi->expected_ack_seq = tx_seq;
3424 l2cap_drop_acked_frames(sk);
3426 if (rx_control & L2CAP_CTRL_FINAL) {
3427 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3428 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3430 l2cap_retransmit_frames(sk);
3432 l2cap_retransmit_frames(sk);
3434 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3435 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3438 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3440 struct l2cap_pinfo *pi = l2cap_pi(sk);
3441 u8 tx_seq = __get_reqseq(rx_control);
3443 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3445 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3447 if (rx_control & L2CAP_CTRL_POLL) {
3448 pi->expected_ack_seq = tx_seq;
3449 l2cap_drop_acked_frames(sk);
3451 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3452 l2cap_retransmit_one_frame(sk, tx_seq);
3454 l2cap_ertm_send(sk);
3456 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3457 pi->srej_save_reqseq = tx_seq;
3458 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3460 } else if (rx_control & L2CAP_CTRL_FINAL) {
3461 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3462 pi->srej_save_reqseq == tx_seq)
3463 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3465 l2cap_retransmit_one_frame(sk, tx_seq);
3467 l2cap_retransmit_one_frame(sk, tx_seq);
3468 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3469 pi->srej_save_reqseq = tx_seq;
3470 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3475 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3477 struct l2cap_pinfo *pi = l2cap_pi(sk);
3478 u8 tx_seq = __get_reqseq(rx_control);
3480 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3482 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3483 pi->expected_ack_seq = tx_seq;
3484 l2cap_drop_acked_frames(sk);
3486 if (rx_control & L2CAP_CTRL_POLL)
3487 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3489 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3490 del_timer(&pi->retrans_timer);
3491 if (rx_control & L2CAP_CTRL_POLL)
3492 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3496 if (rx_control & L2CAP_CTRL_POLL)
3497 l2cap_send_srejtail(sk);
3499 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3502 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3504 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3506 if (L2CAP_CTRL_FINAL & rx_control &&
3507 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3508 del_timer(&l2cap_pi(sk)->monitor_timer);
3509 if (l2cap_pi(sk)->unacked_frames > 0)
3510 __mod_retrans_timer();
3511 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3514 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3515 case L2CAP_SUPER_RCV_READY:
3516 l2cap_data_channel_rrframe(sk, rx_control);
3519 case L2CAP_SUPER_REJECT:
3520 l2cap_data_channel_rejframe(sk, rx_control);
3523 case L2CAP_SUPER_SELECT_REJECT:
3524 l2cap_data_channel_srejframe(sk, rx_control);
3527 case L2CAP_SUPER_RCV_NOT_READY:
3528 l2cap_data_channel_rnrframe(sk, rx_control);
3536 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3538 struct l2cap_pinfo *pi = l2cap_pi(sk);
3541 int len, next_tx_seq_offset, req_seq_offset;
3543 control = get_unaligned_le16(skb->data);
3548 * We can just drop the corrupted I-frame here.
3549 * Receiver will miss it and start proper recovery
3550 * procedures and ask retransmission.
3552 if (l2cap_check_fcs(pi, skb))
3555 if (__is_sar_start(control) && __is_iframe(control))
3558 if (pi->fcs == L2CAP_FCS_CRC16)
3561 if (len > pi->mps) {
3562 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3566 req_seq = __get_reqseq(control);
3567 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3568 if (req_seq_offset < 0)
3569 req_seq_offset += 64;
3571 next_tx_seq_offset =
3572 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3573 if (next_tx_seq_offset < 0)
3574 next_tx_seq_offset += 64;
3576 /* check for invalid req-seq */
3577 if (req_seq_offset > next_tx_seq_offset) {
3578 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3582 if (__is_iframe(control)) {
3584 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3588 l2cap_data_channel_iframe(sk, control, skb);
3592 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3596 l2cap_data_channel_sframe(sk, control, skb);
3606 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3608 struct l2cap_chan *chan;
3610 struct l2cap_pinfo *pi;
3615 chan = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3617 BT_DBG("unknown cid 0x%4.4x", cid);
3624 BT_DBG("sk %p, len %d", sk, skb->len);
3626 if (sk->sk_state != BT_CONNECTED)
3630 case L2CAP_MODE_BASIC:
3631 /* If socket recv buffers overflows we drop data here
3632 * which is *bad* because L2CAP has to be reliable.
3633 * But we don't have any other choice. L2CAP doesn't
3634 * provide flow control mechanism. */
3636 if (pi->imtu < skb->len)
3639 if (!sock_queue_rcv_skb(sk, skb))
3643 case L2CAP_MODE_ERTM:
3644 if (!sock_owned_by_user(sk)) {
3645 l2cap_ertm_data_rcv(sk, skb);
3647 if (sk_add_backlog(sk, skb))
3653 case L2CAP_MODE_STREAMING:
3654 control = get_unaligned_le16(skb->data);
3658 if (l2cap_check_fcs(pi, skb))
3661 if (__is_sar_start(control))
3664 if (pi->fcs == L2CAP_FCS_CRC16)
3667 if (len > pi->mps || len < 0 || __is_sframe(control))
3670 tx_seq = __get_txseq(control);
3672 if (pi->expected_tx_seq == tx_seq)
3673 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3675 pi->expected_tx_seq = (tx_seq + 1) % 64;
3677 l2cap_streaming_reassembly_sdu(sk, skb, control);
3682 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3696 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3700 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3706 BT_DBG("sk %p, len %d", sk, skb->len);
3708 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3711 if (l2cap_pi(sk)->imtu < skb->len)
3714 if (!sock_queue_rcv_skb(sk, skb))
3726 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3728 struct l2cap_hdr *lh = (void *) skb->data;
3732 skb_pull(skb, L2CAP_HDR_SIZE);
3733 cid = __le16_to_cpu(lh->cid);
3734 len = __le16_to_cpu(lh->len);
3736 if (len != skb->len) {
3741 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3744 case L2CAP_CID_LE_SIGNALING:
3745 case L2CAP_CID_SIGNALING:
3746 l2cap_sig_channel(conn, skb);
3749 case L2CAP_CID_CONN_LESS:
3750 psm = get_unaligned_le16(skb->data);
3752 l2cap_conless_channel(conn, psm, skb);
3756 l2cap_data_channel(conn, cid, skb);
3761 /* ---- L2CAP interface with lower layer (HCI) ---- */
3763 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3765 int exact = 0, lm1 = 0, lm2 = 0;
3766 register struct sock *sk;
3767 struct hlist_node *node;
3769 if (type != ACL_LINK)
3772 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3774 /* Find listening sockets and check their link_mode */
3775 read_lock(&l2cap_sk_list.lock);
3776 sk_for_each(sk, node, &l2cap_sk_list.head) {
3777 if (sk->sk_state != BT_LISTEN)
3780 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3781 lm1 |= HCI_LM_ACCEPT;
3782 if (l2cap_pi(sk)->role_switch)
3783 lm1 |= HCI_LM_MASTER;
3785 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3786 lm2 |= HCI_LM_ACCEPT;
3787 if (l2cap_pi(sk)->role_switch)
3788 lm2 |= HCI_LM_MASTER;
3791 read_unlock(&l2cap_sk_list.lock);
3793 return exact ? lm1 : lm2;
3796 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3798 struct l2cap_conn *conn;
3800 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3802 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3806 conn = l2cap_conn_add(hcon, status);
3808 l2cap_conn_ready(conn);
3810 l2cap_conn_del(hcon, bt_err(status));
3815 static int l2cap_disconn_ind(struct hci_conn *hcon)
3817 struct l2cap_conn *conn = hcon->l2cap_data;
3819 BT_DBG("hcon %p", hcon);
3821 if (hcon->type != ACL_LINK || !conn)
3824 return conn->disc_reason;
3827 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3829 BT_DBG("hcon %p reason %d", hcon, reason);
3831 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3834 l2cap_conn_del(hcon, bt_err(reason));
3839 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3841 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3844 if (encrypt == 0x00) {
3845 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3846 l2cap_sock_clear_timer(sk);
3847 l2cap_sock_set_timer(sk, HZ * 5);
3848 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3849 __l2cap_sock_close(sk, ECONNREFUSED);
3851 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3852 l2cap_sock_clear_timer(sk);
3856 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3858 struct l2cap_chan_list *l;
3859 struct l2cap_conn *conn = hcon->l2cap_data;
3860 struct l2cap_chan *chan;
3865 l = &conn->chan_list;
3867 BT_DBG("conn %p", conn);
3869 read_lock(&l->lock);
3871 for (chan = l->head; chan; chan = chan->next_c) {
3872 struct sock *sk = chan->sk;
3875 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3880 if (!status && (sk->sk_state == BT_CONNECTED ||
3881 sk->sk_state == BT_CONFIG)) {
3882 l2cap_check_encryption(sk, encrypt);
3887 if (sk->sk_state == BT_CONNECT) {
3889 struct l2cap_conn_req req;
3890 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3891 req.psm = l2cap_pi(sk)->psm;
3893 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3894 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3896 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3897 L2CAP_CONN_REQ, sizeof(req), &req);
3899 l2cap_sock_clear_timer(sk);
3900 l2cap_sock_set_timer(sk, HZ / 10);
3902 } else if (sk->sk_state == BT_CONNECT2) {
3903 struct l2cap_conn_rsp rsp;
3907 sk->sk_state = BT_CONFIG;
3908 result = L2CAP_CR_SUCCESS;
3910 sk->sk_state = BT_DISCONN;
3911 l2cap_sock_set_timer(sk, HZ / 10);
3912 result = L2CAP_CR_SEC_BLOCK;
3915 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3916 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3917 rsp.result = cpu_to_le16(result);
3918 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3919 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3920 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3926 read_unlock(&l->lock);
3931 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3933 struct l2cap_conn *conn = hcon->l2cap_data;
3936 conn = l2cap_conn_add(hcon, 0);
3941 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3943 if (!(flags & ACL_CONT)) {
3944 struct l2cap_hdr *hdr;
3945 struct l2cap_chan *chan;
3950 BT_ERR("Unexpected start frame (len %d)", skb->len);
3951 kfree_skb(conn->rx_skb);
3952 conn->rx_skb = NULL;
3954 l2cap_conn_unreliable(conn, ECOMM);
3957 /* Start fragment always begin with Basic L2CAP header */
3958 if (skb->len < L2CAP_HDR_SIZE) {
3959 BT_ERR("Frame is too short (len %d)", skb->len);
3960 l2cap_conn_unreliable(conn, ECOMM);
3964 hdr = (struct l2cap_hdr *) skb->data;
3965 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3966 cid = __le16_to_cpu(hdr->cid);
3968 if (len == skb->len) {
3969 /* Complete frame received */
3970 l2cap_recv_frame(conn, skb);
3974 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3976 if (skb->len > len) {
3977 BT_ERR("Frame is too long (len %d, expected len %d)",
3979 l2cap_conn_unreliable(conn, ECOMM);
3983 chan = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3985 if (chan && chan->sk) {
3986 struct sock *sk = chan->sk;
3988 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3989 BT_ERR("Frame exceeding recv MTU (len %d, "
3991 l2cap_pi(sk)->imtu);
3993 l2cap_conn_unreliable(conn, ECOMM);
3999 /* Allocate skb for the complete frame (with header) */
4000 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4004 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4006 conn->rx_len = len - skb->len;
4008 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4010 if (!conn->rx_len) {
4011 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4012 l2cap_conn_unreliable(conn, ECOMM);
4016 if (skb->len > conn->rx_len) {
4017 BT_ERR("Fragment is too long (len %d, expected %d)",
4018 skb->len, conn->rx_len);
4019 kfree_skb(conn->rx_skb);
4020 conn->rx_skb = NULL;
4022 l2cap_conn_unreliable(conn, ECOMM);
4026 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4028 conn->rx_len -= skb->len;
4030 if (!conn->rx_len) {
4031 /* Complete frame received */
4032 l2cap_recv_frame(conn, conn->rx_skb);
4033 conn->rx_skb = NULL;
4042 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4045 struct hlist_node *node;
4047 read_lock_bh(&l2cap_sk_list.lock);
4049 sk_for_each(sk, node, &l2cap_sk_list.head) {
4050 struct l2cap_pinfo *pi = l2cap_pi(sk);
4052 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4053 batostr(&bt_sk(sk)->src),
4054 batostr(&bt_sk(sk)->dst),
4055 sk->sk_state, __le16_to_cpu(pi->psm),
4057 pi->imtu, pi->omtu, pi->sec_level,
4061 read_unlock_bh(&l2cap_sk_list.lock);
4066 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4068 return single_open(file, l2cap_debugfs_show, inode->i_private);
4071 static const struct file_operations l2cap_debugfs_fops = {
4072 .open = l2cap_debugfs_open,
4074 .llseek = seq_lseek,
4075 .release = single_release,
4078 static struct dentry *l2cap_debugfs;
4080 static struct hci_proto l2cap_hci_proto = {
4082 .id = HCI_PROTO_L2CAP,
4083 .connect_ind = l2cap_connect_ind,
4084 .connect_cfm = l2cap_connect_cfm,
4085 .disconn_ind = l2cap_disconn_ind,
4086 .disconn_cfm = l2cap_disconn_cfm,
4087 .security_cfm = l2cap_security_cfm,
4088 .recv_acldata = l2cap_recv_acldata
4091 int __init l2cap_init(void)
4095 err = l2cap_init_sockets();
4099 _busy_wq = create_singlethread_workqueue("l2cap");
4105 err = hci_register_proto(&l2cap_hci_proto);
4107 BT_ERR("L2CAP protocol registration failed");
4108 bt_sock_unregister(BTPROTO_L2CAP);
4113 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4114 bt_debugfs, NULL, &l2cap_debugfs_fops);
4116 BT_ERR("Failed to create L2CAP debug file");
4122 destroy_workqueue(_busy_wq);
4123 l2cap_cleanup_sockets();
4127 void l2cap_exit(void)
4129 debugfs_remove(l2cap_debugfs);
4131 flush_workqueue(_busy_wq);
4132 destroy_workqueue(_busy_wq);
4134 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4135 BT_ERR("L2CAP protocol unregistration failed");
4137 l2cap_cleanup_sockets();
4140 module_param(disable_ertm, bool, 0644);
4141 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");