2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 read_unlock(&conn->chan_lock);
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 struct l2cap_chan *c;
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
136 read_unlock(&conn->chan_lock);
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
142 u16 cid = L2CAP_CID_DYN_START;
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
152 struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
154 struct l2cap_chan *chan;
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
167 struct sock *sk = chan->sk;
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
172 conn->disc_reason = 0x13;
174 l2cap_pi(sk)->conn = conn;
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
201 list_add(&chan->list, &conn->chan_l);
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
212 l2cap_sock_clear_timer(sk);
214 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
237 sk->sk_state_change(sk);
239 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
240 chan->conf_state & L2CAP_CONF_INPUT_DONE))
243 skb_queue_purge(&chan->tx_q);
245 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
246 struct srej_list *l, *tmp;
248 del_timer(&chan->retrans_timer);
249 del_timer(&chan->monitor_timer);
250 del_timer(&chan->ack_timer);
252 skb_queue_purge(&chan->srej_q);
253 skb_queue_purge(&chan->busy_q);
255 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
265 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
267 struct sock *sk = chan->sk;
269 if (sk->sk_type == SOCK_RAW) {
270 switch (chan->sec_level) {
271 case BT_SECURITY_HIGH:
272 return HCI_AT_DEDICATED_BONDING_MITM;
273 case BT_SECURITY_MEDIUM:
274 return HCI_AT_DEDICATED_BONDING;
276 return HCI_AT_NO_BONDING;
278 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
279 if (chan->sec_level == BT_SECURITY_LOW)
280 chan->sec_level = BT_SECURITY_SDP;
282 if (chan->sec_level == BT_SECURITY_HIGH)
283 return HCI_AT_NO_BONDING_MITM;
285 return HCI_AT_NO_BONDING;
287 switch (chan->sec_level) {
288 case BT_SECURITY_HIGH:
289 return HCI_AT_GENERAL_BONDING_MITM;
290 case BT_SECURITY_MEDIUM:
291 return HCI_AT_GENERAL_BONDING;
293 return HCI_AT_NO_BONDING;
298 /* Service level security */
299 static inline int l2cap_check_security(struct l2cap_chan *chan)
301 struct l2cap_conn *conn = l2cap_pi(chan->sk)->conn;
304 auth_type = l2cap_get_auth_type(chan);
306 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
309 u8 l2cap_get_ident(struct l2cap_conn *conn)
313 /* Get next available identificator.
314 * 1 - 128 are used by kernel.
315 * 129 - 199 are reserved.
316 * 200 - 254 are used by utilities like l2ping, etc.
319 spin_lock_bh(&conn->lock);
321 if (++conn->tx_ident > 128)
326 spin_unlock_bh(&conn->lock);
331 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
333 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
336 BT_DBG("code 0x%2.2x", code);
341 if (lmp_no_flush_capable(conn->hcon->hdev))
342 flags = ACL_START_NO_FLUSH;
346 hci_send_acl(conn->hcon, skb, flags);
349 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
352 struct l2cap_hdr *lh;
353 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
354 struct l2cap_conn *conn = pi->conn;
355 struct sock *sk = (struct sock *)pi;
356 int count, hlen = L2CAP_HDR_SIZE + 2;
359 if (sk->sk_state != BT_CONNECTED)
362 if (pi->fcs == L2CAP_FCS_CRC16)
365 BT_DBG("chan %p, control 0x%2.2x", chan, control);
367 count = min_t(unsigned int, conn->mtu, hlen);
368 control |= L2CAP_CTRL_FRAME_TYPE;
370 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
371 control |= L2CAP_CTRL_FINAL;
372 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
375 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
376 control |= L2CAP_CTRL_POLL;
377 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
380 skb = bt_skb_alloc(count, GFP_ATOMIC);
384 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
385 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
386 lh->cid = cpu_to_le16(pi->dcid);
387 put_unaligned_le16(control, skb_put(skb, 2));
389 if (pi->fcs == L2CAP_FCS_CRC16) {
390 u16 fcs = crc16(0, (u8 *)lh, count - 2);
391 put_unaligned_le16(fcs, skb_put(skb, 2));
394 if (lmp_no_flush_capable(conn->hcon->hdev))
395 flags = ACL_START_NO_FLUSH;
399 hci_send_acl(pi->conn->hcon, skb, flags);
402 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
404 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
405 control |= L2CAP_SUPER_RCV_NOT_READY;
406 chan->conn_state |= L2CAP_CONN_RNR_SENT;
408 control |= L2CAP_SUPER_RCV_READY;
410 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
412 l2cap_send_sframe(chan, control);
415 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
417 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
420 static void l2cap_do_start(struct l2cap_chan *chan)
422 struct sock *sk = chan->sk;
423 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
425 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
426 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
429 if (l2cap_check_security(chan) &&
430 __l2cap_no_conn_pending(chan)) {
431 struct l2cap_conn_req req;
432 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
433 req.psm = l2cap_pi(sk)->psm;
435 chan->ident = l2cap_get_ident(conn);
436 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
438 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
442 struct l2cap_info_req req;
443 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
445 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
446 conn->info_ident = l2cap_get_ident(conn);
448 mod_timer(&conn->info_timer, jiffies +
449 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
451 l2cap_send_cmd(conn, conn->info_ident,
452 L2CAP_INFO_REQ, sizeof(req), &req);
456 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
458 u32 local_feat_mask = l2cap_feat_mask;
460 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
463 case L2CAP_MODE_ERTM:
464 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
465 case L2CAP_MODE_STREAMING:
466 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
472 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
475 struct l2cap_disconn_req req;
482 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
483 del_timer(&chan->retrans_timer);
484 del_timer(&chan->monitor_timer);
485 del_timer(&chan->ack_timer);
488 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
489 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
490 l2cap_send_cmd(conn, l2cap_get_ident(conn),
491 L2CAP_DISCONN_REQ, sizeof(req), &req);
493 sk->sk_state = BT_DISCONN;
497 /* ---- L2CAP connections ---- */
498 static void l2cap_conn_start(struct l2cap_conn *conn)
500 struct l2cap_chan *chan, *tmp;
502 BT_DBG("conn %p", conn);
504 read_lock(&conn->chan_lock);
506 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
507 struct sock *sk = chan->sk;
511 if (sk->sk_type != SOCK_SEQPACKET &&
512 sk->sk_type != SOCK_STREAM) {
517 if (sk->sk_state == BT_CONNECT) {
518 struct l2cap_conn_req req;
520 if (!l2cap_check_security(chan) ||
521 !__l2cap_no_conn_pending(chan)) {
526 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
528 && chan->conf_state &
529 L2CAP_CONF_STATE2_DEVICE) {
530 /* __l2cap_sock_close() calls list_del(chan)
531 * so release the lock */
532 read_unlock_bh(&conn->chan_lock);
533 __l2cap_sock_close(sk, ECONNRESET);
534 read_lock_bh(&conn->chan_lock);
539 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
540 req.psm = l2cap_pi(sk)->psm;
542 chan->ident = l2cap_get_ident(conn);
543 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
545 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
548 } else if (sk->sk_state == BT_CONNECT2) {
549 struct l2cap_conn_rsp rsp;
551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
554 if (l2cap_check_security(chan)) {
555 if (bt_sk(sk)->defer_setup) {
556 struct sock *parent = bt_sk(sk)->parent;
557 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
558 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
559 parent->sk_data_ready(parent, 0);
562 sk->sk_state = BT_CONFIG;
563 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
564 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
567 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
568 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
571 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
574 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
575 rsp.result != L2CAP_CR_SUCCESS) {
580 chan->conf_state |= L2CAP_CONF_REQ_SENT;
581 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
582 l2cap_build_conf_req(chan, buf), buf);
583 chan->num_conf_req++;
589 read_unlock(&conn->chan_lock);
592 /* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
595 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
597 struct sock *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
600 read_lock(&l2cap_sk_list.lock);
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
606 if (l2cap_pi(sk)->scid == cid) {
608 if (!bacmp(&bt_sk(sk)->src, src))
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
617 read_unlock(&l2cap_sk_list.lock);
619 return node ? sk : sk1;
622 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
624 struct sock *parent, *sk;
625 struct l2cap_chan *chan;
629 /* Check if we have socket listening on cid */
630 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
635 bh_lock_sock(parent);
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
647 chan = l2cap_chan_alloc(sk);
653 l2cap_pi(sk)->chan = chan;
655 write_lock_bh(&conn->chan_lock);
657 hci_conn_hold(conn->hcon);
659 l2cap_sock_init(sk, parent);
661 bacpy(&bt_sk(sk)->src, conn->src);
662 bacpy(&bt_sk(sk)->dst, conn->dst);
664 bt_accept_enqueue(parent, sk);
666 __l2cap_chan_add(conn, chan);
668 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
670 sk->sk_state = BT_CONNECTED;
671 parent->sk_data_ready(parent, 0);
673 write_unlock_bh(&conn->chan_lock);
676 bh_unlock_sock(parent);
679 static void l2cap_conn_ready(struct l2cap_conn *conn)
681 struct l2cap_chan *chan;
683 BT_DBG("conn %p", conn);
685 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
686 l2cap_le_conn_ready(conn);
688 read_lock(&conn->chan_lock);
690 list_for_each_entry(chan, &conn->chan_l, list) {
691 struct sock *sk = chan->sk;
695 if (conn->hcon->type == LE_LINK) {
696 l2cap_sock_clear_timer(sk);
697 sk->sk_state = BT_CONNECTED;
698 sk->sk_state_change(sk);
701 if (sk->sk_type != SOCK_SEQPACKET &&
702 sk->sk_type != SOCK_STREAM) {
703 l2cap_sock_clear_timer(sk);
704 sk->sk_state = BT_CONNECTED;
705 sk->sk_state_change(sk);
706 } else if (sk->sk_state == BT_CONNECT)
707 l2cap_do_start(chan);
712 read_unlock(&conn->chan_lock);
715 /* Notify sockets that we cannot guaranty reliability anymore */
716 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
718 struct l2cap_chan *chan;
720 BT_DBG("conn %p", conn);
722 read_lock(&conn->chan_lock);
724 list_for_each_entry(chan, &conn->chan_l, list) {
725 struct sock *sk = chan->sk;
727 if (chan->force_reliable)
731 read_unlock(&conn->chan_lock);
734 static void l2cap_info_timeout(unsigned long arg)
736 struct l2cap_conn *conn = (void *) arg;
738 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
739 conn->info_ident = 0;
741 l2cap_conn_start(conn);
744 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
746 struct l2cap_conn *conn = hcon->l2cap_data;
751 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
755 hcon->l2cap_data = conn;
758 BT_DBG("hcon %p conn %p", hcon, conn);
760 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
761 conn->mtu = hcon->hdev->le_mtu;
763 conn->mtu = hcon->hdev->acl_mtu;
765 conn->src = &hcon->hdev->bdaddr;
766 conn->dst = &hcon->dst;
770 spin_lock_init(&conn->lock);
771 rwlock_init(&conn->chan_lock);
773 INIT_LIST_HEAD(&conn->chan_l);
775 if (hcon->type != LE_LINK)
776 setup_timer(&conn->info_timer, l2cap_info_timeout,
777 (unsigned long) conn);
779 conn->disc_reason = 0x13;
784 static void l2cap_conn_del(struct hci_conn *hcon, int err)
786 struct l2cap_conn *conn = hcon->l2cap_data;
787 struct l2cap_chan *chan, *l;
793 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
795 kfree_skb(conn->rx_skb);
798 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
801 l2cap_chan_del(chan, err);
806 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
807 del_timer_sync(&conn->info_timer);
809 hcon->l2cap_data = NULL;
813 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
815 write_lock_bh(&conn->chan_lock);
816 __l2cap_chan_add(conn, chan);
817 write_unlock_bh(&conn->chan_lock);
820 /* ---- Socket interface ---- */
822 /* Find socket with psm and source bdaddr.
823 * Returns closest match.
825 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
827 struct sock *sk = NULL, *sk1 = NULL;
828 struct hlist_node *node;
830 read_lock(&l2cap_sk_list.lock);
832 sk_for_each(sk, node, &l2cap_sk_list.head) {
833 if (state && sk->sk_state != state)
836 if (l2cap_pi(sk)->psm == psm) {
838 if (!bacmp(&bt_sk(sk)->src, src))
842 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
847 read_unlock(&l2cap_sk_list.lock);
849 return node ? sk : sk1;
852 int l2cap_chan_connect(struct l2cap_chan *chan)
854 struct sock *sk = chan->sk;
855 bdaddr_t *src = &bt_sk(sk)->src;
856 bdaddr_t *dst = &bt_sk(sk)->dst;
857 struct l2cap_conn *conn;
858 struct hci_conn *hcon;
859 struct hci_dev *hdev;
863 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
866 hdev = hci_get_route(dst, src);
868 return -EHOSTUNREACH;
870 hci_dev_lock_bh(hdev);
872 auth_type = l2cap_get_auth_type(chan);
874 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
875 hcon = hci_connect(hdev, LE_LINK, dst,
876 chan->sec_level, auth_type);
878 hcon = hci_connect(hdev, ACL_LINK, dst,
879 chan->sec_level, auth_type);
886 conn = l2cap_conn_add(hcon, 0);
893 /* Update source addr of the socket */
894 bacpy(src, conn->src);
896 l2cap_chan_add(conn, chan);
898 sk->sk_state = BT_CONNECT;
899 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
901 if (hcon->state == BT_CONNECTED) {
902 if (sk->sk_type != SOCK_SEQPACKET &&
903 sk->sk_type != SOCK_STREAM) {
904 l2cap_sock_clear_timer(sk);
905 if (l2cap_check_security(chan))
906 sk->sk_state = BT_CONNECTED;
908 l2cap_do_start(chan);
914 hci_dev_unlock_bh(hdev);
919 int __l2cap_wait_ack(struct sock *sk)
921 DECLARE_WAITQUEUE(wait, current);
925 add_wait_queue(sk_sleep(sk), &wait);
926 while ((l2cap_pi(sk)->chan->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
927 set_current_state(TASK_INTERRUPTIBLE);
932 if (signal_pending(current)) {
933 err = sock_intr_errno(timeo);
938 timeo = schedule_timeout(timeo);
941 err = sock_error(sk);
945 set_current_state(TASK_RUNNING);
946 remove_wait_queue(sk_sleep(sk), &wait);
950 static void l2cap_monitor_timeout(unsigned long arg)
952 struct l2cap_chan *chan = (void *) arg;
953 struct sock *sk = chan->sk;
955 BT_DBG("chan %p", chan);
958 if (chan->retry_count >= chan->remote_max_tx) {
959 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, ECONNABORTED);
965 __mod_monitor_timer();
967 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
971 static void l2cap_retrans_timeout(unsigned long arg)
973 struct l2cap_chan *chan = (void *) arg;
974 struct sock *sk = chan->sk;
976 BT_DBG("chan %p", chan);
979 chan->retry_count = 1;
980 __mod_monitor_timer();
982 chan->conn_state |= L2CAP_CONN_WAIT_F;
984 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
988 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
992 while ((skb = skb_peek(&chan->tx_q)) &&
993 chan->unacked_frames) {
994 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
997 skb = skb_dequeue(&chan->tx_q);
1000 chan->unacked_frames--;
1003 if (!chan->unacked_frames)
1004 del_timer(&chan->retrans_timer);
1007 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1009 struct sock *sk = chan->sk;
1010 struct hci_conn *hcon = l2cap_pi(sk)->conn->hcon;
1013 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1015 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1016 flags = ACL_START_NO_FLUSH;
1020 hci_send_acl(hcon, skb, flags);
1023 void l2cap_streaming_send(struct l2cap_chan *chan)
1025 struct sock *sk = chan->sk;
1026 struct sk_buff *skb;
1027 struct l2cap_pinfo *pi = l2cap_pi(sk);
1030 while ((skb = skb_dequeue(&chan->tx_q))) {
1031 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1032 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1033 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1035 if (pi->fcs == L2CAP_FCS_CRC16) {
1036 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1037 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1040 l2cap_do_send(chan, skb);
1042 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1046 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1048 struct sock *sk = chan->sk;
1049 struct l2cap_pinfo *pi = l2cap_pi(sk);
1050 struct sk_buff *skb, *tx_skb;
1053 skb = skb_peek(&chan->tx_q);
1058 if (bt_cb(skb)->tx_seq == tx_seq)
1061 if (skb_queue_is_last(&chan->tx_q, skb))
1064 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1066 if (chan->remote_max_tx &&
1067 bt_cb(skb)->retries == chan->remote_max_tx) {
1068 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1072 tx_skb = skb_clone(skb, GFP_ATOMIC);
1073 bt_cb(skb)->retries++;
1074 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1075 control &= L2CAP_CTRL_SAR;
1077 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1078 control |= L2CAP_CTRL_FINAL;
1079 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1082 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1083 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1085 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1087 if (pi->fcs == L2CAP_FCS_CRC16) {
1088 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1089 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1092 l2cap_do_send(chan, tx_skb);
1095 int l2cap_ertm_send(struct l2cap_chan *chan)
1097 struct sk_buff *skb, *tx_skb;
1098 struct sock *sk = chan->sk;
1099 struct l2cap_pinfo *pi = l2cap_pi(sk);
1103 if (sk->sk_state != BT_CONNECTED)
1106 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1108 if (chan->remote_max_tx &&
1109 bt_cb(skb)->retries == chan->remote_max_tx) {
1110 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1114 tx_skb = skb_clone(skb, GFP_ATOMIC);
1116 bt_cb(skb)->retries++;
1118 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1119 control &= L2CAP_CTRL_SAR;
1121 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1122 control |= L2CAP_CTRL_FINAL;
1123 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1125 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1126 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1127 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1130 if (pi->fcs == L2CAP_FCS_CRC16) {
1131 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1132 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1135 l2cap_do_send(chan, tx_skb);
1137 __mod_retrans_timer();
1139 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1140 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1142 if (bt_cb(skb)->retries == 1)
1143 chan->unacked_frames++;
1145 chan->frames_sent++;
1147 if (skb_queue_is_last(&chan->tx_q, skb))
1148 chan->tx_send_head = NULL;
1150 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1158 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1162 if (!skb_queue_empty(&chan->tx_q))
1163 chan->tx_send_head = chan->tx_q.next;
1165 chan->next_tx_seq = chan->expected_ack_seq;
1166 ret = l2cap_ertm_send(chan);
1170 static void l2cap_send_ack(struct l2cap_chan *chan)
1174 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1176 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1177 control |= L2CAP_SUPER_RCV_NOT_READY;
1178 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1179 l2cap_send_sframe(chan, control);
1183 if (l2cap_ertm_send(chan) > 0)
1186 control |= L2CAP_SUPER_RCV_READY;
1187 l2cap_send_sframe(chan, control);
1190 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1192 struct srej_list *tail;
1195 control = L2CAP_SUPER_SELECT_REJECT;
1196 control |= L2CAP_CTRL_FINAL;
1198 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1199 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1201 l2cap_send_sframe(chan, control);
1204 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1206 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1207 struct sk_buff **frag;
1210 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1216 /* Continuation fragments (no L2CAP header) */
1217 frag = &skb_shinfo(skb)->frag_list;
1219 count = min_t(unsigned int, conn->mtu, len);
1221 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1224 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1230 frag = &(*frag)->next;
1236 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1238 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1239 struct sk_buff *skb;
1240 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1241 struct l2cap_hdr *lh;
1243 BT_DBG("sk %p len %d", sk, (int)len);
1245 count = min_t(unsigned int, (conn->mtu - hlen), len);
1246 skb = bt_skb_send_alloc(sk, count + hlen,
1247 msg->msg_flags & MSG_DONTWAIT, &err);
1249 return ERR_PTR(err);
1251 /* Create L2CAP header */
1252 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1253 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1254 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1255 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1257 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1258 if (unlikely(err < 0)) {
1260 return ERR_PTR(err);
1265 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1267 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1268 struct sk_buff *skb;
1269 int err, count, hlen = L2CAP_HDR_SIZE;
1270 struct l2cap_hdr *lh;
1272 BT_DBG("sk %p len %d", sk, (int)len);
1274 count = min_t(unsigned int, (conn->mtu - hlen), len);
1275 skb = bt_skb_send_alloc(sk, count + hlen,
1276 msg->msg_flags & MSG_DONTWAIT, &err);
1278 return ERR_PTR(err);
1280 /* Create L2CAP header */
1281 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1282 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1283 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1285 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1286 if (unlikely(err < 0)) {
1288 return ERR_PTR(err);
1293 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1295 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1296 struct sk_buff *skb;
1297 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1298 struct l2cap_hdr *lh;
1300 BT_DBG("sk %p len %d", sk, (int)len);
1303 return ERR_PTR(-ENOTCONN);
1308 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1311 count = min_t(unsigned int, (conn->mtu - hlen), len);
1312 skb = bt_skb_send_alloc(sk, count + hlen,
1313 msg->msg_flags & MSG_DONTWAIT, &err);
1315 return ERR_PTR(err);
1317 /* Create L2CAP header */
1318 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1319 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1320 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1321 put_unaligned_le16(control, skb_put(skb, 2));
1323 put_unaligned_le16(sdulen, skb_put(skb, 2));
1325 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1326 if (unlikely(err < 0)) {
1328 return ERR_PTR(err);
1331 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1332 put_unaligned_le16(0, skb_put(skb, 2));
1334 bt_cb(skb)->retries = 0;
1338 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1340 struct sock *sk = chan->sk;
1341 struct sk_buff *skb;
1342 struct sk_buff_head sar_queue;
1346 skb_queue_head_init(&sar_queue);
1347 control = L2CAP_SDU_START;
1348 skb = l2cap_create_iframe_pdu(sk, msg, chan->remote_mps, control, len);
1350 return PTR_ERR(skb);
1352 __skb_queue_tail(&sar_queue, skb);
1353 len -= chan->remote_mps;
1354 size += chan->remote_mps;
1359 if (len > chan->remote_mps) {
1360 control = L2CAP_SDU_CONTINUE;
1361 buflen = chan->remote_mps;
1363 control = L2CAP_SDU_END;
1367 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1369 skb_queue_purge(&sar_queue);
1370 return PTR_ERR(skb);
1373 __skb_queue_tail(&sar_queue, skb);
1377 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1378 if (chan->tx_send_head == NULL)
1379 chan->tx_send_head = sar_queue.next;
1384 static void l2cap_chan_ready(struct sock *sk)
1386 struct sock *parent = bt_sk(sk)->parent;
1387 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1389 BT_DBG("sk %p, parent %p", sk, parent);
1391 chan->conf_state = 0;
1392 l2cap_sock_clear_timer(sk);
1395 /* Outgoing channel.
1396 * Wake up socket sleeping on connect.
1398 sk->sk_state = BT_CONNECTED;
1399 sk->sk_state_change(sk);
1401 /* Incoming channel.
1402 * Wake up socket sleeping on accept.
1404 parent->sk_data_ready(parent, 0);
1408 /* Copy frame to all raw sockets on that connection */
1409 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1411 struct sk_buff *nskb;
1412 struct l2cap_chan *chan;
1414 BT_DBG("conn %p", conn);
1416 read_lock(&conn->chan_lock);
1417 list_for_each_entry(chan, &conn->chan_l, list) {
1418 struct sock *sk = chan->sk;
1419 if (sk->sk_type != SOCK_RAW)
1422 /* Don't send frame to the socket it came from */
1425 nskb = skb_clone(skb, GFP_ATOMIC);
1429 if (sock_queue_rcv_skb(sk, nskb))
1432 read_unlock(&conn->chan_lock);
1435 /* ---- L2CAP signalling commands ---- */
1436 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1437 u8 code, u8 ident, u16 dlen, void *data)
1439 struct sk_buff *skb, **frag;
1440 struct l2cap_cmd_hdr *cmd;
1441 struct l2cap_hdr *lh;
1444 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1445 conn, code, ident, dlen);
1447 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1448 count = min_t(unsigned int, conn->mtu, len);
1450 skb = bt_skb_alloc(count, GFP_ATOMIC);
1454 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1455 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1457 if (conn->hcon->type == LE_LINK)
1458 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1460 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1462 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1465 cmd->len = cpu_to_le16(dlen);
1468 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1469 memcpy(skb_put(skb, count), data, count);
1475 /* Continuation fragments (no L2CAP header) */
1476 frag = &skb_shinfo(skb)->frag_list;
1478 count = min_t(unsigned int, conn->mtu, len);
1480 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1484 memcpy(skb_put(*frag, count), data, count);
1489 frag = &(*frag)->next;
1499 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1501 struct l2cap_conf_opt *opt = *ptr;
1504 len = L2CAP_CONF_OPT_SIZE + opt->len;
1512 *val = *((u8 *) opt->val);
1516 *val = get_unaligned_le16(opt->val);
1520 *val = get_unaligned_le32(opt->val);
1524 *val = (unsigned long) opt->val;
1528 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1532 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1534 struct l2cap_conf_opt *opt = *ptr;
1536 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1543 *((u8 *) opt->val) = val;
1547 put_unaligned_le16(val, opt->val);
1551 put_unaligned_le32(val, opt->val);
1555 memcpy(opt->val, (void *) val, len);
1559 *ptr += L2CAP_CONF_OPT_SIZE + len;
1562 static void l2cap_ack_timeout(unsigned long arg)
1564 struct l2cap_chan *chan = (void *) arg;
1566 bh_lock_sock(chan->sk);
1567 l2cap_send_ack(chan);
1568 bh_unlock_sock(chan->sk);
1571 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1573 struct sock *sk = chan->sk;
1575 chan->expected_ack_seq = 0;
1576 chan->unacked_frames = 0;
1577 chan->buffer_seq = 0;
1578 chan->num_acked = 0;
1579 chan->frames_sent = 0;
1581 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1582 (unsigned long) chan);
1583 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1584 (unsigned long) chan);
1585 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1587 skb_queue_head_init(&chan->srej_q);
1588 skb_queue_head_init(&chan->busy_q);
1590 INIT_LIST_HEAD(&chan->srej_l);
1592 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1594 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1597 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1600 case L2CAP_MODE_STREAMING:
1601 case L2CAP_MODE_ERTM:
1602 if (l2cap_mode_supported(mode, remote_feat_mask))
1606 return L2CAP_MODE_BASIC;
1610 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1612 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1613 struct l2cap_conf_req *req = data;
1614 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1615 void *ptr = req->data;
1617 BT_DBG("chan %p", chan);
1619 if (chan->num_conf_req || chan->num_conf_rsp)
1623 case L2CAP_MODE_STREAMING:
1624 case L2CAP_MODE_ERTM:
1625 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1630 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1635 if (pi->imtu != L2CAP_DEFAULT_MTU)
1636 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1639 case L2CAP_MODE_BASIC:
1640 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1641 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1644 rfc.mode = L2CAP_MODE_BASIC;
1646 rfc.max_transmit = 0;
1647 rfc.retrans_timeout = 0;
1648 rfc.monitor_timeout = 0;
1649 rfc.max_pdu_size = 0;
1651 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1652 (unsigned long) &rfc);
1655 case L2CAP_MODE_ERTM:
1656 rfc.mode = L2CAP_MODE_ERTM;
1657 rfc.txwin_size = pi->tx_win;
1658 rfc.max_transmit = pi->max_tx;
1659 rfc.retrans_timeout = 0;
1660 rfc.monitor_timeout = 0;
1661 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1662 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1663 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1665 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1666 (unsigned long) &rfc);
1668 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1671 if (pi->fcs == L2CAP_FCS_NONE ||
1672 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1673 pi->fcs = L2CAP_FCS_NONE;
1674 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1678 case L2CAP_MODE_STREAMING:
1679 rfc.mode = L2CAP_MODE_STREAMING;
1681 rfc.max_transmit = 0;
1682 rfc.retrans_timeout = 0;
1683 rfc.monitor_timeout = 0;
1684 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1685 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1686 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1688 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1689 (unsigned long) &rfc);
1691 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1694 if (pi->fcs == L2CAP_FCS_NONE ||
1695 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1696 pi->fcs = L2CAP_FCS_NONE;
1697 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1702 req->dcid = cpu_to_le16(pi->dcid);
1703 req->flags = cpu_to_le16(0);
1708 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1710 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1711 struct l2cap_conf_rsp *rsp = data;
1712 void *ptr = rsp->data;
1713 void *req = chan->conf_req;
1714 int len = chan->conf_len;
1715 int type, hint, olen;
1717 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1718 u16 mtu = L2CAP_DEFAULT_MTU;
1719 u16 result = L2CAP_CONF_SUCCESS;
1721 BT_DBG("chan %p", chan);
1723 while (len >= L2CAP_CONF_OPT_SIZE) {
1724 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1726 hint = type & L2CAP_CONF_HINT;
1727 type &= L2CAP_CONF_MASK;
1730 case L2CAP_CONF_MTU:
1734 case L2CAP_CONF_FLUSH_TO:
1738 case L2CAP_CONF_QOS:
1741 case L2CAP_CONF_RFC:
1742 if (olen == sizeof(rfc))
1743 memcpy(&rfc, (void *) val, olen);
1746 case L2CAP_CONF_FCS:
1747 if (val == L2CAP_FCS_NONE)
1748 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1756 result = L2CAP_CONF_UNKNOWN;
1757 *((u8 *) ptr++) = type;
1762 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1766 case L2CAP_MODE_STREAMING:
1767 case L2CAP_MODE_ERTM:
1768 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1769 pi->mode = l2cap_select_mode(rfc.mode,
1770 pi->conn->feat_mask);
1774 if (pi->mode != rfc.mode)
1775 return -ECONNREFUSED;
1781 if (pi->mode != rfc.mode) {
1782 result = L2CAP_CONF_UNACCEPT;
1783 rfc.mode = pi->mode;
1785 if (chan->num_conf_rsp == 1)
1786 return -ECONNREFUSED;
1788 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1789 sizeof(rfc), (unsigned long) &rfc);
1793 if (result == L2CAP_CONF_SUCCESS) {
1794 /* Configure output options and let the other side know
1795 * which ones we don't like. */
1797 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1798 result = L2CAP_CONF_UNACCEPT;
1801 chan->conf_state |= L2CAP_CONF_MTU_DONE;
1803 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1806 case L2CAP_MODE_BASIC:
1807 pi->fcs = L2CAP_FCS_NONE;
1808 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1811 case L2CAP_MODE_ERTM:
1812 chan->remote_tx_win = rfc.txwin_size;
1813 chan->remote_max_tx = rfc.max_transmit;
1815 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1816 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1818 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1820 rfc.retrans_timeout =
1821 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1822 rfc.monitor_timeout =
1823 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1825 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1827 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1828 sizeof(rfc), (unsigned long) &rfc);
1832 case L2CAP_MODE_STREAMING:
1833 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1834 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1836 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1838 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1840 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1841 sizeof(rfc), (unsigned long) &rfc);
1846 result = L2CAP_CONF_UNACCEPT;
1848 memset(&rfc, 0, sizeof(rfc));
1849 rfc.mode = pi->mode;
1852 if (result == L2CAP_CONF_SUCCESS)
1853 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1855 rsp->scid = cpu_to_le16(pi->dcid);
1856 rsp->result = cpu_to_le16(result);
1857 rsp->flags = cpu_to_le16(0x0000);
1862 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1864 struct sock *sk = chan->sk;
1865 struct l2cap_pinfo *pi = l2cap_pi(sk);
1866 struct l2cap_conf_req *req = data;
1867 void *ptr = req->data;
1870 struct l2cap_conf_rfc rfc;
1872 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1874 while (len >= L2CAP_CONF_OPT_SIZE) {
1875 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1878 case L2CAP_CONF_MTU:
1879 if (val < L2CAP_DEFAULT_MIN_MTU) {
1880 *result = L2CAP_CONF_UNACCEPT;
1881 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1884 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1887 case L2CAP_CONF_FLUSH_TO:
1889 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1893 case L2CAP_CONF_RFC:
1894 if (olen == sizeof(rfc))
1895 memcpy(&rfc, (void *)val, olen);
1897 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1898 rfc.mode != pi->mode)
1899 return -ECONNREFUSED;
1903 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1904 sizeof(rfc), (unsigned long) &rfc);
1909 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1910 return -ECONNREFUSED;
1912 pi->mode = rfc.mode;
1914 if (*result == L2CAP_CONF_SUCCESS) {
1916 case L2CAP_MODE_ERTM:
1917 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1918 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1919 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1921 case L2CAP_MODE_STREAMING:
1922 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1926 req->dcid = cpu_to_le16(pi->dcid);
1927 req->flags = cpu_to_le16(0x0000);
1932 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1934 struct l2cap_conf_rsp *rsp = data;
1935 void *ptr = rsp->data;
1937 BT_DBG("sk %p", sk);
1939 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1940 rsp->result = cpu_to_le16(result);
1941 rsp->flags = cpu_to_le16(flags);
1946 void __l2cap_connect_rsp_defer(struct sock *sk)
1948 struct l2cap_conn_rsp rsp;
1949 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1950 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1953 sk->sk_state = BT_CONFIG;
1955 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1956 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1957 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1958 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1959 l2cap_send_cmd(conn, chan->ident,
1960 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1962 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
1965 chan->conf_state |= L2CAP_CONF_REQ_SENT;
1966 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1967 l2cap_build_conf_req(chan, buf), buf);
1968 chan->num_conf_req++;
1971 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1973 struct l2cap_pinfo *pi = l2cap_pi(sk);
1976 struct l2cap_conf_rfc rfc;
1978 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1980 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1983 while (len >= L2CAP_CONF_OPT_SIZE) {
1984 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1987 case L2CAP_CONF_RFC:
1988 if (olen == sizeof(rfc))
1989 memcpy(&rfc, (void *)val, olen);
1996 case L2CAP_MODE_ERTM:
1997 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1998 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1999 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2001 case L2CAP_MODE_STREAMING:
2002 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2006 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2008 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2010 if (rej->reason != 0x0000)
2013 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2014 cmd->ident == conn->info_ident) {
2015 del_timer(&conn->info_timer);
2017 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2018 conn->info_ident = 0;
2020 l2cap_conn_start(conn);
2026 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2028 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2029 struct l2cap_conn_rsp rsp;
2030 struct l2cap_chan *chan = NULL;
2031 struct sock *parent, *sk = NULL;
2032 int result, status = L2CAP_CS_NO_INFO;
2034 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2035 __le16 psm = req->psm;
2037 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2039 /* Check if we have socket listening on psm */
2040 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2042 result = L2CAP_CR_BAD_PSM;
2046 bh_lock_sock(parent);
2048 /* Check if the ACL is secure enough (if not SDP) */
2049 if (psm != cpu_to_le16(0x0001) &&
2050 !hci_conn_check_link_mode(conn->hcon)) {
2051 conn->disc_reason = 0x05;
2052 result = L2CAP_CR_SEC_BLOCK;
2056 result = L2CAP_CR_NO_MEM;
2058 /* Check for backlog size */
2059 if (sk_acceptq_is_full(parent)) {
2060 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2064 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2068 chan = l2cap_chan_alloc(sk);
2070 l2cap_sock_kill(sk);
2074 l2cap_pi(sk)->chan = chan;
2076 write_lock_bh(&conn->chan_lock);
2078 /* Check if we already have channel with that dcid */
2079 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2080 write_unlock_bh(&conn->chan_lock);
2081 sock_set_flag(sk, SOCK_ZAPPED);
2082 l2cap_sock_kill(sk);
2086 hci_conn_hold(conn->hcon);
2088 l2cap_sock_init(sk, parent);
2089 bacpy(&bt_sk(sk)->src, conn->src);
2090 bacpy(&bt_sk(sk)->dst, conn->dst);
2091 l2cap_pi(sk)->psm = psm;
2092 l2cap_pi(sk)->dcid = scid;
2094 bt_accept_enqueue(parent, sk);
2096 __l2cap_chan_add(conn, chan);
2098 dcid = l2cap_pi(sk)->scid;
2100 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2102 chan->ident = cmd->ident;
2104 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2105 if (l2cap_check_security(chan)) {
2106 if (bt_sk(sk)->defer_setup) {
2107 sk->sk_state = BT_CONNECT2;
2108 result = L2CAP_CR_PEND;
2109 status = L2CAP_CS_AUTHOR_PEND;
2110 parent->sk_data_ready(parent, 0);
2112 sk->sk_state = BT_CONFIG;
2113 result = L2CAP_CR_SUCCESS;
2114 status = L2CAP_CS_NO_INFO;
2117 sk->sk_state = BT_CONNECT2;
2118 result = L2CAP_CR_PEND;
2119 status = L2CAP_CS_AUTHEN_PEND;
2122 sk->sk_state = BT_CONNECT2;
2123 result = L2CAP_CR_PEND;
2124 status = L2CAP_CS_NO_INFO;
2127 write_unlock_bh(&conn->chan_lock);
2130 bh_unlock_sock(parent);
2133 rsp.scid = cpu_to_le16(scid);
2134 rsp.dcid = cpu_to_le16(dcid);
2135 rsp.result = cpu_to_le16(result);
2136 rsp.status = cpu_to_le16(status);
2137 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2139 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2140 struct l2cap_info_req info;
2141 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2143 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2144 conn->info_ident = l2cap_get_ident(conn);
2146 mod_timer(&conn->info_timer, jiffies +
2147 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2149 l2cap_send_cmd(conn, conn->info_ident,
2150 L2CAP_INFO_REQ, sizeof(info), &info);
2153 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2154 result == L2CAP_CR_SUCCESS) {
2156 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2157 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2158 l2cap_build_conf_req(chan, buf), buf);
2159 chan->num_conf_req++;
2165 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2167 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2168 u16 scid, dcid, result, status;
2169 struct l2cap_chan *chan;
2173 scid = __le16_to_cpu(rsp->scid);
2174 dcid = __le16_to_cpu(rsp->dcid);
2175 result = __le16_to_cpu(rsp->result);
2176 status = __le16_to_cpu(rsp->status);
2178 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2181 chan = l2cap_get_chan_by_scid(conn, scid);
2185 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2193 case L2CAP_CR_SUCCESS:
2194 sk->sk_state = BT_CONFIG;
2196 l2cap_pi(sk)->dcid = dcid;
2197 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2199 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2202 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2204 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2205 l2cap_build_conf_req(chan, req), req);
2206 chan->num_conf_req++;
2210 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2214 /* don't delete l2cap channel if sk is owned by user */
2215 if (sock_owned_by_user(sk)) {
2216 sk->sk_state = BT_DISCONN;
2217 l2cap_sock_clear_timer(sk);
2218 l2cap_sock_set_timer(sk, HZ / 5);
2222 l2cap_chan_del(chan, ECONNREFUSED);
2230 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2232 /* FCS is enabled only in ERTM or streaming mode, if one or both
2235 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2236 pi->fcs = L2CAP_FCS_NONE;
2237 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2238 pi->fcs = L2CAP_FCS_CRC16;
2241 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2243 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2246 struct l2cap_chan *chan;
2250 dcid = __le16_to_cpu(req->dcid);
2251 flags = __le16_to_cpu(req->flags);
2253 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2255 chan = l2cap_get_chan_by_scid(conn, dcid);
2261 if (sk->sk_state != BT_CONFIG) {
2262 struct l2cap_cmd_rej rej;
2264 rej.reason = cpu_to_le16(0x0002);
2265 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2270 /* Reject if config buffer is too small. */
2271 len = cmd_len - sizeof(*req);
2272 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2273 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2274 l2cap_build_conf_rsp(sk, rsp,
2275 L2CAP_CONF_REJECT, flags), rsp);
2280 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2281 chan->conf_len += len;
2283 if (flags & 0x0001) {
2284 /* Incomplete config. Send empty response. */
2285 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2286 l2cap_build_conf_rsp(sk, rsp,
2287 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2291 /* Complete config. */
2292 len = l2cap_parse_conf_req(chan, rsp);
2294 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2298 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2299 chan->num_conf_rsp++;
2301 /* Reset config buffer. */
2304 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2307 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2308 set_default_fcs(l2cap_pi(sk));
2310 sk->sk_state = BT_CONNECTED;
2312 chan->next_tx_seq = 0;
2313 chan->expected_tx_seq = 0;
2314 skb_queue_head_init(&chan->tx_q);
2315 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2316 l2cap_ertm_init(chan);
2318 l2cap_chan_ready(sk);
2322 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2324 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2325 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2326 l2cap_build_conf_req(chan, buf), buf);
2327 chan->num_conf_req++;
2335 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2337 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2338 u16 scid, flags, result;
2339 struct l2cap_chan *chan;
2341 int len = cmd->len - sizeof(*rsp);
2343 scid = __le16_to_cpu(rsp->scid);
2344 flags = __le16_to_cpu(rsp->flags);
2345 result = __le16_to_cpu(rsp->result);
2347 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2348 scid, flags, result);
2350 chan = l2cap_get_chan_by_scid(conn, scid);
2357 case L2CAP_CONF_SUCCESS:
2358 l2cap_conf_rfc_get(sk, rsp->data, len);
2361 case L2CAP_CONF_UNACCEPT:
2362 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2365 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2366 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2370 /* throw out any old stored conf requests */
2371 result = L2CAP_CONF_SUCCESS;
2372 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2375 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2379 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2380 L2CAP_CONF_REQ, len, req);
2381 chan->num_conf_req++;
2382 if (result != L2CAP_CONF_SUCCESS)
2388 sk->sk_err = ECONNRESET;
2389 l2cap_sock_set_timer(sk, HZ * 5);
2390 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2397 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2399 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2400 set_default_fcs(l2cap_pi(sk));
2402 sk->sk_state = BT_CONNECTED;
2403 chan->next_tx_seq = 0;
2404 chan->expected_tx_seq = 0;
2405 skb_queue_head_init(&chan->tx_q);
2406 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2407 l2cap_ertm_init(chan);
2409 l2cap_chan_ready(sk);
2417 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2419 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2420 struct l2cap_disconn_rsp rsp;
2422 struct l2cap_chan *chan;
2425 scid = __le16_to_cpu(req->scid);
2426 dcid = __le16_to_cpu(req->dcid);
2428 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2430 chan = l2cap_get_chan_by_scid(conn, dcid);
2436 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2437 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2438 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2440 sk->sk_shutdown = SHUTDOWN_MASK;
2442 /* don't delete l2cap channel if sk is owned by user */
2443 if (sock_owned_by_user(sk)) {
2444 sk->sk_state = BT_DISCONN;
2445 l2cap_sock_clear_timer(sk);
2446 l2cap_sock_set_timer(sk, HZ / 5);
2451 l2cap_chan_del(chan, ECONNRESET);
2454 l2cap_sock_kill(sk);
2458 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2460 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2462 struct l2cap_chan *chan;
2465 scid = __le16_to_cpu(rsp->scid);
2466 dcid = __le16_to_cpu(rsp->dcid);
2468 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2470 chan = l2cap_get_chan_by_scid(conn, scid);
2476 /* don't delete l2cap channel if sk is owned by user */
2477 if (sock_owned_by_user(sk)) {
2478 sk->sk_state = BT_DISCONN;
2479 l2cap_sock_clear_timer(sk);
2480 l2cap_sock_set_timer(sk, HZ / 5);
2485 l2cap_chan_del(chan, 0);
2488 l2cap_sock_kill(sk);
2492 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2494 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2497 type = __le16_to_cpu(req->type);
2499 BT_DBG("type 0x%4.4x", type);
2501 if (type == L2CAP_IT_FEAT_MASK) {
2503 u32 feat_mask = l2cap_feat_mask;
2504 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2505 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2506 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2508 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2510 put_unaligned_le32(feat_mask, rsp->data);
2511 l2cap_send_cmd(conn, cmd->ident,
2512 L2CAP_INFO_RSP, sizeof(buf), buf);
2513 } else if (type == L2CAP_IT_FIXED_CHAN) {
2515 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2516 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2517 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2518 memcpy(buf + 4, l2cap_fixed_chan, 8);
2519 l2cap_send_cmd(conn, cmd->ident,
2520 L2CAP_INFO_RSP, sizeof(buf), buf);
2522 struct l2cap_info_rsp rsp;
2523 rsp.type = cpu_to_le16(type);
2524 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2525 l2cap_send_cmd(conn, cmd->ident,
2526 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2532 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2534 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2537 type = __le16_to_cpu(rsp->type);
2538 result = __le16_to_cpu(rsp->result);
2540 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2542 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2543 if (cmd->ident != conn->info_ident ||
2544 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2547 del_timer(&conn->info_timer);
2549 if (result != L2CAP_IR_SUCCESS) {
2550 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2551 conn->info_ident = 0;
2553 l2cap_conn_start(conn);
2558 if (type == L2CAP_IT_FEAT_MASK) {
2559 conn->feat_mask = get_unaligned_le32(rsp->data);
2561 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2562 struct l2cap_info_req req;
2563 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2565 conn->info_ident = l2cap_get_ident(conn);
2567 l2cap_send_cmd(conn, conn->info_ident,
2568 L2CAP_INFO_REQ, sizeof(req), &req);
2570 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2571 conn->info_ident = 0;
2573 l2cap_conn_start(conn);
2575 } else if (type == L2CAP_IT_FIXED_CHAN) {
2576 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2577 conn->info_ident = 0;
2579 l2cap_conn_start(conn);
2585 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2590 if (min > max || min < 6 || max > 3200)
2593 if (to_multiplier < 10 || to_multiplier > 3200)
2596 if (max >= to_multiplier * 8)
2599 max_latency = (to_multiplier * 8 / max) - 1;
2600 if (latency > 499 || latency > max_latency)
2606 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2607 struct l2cap_cmd_hdr *cmd, u8 *data)
2609 struct hci_conn *hcon = conn->hcon;
2610 struct l2cap_conn_param_update_req *req;
2611 struct l2cap_conn_param_update_rsp rsp;
2612 u16 min, max, latency, to_multiplier, cmd_len;
2615 if (!(hcon->link_mode & HCI_LM_MASTER))
2618 cmd_len = __le16_to_cpu(cmd->len);
2619 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2622 req = (struct l2cap_conn_param_update_req *) data;
2623 min = __le16_to_cpu(req->min);
2624 max = __le16_to_cpu(req->max);
2625 latency = __le16_to_cpu(req->latency);
2626 to_multiplier = __le16_to_cpu(req->to_multiplier);
2628 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2629 min, max, latency, to_multiplier);
2631 memset(&rsp, 0, sizeof(rsp));
2633 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2635 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2637 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2639 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2643 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2648 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2649 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2653 switch (cmd->code) {
2654 case L2CAP_COMMAND_REJ:
2655 l2cap_command_rej(conn, cmd, data);
2658 case L2CAP_CONN_REQ:
2659 err = l2cap_connect_req(conn, cmd, data);
2662 case L2CAP_CONN_RSP:
2663 err = l2cap_connect_rsp(conn, cmd, data);
2666 case L2CAP_CONF_REQ:
2667 err = l2cap_config_req(conn, cmd, cmd_len, data);
2670 case L2CAP_CONF_RSP:
2671 err = l2cap_config_rsp(conn, cmd, data);
2674 case L2CAP_DISCONN_REQ:
2675 err = l2cap_disconnect_req(conn, cmd, data);
2678 case L2CAP_DISCONN_RSP:
2679 err = l2cap_disconnect_rsp(conn, cmd, data);
2682 case L2CAP_ECHO_REQ:
2683 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2686 case L2CAP_ECHO_RSP:
2689 case L2CAP_INFO_REQ:
2690 err = l2cap_information_req(conn, cmd, data);
2693 case L2CAP_INFO_RSP:
2694 err = l2cap_information_rsp(conn, cmd, data);
2698 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2706 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2707 struct l2cap_cmd_hdr *cmd, u8 *data)
2709 switch (cmd->code) {
2710 case L2CAP_COMMAND_REJ:
2713 case L2CAP_CONN_PARAM_UPDATE_REQ:
2714 return l2cap_conn_param_update_req(conn, cmd, data);
2716 case L2CAP_CONN_PARAM_UPDATE_RSP:
2720 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2725 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2726 struct sk_buff *skb)
2728 u8 *data = skb->data;
2730 struct l2cap_cmd_hdr cmd;
2733 l2cap_raw_recv(conn, skb);
2735 while (len >= L2CAP_CMD_HDR_SIZE) {
2737 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2738 data += L2CAP_CMD_HDR_SIZE;
2739 len -= L2CAP_CMD_HDR_SIZE;
2741 cmd_len = le16_to_cpu(cmd.len);
2743 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2745 if (cmd_len > len || !cmd.ident) {
2746 BT_DBG("corrupted command");
2750 if (conn->hcon->type == LE_LINK)
2751 err = l2cap_le_sig_cmd(conn, &cmd, data);
2753 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2756 struct l2cap_cmd_rej rej;
2758 BT_ERR("Wrong link type (%d)", err);
2760 /* FIXME: Map err to a valid reason */
2761 rej.reason = cpu_to_le16(0);
2762 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2772 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2774 u16 our_fcs, rcv_fcs;
2775 int hdr_size = L2CAP_HDR_SIZE + 2;
2777 if (pi->fcs == L2CAP_FCS_CRC16) {
2778 skb_trim(skb, skb->len - 2);
2779 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2780 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2782 if (our_fcs != rcv_fcs)
2788 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2792 chan->frames_sent = 0;
2794 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2796 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2797 control |= L2CAP_SUPER_RCV_NOT_READY;
2798 l2cap_send_sframe(chan, control);
2799 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2802 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2803 l2cap_retransmit_frames(chan);
2805 l2cap_ertm_send(chan);
2807 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2808 chan->frames_sent == 0) {
2809 control |= L2CAP_SUPER_RCV_READY;
2810 l2cap_send_sframe(chan, control);
2814 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2816 struct sk_buff *next_skb;
2817 int tx_seq_offset, next_tx_seq_offset;
2819 bt_cb(skb)->tx_seq = tx_seq;
2820 bt_cb(skb)->sar = sar;
2822 next_skb = skb_peek(&chan->srej_q);
2824 __skb_queue_tail(&chan->srej_q, skb);
2828 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2829 if (tx_seq_offset < 0)
2830 tx_seq_offset += 64;
2833 if (bt_cb(next_skb)->tx_seq == tx_seq)
2836 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2837 chan->buffer_seq) % 64;
2838 if (next_tx_seq_offset < 0)
2839 next_tx_seq_offset += 64;
2841 if (next_tx_seq_offset > tx_seq_offset) {
2842 __skb_queue_before(&chan->srej_q, next_skb, skb);
2846 if (skb_queue_is_last(&chan->srej_q, next_skb))
2849 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2851 __skb_queue_tail(&chan->srej_q, skb);
2856 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2858 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2859 struct sk_buff *_skb;
2862 switch (control & L2CAP_CTRL_SAR) {
2863 case L2CAP_SDU_UNSEGMENTED:
2864 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2867 err = sock_queue_rcv_skb(chan->sk, skb);
2873 case L2CAP_SDU_START:
2874 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2877 chan->sdu_len = get_unaligned_le16(skb->data);
2879 if (chan->sdu_len > pi->imtu)
2882 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2886 /* pull sdu_len bytes only after alloc, because of Local Busy
2887 * condition we have to be sure that this will be executed
2888 * only once, i.e., when alloc does not fail */
2891 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2893 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2894 chan->partial_sdu_len = skb->len;
2897 case L2CAP_SDU_CONTINUE:
2898 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2904 chan->partial_sdu_len += skb->len;
2905 if (chan->partial_sdu_len > chan->sdu_len)
2908 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2913 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2919 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2920 chan->partial_sdu_len += skb->len;
2922 if (chan->partial_sdu_len > pi->imtu)
2925 if (chan->partial_sdu_len != chan->sdu_len)
2928 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2931 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2933 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2937 err = sock_queue_rcv_skb(chan->sk, _skb);
2940 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2944 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2945 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2947 kfree_skb(chan->sdu);
2955 kfree_skb(chan->sdu);
2959 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
2964 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2966 struct sk_buff *skb;
2970 while ((skb = skb_dequeue(&chan->busy_q))) {
2971 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2972 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2974 skb_queue_head(&chan->busy_q, skb);
2978 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2981 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2984 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2985 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2986 l2cap_send_sframe(chan, control);
2987 chan->retry_count = 1;
2989 del_timer(&chan->retrans_timer);
2990 __mod_monitor_timer();
2992 chan->conn_state |= L2CAP_CONN_WAIT_F;
2995 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2996 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
2998 BT_DBG("chan %p, Exit local busy", chan);
3003 static void l2cap_busy_work(struct work_struct *work)
3005 DECLARE_WAITQUEUE(wait, current);
3006 struct l2cap_chan *chan =
3007 container_of(work, struct l2cap_chan, busy_work);
3008 struct sock *sk = chan->sk;
3009 int n_tries = 0, timeo = HZ/5, err;
3010 struct sk_buff *skb;
3014 add_wait_queue(sk_sleep(sk), &wait);
3015 while ((skb = skb_peek(&chan->busy_q))) {
3016 set_current_state(TASK_INTERRUPTIBLE);
3018 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3020 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, EBUSY);
3027 if (signal_pending(current)) {
3028 err = sock_intr_errno(timeo);
3033 timeo = schedule_timeout(timeo);
3036 err = sock_error(sk);
3040 if (l2cap_try_push_rx_skb(chan) == 0)
3044 set_current_state(TASK_RUNNING);
3045 remove_wait_queue(sk_sleep(sk), &wait);
3050 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3054 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3055 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3056 __skb_queue_tail(&chan->busy_q, skb);
3057 return l2cap_try_push_rx_skb(chan);
3062 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3064 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3068 /* Busy Condition */
3069 BT_DBG("chan %p, Enter local busy", chan);
3071 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3072 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3073 __skb_queue_tail(&chan->busy_q, skb);
3075 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3076 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3077 l2cap_send_sframe(chan, sctrl);
3079 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3081 del_timer(&chan->ack_timer);
3083 queue_work(_busy_wq, &chan->busy_work);
3088 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3090 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3091 struct sk_buff *_skb;
3095 * TODO: We have to notify the userland if some data is lost with the
3099 switch (control & L2CAP_CTRL_SAR) {
3100 case L2CAP_SDU_UNSEGMENTED:
3101 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3102 kfree_skb(chan->sdu);
3106 err = sock_queue_rcv_skb(chan->sk, skb);
3112 case L2CAP_SDU_START:
3113 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3114 kfree_skb(chan->sdu);
3118 chan->sdu_len = get_unaligned_le16(skb->data);
3121 if (chan->sdu_len > pi->imtu) {
3126 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3132 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3134 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3135 chan->partial_sdu_len = skb->len;
3139 case L2CAP_SDU_CONTINUE:
3140 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3143 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3145 chan->partial_sdu_len += skb->len;
3146 if (chan->partial_sdu_len > chan->sdu_len)
3147 kfree_skb(chan->sdu);
3154 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3157 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3159 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3160 chan->partial_sdu_len += skb->len;
3162 if (chan->partial_sdu_len > pi->imtu)
3165 if (chan->partial_sdu_len == chan->sdu_len) {
3166 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3167 err = sock_queue_rcv_skb(chan->sk, _skb);
3174 kfree_skb(chan->sdu);
3182 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3184 struct sk_buff *skb;
3187 while ((skb = skb_peek(&chan->srej_q))) {
3188 if (bt_cb(skb)->tx_seq != tx_seq)
3191 skb = skb_dequeue(&chan->srej_q);
3192 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3193 l2cap_ertm_reassembly_sdu(chan, skb, control);
3194 chan->buffer_seq_srej =
3195 (chan->buffer_seq_srej + 1) % 64;
3196 tx_seq = (tx_seq + 1) % 64;
3200 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3202 struct srej_list *l, *tmp;
3205 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3206 if (l->tx_seq == tx_seq) {
3211 control = L2CAP_SUPER_SELECT_REJECT;
3212 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3213 l2cap_send_sframe(chan, control);
3215 list_add_tail(&l->list, &chan->srej_l);
3219 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3221 struct srej_list *new;
3224 while (tx_seq != chan->expected_tx_seq) {
3225 control = L2CAP_SUPER_SELECT_REJECT;
3226 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3227 l2cap_send_sframe(chan, control);
3229 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3230 new->tx_seq = chan->expected_tx_seq;
3231 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3232 list_add_tail(&new->list, &chan->srej_l);
3234 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3237 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3239 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3240 u8 tx_seq = __get_txseq(rx_control);
3241 u8 req_seq = __get_reqseq(rx_control);
3242 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3243 int tx_seq_offset, expected_tx_seq_offset;
3244 int num_to_ack = (pi->tx_win/6) + 1;
3247 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3248 tx_seq, rx_control);
3250 if (L2CAP_CTRL_FINAL & rx_control &&
3251 chan->conn_state & L2CAP_CONN_WAIT_F) {
3252 del_timer(&chan->monitor_timer);
3253 if (chan->unacked_frames > 0)
3254 __mod_retrans_timer();
3255 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3258 chan->expected_ack_seq = req_seq;
3259 l2cap_drop_acked_frames(chan);
3261 if (tx_seq == chan->expected_tx_seq)
3264 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3265 if (tx_seq_offset < 0)
3266 tx_seq_offset += 64;
3268 /* invalid tx_seq */
3269 if (tx_seq_offset >= pi->tx_win) {
3270 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3274 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3277 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3278 struct srej_list *first;
3280 first = list_first_entry(&chan->srej_l,
3281 struct srej_list, list);
3282 if (tx_seq == first->tx_seq) {
3283 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3284 l2cap_check_srej_gap(chan, tx_seq);
3286 list_del(&first->list);
3289 if (list_empty(&chan->srej_l)) {
3290 chan->buffer_seq = chan->buffer_seq_srej;
3291 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3292 l2cap_send_ack(chan);
3293 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3296 struct srej_list *l;
3298 /* duplicated tx_seq */
3299 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3302 list_for_each_entry(l, &chan->srej_l, list) {
3303 if (l->tx_seq == tx_seq) {
3304 l2cap_resend_srejframe(chan, tx_seq);
3308 l2cap_send_srejframe(chan, tx_seq);
3311 expected_tx_seq_offset =
3312 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3313 if (expected_tx_seq_offset < 0)
3314 expected_tx_seq_offset += 64;
3316 /* duplicated tx_seq */
3317 if (tx_seq_offset < expected_tx_seq_offset)
3320 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3322 BT_DBG("chan %p, Enter SREJ", chan);
3324 INIT_LIST_HEAD(&chan->srej_l);
3325 chan->buffer_seq_srej = chan->buffer_seq;
3327 __skb_queue_head_init(&chan->srej_q);
3328 __skb_queue_head_init(&chan->busy_q);
3329 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3331 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3333 l2cap_send_srejframe(chan, tx_seq);
3335 del_timer(&chan->ack_timer);
3340 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3342 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3343 bt_cb(skb)->tx_seq = tx_seq;
3344 bt_cb(skb)->sar = sar;
3345 __skb_queue_tail(&chan->srej_q, skb);
3349 err = l2cap_push_rx_skb(chan, skb, rx_control);
3353 if (rx_control & L2CAP_CTRL_FINAL) {
3354 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3355 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3357 l2cap_retransmit_frames(chan);
3362 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3363 if (chan->num_acked == num_to_ack - 1)
3364 l2cap_send_ack(chan);
3373 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3375 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3378 chan->expected_ack_seq = __get_reqseq(rx_control);
3379 l2cap_drop_acked_frames(chan);
3381 if (rx_control & L2CAP_CTRL_POLL) {
3382 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3383 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3384 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3385 (chan->unacked_frames > 0))
3386 __mod_retrans_timer();
3388 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3389 l2cap_send_srejtail(chan);
3391 l2cap_send_i_or_rr_or_rnr(chan);
3394 } else if (rx_control & L2CAP_CTRL_FINAL) {
3395 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3397 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3398 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3400 l2cap_retransmit_frames(chan);
3403 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3404 (chan->unacked_frames > 0))
3405 __mod_retrans_timer();
3407 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3408 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3409 l2cap_send_ack(chan);
3411 l2cap_ertm_send(chan);
3415 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3417 u8 tx_seq = __get_reqseq(rx_control);
3419 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3421 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3423 chan->expected_ack_seq = tx_seq;
3424 l2cap_drop_acked_frames(chan);
3426 if (rx_control & L2CAP_CTRL_FINAL) {
3427 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3428 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3430 l2cap_retransmit_frames(chan);
3432 l2cap_retransmit_frames(chan);
3434 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3435 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3438 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3440 u8 tx_seq = __get_reqseq(rx_control);
3442 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3444 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3446 if (rx_control & L2CAP_CTRL_POLL) {
3447 chan->expected_ack_seq = tx_seq;
3448 l2cap_drop_acked_frames(chan);
3450 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3451 l2cap_retransmit_one_frame(chan, tx_seq);
3453 l2cap_ertm_send(chan);
3455 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3456 chan->srej_save_reqseq = tx_seq;
3457 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3459 } else if (rx_control & L2CAP_CTRL_FINAL) {
3460 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3461 chan->srej_save_reqseq == tx_seq)
3462 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3464 l2cap_retransmit_one_frame(chan, tx_seq);
3466 l2cap_retransmit_one_frame(chan, tx_seq);
3467 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3468 chan->srej_save_reqseq = tx_seq;
3469 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3474 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3476 u8 tx_seq = __get_reqseq(rx_control);
3478 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3480 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3481 chan->expected_ack_seq = tx_seq;
3482 l2cap_drop_acked_frames(chan);
3484 if (rx_control & L2CAP_CTRL_POLL)
3485 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3487 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3488 del_timer(&chan->retrans_timer);
3489 if (rx_control & L2CAP_CTRL_POLL)
3490 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3494 if (rx_control & L2CAP_CTRL_POLL)
3495 l2cap_send_srejtail(chan);
3497 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3500 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3502 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3504 if (L2CAP_CTRL_FINAL & rx_control &&
3505 chan->conn_state & L2CAP_CONN_WAIT_F) {
3506 del_timer(&chan->monitor_timer);
3507 if (chan->unacked_frames > 0)
3508 __mod_retrans_timer();
3509 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3512 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3513 case L2CAP_SUPER_RCV_READY:
3514 l2cap_data_channel_rrframe(chan, rx_control);
3517 case L2CAP_SUPER_REJECT:
3518 l2cap_data_channel_rejframe(chan, rx_control);
3521 case L2CAP_SUPER_SELECT_REJECT:
3522 l2cap_data_channel_srejframe(chan, rx_control);
3525 case L2CAP_SUPER_RCV_NOT_READY:
3526 l2cap_data_channel_rnrframe(chan, rx_control);
3534 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3536 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3537 struct l2cap_pinfo *pi = l2cap_pi(sk);
3540 int len, next_tx_seq_offset, req_seq_offset;
3542 control = get_unaligned_le16(skb->data);
3547 * We can just drop the corrupted I-frame here.
3548 * Receiver will miss it and start proper recovery
3549 * procedures and ask retransmission.
3551 if (l2cap_check_fcs(pi, skb))
3554 if (__is_sar_start(control) && __is_iframe(control))
3557 if (pi->fcs == L2CAP_FCS_CRC16)
3560 if (len > pi->mps) {
3561 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3565 req_seq = __get_reqseq(control);
3566 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3567 if (req_seq_offset < 0)
3568 req_seq_offset += 64;
3570 next_tx_seq_offset =
3571 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3572 if (next_tx_seq_offset < 0)
3573 next_tx_seq_offset += 64;
3575 /* check for invalid req-seq */
3576 if (req_seq_offset > next_tx_seq_offset) {
3577 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3581 if (__is_iframe(control)) {
3583 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3587 l2cap_data_channel_iframe(chan, control, skb);
3591 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3595 l2cap_data_channel_sframe(chan, control, skb);
3605 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3607 struct l2cap_chan *chan;
3609 struct l2cap_pinfo *pi;
3614 chan = l2cap_get_chan_by_scid(conn, cid);
3616 BT_DBG("unknown cid 0x%4.4x", cid);
3623 BT_DBG("chan %p, len %d", chan, skb->len);
3625 if (sk->sk_state != BT_CONNECTED)
3629 case L2CAP_MODE_BASIC:
3630 /* If socket recv buffers overflows we drop data here
3631 * which is *bad* because L2CAP has to be reliable.
3632 * But we don't have any other choice. L2CAP doesn't
3633 * provide flow control mechanism. */
3635 if (pi->imtu < skb->len)
3638 if (!sock_queue_rcv_skb(sk, skb))
3642 case L2CAP_MODE_ERTM:
3643 if (!sock_owned_by_user(sk)) {
3644 l2cap_ertm_data_rcv(sk, skb);
3646 if (sk_add_backlog(sk, skb))
3652 case L2CAP_MODE_STREAMING:
3653 control = get_unaligned_le16(skb->data);
3657 if (l2cap_check_fcs(pi, skb))
3660 if (__is_sar_start(control))
3663 if (pi->fcs == L2CAP_FCS_CRC16)
3666 if (len > pi->mps || len < 0 || __is_sframe(control))
3669 tx_seq = __get_txseq(control);
3671 if (chan->expected_tx_seq == tx_seq)
3672 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3674 chan->expected_tx_seq = (tx_seq + 1) % 64;
3676 l2cap_streaming_reassembly_sdu(chan, skb, control);
3681 BT_DBG("chan %p: bad mode 0x%2.2x", chan, pi->mode);
3695 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3699 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3705 BT_DBG("sk %p, len %d", sk, skb->len);
3707 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3710 if (l2cap_pi(sk)->imtu < skb->len)
3713 if (!sock_queue_rcv_skb(sk, skb))
3725 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3729 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
3735 BT_DBG("sk %p, len %d", sk, skb->len);
3737 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3740 if (l2cap_pi(sk)->imtu < skb->len)
3743 if (!sock_queue_rcv_skb(sk, skb))
3755 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3757 struct l2cap_hdr *lh = (void *) skb->data;
3761 skb_pull(skb, L2CAP_HDR_SIZE);
3762 cid = __le16_to_cpu(lh->cid);
3763 len = __le16_to_cpu(lh->len);
3765 if (len != skb->len) {
3770 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3773 case L2CAP_CID_LE_SIGNALING:
3774 case L2CAP_CID_SIGNALING:
3775 l2cap_sig_channel(conn, skb);
3778 case L2CAP_CID_CONN_LESS:
3779 psm = get_unaligned_le16(skb->data);
3781 l2cap_conless_channel(conn, psm, skb);
3784 case L2CAP_CID_LE_DATA:
3785 l2cap_att_channel(conn, cid, skb);
3789 l2cap_data_channel(conn, cid, skb);
3794 /* ---- L2CAP interface with lower layer (HCI) ---- */
3796 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3798 int exact = 0, lm1 = 0, lm2 = 0;
3799 register struct sock *sk;
3800 struct hlist_node *node;
3802 if (type != ACL_LINK)
3805 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3807 /* Find listening sockets and check their link_mode */
3808 read_lock(&l2cap_sk_list.lock);
3809 sk_for_each(sk, node, &l2cap_sk_list.head) {
3810 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3812 if (sk->sk_state != BT_LISTEN)
3815 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3816 lm1 |= HCI_LM_ACCEPT;
3817 if (chan->role_switch)
3818 lm1 |= HCI_LM_MASTER;
3820 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3821 lm2 |= HCI_LM_ACCEPT;
3822 if (chan->role_switch)
3823 lm2 |= HCI_LM_MASTER;
3826 read_unlock(&l2cap_sk_list.lock);
3828 return exact ? lm1 : lm2;
3831 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3833 struct l2cap_conn *conn;
3835 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3837 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3841 conn = l2cap_conn_add(hcon, status);
3843 l2cap_conn_ready(conn);
3845 l2cap_conn_del(hcon, bt_err(status));
3850 static int l2cap_disconn_ind(struct hci_conn *hcon)
3852 struct l2cap_conn *conn = hcon->l2cap_data;
3854 BT_DBG("hcon %p", hcon);
3856 if (hcon->type != ACL_LINK || !conn)
3859 return conn->disc_reason;
3862 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3864 BT_DBG("hcon %p reason %d", hcon, reason);
3866 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3869 l2cap_conn_del(hcon, bt_err(reason));
3874 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3876 struct sock *sk = chan->sk;
3878 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3881 if (encrypt == 0x00) {
3882 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3883 l2cap_sock_clear_timer(sk);
3884 l2cap_sock_set_timer(sk, HZ * 5);
3885 } else if (chan->sec_level == BT_SECURITY_HIGH)
3886 __l2cap_sock_close(sk, ECONNREFUSED);
3888 if (chan->sec_level == BT_SECURITY_MEDIUM)
3889 l2cap_sock_clear_timer(sk);
3893 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3895 struct l2cap_conn *conn = hcon->l2cap_data;
3896 struct l2cap_chan *chan;
3901 BT_DBG("conn %p", conn);
3903 read_lock(&conn->chan_lock);
3905 list_for_each_entry(chan, &conn->chan_l, list) {
3906 struct sock *sk = chan->sk;
3910 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3915 if (!status && (sk->sk_state == BT_CONNECTED ||
3916 sk->sk_state == BT_CONFIG)) {
3917 l2cap_check_encryption(chan, encrypt);
3922 if (sk->sk_state == BT_CONNECT) {
3924 struct l2cap_conn_req req;
3925 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3926 req.psm = l2cap_pi(sk)->psm;
3928 chan->ident = l2cap_get_ident(conn);
3929 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3931 l2cap_send_cmd(conn, chan->ident,
3932 L2CAP_CONN_REQ, sizeof(req), &req);
3934 l2cap_sock_clear_timer(sk);
3935 l2cap_sock_set_timer(sk, HZ / 10);
3937 } else if (sk->sk_state == BT_CONNECT2) {
3938 struct l2cap_conn_rsp rsp;
3942 sk->sk_state = BT_CONFIG;
3943 result = L2CAP_CR_SUCCESS;
3945 sk->sk_state = BT_DISCONN;
3946 l2cap_sock_set_timer(sk, HZ / 10);
3947 result = L2CAP_CR_SEC_BLOCK;
3950 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3951 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3952 rsp.result = cpu_to_le16(result);
3953 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3954 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3961 read_unlock(&conn->chan_lock);
3966 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3968 struct l2cap_conn *conn = hcon->l2cap_data;
3971 conn = l2cap_conn_add(hcon, 0);
3976 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3978 if (!(flags & ACL_CONT)) {
3979 struct l2cap_hdr *hdr;
3980 struct l2cap_chan *chan;
3985 BT_ERR("Unexpected start frame (len %d)", skb->len);
3986 kfree_skb(conn->rx_skb);
3987 conn->rx_skb = NULL;
3989 l2cap_conn_unreliable(conn, ECOMM);
3992 /* Start fragment always begin with Basic L2CAP header */
3993 if (skb->len < L2CAP_HDR_SIZE) {
3994 BT_ERR("Frame is too short (len %d)", skb->len);
3995 l2cap_conn_unreliable(conn, ECOMM);
3999 hdr = (struct l2cap_hdr *) skb->data;
4000 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4001 cid = __le16_to_cpu(hdr->cid);
4003 if (len == skb->len) {
4004 /* Complete frame received */
4005 l2cap_recv_frame(conn, skb);
4009 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4011 if (skb->len > len) {
4012 BT_ERR("Frame is too long (len %d, expected len %d)",
4014 l2cap_conn_unreliable(conn, ECOMM);
4018 chan = l2cap_get_chan_by_scid(conn, cid);
4020 if (chan && chan->sk) {
4021 struct sock *sk = chan->sk;
4023 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4024 BT_ERR("Frame exceeding recv MTU (len %d, "
4026 l2cap_pi(sk)->imtu);
4028 l2cap_conn_unreliable(conn, ECOMM);
4034 /* Allocate skb for the complete frame (with header) */
4035 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4039 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4041 conn->rx_len = len - skb->len;
4043 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4045 if (!conn->rx_len) {
4046 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4047 l2cap_conn_unreliable(conn, ECOMM);
4051 if (skb->len > conn->rx_len) {
4052 BT_ERR("Fragment is too long (len %d, expected %d)",
4053 skb->len, conn->rx_len);
4054 kfree_skb(conn->rx_skb);
4055 conn->rx_skb = NULL;
4057 l2cap_conn_unreliable(conn, ECOMM);
4061 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4063 conn->rx_len -= skb->len;
4065 if (!conn->rx_len) {
4066 /* Complete frame received */
4067 l2cap_recv_frame(conn, conn->rx_skb);
4068 conn->rx_skb = NULL;
4077 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4080 struct hlist_node *node;
4082 read_lock_bh(&l2cap_sk_list.lock);
4084 sk_for_each(sk, node, &l2cap_sk_list.head) {
4085 struct l2cap_pinfo *pi = l2cap_pi(sk);
4087 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4088 batostr(&bt_sk(sk)->src),
4089 batostr(&bt_sk(sk)->dst),
4090 sk->sk_state, __le16_to_cpu(pi->psm),
4092 pi->imtu, pi->omtu, pi->chan->sec_level,
4096 read_unlock_bh(&l2cap_sk_list.lock);
4101 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4103 return single_open(file, l2cap_debugfs_show, inode->i_private);
4106 static const struct file_operations l2cap_debugfs_fops = {
4107 .open = l2cap_debugfs_open,
4109 .llseek = seq_lseek,
4110 .release = single_release,
4113 static struct dentry *l2cap_debugfs;
4115 static struct hci_proto l2cap_hci_proto = {
4117 .id = HCI_PROTO_L2CAP,
4118 .connect_ind = l2cap_connect_ind,
4119 .connect_cfm = l2cap_connect_cfm,
4120 .disconn_ind = l2cap_disconn_ind,
4121 .disconn_cfm = l2cap_disconn_cfm,
4122 .security_cfm = l2cap_security_cfm,
4123 .recv_acldata = l2cap_recv_acldata
4126 int __init l2cap_init(void)
4130 err = l2cap_init_sockets();
4134 _busy_wq = create_singlethread_workqueue("l2cap");
4140 err = hci_register_proto(&l2cap_hci_proto);
4142 BT_ERR("L2CAP protocol registration failed");
4143 bt_sock_unregister(BTPROTO_L2CAP);
4148 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4149 bt_debugfs, NULL, &l2cap_debugfs_fops);
4151 BT_ERR("Failed to create L2CAP debug file");
4157 destroy_workqueue(_busy_wq);
4158 l2cap_cleanup_sockets();
4162 void l2cap_exit(void)
4164 debugfs_remove(l2cap_debugfs);
4166 flush_workqueue(_busy_wq);
4167 destroy_workqueue(_busy_wq);
4169 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4170 BT_ERR("L2CAP protocol unregistration failed");
4172 l2cap_cleanup_sockets();
4175 module_param(disable_ertm, bool, 0644);
4176 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");