2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
67 list_for_each_entry(c, &conn->chan_l, list) {
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
78 list_for_each_entry(c, &conn->chan_l, list) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
95 mutex_unlock(&conn->chan_lock);
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
102 struct l2cap_chan *c;
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
113 struct l2cap_chan *c;
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
126 write_lock(&chan_list_lock);
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
151 write_unlock(&chan_list_lock);
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
157 write_lock(&chan_list_lock);
161 write_unlock(&chan_list_lock);
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
184 chan->ops->state_change(chan, state);
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
189 struct sock *sk = chan->sk;
192 __l2cap_state_change(chan, state);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
198 struct sock *sk = chan->sk;
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
205 struct sock *sk = chan->sk;
208 __l2cap_chan_set_err(chan, err);
212 static void __set_retrans_timer(struct l2cap_chan *chan)
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
221 static void __set_monitor_timer(struct l2cap_chan *chan)
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 static void l2cap_chan_destroy(struct l2cap_chan *chan)
421 BT_DBG("chan %p", chan);
423 write_lock(&chan_list_lock);
424 list_del(&chan->global_l);
425 write_unlock(&chan_list_lock);
430 void l2cap_chan_hold(struct l2cap_chan *c)
432 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
434 atomic_inc(&c->refcnt);
437 void l2cap_chan_put(struct l2cap_chan *c)
439 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
441 if (atomic_dec_and_test(&c->refcnt))
442 l2cap_chan_destroy(c);
445 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
447 chan->fcs = L2CAP_FCS_CRC16;
448 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
449 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
450 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
451 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
452 chan->sec_level = BT_SECURITY_LOW;
454 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
457 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
460 __le16_to_cpu(chan->psm), chan->dcid);
462 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
466 switch (chan->chan_type) {
467 case L2CAP_CHAN_CONN_ORIENTED:
468 if (conn->hcon->type == LE_LINK) {
470 chan->omtu = L2CAP_DEFAULT_MTU;
471 chan->scid = L2CAP_CID_LE_DATA;
472 chan->dcid = L2CAP_CID_LE_DATA;
474 /* Alloc CID for connection-oriented socket */
475 chan->scid = l2cap_alloc_cid(conn);
476 chan->omtu = L2CAP_DEFAULT_MTU;
480 case L2CAP_CHAN_CONN_LESS:
481 /* Connectionless socket */
482 chan->scid = L2CAP_CID_CONN_LESS;
483 chan->dcid = L2CAP_CID_CONN_LESS;
484 chan->omtu = L2CAP_DEFAULT_MTU;
487 case L2CAP_CHAN_CONN_FIX_A2MP:
488 chan->scid = L2CAP_CID_A2MP;
489 chan->dcid = L2CAP_CID_A2MP;
490 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
491 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
495 /* Raw socket can send/recv signalling messages only */
496 chan->scid = L2CAP_CID_SIGNALING;
497 chan->dcid = L2CAP_CID_SIGNALING;
498 chan->omtu = L2CAP_DEFAULT_MTU;
501 chan->local_id = L2CAP_BESTEFFORT_ID;
502 chan->local_stype = L2CAP_SERV_BESTEFFORT;
503 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
504 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
505 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
506 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
508 l2cap_chan_hold(chan);
510 list_add(&chan->list, &conn->chan_l);
513 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
515 mutex_lock(&conn->chan_lock);
516 __l2cap_chan_add(conn, chan);
517 mutex_unlock(&conn->chan_lock);
520 void l2cap_chan_del(struct l2cap_chan *chan, int err)
522 struct l2cap_conn *conn = chan->conn;
524 __clear_chan_timer(chan);
526 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
529 /* Delete from channel list */
530 list_del(&chan->list);
532 l2cap_chan_put(chan);
536 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
537 hci_conn_put(conn->hcon);
540 if (chan->ops->teardown)
541 chan->ops->teardown(chan, err);
543 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
547 case L2CAP_MODE_BASIC:
550 case L2CAP_MODE_ERTM:
551 __clear_retrans_timer(chan);
552 __clear_monitor_timer(chan);
553 __clear_ack_timer(chan);
555 skb_queue_purge(&chan->srej_q);
557 l2cap_seq_list_free(&chan->srej_list);
558 l2cap_seq_list_free(&chan->retrans_list);
562 case L2CAP_MODE_STREAMING:
563 skb_queue_purge(&chan->tx_q);
570 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
572 struct l2cap_conn *conn = chan->conn;
573 struct sock *sk = chan->sk;
575 BT_DBG("chan %p state %s sk %p", chan,
576 state_to_string(chan->state), sk);
578 switch (chan->state) {
580 if (chan->ops->teardown)
581 chan->ops->teardown(chan, 0);
586 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
587 conn->hcon->type == ACL_LINK) {
588 __set_chan_timer(chan, sk->sk_sndtimeo);
589 l2cap_send_disconn_req(conn, chan, reason);
591 l2cap_chan_del(chan, reason);
595 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
596 conn->hcon->type == ACL_LINK) {
597 struct l2cap_conn_rsp rsp;
600 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
601 result = L2CAP_CR_SEC_BLOCK;
603 result = L2CAP_CR_BAD_PSM;
604 l2cap_state_change(chan, BT_DISCONN);
606 rsp.scid = cpu_to_le16(chan->dcid);
607 rsp.dcid = cpu_to_le16(chan->scid);
608 rsp.result = cpu_to_le16(result);
609 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
610 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
614 l2cap_chan_del(chan, reason);
619 l2cap_chan_del(chan, reason);
623 if (chan->ops->teardown)
624 chan->ops->teardown(chan, 0);
629 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
631 if (chan->chan_type == L2CAP_CHAN_RAW) {
632 switch (chan->sec_level) {
633 case BT_SECURITY_HIGH:
634 return HCI_AT_DEDICATED_BONDING_MITM;
635 case BT_SECURITY_MEDIUM:
636 return HCI_AT_DEDICATED_BONDING;
638 return HCI_AT_NO_BONDING;
640 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
641 if (chan->sec_level == BT_SECURITY_LOW)
642 chan->sec_level = BT_SECURITY_SDP;
644 if (chan->sec_level == BT_SECURITY_HIGH)
645 return HCI_AT_NO_BONDING_MITM;
647 return HCI_AT_NO_BONDING;
649 switch (chan->sec_level) {
650 case BT_SECURITY_HIGH:
651 return HCI_AT_GENERAL_BONDING_MITM;
652 case BT_SECURITY_MEDIUM:
653 return HCI_AT_GENERAL_BONDING;
655 return HCI_AT_NO_BONDING;
660 /* Service level security */
661 int l2cap_chan_check_security(struct l2cap_chan *chan)
663 struct l2cap_conn *conn = chan->conn;
666 auth_type = l2cap_get_auth_type(chan);
668 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
671 static u8 l2cap_get_ident(struct l2cap_conn *conn)
675 /* Get next available identificator.
676 * 1 - 128 are used by kernel.
677 * 129 - 199 are reserved.
678 * 200 - 254 are used by utilities like l2ping, etc.
681 spin_lock(&conn->lock);
683 if (++conn->tx_ident > 128)
688 spin_unlock(&conn->lock);
693 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
695 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
698 BT_DBG("code 0x%2.2x", code);
703 if (lmp_no_flush_capable(conn->hcon->hdev))
704 flags = ACL_START_NO_FLUSH;
708 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
709 skb->priority = HCI_PRIO_MAX;
711 hci_send_acl(conn->hchan, skb, flags);
714 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
716 struct hci_conn *hcon = chan->conn->hcon;
719 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
722 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
723 lmp_no_flush_capable(hcon->hdev))
724 flags = ACL_START_NO_FLUSH;
728 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
729 hci_send_acl(chan->conn->hchan, skb, flags);
732 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
734 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
735 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
737 if (enh & L2CAP_CTRL_FRAME_TYPE) {
740 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
741 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
748 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
749 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
756 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
758 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
759 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
761 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
764 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
765 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
772 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
773 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
780 static inline void __unpack_control(struct l2cap_chan *chan,
783 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
784 __unpack_extended_control(get_unaligned_le32(skb->data),
785 &bt_cb(skb)->control);
786 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
788 __unpack_enhanced_control(get_unaligned_le16(skb->data),
789 &bt_cb(skb)->control);
790 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
794 static u32 __pack_extended_control(struct l2cap_ctrl *control)
798 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
799 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
801 if (control->sframe) {
802 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
803 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
804 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
806 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
807 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
813 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
817 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
818 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
820 if (control->sframe) {
821 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
822 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
823 packed |= L2CAP_CTRL_FRAME_TYPE;
825 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
826 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
832 static inline void __pack_control(struct l2cap_chan *chan,
833 struct l2cap_ctrl *control,
836 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
837 put_unaligned_le32(__pack_extended_control(control),
838 skb->data + L2CAP_HDR_SIZE);
840 put_unaligned_le16(__pack_enhanced_control(control),
841 skb->data + L2CAP_HDR_SIZE);
845 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
847 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
848 return L2CAP_EXT_HDR_SIZE;
850 return L2CAP_ENH_HDR_SIZE;
853 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
857 struct l2cap_hdr *lh;
858 int hlen = __ertm_hdr_size(chan);
860 if (chan->fcs == L2CAP_FCS_CRC16)
861 hlen += L2CAP_FCS_SIZE;
863 skb = bt_skb_alloc(hlen, GFP_KERNEL);
866 return ERR_PTR(-ENOMEM);
868 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
869 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
870 lh->cid = cpu_to_le16(chan->dcid);
872 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
873 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
875 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
877 if (chan->fcs == L2CAP_FCS_CRC16) {
878 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
879 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
882 skb->priority = HCI_PRIO_MAX;
886 static void l2cap_send_sframe(struct l2cap_chan *chan,
887 struct l2cap_ctrl *control)
892 BT_DBG("chan %p, control %p", chan, control);
894 if (!control->sframe)
897 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
901 if (control->super == L2CAP_SUPER_RR)
902 clear_bit(CONN_RNR_SENT, &chan->conn_state);
903 else if (control->super == L2CAP_SUPER_RNR)
904 set_bit(CONN_RNR_SENT, &chan->conn_state);
906 if (control->super != L2CAP_SUPER_SREJ) {
907 chan->last_acked_seq = control->reqseq;
908 __clear_ack_timer(chan);
911 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
912 control->final, control->poll, control->super);
914 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
915 control_field = __pack_extended_control(control);
917 control_field = __pack_enhanced_control(control);
919 skb = l2cap_create_sframe_pdu(chan, control_field);
921 l2cap_do_send(chan, skb);
924 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
926 struct l2cap_ctrl control;
928 BT_DBG("chan %p, poll %d", chan, poll);
930 memset(&control, 0, sizeof(control));
934 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
935 control.super = L2CAP_SUPER_RNR;
937 control.super = L2CAP_SUPER_RR;
939 control.reqseq = chan->buffer_seq;
940 l2cap_send_sframe(chan, &control);
943 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
945 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
948 static void l2cap_send_conn_req(struct l2cap_chan *chan)
950 struct l2cap_conn *conn = chan->conn;
951 struct l2cap_conn_req req;
953 req.scid = cpu_to_le16(chan->scid);
956 chan->ident = l2cap_get_ident(conn);
958 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
960 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
963 static void l2cap_chan_ready(struct l2cap_chan *chan)
965 /* This clears all conf flags, including CONF_NOT_COMPLETE */
966 chan->conf_state = 0;
967 __clear_chan_timer(chan);
969 chan->state = BT_CONNECTED;
971 chan->ops->ready(chan);
974 static void l2cap_do_start(struct l2cap_chan *chan)
976 struct l2cap_conn *conn = chan->conn;
978 if (conn->hcon->type == LE_LINK) {
979 l2cap_chan_ready(chan);
983 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
984 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
987 if (l2cap_chan_check_security(chan) &&
988 __l2cap_no_conn_pending(chan))
989 l2cap_send_conn_req(chan);
991 struct l2cap_info_req req;
992 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
994 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
995 conn->info_ident = l2cap_get_ident(conn);
997 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
999 l2cap_send_cmd(conn, conn->info_ident,
1000 L2CAP_INFO_REQ, sizeof(req), &req);
1004 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1006 u32 local_feat_mask = l2cap_feat_mask;
1008 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1011 case L2CAP_MODE_ERTM:
1012 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1013 case L2CAP_MODE_STREAMING:
1014 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1020 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1022 struct sock *sk = chan->sk;
1023 struct l2cap_disconn_req req;
1028 if (chan->mode == L2CAP_MODE_ERTM) {
1029 __clear_retrans_timer(chan);
1030 __clear_monitor_timer(chan);
1031 __clear_ack_timer(chan);
1034 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1035 __l2cap_state_change(chan, BT_DISCONN);
1039 req.dcid = cpu_to_le16(chan->dcid);
1040 req.scid = cpu_to_le16(chan->scid);
1041 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1042 L2CAP_DISCONN_REQ, sizeof(req), &req);
1045 __l2cap_state_change(chan, BT_DISCONN);
1046 __l2cap_chan_set_err(chan, err);
1050 /* ---- L2CAP connections ---- */
1051 static void l2cap_conn_start(struct l2cap_conn *conn)
1053 struct l2cap_chan *chan, *tmp;
1055 BT_DBG("conn %p", conn);
1057 mutex_lock(&conn->chan_lock);
1059 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1060 struct sock *sk = chan->sk;
1062 l2cap_chan_lock(chan);
1064 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1065 l2cap_chan_unlock(chan);
1069 if (chan->state == BT_CONNECT) {
1070 if (!l2cap_chan_check_security(chan) ||
1071 !__l2cap_no_conn_pending(chan)) {
1072 l2cap_chan_unlock(chan);
1076 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1077 && test_bit(CONF_STATE2_DEVICE,
1078 &chan->conf_state)) {
1079 l2cap_chan_close(chan, ECONNRESET);
1080 l2cap_chan_unlock(chan);
1084 l2cap_send_conn_req(chan);
1086 } else if (chan->state == BT_CONNECT2) {
1087 struct l2cap_conn_rsp rsp;
1089 rsp.scid = cpu_to_le16(chan->dcid);
1090 rsp.dcid = cpu_to_le16(chan->scid);
1092 if (l2cap_chan_check_security(chan)) {
1094 if (test_bit(BT_SK_DEFER_SETUP,
1095 &bt_sk(sk)->flags)) {
1096 struct sock *parent = bt_sk(sk)->parent;
1097 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1098 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1100 parent->sk_data_ready(parent, 0);
1103 __l2cap_state_change(chan, BT_CONFIG);
1104 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1105 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1109 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1110 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1113 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1116 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1117 rsp.result != L2CAP_CR_SUCCESS) {
1118 l2cap_chan_unlock(chan);
1122 set_bit(CONF_REQ_SENT, &chan->conf_state);
1123 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1124 l2cap_build_conf_req(chan, buf), buf);
1125 chan->num_conf_req++;
1128 l2cap_chan_unlock(chan);
1131 mutex_unlock(&conn->chan_lock);
1134 /* Find socket with cid and source/destination bdaddr.
1135 * Returns closest match, locked.
1137 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1141 struct l2cap_chan *c, *c1 = NULL;
1143 read_lock(&chan_list_lock);
1145 list_for_each_entry(c, &chan_list, global_l) {
1146 struct sock *sk = c->sk;
1148 if (state && c->state != state)
1151 if (c->scid == cid) {
1152 int src_match, dst_match;
1153 int src_any, dst_any;
1156 src_match = !bacmp(&bt_sk(sk)->src, src);
1157 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1158 if (src_match && dst_match) {
1159 read_unlock(&chan_list_lock);
1164 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1165 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1166 if ((src_match && dst_any) || (src_any && dst_match) ||
1167 (src_any && dst_any))
1172 read_unlock(&chan_list_lock);
1177 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1179 struct sock *parent, *sk;
1180 struct l2cap_chan *chan, *pchan;
1184 /* Check if we have socket listening on cid */
1185 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1186 conn->src, conn->dst);
1194 chan = pchan->ops->new_connection(pchan);
1200 hci_conn_hold(conn->hcon);
1201 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1203 bacpy(&bt_sk(sk)->src, conn->src);
1204 bacpy(&bt_sk(sk)->dst, conn->dst);
1206 bt_accept_enqueue(parent, sk);
1208 l2cap_chan_add(conn, chan);
1210 l2cap_chan_ready(chan);
1213 release_sock(parent);
1216 static void l2cap_conn_ready(struct l2cap_conn *conn)
1218 struct l2cap_chan *chan;
1220 BT_DBG("conn %p", conn);
1222 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1223 l2cap_le_conn_ready(conn);
1225 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1226 smp_conn_security(conn, conn->hcon->pending_sec_level);
1228 mutex_lock(&conn->chan_lock);
1230 list_for_each_entry(chan, &conn->chan_l, list) {
1232 l2cap_chan_lock(chan);
1234 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1235 l2cap_chan_unlock(chan);
1239 if (conn->hcon->type == LE_LINK) {
1240 if (smp_conn_security(conn, chan->sec_level))
1241 l2cap_chan_ready(chan);
1243 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1244 struct sock *sk = chan->sk;
1245 __clear_chan_timer(chan);
1247 __l2cap_state_change(chan, BT_CONNECTED);
1248 sk->sk_state_change(sk);
1251 } else if (chan->state == BT_CONNECT)
1252 l2cap_do_start(chan);
1254 l2cap_chan_unlock(chan);
1257 mutex_unlock(&conn->chan_lock);
1260 /* Notify sockets that we cannot guaranty reliability anymore */
1261 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1263 struct l2cap_chan *chan;
1265 BT_DBG("conn %p", conn);
1267 mutex_lock(&conn->chan_lock);
1269 list_for_each_entry(chan, &conn->chan_l, list) {
1270 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1271 __l2cap_chan_set_err(chan, err);
1274 mutex_unlock(&conn->chan_lock);
1277 static void l2cap_info_timeout(struct work_struct *work)
1279 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1282 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1283 conn->info_ident = 0;
1285 l2cap_conn_start(conn);
1288 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1290 struct l2cap_conn *conn = hcon->l2cap_data;
1291 struct l2cap_chan *chan, *l;
1296 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1298 kfree_skb(conn->rx_skb);
1300 mutex_lock(&conn->chan_lock);
1303 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1304 l2cap_chan_hold(chan);
1305 l2cap_chan_lock(chan);
1307 l2cap_chan_del(chan, err);
1309 l2cap_chan_unlock(chan);
1311 chan->ops->close(chan);
1312 l2cap_chan_put(chan);
1315 mutex_unlock(&conn->chan_lock);
1317 hci_chan_del(conn->hchan);
1319 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1320 cancel_delayed_work_sync(&conn->info_timer);
1322 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1323 cancel_delayed_work_sync(&conn->security_timer);
1324 smp_chan_destroy(conn);
1327 hcon->l2cap_data = NULL;
1331 static void security_timeout(struct work_struct *work)
1333 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1334 security_timer.work);
1336 BT_DBG("conn %p", conn);
1338 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1339 smp_chan_destroy(conn);
1340 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1344 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1346 struct l2cap_conn *conn = hcon->l2cap_data;
1347 struct hci_chan *hchan;
1352 hchan = hci_chan_create(hcon);
1356 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1358 hci_chan_del(hchan);
1362 hcon->l2cap_data = conn;
1364 conn->hchan = hchan;
1366 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1368 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1369 conn->mtu = hcon->hdev->le_mtu;
1371 conn->mtu = hcon->hdev->acl_mtu;
1373 conn->src = &hcon->hdev->bdaddr;
1374 conn->dst = &hcon->dst;
1376 conn->feat_mask = 0;
1378 spin_lock_init(&conn->lock);
1379 mutex_init(&conn->chan_lock);
1381 INIT_LIST_HEAD(&conn->chan_l);
1383 if (hcon->type == LE_LINK)
1384 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1386 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1388 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1393 /* ---- Socket interface ---- */
1395 /* Find socket with psm and source / destination bdaddr.
1396 * Returns closest match.
1398 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1402 struct l2cap_chan *c, *c1 = NULL;
1404 read_lock(&chan_list_lock);
1406 list_for_each_entry(c, &chan_list, global_l) {
1407 struct sock *sk = c->sk;
1409 if (state && c->state != state)
1412 if (c->psm == psm) {
1413 int src_match, dst_match;
1414 int src_any, dst_any;
1417 src_match = !bacmp(&bt_sk(sk)->src, src);
1418 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1419 if (src_match && dst_match) {
1420 read_unlock(&chan_list_lock);
1425 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1426 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1427 if ((src_match && dst_any) || (src_any && dst_match) ||
1428 (src_any && dst_any))
1433 read_unlock(&chan_list_lock);
1438 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1439 bdaddr_t *dst, u8 dst_type)
1441 struct sock *sk = chan->sk;
1442 bdaddr_t *src = &bt_sk(sk)->src;
1443 struct l2cap_conn *conn;
1444 struct hci_conn *hcon;
1445 struct hci_dev *hdev;
1449 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1450 dst_type, __le16_to_cpu(chan->psm));
1452 hdev = hci_get_route(dst, src);
1454 return -EHOSTUNREACH;
1458 l2cap_chan_lock(chan);
1460 /* PSM must be odd and lsb of upper byte must be 0 */
1461 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1462 chan->chan_type != L2CAP_CHAN_RAW) {
1467 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1472 switch (chan->mode) {
1473 case L2CAP_MODE_BASIC:
1475 case L2CAP_MODE_ERTM:
1476 case L2CAP_MODE_STREAMING:
1485 switch (chan->state) {
1489 /* Already connecting */
1494 /* Already connected */
1508 /* Set destination address and psm */
1510 bacpy(&bt_sk(sk)->dst, dst);
1516 auth_type = l2cap_get_auth_type(chan);
1518 if (chan->dcid == L2CAP_CID_LE_DATA)
1519 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1520 chan->sec_level, auth_type);
1522 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1523 chan->sec_level, auth_type);
1526 err = PTR_ERR(hcon);
1530 conn = l2cap_conn_add(hcon, 0);
1537 if (hcon->type == LE_LINK) {
1540 if (!list_empty(&conn->chan_l)) {
1549 /* Update source addr of the socket */
1550 bacpy(src, conn->src);
1552 l2cap_chan_unlock(chan);
1553 l2cap_chan_add(conn, chan);
1554 l2cap_chan_lock(chan);
1556 l2cap_state_change(chan, BT_CONNECT);
1557 __set_chan_timer(chan, sk->sk_sndtimeo);
1559 if (hcon->state == BT_CONNECTED) {
1560 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1561 __clear_chan_timer(chan);
1562 if (l2cap_chan_check_security(chan))
1563 l2cap_state_change(chan, BT_CONNECTED);
1565 l2cap_do_start(chan);
1571 l2cap_chan_unlock(chan);
1572 hci_dev_unlock(hdev);
1577 int __l2cap_wait_ack(struct sock *sk)
1579 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1580 DECLARE_WAITQUEUE(wait, current);
1584 add_wait_queue(sk_sleep(sk), &wait);
1585 set_current_state(TASK_INTERRUPTIBLE);
1586 while (chan->unacked_frames > 0 && chan->conn) {
1590 if (signal_pending(current)) {
1591 err = sock_intr_errno(timeo);
1596 timeo = schedule_timeout(timeo);
1598 set_current_state(TASK_INTERRUPTIBLE);
1600 err = sock_error(sk);
1604 set_current_state(TASK_RUNNING);
1605 remove_wait_queue(sk_sleep(sk), &wait);
1609 static void l2cap_monitor_timeout(struct work_struct *work)
1611 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1612 monitor_timer.work);
1614 BT_DBG("chan %p", chan);
1616 l2cap_chan_lock(chan);
1619 l2cap_chan_unlock(chan);
1620 l2cap_chan_put(chan);
1624 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1626 l2cap_chan_unlock(chan);
1627 l2cap_chan_put(chan);
1630 static void l2cap_retrans_timeout(struct work_struct *work)
1632 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1633 retrans_timer.work);
1635 BT_DBG("chan %p", chan);
1637 l2cap_chan_lock(chan);
1640 l2cap_chan_unlock(chan);
1641 l2cap_chan_put(chan);
1645 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1646 l2cap_chan_unlock(chan);
1647 l2cap_chan_put(chan);
1650 static void l2cap_streaming_send(struct l2cap_chan *chan,
1651 struct sk_buff_head *skbs)
1653 struct sk_buff *skb;
1654 struct l2cap_ctrl *control;
1656 BT_DBG("chan %p, skbs %p", chan, skbs);
1658 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1660 while (!skb_queue_empty(&chan->tx_q)) {
1662 skb = skb_dequeue(&chan->tx_q);
1664 bt_cb(skb)->control.retries = 1;
1665 control = &bt_cb(skb)->control;
1667 control->reqseq = 0;
1668 control->txseq = chan->next_tx_seq;
1670 __pack_control(chan, control, skb);
1672 if (chan->fcs == L2CAP_FCS_CRC16) {
1673 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1674 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1677 l2cap_do_send(chan, skb);
1679 BT_DBG("Sent txseq %u", control->txseq);
1681 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1682 chan->frames_sent++;
1686 static int l2cap_ertm_send(struct l2cap_chan *chan)
1688 struct sk_buff *skb, *tx_skb;
1689 struct l2cap_ctrl *control;
1692 BT_DBG("chan %p", chan);
1694 if (chan->state != BT_CONNECTED)
1697 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1700 while (chan->tx_send_head &&
1701 chan->unacked_frames < chan->remote_tx_win &&
1702 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1704 skb = chan->tx_send_head;
1706 bt_cb(skb)->control.retries = 1;
1707 control = &bt_cb(skb)->control;
1709 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1712 control->reqseq = chan->buffer_seq;
1713 chan->last_acked_seq = chan->buffer_seq;
1714 control->txseq = chan->next_tx_seq;
1716 __pack_control(chan, control, skb);
1718 if (chan->fcs == L2CAP_FCS_CRC16) {
1719 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1720 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1723 /* Clone after data has been modified. Data is assumed to be
1724 read-only (for locking purposes) on cloned sk_buffs.
1726 tx_skb = skb_clone(skb, GFP_KERNEL);
1731 __set_retrans_timer(chan);
1733 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1734 chan->unacked_frames++;
1735 chan->frames_sent++;
1738 if (skb_queue_is_last(&chan->tx_q, skb))
1739 chan->tx_send_head = NULL;
1741 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1743 l2cap_do_send(chan, tx_skb);
1744 BT_DBG("Sent txseq %u", control->txseq);
1747 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1748 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1753 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1755 struct l2cap_ctrl control;
1756 struct sk_buff *skb;
1757 struct sk_buff *tx_skb;
1760 BT_DBG("chan %p", chan);
1762 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1765 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1766 seq = l2cap_seq_list_pop(&chan->retrans_list);
1768 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1770 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1775 bt_cb(skb)->control.retries++;
1776 control = bt_cb(skb)->control;
1778 if (chan->max_tx != 0 &&
1779 bt_cb(skb)->control.retries > chan->max_tx) {
1780 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1781 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1782 l2cap_seq_list_clear(&chan->retrans_list);
1786 control.reqseq = chan->buffer_seq;
1787 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1792 if (skb_cloned(skb)) {
1793 /* Cloned sk_buffs are read-only, so we need a
1796 tx_skb = skb_copy(skb, GFP_ATOMIC);
1798 tx_skb = skb_clone(skb, GFP_ATOMIC);
1802 l2cap_seq_list_clear(&chan->retrans_list);
1806 /* Update skb contents */
1807 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1808 put_unaligned_le32(__pack_extended_control(&control),
1809 tx_skb->data + L2CAP_HDR_SIZE);
1811 put_unaligned_le16(__pack_enhanced_control(&control),
1812 tx_skb->data + L2CAP_HDR_SIZE);
1815 if (chan->fcs == L2CAP_FCS_CRC16) {
1816 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1817 put_unaligned_le16(fcs, skb_put(tx_skb,
1821 l2cap_do_send(chan, tx_skb);
1823 BT_DBG("Resent txseq %d", control.txseq);
1825 chan->last_acked_seq = chan->buffer_seq;
1829 static void l2cap_retransmit(struct l2cap_chan *chan,
1830 struct l2cap_ctrl *control)
1832 BT_DBG("chan %p, control %p", chan, control);
1834 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1835 l2cap_ertm_resend(chan);
1838 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1839 struct l2cap_ctrl *control)
1841 struct sk_buff *skb;
1843 BT_DBG("chan %p, control %p", chan, control);
1846 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1848 l2cap_seq_list_clear(&chan->retrans_list);
1850 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1853 if (chan->unacked_frames) {
1854 skb_queue_walk(&chan->tx_q, skb) {
1855 if (bt_cb(skb)->control.txseq == control->reqseq ||
1856 skb == chan->tx_send_head)
1860 skb_queue_walk_from(&chan->tx_q, skb) {
1861 if (skb == chan->tx_send_head)
1864 l2cap_seq_list_append(&chan->retrans_list,
1865 bt_cb(skb)->control.txseq);
1868 l2cap_ertm_resend(chan);
1872 static void l2cap_send_ack(struct l2cap_chan *chan)
1874 struct l2cap_ctrl control;
1875 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1876 chan->last_acked_seq);
1879 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1880 chan, chan->last_acked_seq, chan->buffer_seq);
1882 memset(&control, 0, sizeof(control));
1885 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1886 chan->rx_state == L2CAP_RX_STATE_RECV) {
1887 __clear_ack_timer(chan);
1888 control.super = L2CAP_SUPER_RNR;
1889 control.reqseq = chan->buffer_seq;
1890 l2cap_send_sframe(chan, &control);
1892 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1893 l2cap_ertm_send(chan);
1894 /* If any i-frames were sent, they included an ack */
1895 if (chan->buffer_seq == chan->last_acked_seq)
1899 /* Ack now if the window is 3/4ths full.
1900 * Calculate without mul or div
1902 threshold = chan->ack_win;
1903 threshold += threshold << 1;
1906 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1909 if (frames_to_ack >= threshold) {
1910 __clear_ack_timer(chan);
1911 control.super = L2CAP_SUPER_RR;
1912 control.reqseq = chan->buffer_seq;
1913 l2cap_send_sframe(chan, &control);
1918 __set_ack_timer(chan);
1922 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1923 struct msghdr *msg, int len,
1924 int count, struct sk_buff *skb)
1926 struct l2cap_conn *conn = chan->conn;
1927 struct sk_buff **frag;
1930 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1936 /* Continuation fragments (no L2CAP header) */
1937 frag = &skb_shinfo(skb)->frag_list;
1939 struct sk_buff *tmp;
1941 count = min_t(unsigned int, conn->mtu, len);
1943 tmp = chan->ops->alloc_skb(chan, count,
1944 msg->msg_flags & MSG_DONTWAIT);
1946 return PTR_ERR(tmp);
1950 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1953 (*frag)->priority = skb->priority;
1958 skb->len += (*frag)->len;
1959 skb->data_len += (*frag)->len;
1961 frag = &(*frag)->next;
1967 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1968 struct msghdr *msg, size_t len,
1971 struct l2cap_conn *conn = chan->conn;
1972 struct sk_buff *skb;
1973 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1974 struct l2cap_hdr *lh;
1976 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1978 count = min_t(unsigned int, (conn->mtu - hlen), len);
1980 skb = chan->ops->alloc_skb(chan, count + hlen,
1981 msg->msg_flags & MSG_DONTWAIT);
1985 skb->priority = priority;
1987 /* Create L2CAP header */
1988 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1989 lh->cid = cpu_to_le16(chan->dcid);
1990 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1991 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1993 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1994 if (unlikely(err < 0)) {
1996 return ERR_PTR(err);
2001 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2002 struct msghdr *msg, size_t len,
2005 struct l2cap_conn *conn = chan->conn;
2006 struct sk_buff *skb;
2008 struct l2cap_hdr *lh;
2010 BT_DBG("chan %p len %zu", chan, len);
2012 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2014 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2015 msg->msg_flags & MSG_DONTWAIT);
2019 skb->priority = priority;
2021 /* Create L2CAP header */
2022 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2023 lh->cid = cpu_to_le16(chan->dcid);
2024 lh->len = cpu_to_le16(len);
2026 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2027 if (unlikely(err < 0)) {
2029 return ERR_PTR(err);
2034 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2035 struct msghdr *msg, size_t len,
2038 struct l2cap_conn *conn = chan->conn;
2039 struct sk_buff *skb;
2040 int err, count, hlen;
2041 struct l2cap_hdr *lh;
2043 BT_DBG("chan %p len %zu", chan, len);
2046 return ERR_PTR(-ENOTCONN);
2048 hlen = __ertm_hdr_size(chan);
2051 hlen += L2CAP_SDULEN_SIZE;
2053 if (chan->fcs == L2CAP_FCS_CRC16)
2054 hlen += L2CAP_FCS_SIZE;
2056 count = min_t(unsigned int, (conn->mtu - hlen), len);
2058 skb = chan->ops->alloc_skb(chan, count + hlen,
2059 msg->msg_flags & MSG_DONTWAIT);
2063 /* Create L2CAP header */
2064 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2065 lh->cid = cpu_to_le16(chan->dcid);
2066 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2068 /* Control header is populated later */
2069 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2070 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2072 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2075 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2077 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2078 if (unlikely(err < 0)) {
2080 return ERR_PTR(err);
2083 bt_cb(skb)->control.fcs = chan->fcs;
2084 bt_cb(skb)->control.retries = 0;
2088 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2089 struct sk_buff_head *seg_queue,
2090 struct msghdr *msg, size_t len)
2092 struct sk_buff *skb;
2097 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2099 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2100 * so fragmented skbs are not used. The HCI layer's handling
2101 * of fragmented skbs is not compatible with ERTM's queueing.
2104 /* PDU size is derived from the HCI MTU */
2105 pdu_len = chan->conn->mtu;
2107 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2109 /* Adjust for largest possible L2CAP overhead. */
2111 pdu_len -= L2CAP_FCS_SIZE;
2113 pdu_len -= __ertm_hdr_size(chan);
2115 /* Remote device may have requested smaller PDUs */
2116 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2118 if (len <= pdu_len) {
2119 sar = L2CAP_SAR_UNSEGMENTED;
2123 sar = L2CAP_SAR_START;
2125 pdu_len -= L2CAP_SDULEN_SIZE;
2129 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2132 __skb_queue_purge(seg_queue);
2133 return PTR_ERR(skb);
2136 bt_cb(skb)->control.sar = sar;
2137 __skb_queue_tail(seg_queue, skb);
2142 pdu_len += L2CAP_SDULEN_SIZE;
2145 if (len <= pdu_len) {
2146 sar = L2CAP_SAR_END;
2149 sar = L2CAP_SAR_CONTINUE;
2156 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2159 struct sk_buff *skb;
2161 struct sk_buff_head seg_queue;
2163 /* Connectionless channel */
2164 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2165 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2167 return PTR_ERR(skb);
2169 l2cap_do_send(chan, skb);
2173 switch (chan->mode) {
2174 case L2CAP_MODE_BASIC:
2175 /* Check outgoing MTU */
2176 if (len > chan->omtu)
2179 /* Create a basic PDU */
2180 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2182 return PTR_ERR(skb);
2184 l2cap_do_send(chan, skb);
2188 case L2CAP_MODE_ERTM:
2189 case L2CAP_MODE_STREAMING:
2190 /* Check outgoing MTU */
2191 if (len > chan->omtu) {
2196 __skb_queue_head_init(&seg_queue);
2198 /* Do segmentation before calling in to the state machine,
2199 * since it's possible to block while waiting for memory
2202 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2204 /* The channel could have been closed while segmenting,
2205 * check that it is still connected.
2207 if (chan->state != BT_CONNECTED) {
2208 __skb_queue_purge(&seg_queue);
2215 if (chan->mode == L2CAP_MODE_ERTM)
2216 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2218 l2cap_streaming_send(chan, &seg_queue);
2222 /* If the skbs were not queued for sending, they'll still be in
2223 * seg_queue and need to be purged.
2225 __skb_queue_purge(&seg_queue);
2229 BT_DBG("bad state %1.1x", chan->mode);
2236 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2238 struct l2cap_ctrl control;
2241 BT_DBG("chan %p, txseq %u", chan, txseq);
2243 memset(&control, 0, sizeof(control));
2245 control.super = L2CAP_SUPER_SREJ;
2247 for (seq = chan->expected_tx_seq; seq != txseq;
2248 seq = __next_seq(chan, seq)) {
2249 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2250 control.reqseq = seq;
2251 l2cap_send_sframe(chan, &control);
2252 l2cap_seq_list_append(&chan->srej_list, seq);
2256 chan->expected_tx_seq = __next_seq(chan, txseq);
2259 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2261 struct l2cap_ctrl control;
2263 BT_DBG("chan %p", chan);
2265 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2268 memset(&control, 0, sizeof(control));
2270 control.super = L2CAP_SUPER_SREJ;
2271 control.reqseq = chan->srej_list.tail;
2272 l2cap_send_sframe(chan, &control);
2275 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2277 struct l2cap_ctrl control;
2281 BT_DBG("chan %p, txseq %u", chan, txseq);
2283 memset(&control, 0, sizeof(control));
2285 control.super = L2CAP_SUPER_SREJ;
2287 /* Capture initial list head to allow only one pass through the list. */
2288 initial_head = chan->srej_list.head;
2291 seq = l2cap_seq_list_pop(&chan->srej_list);
2292 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2295 control.reqseq = seq;
2296 l2cap_send_sframe(chan, &control);
2297 l2cap_seq_list_append(&chan->srej_list, seq);
2298 } while (chan->srej_list.head != initial_head);
2301 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2303 struct sk_buff *acked_skb;
2306 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2308 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2311 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2312 chan->expected_ack_seq, chan->unacked_frames);
2314 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2315 ackseq = __next_seq(chan, ackseq)) {
2317 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2319 skb_unlink(acked_skb, &chan->tx_q);
2320 kfree_skb(acked_skb);
2321 chan->unacked_frames--;
2325 chan->expected_ack_seq = reqseq;
2327 if (chan->unacked_frames == 0)
2328 __clear_retrans_timer(chan);
2330 BT_DBG("unacked_frames %u", chan->unacked_frames);
2333 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2335 BT_DBG("chan %p", chan);
2337 chan->expected_tx_seq = chan->buffer_seq;
2338 l2cap_seq_list_clear(&chan->srej_list);
2339 skb_queue_purge(&chan->srej_q);
2340 chan->rx_state = L2CAP_RX_STATE_RECV;
2343 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2344 struct l2cap_ctrl *control,
2345 struct sk_buff_head *skbs, u8 event)
2347 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2351 case L2CAP_EV_DATA_REQUEST:
2352 if (chan->tx_send_head == NULL)
2353 chan->tx_send_head = skb_peek(skbs);
2355 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2356 l2cap_ertm_send(chan);
2358 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2359 BT_DBG("Enter LOCAL_BUSY");
2360 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2362 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2363 /* The SREJ_SENT state must be aborted if we are to
2364 * enter the LOCAL_BUSY state.
2366 l2cap_abort_rx_srej_sent(chan);
2369 l2cap_send_ack(chan);
2372 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2373 BT_DBG("Exit LOCAL_BUSY");
2374 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2376 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2377 struct l2cap_ctrl local_control;
2379 memset(&local_control, 0, sizeof(local_control));
2380 local_control.sframe = 1;
2381 local_control.super = L2CAP_SUPER_RR;
2382 local_control.poll = 1;
2383 local_control.reqseq = chan->buffer_seq;
2384 l2cap_send_sframe(chan, &local_control);
2386 chan->retry_count = 1;
2387 __set_monitor_timer(chan);
2388 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2391 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2392 l2cap_process_reqseq(chan, control->reqseq);
2394 case L2CAP_EV_EXPLICIT_POLL:
2395 l2cap_send_rr_or_rnr(chan, 1);
2396 chan->retry_count = 1;
2397 __set_monitor_timer(chan);
2398 __clear_ack_timer(chan);
2399 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2401 case L2CAP_EV_RETRANS_TO:
2402 l2cap_send_rr_or_rnr(chan, 1);
2403 chan->retry_count = 1;
2404 __set_monitor_timer(chan);
2405 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2407 case L2CAP_EV_RECV_FBIT:
2408 /* Nothing to process */
2415 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2416 struct l2cap_ctrl *control,
2417 struct sk_buff_head *skbs, u8 event)
2419 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2423 case L2CAP_EV_DATA_REQUEST:
2424 if (chan->tx_send_head == NULL)
2425 chan->tx_send_head = skb_peek(skbs);
2426 /* Queue data, but don't send. */
2427 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2429 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2430 BT_DBG("Enter LOCAL_BUSY");
2431 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2433 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2434 /* The SREJ_SENT state must be aborted if we are to
2435 * enter the LOCAL_BUSY state.
2437 l2cap_abort_rx_srej_sent(chan);
2440 l2cap_send_ack(chan);
2443 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2444 BT_DBG("Exit LOCAL_BUSY");
2445 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2447 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2448 struct l2cap_ctrl local_control;
2449 memset(&local_control, 0, sizeof(local_control));
2450 local_control.sframe = 1;
2451 local_control.super = L2CAP_SUPER_RR;
2452 local_control.poll = 1;
2453 local_control.reqseq = chan->buffer_seq;
2454 l2cap_send_sframe(chan, &local_control);
2456 chan->retry_count = 1;
2457 __set_monitor_timer(chan);
2458 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2461 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2462 l2cap_process_reqseq(chan, control->reqseq);
2466 case L2CAP_EV_RECV_FBIT:
2467 if (control && control->final) {
2468 __clear_monitor_timer(chan);
2469 if (chan->unacked_frames > 0)
2470 __set_retrans_timer(chan);
2471 chan->retry_count = 0;
2472 chan->tx_state = L2CAP_TX_STATE_XMIT;
2473 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2476 case L2CAP_EV_EXPLICIT_POLL:
2479 case L2CAP_EV_MONITOR_TO:
2480 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2481 l2cap_send_rr_or_rnr(chan, 1);
2482 __set_monitor_timer(chan);
2483 chan->retry_count++;
2485 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2493 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2494 struct sk_buff_head *skbs, u8 event)
2496 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2497 chan, control, skbs, event, chan->tx_state);
2499 switch (chan->tx_state) {
2500 case L2CAP_TX_STATE_XMIT:
2501 l2cap_tx_state_xmit(chan, control, skbs, event);
2503 case L2CAP_TX_STATE_WAIT_F:
2504 l2cap_tx_state_wait_f(chan, control, skbs, event);
2512 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2513 struct l2cap_ctrl *control)
2515 BT_DBG("chan %p, control %p", chan, control);
2516 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2519 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2520 struct l2cap_ctrl *control)
2522 BT_DBG("chan %p, control %p", chan, control);
2523 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2526 /* Copy frame to all raw sockets on that connection */
2527 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2529 struct sk_buff *nskb;
2530 struct l2cap_chan *chan;
2532 BT_DBG("conn %p", conn);
2534 mutex_lock(&conn->chan_lock);
2536 list_for_each_entry(chan, &conn->chan_l, list) {
2537 struct sock *sk = chan->sk;
2538 if (chan->chan_type != L2CAP_CHAN_RAW)
2541 /* Don't send frame to the socket it came from */
2544 nskb = skb_clone(skb, GFP_ATOMIC);
2548 if (chan->ops->recv(chan, nskb))
2552 mutex_unlock(&conn->chan_lock);
2555 /* ---- L2CAP signalling commands ---- */
2556 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2557 u8 ident, u16 dlen, void *data)
2559 struct sk_buff *skb, **frag;
2560 struct l2cap_cmd_hdr *cmd;
2561 struct l2cap_hdr *lh;
2564 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2565 conn, code, ident, dlen);
2567 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2568 count = min_t(unsigned int, conn->mtu, len);
2570 skb = bt_skb_alloc(count, GFP_ATOMIC);
2574 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2575 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2577 if (conn->hcon->type == LE_LINK)
2578 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2580 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2582 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2585 cmd->len = cpu_to_le16(dlen);
2588 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2589 memcpy(skb_put(skb, count), data, count);
2595 /* Continuation fragments (no L2CAP header) */
2596 frag = &skb_shinfo(skb)->frag_list;
2598 count = min_t(unsigned int, conn->mtu, len);
2600 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2604 memcpy(skb_put(*frag, count), data, count);
2609 frag = &(*frag)->next;
2619 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2621 struct l2cap_conf_opt *opt = *ptr;
2624 len = L2CAP_CONF_OPT_SIZE + opt->len;
2632 *val = *((u8 *) opt->val);
2636 *val = get_unaligned_le16(opt->val);
2640 *val = get_unaligned_le32(opt->val);
2644 *val = (unsigned long) opt->val;
2648 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2652 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2654 struct l2cap_conf_opt *opt = *ptr;
2656 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2663 *((u8 *) opt->val) = val;
2667 put_unaligned_le16(val, opt->val);
2671 put_unaligned_le32(val, opt->val);
2675 memcpy(opt->val, (void *) val, len);
2679 *ptr += L2CAP_CONF_OPT_SIZE + len;
2682 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2684 struct l2cap_conf_efs efs;
2686 switch (chan->mode) {
2687 case L2CAP_MODE_ERTM:
2688 efs.id = chan->local_id;
2689 efs.stype = chan->local_stype;
2690 efs.msdu = cpu_to_le16(chan->local_msdu);
2691 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2692 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2693 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2696 case L2CAP_MODE_STREAMING:
2698 efs.stype = L2CAP_SERV_BESTEFFORT;
2699 efs.msdu = cpu_to_le16(chan->local_msdu);
2700 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2709 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2710 (unsigned long) &efs);
2713 static void l2cap_ack_timeout(struct work_struct *work)
2715 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2719 BT_DBG("chan %p", chan);
2721 l2cap_chan_lock(chan);
2723 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2724 chan->last_acked_seq);
2727 l2cap_send_rr_or_rnr(chan, 0);
2729 l2cap_chan_unlock(chan);
2730 l2cap_chan_put(chan);
2733 int l2cap_ertm_init(struct l2cap_chan *chan)
2737 chan->next_tx_seq = 0;
2738 chan->expected_tx_seq = 0;
2739 chan->expected_ack_seq = 0;
2740 chan->unacked_frames = 0;
2741 chan->buffer_seq = 0;
2742 chan->frames_sent = 0;
2743 chan->last_acked_seq = 0;
2745 chan->sdu_last_frag = NULL;
2748 skb_queue_head_init(&chan->tx_q);
2750 if (chan->mode != L2CAP_MODE_ERTM)
2753 chan->rx_state = L2CAP_RX_STATE_RECV;
2754 chan->tx_state = L2CAP_TX_STATE_XMIT;
2756 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2757 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2758 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2760 skb_queue_head_init(&chan->srej_q);
2762 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2766 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2768 l2cap_seq_list_free(&chan->srej_list);
2773 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2776 case L2CAP_MODE_STREAMING:
2777 case L2CAP_MODE_ERTM:
2778 if (l2cap_mode_supported(mode, remote_feat_mask))
2782 return L2CAP_MODE_BASIC;
2786 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2788 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2791 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2793 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2796 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2798 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2799 __l2cap_ews_supported(chan)) {
2800 /* use extended control field */
2801 set_bit(FLAG_EXT_CTRL, &chan->flags);
2802 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2804 chan->tx_win = min_t(u16, chan->tx_win,
2805 L2CAP_DEFAULT_TX_WINDOW);
2806 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2808 chan->ack_win = chan->tx_win;
2811 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2813 struct l2cap_conf_req *req = data;
2814 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2815 void *ptr = req->data;
2818 BT_DBG("chan %p", chan);
2820 if (chan->num_conf_req || chan->num_conf_rsp)
2823 switch (chan->mode) {
2824 case L2CAP_MODE_STREAMING:
2825 case L2CAP_MODE_ERTM:
2826 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2829 if (__l2cap_efs_supported(chan))
2830 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2834 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2839 if (chan->imtu != L2CAP_DEFAULT_MTU)
2840 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2842 switch (chan->mode) {
2843 case L2CAP_MODE_BASIC:
2844 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2845 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2848 rfc.mode = L2CAP_MODE_BASIC;
2850 rfc.max_transmit = 0;
2851 rfc.retrans_timeout = 0;
2852 rfc.monitor_timeout = 0;
2853 rfc.max_pdu_size = 0;
2855 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2856 (unsigned long) &rfc);
2859 case L2CAP_MODE_ERTM:
2860 rfc.mode = L2CAP_MODE_ERTM;
2861 rfc.max_transmit = chan->max_tx;
2862 rfc.retrans_timeout = 0;
2863 rfc.monitor_timeout = 0;
2865 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2866 L2CAP_EXT_HDR_SIZE -
2869 rfc.max_pdu_size = cpu_to_le16(size);
2871 l2cap_txwin_setup(chan);
2873 rfc.txwin_size = min_t(u16, chan->tx_win,
2874 L2CAP_DEFAULT_TX_WINDOW);
2876 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2877 (unsigned long) &rfc);
2879 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2880 l2cap_add_opt_efs(&ptr, chan);
2882 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2885 if (chan->fcs == L2CAP_FCS_NONE ||
2886 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2887 chan->fcs = L2CAP_FCS_NONE;
2888 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2891 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2892 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2896 case L2CAP_MODE_STREAMING:
2897 l2cap_txwin_setup(chan);
2898 rfc.mode = L2CAP_MODE_STREAMING;
2900 rfc.max_transmit = 0;
2901 rfc.retrans_timeout = 0;
2902 rfc.monitor_timeout = 0;
2904 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2905 L2CAP_EXT_HDR_SIZE -
2908 rfc.max_pdu_size = cpu_to_le16(size);
2910 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2911 (unsigned long) &rfc);
2913 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2914 l2cap_add_opt_efs(&ptr, chan);
2916 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2919 if (chan->fcs == L2CAP_FCS_NONE ||
2920 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2921 chan->fcs = L2CAP_FCS_NONE;
2922 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2927 req->dcid = cpu_to_le16(chan->dcid);
2928 req->flags = __constant_cpu_to_le16(0);
2933 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2935 struct l2cap_conf_rsp *rsp = data;
2936 void *ptr = rsp->data;
2937 void *req = chan->conf_req;
2938 int len = chan->conf_len;
2939 int type, hint, olen;
2941 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2942 struct l2cap_conf_efs efs;
2944 u16 mtu = L2CAP_DEFAULT_MTU;
2945 u16 result = L2CAP_CONF_SUCCESS;
2948 BT_DBG("chan %p", chan);
2950 while (len >= L2CAP_CONF_OPT_SIZE) {
2951 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2953 hint = type & L2CAP_CONF_HINT;
2954 type &= L2CAP_CONF_MASK;
2957 case L2CAP_CONF_MTU:
2961 case L2CAP_CONF_FLUSH_TO:
2962 chan->flush_to = val;
2965 case L2CAP_CONF_QOS:
2968 case L2CAP_CONF_RFC:
2969 if (olen == sizeof(rfc))
2970 memcpy(&rfc, (void *) val, olen);
2973 case L2CAP_CONF_FCS:
2974 if (val == L2CAP_FCS_NONE)
2975 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2978 case L2CAP_CONF_EFS:
2980 if (olen == sizeof(efs))
2981 memcpy(&efs, (void *) val, olen);
2984 case L2CAP_CONF_EWS:
2986 return -ECONNREFUSED;
2988 set_bit(FLAG_EXT_CTRL, &chan->flags);
2989 set_bit(CONF_EWS_RECV, &chan->conf_state);
2990 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2991 chan->remote_tx_win = val;
2998 result = L2CAP_CONF_UNKNOWN;
2999 *((u8 *) ptr++) = type;
3004 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3007 switch (chan->mode) {
3008 case L2CAP_MODE_STREAMING:
3009 case L2CAP_MODE_ERTM:
3010 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3011 chan->mode = l2cap_select_mode(rfc.mode,
3012 chan->conn->feat_mask);
3017 if (__l2cap_efs_supported(chan))
3018 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3020 return -ECONNREFUSED;
3023 if (chan->mode != rfc.mode)
3024 return -ECONNREFUSED;
3030 if (chan->mode != rfc.mode) {
3031 result = L2CAP_CONF_UNACCEPT;
3032 rfc.mode = chan->mode;
3034 if (chan->num_conf_rsp == 1)
3035 return -ECONNREFUSED;
3037 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3038 sizeof(rfc), (unsigned long) &rfc);
3041 if (result == L2CAP_CONF_SUCCESS) {
3042 /* Configure output options and let the other side know
3043 * which ones we don't like. */
3045 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3046 result = L2CAP_CONF_UNACCEPT;
3049 set_bit(CONF_MTU_DONE, &chan->conf_state);
3051 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3054 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3055 efs.stype != L2CAP_SERV_NOTRAFIC &&
3056 efs.stype != chan->local_stype) {
3058 result = L2CAP_CONF_UNACCEPT;
3060 if (chan->num_conf_req >= 1)
3061 return -ECONNREFUSED;
3063 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3065 (unsigned long) &efs);
3067 /* Send PENDING Conf Rsp */
3068 result = L2CAP_CONF_PENDING;
3069 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3074 case L2CAP_MODE_BASIC:
3075 chan->fcs = L2CAP_FCS_NONE;
3076 set_bit(CONF_MODE_DONE, &chan->conf_state);
3079 case L2CAP_MODE_ERTM:
3080 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3081 chan->remote_tx_win = rfc.txwin_size;
3083 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3085 chan->remote_max_tx = rfc.max_transmit;
3087 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3089 L2CAP_EXT_HDR_SIZE -
3092 rfc.max_pdu_size = cpu_to_le16(size);
3093 chan->remote_mps = size;
3095 rfc.retrans_timeout =
3096 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3097 rfc.monitor_timeout =
3098 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3100 set_bit(CONF_MODE_DONE, &chan->conf_state);
3102 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3103 sizeof(rfc), (unsigned long) &rfc);
3105 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3106 chan->remote_id = efs.id;
3107 chan->remote_stype = efs.stype;
3108 chan->remote_msdu = le16_to_cpu(efs.msdu);
3109 chan->remote_flush_to =
3110 le32_to_cpu(efs.flush_to);
3111 chan->remote_acc_lat =
3112 le32_to_cpu(efs.acc_lat);
3113 chan->remote_sdu_itime =
3114 le32_to_cpu(efs.sdu_itime);
3115 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3116 sizeof(efs), (unsigned long) &efs);
3120 case L2CAP_MODE_STREAMING:
3121 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3123 L2CAP_EXT_HDR_SIZE -
3126 rfc.max_pdu_size = cpu_to_le16(size);
3127 chan->remote_mps = size;
3129 set_bit(CONF_MODE_DONE, &chan->conf_state);
3131 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3132 sizeof(rfc), (unsigned long) &rfc);
3137 result = L2CAP_CONF_UNACCEPT;
3139 memset(&rfc, 0, sizeof(rfc));
3140 rfc.mode = chan->mode;
3143 if (result == L2CAP_CONF_SUCCESS)
3144 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3146 rsp->scid = cpu_to_le16(chan->dcid);
3147 rsp->result = cpu_to_le16(result);
3148 rsp->flags = __constant_cpu_to_le16(0);
3153 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3155 struct l2cap_conf_req *req = data;
3156 void *ptr = req->data;
3159 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3160 struct l2cap_conf_efs efs;
3162 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3164 while (len >= L2CAP_CONF_OPT_SIZE) {
3165 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3168 case L2CAP_CONF_MTU:
3169 if (val < L2CAP_DEFAULT_MIN_MTU) {
3170 *result = L2CAP_CONF_UNACCEPT;
3171 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3177 case L2CAP_CONF_FLUSH_TO:
3178 chan->flush_to = val;
3179 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3183 case L2CAP_CONF_RFC:
3184 if (olen == sizeof(rfc))
3185 memcpy(&rfc, (void *)val, olen);
3187 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3188 rfc.mode != chan->mode)
3189 return -ECONNREFUSED;
3193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3194 sizeof(rfc), (unsigned long) &rfc);
3197 case L2CAP_CONF_EWS:
3198 chan->ack_win = min_t(u16, val, chan->ack_win);
3199 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3203 case L2CAP_CONF_EFS:
3204 if (olen == sizeof(efs))
3205 memcpy(&efs, (void *)val, olen);
3207 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3208 efs.stype != L2CAP_SERV_NOTRAFIC &&
3209 efs.stype != chan->local_stype)
3210 return -ECONNREFUSED;
3212 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3213 sizeof(efs), (unsigned long) &efs);
3218 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3219 return -ECONNREFUSED;
3221 chan->mode = rfc.mode;
3223 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3225 case L2CAP_MODE_ERTM:
3226 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3227 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3228 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3229 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3230 chan->ack_win = min_t(u16, chan->ack_win,
3233 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3234 chan->local_msdu = le16_to_cpu(efs.msdu);
3235 chan->local_sdu_itime =
3236 le32_to_cpu(efs.sdu_itime);
3237 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3238 chan->local_flush_to =
3239 le32_to_cpu(efs.flush_to);
3243 case L2CAP_MODE_STREAMING:
3244 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3248 req->dcid = cpu_to_le16(chan->dcid);
3249 req->flags = __constant_cpu_to_le16(0);
3254 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3256 struct l2cap_conf_rsp *rsp = data;
3257 void *ptr = rsp->data;
3259 BT_DBG("chan %p", chan);
3261 rsp->scid = cpu_to_le16(chan->dcid);
3262 rsp->result = cpu_to_le16(result);
3263 rsp->flags = cpu_to_le16(flags);
3268 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3270 struct l2cap_conn_rsp rsp;
3271 struct l2cap_conn *conn = chan->conn;
3274 rsp.scid = cpu_to_le16(chan->dcid);
3275 rsp.dcid = cpu_to_le16(chan->scid);
3276 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3277 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3278 l2cap_send_cmd(conn, chan->ident,
3279 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3281 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3284 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3285 l2cap_build_conf_req(chan, buf), buf);
3286 chan->num_conf_req++;
3289 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3293 /* Use sane default values in case a misbehaving remote device
3294 * did not send an RFC or extended window size option.
3296 u16 txwin_ext = chan->ack_win;
3297 struct l2cap_conf_rfc rfc = {
3299 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3300 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3301 .max_pdu_size = cpu_to_le16(chan->imtu),
3302 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3305 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3307 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3310 while (len >= L2CAP_CONF_OPT_SIZE) {
3311 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3314 case L2CAP_CONF_RFC:
3315 if (olen == sizeof(rfc))
3316 memcpy(&rfc, (void *)val, olen);
3318 case L2CAP_CONF_EWS:
3325 case L2CAP_MODE_ERTM:
3326 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3327 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3328 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3329 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3330 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3332 chan->ack_win = min_t(u16, chan->ack_win,
3335 case L2CAP_MODE_STREAMING:
3336 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3340 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3342 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3344 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3347 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3348 cmd->ident == conn->info_ident) {
3349 cancel_delayed_work(&conn->info_timer);
3351 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3352 conn->info_ident = 0;
3354 l2cap_conn_start(conn);
3360 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3362 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3363 struct l2cap_conn_rsp rsp;
3364 struct l2cap_chan *chan = NULL, *pchan;
3365 struct sock *parent, *sk = NULL;
3366 int result, status = L2CAP_CS_NO_INFO;
3368 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3369 __le16 psm = req->psm;
3371 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3373 /* Check if we have socket listening on psm */
3374 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3376 result = L2CAP_CR_BAD_PSM;
3382 mutex_lock(&conn->chan_lock);
3385 /* Check if the ACL is secure enough (if not SDP) */
3386 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3387 !hci_conn_check_link_mode(conn->hcon)) {
3388 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3389 result = L2CAP_CR_SEC_BLOCK;
3393 result = L2CAP_CR_NO_MEM;
3395 /* Check if we already have channel with that dcid */
3396 if (__l2cap_get_chan_by_dcid(conn, scid))
3399 chan = pchan->ops->new_connection(pchan);
3405 hci_conn_hold(conn->hcon);
3407 bacpy(&bt_sk(sk)->src, conn->src);
3408 bacpy(&bt_sk(sk)->dst, conn->dst);
3412 bt_accept_enqueue(parent, sk);
3414 __l2cap_chan_add(conn, chan);
3418 __set_chan_timer(chan, sk->sk_sndtimeo);
3420 chan->ident = cmd->ident;
3422 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3423 if (l2cap_chan_check_security(chan)) {
3424 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3425 __l2cap_state_change(chan, BT_CONNECT2);
3426 result = L2CAP_CR_PEND;
3427 status = L2CAP_CS_AUTHOR_PEND;
3428 parent->sk_data_ready(parent, 0);
3430 __l2cap_state_change(chan, BT_CONFIG);
3431 result = L2CAP_CR_SUCCESS;
3432 status = L2CAP_CS_NO_INFO;
3435 __l2cap_state_change(chan, BT_CONNECT2);
3436 result = L2CAP_CR_PEND;
3437 status = L2CAP_CS_AUTHEN_PEND;
3440 __l2cap_state_change(chan, BT_CONNECT2);
3441 result = L2CAP_CR_PEND;
3442 status = L2CAP_CS_NO_INFO;
3446 release_sock(parent);
3447 mutex_unlock(&conn->chan_lock);
3450 rsp.scid = cpu_to_le16(scid);
3451 rsp.dcid = cpu_to_le16(dcid);
3452 rsp.result = cpu_to_le16(result);
3453 rsp.status = cpu_to_le16(status);
3454 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3456 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3457 struct l2cap_info_req info;
3458 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3460 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3461 conn->info_ident = l2cap_get_ident(conn);
3463 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3465 l2cap_send_cmd(conn, conn->info_ident,
3466 L2CAP_INFO_REQ, sizeof(info), &info);
3469 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3470 result == L2CAP_CR_SUCCESS) {
3472 set_bit(CONF_REQ_SENT, &chan->conf_state);
3473 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3474 l2cap_build_conf_req(chan, buf), buf);
3475 chan->num_conf_req++;
3481 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3483 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3484 u16 scid, dcid, result, status;
3485 struct l2cap_chan *chan;
3489 scid = __le16_to_cpu(rsp->scid);
3490 dcid = __le16_to_cpu(rsp->dcid);
3491 result = __le16_to_cpu(rsp->result);
3492 status = __le16_to_cpu(rsp->status);
3494 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3495 dcid, scid, result, status);
3497 mutex_lock(&conn->chan_lock);
3500 chan = __l2cap_get_chan_by_scid(conn, scid);
3506 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3515 l2cap_chan_lock(chan);
3518 case L2CAP_CR_SUCCESS:
3519 l2cap_state_change(chan, BT_CONFIG);
3522 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3524 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3527 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3528 l2cap_build_conf_req(chan, req), req);
3529 chan->num_conf_req++;
3533 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3537 l2cap_chan_del(chan, ECONNREFUSED);
3541 l2cap_chan_unlock(chan);
3544 mutex_unlock(&conn->chan_lock);
3549 static inline void set_default_fcs(struct l2cap_chan *chan)
3551 /* FCS is enabled only in ERTM or streaming mode, if one or both
3554 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3555 chan->fcs = L2CAP_FCS_NONE;
3556 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3557 chan->fcs = L2CAP_FCS_CRC16;
3560 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3562 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3565 struct l2cap_chan *chan;
3568 dcid = __le16_to_cpu(req->dcid);
3569 flags = __le16_to_cpu(req->flags);
3571 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3573 chan = l2cap_get_chan_by_scid(conn, dcid);
3577 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3578 struct l2cap_cmd_rej_cid rej;
3580 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3581 rej.scid = cpu_to_le16(chan->scid);
3582 rej.dcid = cpu_to_le16(chan->dcid);
3584 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3589 /* Reject if config buffer is too small. */
3590 len = cmd_len - sizeof(*req);
3591 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3592 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3593 l2cap_build_conf_rsp(chan, rsp,
3594 L2CAP_CONF_REJECT, flags), rsp);
3599 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3600 chan->conf_len += len;
3602 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3603 /* Incomplete config. Send empty response. */
3604 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3605 l2cap_build_conf_rsp(chan, rsp,
3606 L2CAP_CONF_SUCCESS, flags), rsp);
3610 /* Complete config. */
3611 len = l2cap_parse_conf_req(chan, rsp);
3613 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3617 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3618 chan->num_conf_rsp++;
3620 /* Reset config buffer. */
3623 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3626 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3627 set_default_fcs(chan);
3629 if (chan->mode == L2CAP_MODE_ERTM ||
3630 chan->mode == L2CAP_MODE_STREAMING)
3631 err = l2cap_ertm_init(chan);
3634 l2cap_send_disconn_req(chan->conn, chan, -err);
3636 l2cap_chan_ready(chan);
3641 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3643 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3644 l2cap_build_conf_req(chan, buf), buf);
3645 chan->num_conf_req++;
3648 /* Got Conf Rsp PENDING from remote side and asume we sent
3649 Conf Rsp PENDING in the code above */
3650 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3651 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3653 /* check compatibility */
3655 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3656 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3659 l2cap_build_conf_rsp(chan, rsp,
3660 L2CAP_CONF_SUCCESS, flags), rsp);
3664 l2cap_chan_unlock(chan);
3668 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3670 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3671 u16 scid, flags, result;
3672 struct l2cap_chan *chan;
3673 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3676 scid = __le16_to_cpu(rsp->scid);
3677 flags = __le16_to_cpu(rsp->flags);
3678 result = __le16_to_cpu(rsp->result);
3680 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3683 chan = l2cap_get_chan_by_scid(conn, scid);
3688 case L2CAP_CONF_SUCCESS:
3689 l2cap_conf_rfc_get(chan, rsp->data, len);
3690 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3693 case L2CAP_CONF_PENDING:
3694 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3696 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3699 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3702 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3706 /* check compatibility */
3708 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3709 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3711 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3712 l2cap_build_conf_rsp(chan, buf,
3713 L2CAP_CONF_SUCCESS, 0x0000), buf);
3717 case L2CAP_CONF_UNACCEPT:
3718 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3721 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3722 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3726 /* throw out any old stored conf requests */
3727 result = L2CAP_CONF_SUCCESS;
3728 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3731 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3735 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3736 L2CAP_CONF_REQ, len, req);
3737 chan->num_conf_req++;
3738 if (result != L2CAP_CONF_SUCCESS)
3744 l2cap_chan_set_err(chan, ECONNRESET);
3746 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3747 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3751 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3754 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3756 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3757 set_default_fcs(chan);
3759 if (chan->mode == L2CAP_MODE_ERTM ||
3760 chan->mode == L2CAP_MODE_STREAMING)
3761 err = l2cap_ertm_init(chan);
3764 l2cap_send_disconn_req(chan->conn, chan, -err);
3766 l2cap_chan_ready(chan);
3770 l2cap_chan_unlock(chan);
3774 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3776 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3777 struct l2cap_disconn_rsp rsp;
3779 struct l2cap_chan *chan;
3782 scid = __le16_to_cpu(req->scid);
3783 dcid = __le16_to_cpu(req->dcid);
3785 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3787 mutex_lock(&conn->chan_lock);
3789 chan = __l2cap_get_chan_by_scid(conn, dcid);
3791 mutex_unlock(&conn->chan_lock);
3795 l2cap_chan_lock(chan);
3799 rsp.dcid = cpu_to_le16(chan->scid);
3800 rsp.scid = cpu_to_le16(chan->dcid);
3801 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3804 sk->sk_shutdown = SHUTDOWN_MASK;
3807 l2cap_chan_hold(chan);
3808 l2cap_chan_del(chan, ECONNRESET);
3810 l2cap_chan_unlock(chan);
3812 chan->ops->close(chan);
3813 l2cap_chan_put(chan);
3815 mutex_unlock(&conn->chan_lock);
3820 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3822 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3824 struct l2cap_chan *chan;
3826 scid = __le16_to_cpu(rsp->scid);
3827 dcid = __le16_to_cpu(rsp->dcid);
3829 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3831 mutex_lock(&conn->chan_lock);
3833 chan = __l2cap_get_chan_by_scid(conn, scid);
3835 mutex_unlock(&conn->chan_lock);
3839 l2cap_chan_lock(chan);
3841 l2cap_chan_hold(chan);
3842 l2cap_chan_del(chan, 0);
3844 l2cap_chan_unlock(chan);
3846 chan->ops->close(chan);
3847 l2cap_chan_put(chan);
3849 mutex_unlock(&conn->chan_lock);
3854 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3856 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3859 type = __le16_to_cpu(req->type);
3861 BT_DBG("type 0x%4.4x", type);
3863 if (type == L2CAP_IT_FEAT_MASK) {
3865 u32 feat_mask = l2cap_feat_mask;
3866 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3867 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3868 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3870 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3873 feat_mask |= L2CAP_FEAT_EXT_FLOW
3874 | L2CAP_FEAT_EXT_WINDOW;
3876 put_unaligned_le32(feat_mask, rsp->data);
3877 l2cap_send_cmd(conn, cmd->ident,
3878 L2CAP_INFO_RSP, sizeof(buf), buf);
3879 } else if (type == L2CAP_IT_FIXED_CHAN) {
3881 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3884 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3886 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3888 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3889 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3890 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3891 l2cap_send_cmd(conn, cmd->ident,
3892 L2CAP_INFO_RSP, sizeof(buf), buf);
3894 struct l2cap_info_rsp rsp;
3895 rsp.type = cpu_to_le16(type);
3896 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3897 l2cap_send_cmd(conn, cmd->ident,
3898 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3904 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3906 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3909 type = __le16_to_cpu(rsp->type);
3910 result = __le16_to_cpu(rsp->result);
3912 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3914 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3915 if (cmd->ident != conn->info_ident ||
3916 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3919 cancel_delayed_work(&conn->info_timer);
3921 if (result != L2CAP_IR_SUCCESS) {
3922 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3923 conn->info_ident = 0;
3925 l2cap_conn_start(conn);
3931 case L2CAP_IT_FEAT_MASK:
3932 conn->feat_mask = get_unaligned_le32(rsp->data);
3934 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3935 struct l2cap_info_req req;
3936 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3938 conn->info_ident = l2cap_get_ident(conn);
3940 l2cap_send_cmd(conn, conn->info_ident,
3941 L2CAP_INFO_REQ, sizeof(req), &req);
3943 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3944 conn->info_ident = 0;
3946 l2cap_conn_start(conn);
3950 case L2CAP_IT_FIXED_CHAN:
3951 conn->fixed_chan_mask = rsp->data[0];
3952 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3953 conn->info_ident = 0;
3955 l2cap_conn_start(conn);
3962 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3963 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3966 struct l2cap_create_chan_req *req = data;
3967 struct l2cap_create_chan_rsp rsp;
3970 if (cmd_len != sizeof(*req))
3976 psm = le16_to_cpu(req->psm);
3977 scid = le16_to_cpu(req->scid);
3979 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3981 /* Placeholder: Always reject */
3983 rsp.scid = cpu_to_le16(scid);
3984 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3985 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3987 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3993 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3994 struct l2cap_cmd_hdr *cmd, void *data)
3996 BT_DBG("conn %p", conn);
3998 return l2cap_connect_rsp(conn, cmd, data);
4001 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4002 u16 icid, u16 result)
4004 struct l2cap_move_chan_rsp rsp;
4006 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4008 rsp.icid = cpu_to_le16(icid);
4009 rsp.result = cpu_to_le16(result);
4011 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4014 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4015 struct l2cap_chan *chan,
4016 u16 icid, u16 result)
4018 struct l2cap_move_chan_cfm cfm;
4021 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4023 ident = l2cap_get_ident(conn);
4025 chan->ident = ident;
4027 cfm.icid = cpu_to_le16(icid);
4028 cfm.result = cpu_to_le16(result);
4030 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4033 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4036 struct l2cap_move_chan_cfm_rsp rsp;
4038 BT_DBG("icid 0x%4.4x", icid);
4040 rsp.icid = cpu_to_le16(icid);
4041 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4044 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4045 struct l2cap_cmd_hdr *cmd,
4046 u16 cmd_len, void *data)
4048 struct l2cap_move_chan_req *req = data;
4050 u16 result = L2CAP_MR_NOT_ALLOWED;
4052 if (cmd_len != sizeof(*req))
4055 icid = le16_to_cpu(req->icid);
4057 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4062 /* Placeholder: Always refuse */
4063 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4068 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4069 struct l2cap_cmd_hdr *cmd,
4070 u16 cmd_len, void *data)
4072 struct l2cap_move_chan_rsp *rsp = data;
4075 if (cmd_len != sizeof(*rsp))
4078 icid = le16_to_cpu(rsp->icid);
4079 result = le16_to_cpu(rsp->result);
4081 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4083 /* Placeholder: Always unconfirmed */
4084 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4089 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4090 struct l2cap_cmd_hdr *cmd,
4091 u16 cmd_len, void *data)
4093 struct l2cap_move_chan_cfm *cfm = data;
4096 if (cmd_len != sizeof(*cfm))
4099 icid = le16_to_cpu(cfm->icid);
4100 result = le16_to_cpu(cfm->result);
4102 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4104 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4109 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4110 struct l2cap_cmd_hdr *cmd,
4111 u16 cmd_len, void *data)
4113 struct l2cap_move_chan_cfm_rsp *rsp = data;
4116 if (cmd_len != sizeof(*rsp))
4119 icid = le16_to_cpu(rsp->icid);
4121 BT_DBG("icid 0x%4.4x", icid);
4126 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4131 if (min > max || min < 6 || max > 3200)
4134 if (to_multiplier < 10 || to_multiplier > 3200)
4137 if (max >= to_multiplier * 8)
4140 max_latency = (to_multiplier * 8 / max) - 1;
4141 if (latency > 499 || latency > max_latency)
4147 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4148 struct l2cap_cmd_hdr *cmd, u8 *data)
4150 struct hci_conn *hcon = conn->hcon;
4151 struct l2cap_conn_param_update_req *req;
4152 struct l2cap_conn_param_update_rsp rsp;
4153 u16 min, max, latency, to_multiplier, cmd_len;
4156 if (!(hcon->link_mode & HCI_LM_MASTER))
4159 cmd_len = __le16_to_cpu(cmd->len);
4160 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4163 req = (struct l2cap_conn_param_update_req *) data;
4164 min = __le16_to_cpu(req->min);
4165 max = __le16_to_cpu(req->max);
4166 latency = __le16_to_cpu(req->latency);
4167 to_multiplier = __le16_to_cpu(req->to_multiplier);
4169 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4170 min, max, latency, to_multiplier);
4172 memset(&rsp, 0, sizeof(rsp));
4174 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4176 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4178 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4180 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4184 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4189 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4190 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4194 switch (cmd->code) {
4195 case L2CAP_COMMAND_REJ:
4196 l2cap_command_rej(conn, cmd, data);
4199 case L2CAP_CONN_REQ:
4200 err = l2cap_connect_req(conn, cmd, data);
4203 case L2CAP_CONN_RSP:
4204 err = l2cap_connect_rsp(conn, cmd, data);
4207 case L2CAP_CONF_REQ:
4208 err = l2cap_config_req(conn, cmd, cmd_len, data);
4211 case L2CAP_CONF_RSP:
4212 err = l2cap_config_rsp(conn, cmd, data);
4215 case L2CAP_DISCONN_REQ:
4216 err = l2cap_disconnect_req(conn, cmd, data);
4219 case L2CAP_DISCONN_RSP:
4220 err = l2cap_disconnect_rsp(conn, cmd, data);
4223 case L2CAP_ECHO_REQ:
4224 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4227 case L2CAP_ECHO_RSP:
4230 case L2CAP_INFO_REQ:
4231 err = l2cap_information_req(conn, cmd, data);
4234 case L2CAP_INFO_RSP:
4235 err = l2cap_information_rsp(conn, cmd, data);
4238 case L2CAP_CREATE_CHAN_REQ:
4239 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4242 case L2CAP_CREATE_CHAN_RSP:
4243 err = l2cap_create_channel_rsp(conn, cmd, data);
4246 case L2CAP_MOVE_CHAN_REQ:
4247 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4250 case L2CAP_MOVE_CHAN_RSP:
4251 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4254 case L2CAP_MOVE_CHAN_CFM:
4255 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4258 case L2CAP_MOVE_CHAN_CFM_RSP:
4259 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4263 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4271 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4272 struct l2cap_cmd_hdr *cmd, u8 *data)
4274 switch (cmd->code) {
4275 case L2CAP_COMMAND_REJ:
4278 case L2CAP_CONN_PARAM_UPDATE_REQ:
4279 return l2cap_conn_param_update_req(conn, cmd, data);
4281 case L2CAP_CONN_PARAM_UPDATE_RSP:
4285 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4290 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4291 struct sk_buff *skb)
4293 u8 *data = skb->data;
4295 struct l2cap_cmd_hdr cmd;
4298 l2cap_raw_recv(conn, skb);
4300 while (len >= L2CAP_CMD_HDR_SIZE) {
4302 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4303 data += L2CAP_CMD_HDR_SIZE;
4304 len -= L2CAP_CMD_HDR_SIZE;
4306 cmd_len = le16_to_cpu(cmd.len);
4308 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4310 if (cmd_len > len || !cmd.ident) {
4311 BT_DBG("corrupted command");
4315 if (conn->hcon->type == LE_LINK)
4316 err = l2cap_le_sig_cmd(conn, &cmd, data);
4318 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4321 struct l2cap_cmd_rej_unk rej;
4323 BT_ERR("Wrong link type (%d)", err);
4325 /* FIXME: Map err to a valid reason */
4326 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4327 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4337 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4339 u16 our_fcs, rcv_fcs;
4342 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4343 hdr_size = L2CAP_EXT_HDR_SIZE;
4345 hdr_size = L2CAP_ENH_HDR_SIZE;
4347 if (chan->fcs == L2CAP_FCS_CRC16) {
4348 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4349 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4350 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4352 if (our_fcs != rcv_fcs)
4358 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4360 struct l2cap_ctrl control;
4362 BT_DBG("chan %p", chan);
4364 memset(&control, 0, sizeof(control));
4367 control.reqseq = chan->buffer_seq;
4368 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4370 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4371 control.super = L2CAP_SUPER_RNR;
4372 l2cap_send_sframe(chan, &control);
4375 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4376 chan->unacked_frames > 0)
4377 __set_retrans_timer(chan);
4379 /* Send pending iframes */
4380 l2cap_ertm_send(chan);
4382 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4383 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4384 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4387 control.super = L2CAP_SUPER_RR;
4388 l2cap_send_sframe(chan, &control);
4392 static void append_skb_frag(struct sk_buff *skb,
4393 struct sk_buff *new_frag, struct sk_buff **last_frag)
4395 /* skb->len reflects data in skb as well as all fragments
4396 * skb->data_len reflects only data in fragments
4398 if (!skb_has_frag_list(skb))
4399 skb_shinfo(skb)->frag_list = new_frag;
4401 new_frag->next = NULL;
4403 (*last_frag)->next = new_frag;
4404 *last_frag = new_frag;
4406 skb->len += new_frag->len;
4407 skb->data_len += new_frag->len;
4408 skb->truesize += new_frag->truesize;
4411 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4412 struct l2cap_ctrl *control)
4416 switch (control->sar) {
4417 case L2CAP_SAR_UNSEGMENTED:
4421 err = chan->ops->recv(chan, skb);
4424 case L2CAP_SAR_START:
4428 chan->sdu_len = get_unaligned_le16(skb->data);
4429 skb_pull(skb, L2CAP_SDULEN_SIZE);
4431 if (chan->sdu_len > chan->imtu) {
4436 if (skb->len >= chan->sdu_len)
4440 chan->sdu_last_frag = skb;
4446 case L2CAP_SAR_CONTINUE:
4450 append_skb_frag(chan->sdu, skb,
4451 &chan->sdu_last_frag);
4454 if (chan->sdu->len >= chan->sdu_len)
4464 append_skb_frag(chan->sdu, skb,
4465 &chan->sdu_last_frag);
4468 if (chan->sdu->len != chan->sdu_len)
4471 err = chan->ops->recv(chan, chan->sdu);
4474 /* Reassembly complete */
4476 chan->sdu_last_frag = NULL;
4484 kfree_skb(chan->sdu);
4486 chan->sdu_last_frag = NULL;
4493 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4497 if (chan->mode != L2CAP_MODE_ERTM)
4500 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4501 l2cap_tx(chan, NULL, NULL, event);
4504 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4507 /* Pass sequential frames to l2cap_reassemble_sdu()
4508 * until a gap is encountered.
4511 BT_DBG("chan %p", chan);
4513 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4514 struct sk_buff *skb;
4515 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4516 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4518 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4523 skb_unlink(skb, &chan->srej_q);
4524 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4525 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4530 if (skb_queue_empty(&chan->srej_q)) {
4531 chan->rx_state = L2CAP_RX_STATE_RECV;
4532 l2cap_send_ack(chan);
4538 static void l2cap_handle_srej(struct l2cap_chan *chan,
4539 struct l2cap_ctrl *control)
4541 struct sk_buff *skb;
4543 BT_DBG("chan %p, control %p", chan, control);
4545 if (control->reqseq == chan->next_tx_seq) {
4546 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4547 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4551 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4554 BT_DBG("Seq %d not available for retransmission",
4559 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4560 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4561 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4565 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4567 if (control->poll) {
4568 l2cap_pass_to_tx(chan, control);
4570 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4571 l2cap_retransmit(chan, control);
4572 l2cap_ertm_send(chan);
4574 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4575 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4576 chan->srej_save_reqseq = control->reqseq;
4579 l2cap_pass_to_tx_fbit(chan, control);
4581 if (control->final) {
4582 if (chan->srej_save_reqseq != control->reqseq ||
4583 !test_and_clear_bit(CONN_SREJ_ACT,
4585 l2cap_retransmit(chan, control);
4587 l2cap_retransmit(chan, control);
4588 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4589 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4590 chan->srej_save_reqseq = control->reqseq;
4596 static void l2cap_handle_rej(struct l2cap_chan *chan,
4597 struct l2cap_ctrl *control)
4599 struct sk_buff *skb;
4601 BT_DBG("chan %p, control %p", chan, control);
4603 if (control->reqseq == chan->next_tx_seq) {
4604 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4605 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4609 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4611 if (chan->max_tx && skb &&
4612 bt_cb(skb)->control.retries >= chan->max_tx) {
4613 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4614 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4618 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4620 l2cap_pass_to_tx(chan, control);
4622 if (control->final) {
4623 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4624 l2cap_retransmit_all(chan, control);
4626 l2cap_retransmit_all(chan, control);
4627 l2cap_ertm_send(chan);
4628 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4629 set_bit(CONN_REJ_ACT, &chan->conn_state);
4633 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4635 BT_DBG("chan %p, txseq %d", chan, txseq);
4637 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4638 chan->expected_tx_seq);
4640 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4641 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4643 /* See notes below regarding "double poll" and
4646 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4647 BT_DBG("Invalid/Ignore - after SREJ");
4648 return L2CAP_TXSEQ_INVALID_IGNORE;
4650 BT_DBG("Invalid - in window after SREJ sent");
4651 return L2CAP_TXSEQ_INVALID;
4655 if (chan->srej_list.head == txseq) {
4656 BT_DBG("Expected SREJ");
4657 return L2CAP_TXSEQ_EXPECTED_SREJ;
4660 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4661 BT_DBG("Duplicate SREJ - txseq already stored");
4662 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4665 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4666 BT_DBG("Unexpected SREJ - not requested");
4667 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4671 if (chan->expected_tx_seq == txseq) {
4672 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4674 BT_DBG("Invalid - txseq outside tx window");
4675 return L2CAP_TXSEQ_INVALID;
4678 return L2CAP_TXSEQ_EXPECTED;
4682 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4683 __seq_offset(chan, chan->expected_tx_seq,
4684 chan->last_acked_seq)){
4685 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4686 return L2CAP_TXSEQ_DUPLICATE;
4689 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4690 /* A source of invalid packets is a "double poll" condition,
4691 * where delays cause us to send multiple poll packets. If
4692 * the remote stack receives and processes both polls,
4693 * sequence numbers can wrap around in such a way that a
4694 * resent frame has a sequence number that looks like new data
4695 * with a sequence gap. This would trigger an erroneous SREJ
4698 * Fortunately, this is impossible with a tx window that's
4699 * less than half of the maximum sequence number, which allows
4700 * invalid frames to be safely ignored.
4702 * With tx window sizes greater than half of the tx window
4703 * maximum, the frame is invalid and cannot be ignored. This
4704 * causes a disconnect.
4707 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4708 BT_DBG("Invalid/Ignore - txseq outside tx window");
4709 return L2CAP_TXSEQ_INVALID_IGNORE;
4711 BT_DBG("Invalid - txseq outside tx window");
4712 return L2CAP_TXSEQ_INVALID;
4715 BT_DBG("Unexpected - txseq indicates missing frames");
4716 return L2CAP_TXSEQ_UNEXPECTED;
4720 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4721 struct l2cap_ctrl *control,
4722 struct sk_buff *skb, u8 event)
4725 bool skb_in_use = 0;
4727 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4731 case L2CAP_EV_RECV_IFRAME:
4732 switch (l2cap_classify_txseq(chan, control->txseq)) {
4733 case L2CAP_TXSEQ_EXPECTED:
4734 l2cap_pass_to_tx(chan, control);
4736 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4737 BT_DBG("Busy, discarding expected seq %d",
4742 chan->expected_tx_seq = __next_seq(chan,
4745 chan->buffer_seq = chan->expected_tx_seq;
4748 err = l2cap_reassemble_sdu(chan, skb, control);
4752 if (control->final) {
4753 if (!test_and_clear_bit(CONN_REJ_ACT,
4754 &chan->conn_state)) {
4756 l2cap_retransmit_all(chan, control);
4757 l2cap_ertm_send(chan);
4761 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4762 l2cap_send_ack(chan);
4764 case L2CAP_TXSEQ_UNEXPECTED:
4765 l2cap_pass_to_tx(chan, control);
4767 /* Can't issue SREJ frames in the local busy state.
4768 * Drop this frame, it will be seen as missing
4769 * when local busy is exited.
4771 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4772 BT_DBG("Busy, discarding unexpected seq %d",
4777 /* There was a gap in the sequence, so an SREJ
4778 * must be sent for each missing frame. The
4779 * current frame is stored for later use.
4781 skb_queue_tail(&chan->srej_q, skb);
4783 BT_DBG("Queued %p (queue len %d)", skb,
4784 skb_queue_len(&chan->srej_q));
4786 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4787 l2cap_seq_list_clear(&chan->srej_list);
4788 l2cap_send_srej(chan, control->txseq);
4790 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4792 case L2CAP_TXSEQ_DUPLICATE:
4793 l2cap_pass_to_tx(chan, control);
4795 case L2CAP_TXSEQ_INVALID_IGNORE:
4797 case L2CAP_TXSEQ_INVALID:
4799 l2cap_send_disconn_req(chan->conn, chan,
4804 case L2CAP_EV_RECV_RR:
4805 l2cap_pass_to_tx(chan, control);
4806 if (control->final) {
4807 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4809 if (!test_and_clear_bit(CONN_REJ_ACT,
4810 &chan->conn_state)) {
4812 l2cap_retransmit_all(chan, control);
4815 l2cap_ertm_send(chan);
4816 } else if (control->poll) {
4817 l2cap_send_i_or_rr_or_rnr(chan);
4819 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4820 &chan->conn_state) &&
4821 chan->unacked_frames)
4822 __set_retrans_timer(chan);
4824 l2cap_ertm_send(chan);
4827 case L2CAP_EV_RECV_RNR:
4828 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4829 l2cap_pass_to_tx(chan, control);
4830 if (control && control->poll) {
4831 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4832 l2cap_send_rr_or_rnr(chan, 0);
4834 __clear_retrans_timer(chan);
4835 l2cap_seq_list_clear(&chan->retrans_list);
4837 case L2CAP_EV_RECV_REJ:
4838 l2cap_handle_rej(chan, control);
4840 case L2CAP_EV_RECV_SREJ:
4841 l2cap_handle_srej(chan, control);
4847 if (skb && !skb_in_use) {
4848 BT_DBG("Freeing %p", skb);
4855 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4856 struct l2cap_ctrl *control,
4857 struct sk_buff *skb, u8 event)
4860 u16 txseq = control->txseq;
4861 bool skb_in_use = 0;
4863 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4867 case L2CAP_EV_RECV_IFRAME:
4868 switch (l2cap_classify_txseq(chan, txseq)) {
4869 case L2CAP_TXSEQ_EXPECTED:
4870 /* Keep frame for reassembly later */
4871 l2cap_pass_to_tx(chan, control);
4872 skb_queue_tail(&chan->srej_q, skb);
4874 BT_DBG("Queued %p (queue len %d)", skb,
4875 skb_queue_len(&chan->srej_q));
4877 chan->expected_tx_seq = __next_seq(chan, txseq);
4879 case L2CAP_TXSEQ_EXPECTED_SREJ:
4880 l2cap_seq_list_pop(&chan->srej_list);
4882 l2cap_pass_to_tx(chan, control);
4883 skb_queue_tail(&chan->srej_q, skb);
4885 BT_DBG("Queued %p (queue len %d)", skb,
4886 skb_queue_len(&chan->srej_q));
4888 err = l2cap_rx_queued_iframes(chan);
4893 case L2CAP_TXSEQ_UNEXPECTED:
4894 /* Got a frame that can't be reassembled yet.
4895 * Save it for later, and send SREJs to cover
4896 * the missing frames.
4898 skb_queue_tail(&chan->srej_q, skb);
4900 BT_DBG("Queued %p (queue len %d)", skb,
4901 skb_queue_len(&chan->srej_q));
4903 l2cap_pass_to_tx(chan, control);
4904 l2cap_send_srej(chan, control->txseq);
4906 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4907 /* This frame was requested with an SREJ, but
4908 * some expected retransmitted frames are
4909 * missing. Request retransmission of missing
4912 skb_queue_tail(&chan->srej_q, skb);
4914 BT_DBG("Queued %p (queue len %d)", skb,
4915 skb_queue_len(&chan->srej_q));
4917 l2cap_pass_to_tx(chan, control);
4918 l2cap_send_srej_list(chan, control->txseq);
4920 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4921 /* We've already queued this frame. Drop this copy. */
4922 l2cap_pass_to_tx(chan, control);
4924 case L2CAP_TXSEQ_DUPLICATE:
4925 /* Expecting a later sequence number, so this frame
4926 * was already received. Ignore it completely.
4929 case L2CAP_TXSEQ_INVALID_IGNORE:
4931 case L2CAP_TXSEQ_INVALID:
4933 l2cap_send_disconn_req(chan->conn, chan,
4938 case L2CAP_EV_RECV_RR:
4939 l2cap_pass_to_tx(chan, control);
4940 if (control->final) {
4941 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4943 if (!test_and_clear_bit(CONN_REJ_ACT,
4944 &chan->conn_state)) {
4946 l2cap_retransmit_all(chan, control);
4949 l2cap_ertm_send(chan);
4950 } else if (control->poll) {
4951 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4952 &chan->conn_state) &&
4953 chan->unacked_frames) {
4954 __set_retrans_timer(chan);
4957 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4958 l2cap_send_srej_tail(chan);
4960 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4961 &chan->conn_state) &&
4962 chan->unacked_frames)
4963 __set_retrans_timer(chan);
4965 l2cap_send_ack(chan);
4968 case L2CAP_EV_RECV_RNR:
4969 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4970 l2cap_pass_to_tx(chan, control);
4971 if (control->poll) {
4972 l2cap_send_srej_tail(chan);
4974 struct l2cap_ctrl rr_control;
4975 memset(&rr_control, 0, sizeof(rr_control));
4976 rr_control.sframe = 1;
4977 rr_control.super = L2CAP_SUPER_RR;
4978 rr_control.reqseq = chan->buffer_seq;
4979 l2cap_send_sframe(chan, &rr_control);
4983 case L2CAP_EV_RECV_REJ:
4984 l2cap_handle_rej(chan, control);
4986 case L2CAP_EV_RECV_SREJ:
4987 l2cap_handle_srej(chan, control);
4991 if (skb && !skb_in_use) {
4992 BT_DBG("Freeing %p", skb);
4999 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5001 /* Make sure reqseq is for a packet that has been sent but not acked */
5004 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5005 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5008 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5009 struct sk_buff *skb, u8 event)
5013 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5014 control, skb, event, chan->rx_state);
5016 if (__valid_reqseq(chan, control->reqseq)) {
5017 switch (chan->rx_state) {
5018 case L2CAP_RX_STATE_RECV:
5019 err = l2cap_rx_state_recv(chan, control, skb, event);
5021 case L2CAP_RX_STATE_SREJ_SENT:
5022 err = l2cap_rx_state_srej_sent(chan, control, skb,
5030 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5031 control->reqseq, chan->next_tx_seq,
5032 chan->expected_ack_seq);
5033 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5039 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5040 struct sk_buff *skb)
5044 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5047 if (l2cap_classify_txseq(chan, control->txseq) ==
5048 L2CAP_TXSEQ_EXPECTED) {
5049 l2cap_pass_to_tx(chan, control);
5051 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5052 __next_seq(chan, chan->buffer_seq));
5054 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5056 l2cap_reassemble_sdu(chan, skb, control);
5059 kfree_skb(chan->sdu);
5062 chan->sdu_last_frag = NULL;
5066 BT_DBG("Freeing %p", skb);
5071 chan->last_acked_seq = control->txseq;
5072 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5077 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5079 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5083 __unpack_control(chan, skb);
5088 * We can just drop the corrupted I-frame here.
5089 * Receiver will miss it and start proper recovery
5090 * procedures and ask for retransmission.
5092 if (l2cap_check_fcs(chan, skb))
5095 if (!control->sframe && control->sar == L2CAP_SAR_START)
5096 len -= L2CAP_SDULEN_SIZE;
5098 if (chan->fcs == L2CAP_FCS_CRC16)
5099 len -= L2CAP_FCS_SIZE;
5101 if (len > chan->mps) {
5102 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5106 if (!control->sframe) {
5109 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5110 control->sar, control->reqseq, control->final,
5113 /* Validate F-bit - F=0 always valid, F=1 only
5114 * valid in TX WAIT_F
5116 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5119 if (chan->mode != L2CAP_MODE_STREAMING) {
5120 event = L2CAP_EV_RECV_IFRAME;
5121 err = l2cap_rx(chan, control, skb, event);
5123 err = l2cap_stream_rx(chan, control, skb);
5127 l2cap_send_disconn_req(chan->conn, chan,
5130 const u8 rx_func_to_event[4] = {
5131 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5132 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5135 /* Only I-frames are expected in streaming mode */
5136 if (chan->mode == L2CAP_MODE_STREAMING)
5139 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5140 control->reqseq, control->final, control->poll,
5145 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5149 /* Validate F and P bits */
5150 if (control->final && (control->poll ||
5151 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5154 event = rx_func_to_event[control->super];
5155 if (l2cap_rx(chan, control, skb, event))
5156 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5166 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5167 struct sk_buff *skb)
5169 struct l2cap_chan *chan;
5171 chan = l2cap_get_chan_by_scid(conn, cid);
5173 if (cid == L2CAP_CID_A2MP) {
5174 chan = a2mp_channel_create(conn, skb);
5180 l2cap_chan_lock(chan);
5182 BT_DBG("unknown cid 0x%4.4x", cid);
5183 /* Drop packet and return */
5189 BT_DBG("chan %p, len %d", chan, skb->len);
5191 if (chan->state != BT_CONNECTED)
5194 switch (chan->mode) {
5195 case L2CAP_MODE_BASIC:
5196 /* If socket recv buffers overflows we drop data here
5197 * which is *bad* because L2CAP has to be reliable.
5198 * But we don't have any other choice. L2CAP doesn't
5199 * provide flow control mechanism. */
5201 if (chan->imtu < skb->len)
5204 if (!chan->ops->recv(chan, skb))
5208 case L2CAP_MODE_ERTM:
5209 case L2CAP_MODE_STREAMING:
5210 l2cap_data_rcv(chan, skb);
5214 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5222 l2cap_chan_unlock(chan);
5225 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5226 struct sk_buff *skb)
5228 struct l2cap_chan *chan;
5230 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5234 BT_DBG("chan %p, len %d", chan, skb->len);
5236 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5239 if (chan->imtu < skb->len)
5242 if (!chan->ops->recv(chan, skb))
5249 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5250 struct sk_buff *skb)
5252 struct l2cap_chan *chan;
5254 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5258 BT_DBG("chan %p, len %d", chan, skb->len);
5260 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5263 if (chan->imtu < skb->len)
5266 if (!chan->ops->recv(chan, skb))
5273 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5275 struct l2cap_hdr *lh = (void *) skb->data;
5279 skb_pull(skb, L2CAP_HDR_SIZE);
5280 cid = __le16_to_cpu(lh->cid);
5281 len = __le16_to_cpu(lh->len);
5283 if (len != skb->len) {
5288 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5291 case L2CAP_CID_LE_SIGNALING:
5292 case L2CAP_CID_SIGNALING:
5293 l2cap_sig_channel(conn, skb);
5296 case L2CAP_CID_CONN_LESS:
5297 psm = get_unaligned((__le16 *) skb->data);
5298 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5299 l2cap_conless_channel(conn, psm, skb);
5302 case L2CAP_CID_LE_DATA:
5303 l2cap_att_channel(conn, cid, skb);
5307 if (smp_sig_channel(conn, skb))
5308 l2cap_conn_del(conn->hcon, EACCES);
5312 l2cap_data_channel(conn, cid, skb);
5317 /* ---- L2CAP interface with lower layer (HCI) ---- */
5319 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5321 int exact = 0, lm1 = 0, lm2 = 0;
5322 struct l2cap_chan *c;
5324 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5326 /* Find listening sockets and check their link_mode */
5327 read_lock(&chan_list_lock);
5328 list_for_each_entry(c, &chan_list, global_l) {
5329 struct sock *sk = c->sk;
5331 if (c->state != BT_LISTEN)
5334 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5335 lm1 |= HCI_LM_ACCEPT;
5336 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5337 lm1 |= HCI_LM_MASTER;
5339 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5340 lm2 |= HCI_LM_ACCEPT;
5341 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5342 lm2 |= HCI_LM_MASTER;
5345 read_unlock(&chan_list_lock);
5347 return exact ? lm1 : lm2;
5350 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5352 struct l2cap_conn *conn;
5354 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5357 conn = l2cap_conn_add(hcon, status);
5359 l2cap_conn_ready(conn);
5361 l2cap_conn_del(hcon, bt_to_errno(status));
5365 int l2cap_disconn_ind(struct hci_conn *hcon)
5367 struct l2cap_conn *conn = hcon->l2cap_data;
5369 BT_DBG("hcon %p", hcon);
5372 return HCI_ERROR_REMOTE_USER_TERM;
5373 return conn->disc_reason;
5376 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5378 BT_DBG("hcon %p reason %d", hcon, reason);
5380 l2cap_conn_del(hcon, bt_to_errno(reason));
5383 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5385 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5388 if (encrypt == 0x00) {
5389 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5390 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5391 } else if (chan->sec_level == BT_SECURITY_HIGH)
5392 l2cap_chan_close(chan, ECONNREFUSED);
5394 if (chan->sec_level == BT_SECURITY_MEDIUM)
5395 __clear_chan_timer(chan);
5399 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5401 struct l2cap_conn *conn = hcon->l2cap_data;
5402 struct l2cap_chan *chan;
5407 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5409 if (hcon->type == LE_LINK) {
5410 if (!status && encrypt)
5411 smp_distribute_keys(conn, 0);
5412 cancel_delayed_work(&conn->security_timer);
5415 mutex_lock(&conn->chan_lock);
5417 list_for_each_entry(chan, &conn->chan_l, list) {
5418 l2cap_chan_lock(chan);
5420 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5421 state_to_string(chan->state));
5423 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5424 l2cap_chan_unlock(chan);
5428 if (chan->scid == L2CAP_CID_LE_DATA) {
5429 if (!status && encrypt) {
5430 chan->sec_level = hcon->sec_level;
5431 l2cap_chan_ready(chan);
5434 l2cap_chan_unlock(chan);
5438 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5439 l2cap_chan_unlock(chan);
5443 if (!status && (chan->state == BT_CONNECTED ||
5444 chan->state == BT_CONFIG)) {
5445 struct sock *sk = chan->sk;
5447 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5448 sk->sk_state_change(sk);
5450 l2cap_check_encryption(chan, encrypt);
5451 l2cap_chan_unlock(chan);
5455 if (chan->state == BT_CONNECT) {
5457 l2cap_send_conn_req(chan);
5459 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5461 } else if (chan->state == BT_CONNECT2) {
5462 struct sock *sk = chan->sk;
5463 struct l2cap_conn_rsp rsp;
5469 if (test_bit(BT_SK_DEFER_SETUP,
5470 &bt_sk(sk)->flags)) {
5471 struct sock *parent = bt_sk(sk)->parent;
5472 res = L2CAP_CR_PEND;
5473 stat = L2CAP_CS_AUTHOR_PEND;
5475 parent->sk_data_ready(parent, 0);
5477 __l2cap_state_change(chan, BT_CONFIG);
5478 res = L2CAP_CR_SUCCESS;
5479 stat = L2CAP_CS_NO_INFO;
5482 __l2cap_state_change(chan, BT_DISCONN);
5483 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5484 res = L2CAP_CR_SEC_BLOCK;
5485 stat = L2CAP_CS_NO_INFO;
5490 rsp.scid = cpu_to_le16(chan->dcid);
5491 rsp.dcid = cpu_to_le16(chan->scid);
5492 rsp.result = cpu_to_le16(res);
5493 rsp.status = cpu_to_le16(stat);
5494 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5497 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5498 res == L2CAP_CR_SUCCESS) {
5500 set_bit(CONF_REQ_SENT, &chan->conf_state);
5501 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5503 l2cap_build_conf_req(chan, buf),
5505 chan->num_conf_req++;
5509 l2cap_chan_unlock(chan);
5512 mutex_unlock(&conn->chan_lock);
5517 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5519 struct l2cap_conn *conn = hcon->l2cap_data;
5522 conn = l2cap_conn_add(hcon, 0);
5527 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5529 if (!(flags & ACL_CONT)) {
5530 struct l2cap_hdr *hdr;
5534 BT_ERR("Unexpected start frame (len %d)", skb->len);
5535 kfree_skb(conn->rx_skb);
5536 conn->rx_skb = NULL;
5538 l2cap_conn_unreliable(conn, ECOMM);
5541 /* Start fragment always begin with Basic L2CAP header */
5542 if (skb->len < L2CAP_HDR_SIZE) {
5543 BT_ERR("Frame is too short (len %d)", skb->len);
5544 l2cap_conn_unreliable(conn, ECOMM);
5548 hdr = (struct l2cap_hdr *) skb->data;
5549 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5551 if (len == skb->len) {
5552 /* Complete frame received */
5553 l2cap_recv_frame(conn, skb);
5557 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5559 if (skb->len > len) {
5560 BT_ERR("Frame is too long (len %d, expected len %d)",
5562 l2cap_conn_unreliable(conn, ECOMM);
5566 /* Allocate skb for the complete frame (with header) */
5567 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5571 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5573 conn->rx_len = len - skb->len;
5575 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5577 if (!conn->rx_len) {
5578 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5579 l2cap_conn_unreliable(conn, ECOMM);
5583 if (skb->len > conn->rx_len) {
5584 BT_ERR("Fragment is too long (len %d, expected %d)",
5585 skb->len, conn->rx_len);
5586 kfree_skb(conn->rx_skb);
5587 conn->rx_skb = NULL;
5589 l2cap_conn_unreliable(conn, ECOMM);
5593 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5595 conn->rx_len -= skb->len;
5597 if (!conn->rx_len) {
5598 /* Complete frame received */
5599 l2cap_recv_frame(conn, conn->rx_skb);
5600 conn->rx_skb = NULL;
5609 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5611 struct l2cap_chan *c;
5613 read_lock(&chan_list_lock);
5615 list_for_each_entry(c, &chan_list, global_l) {
5616 struct sock *sk = c->sk;
5618 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5619 batostr(&bt_sk(sk)->src),
5620 batostr(&bt_sk(sk)->dst),
5621 c->state, __le16_to_cpu(c->psm),
5622 c->scid, c->dcid, c->imtu, c->omtu,
5623 c->sec_level, c->mode);
5626 read_unlock(&chan_list_lock);
5631 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5633 return single_open(file, l2cap_debugfs_show, inode->i_private);
5636 static const struct file_operations l2cap_debugfs_fops = {
5637 .open = l2cap_debugfs_open,
5639 .llseek = seq_lseek,
5640 .release = single_release,
5643 static struct dentry *l2cap_debugfs;
5645 int __init l2cap_init(void)
5649 err = l2cap_init_sockets();
5654 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5655 bt_debugfs, NULL, &l2cap_debugfs_fops);
5657 BT_ERR("Failed to create L2CAP debug file");
5663 void l2cap_exit(void)
5665 debugfs_remove(l2cap_debugfs);
5666 l2cap_cleanup_sockets();
5669 module_param(disable_ertm, bool, 0644);
5670 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");